id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11517826
|
import os
import io
import json
import argparse
import time
import glob
import ntpath
from typing import List
class SppClient:
def process(self, input: str, output: str):
raise NotImplementedError
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Client for ScienceParsePlus (SPP) services")
parser.add_argument("--input", default=None, help="path to the directory containing PDF to process")
parser.add_argument("--output", default=None, help="path to the directory where to put the results")
args = parser.parse_args()
input_path = args.input
output_path = args.output
client = SppClient()
start_time = time.time()
client.process(input_path, output_path)
runtime = round(time.time() - start_time, 3)
print("runtime: %s seconds " % (runtime))
|
11517856
|
import os
import tensorflow as tf
from riptide.binary import binary_layers as nn
def channel_shuffle(x, groups):
n, h, w, c = x.shape
channels_per_group = tf.math.floordiv(c, groups)
# reshape
x = tf.reshape(x, [n, h, w, groups, channels_per_group])
x = tf.transpose(x, [0, 1, 2, 4, 3])
x = tf.reshape(x, [n, h, w, c])
return x
|
11517878
|
import argparse
import logging
from os import path
from kubebench.test import deploy_utils
from kubeflow.testing import test_helper
from kubeflow.testing import util # pylint: disable=no-name-in-module
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--namespace", default=None, type=str, help=("The namespace to use."))
parser.add_argument(
"--as_gcloud_user",
dest="as_gcloud_user",
action="store_true",
help=("Impersonate the user corresponding to the gcloud "
"command with kubectl and ks."))
parser.add_argument(
"--no-as_gcloud_user", dest="as_gcloud_user", action="store_false")
parser.set_defaults(as_gcloud_user=False)
parser.add_argument(
"--github_token",
default=None,
type=str,
help=("The GitHub API token to use. This is needed since ksonnet uses the "
"GitHub API and without it we get rate limited. For more info see: "
"https://github.com/ksonnet/ksonnet/blob/master/docs"
"/troubleshooting.md. Can also be set using environment variable "
"GITHUB_TOKEN."))
parser.add_argument(
"--src_root_dir",
default=None,
type=str,
help=("The source directory of all repositories.")
)
args, _ = parser.parse_known_args()
return args
def deploy_kubeflow(test_case): # pylint: disable=unused-argument
"""Deploy Kubeflow."""
args = parse_args()
src_root_dir = args.src_root_dir
namespace = args.namespace
api_client = deploy_utils.create_k8s_client()
manifest_repo_dir = path.join(src_root_dir, "kubeflow", "manifests")
argo_manifest_dir = path.join(manifest_repo_dir, "argo", "base")
tfoperator_manifest_dir = path.join(manifest_repo_dir, "tf-training",
"tf-job-operator", "base")
deploy_utils.setup_test(api_client, namespace)
apply_args = "-f -"
if args.as_gcloud_user:
account = deploy_utils.get_gcp_identity()
logging.info("Impersonate %s", account)
# If we don't use --as to impersonate the service account then we
# observe RBAC errors when doing certain operations. The problem appears
# to be that we end up using the in cluster config (e.g. pod service account)
# and not the GCP service account which has more privileges.
apply_args = " ".join(["--as=" + account, apply_args])
# Deploy argo
logging.info("Deploying argo")
util.run(["kustomize", "edit", "set", "namespace", namespace],
cwd=argo_manifest_dir)
util.run(["sh", "-c", "kustomize build | kubectl apply " + apply_args],
cwd=argo_manifest_dir)
# Deploy tf-job-operator
logging.info("Deploying tf-job-operator")
util.run(["kustomize", "edit", "set", "namespace", namespace],
cwd=tfoperator_manifest_dir)
util.run(["sh", "-c", "kustomize build | kubectl apply " + apply_args],
cwd=tfoperator_manifest_dir)
# Verify that the TfJob operator is actually deployed.
tf_job_deployment_name = "tf-job-operator"
logging.info("Verifying TfJob controller started.")
util.wait_for_deployment(api_client, namespace, tf_job_deployment_name)
# Verify that the Argo operator is deployed.
argo_deployment_name = "workflow-controller"
logging.info("Verifying Argo controller started.")
util.wait_for_deployment(api_client, namespace, argo_deployment_name)
deploy_utils.set_clusterrole(namespace)
def main():
test_case = test_helper.TestCase(
name='deploy_kubeflow', test_func=deploy_kubeflow)
test_suite = test_helper.init(
name='deploy_kubeflow', test_cases=[test_case])
test_suite.run()
if __name__ == "__main__":
main()
|
11517896
|
from keras.models import Model
from keras.layers import Input, merge, ZeroPadding2D
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import keras.backend as K
from custom_layers import Scale
def DenseNet(nb_dense_block=4, growth_rate=32, nb_filter=64, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, classes=1000, weights_path=None):
'''Instantiate the DenseNet architecture,
# Arguments
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters
reduction: reduction factor of transition blocks.
dropout_rate: dropout rate
weight_decay: weight decay factor
classes: optional number of classes to classify images
weights_path: path to pre-trained weights
# Returns
A Keras model instance.
'''
eps = 1.1e-5
# compute compression factor
compression = 1.0 - reduction
# Handle Dimension Ordering for different backends
global concat_axis
if K.image_dim_ordering() == 'tf':
concat_axis = 3
img_input = Input(shape=(224, 224, 3), name='data')
else:
concat_axis = 1
img_input = Input(shape=(3, 224, 224), name='data')
# From architecture for ImageNet (Table 1 in the paper)
nb_filter = 64
nb_layers = [6,12,32,32] # For DenseNet-169
# Initial convolution
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
x = Convolution2D(nb_filter, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
x = Scale(axis=concat_axis, name='conv1_scale')(x)
x = Activation('relu', name='relu1')(x)
x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
stage = block_idx+2
x, nb_filter = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
# Add transition_block
x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay)
nb_filter = int(nb_filter * compression)
final_stage = stage + 1
x, nb_filter = dense_block(x, final_stage, nb_layers[-1], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv'+str(final_stage)+'_blk_bn')(x)
x = Scale(axis=concat_axis, name='conv'+str(final_stage)+'_blk_scale')(x)
x = Activation('relu', name='relu'+str(final_stage)+'_blk')(x)
x = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
x = Dense(classes, name='fc6')(x)
x = Activation('softmax', name='prob')(x)
model = Model(img_input, x, name='densenet')
if weights_path is not None:
model.load_weights(weights_path)
return model
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
'''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
# Arguments
x: input tensor
stage: index for dense block
branch: layer index within each dense block
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_' + str(branch)
relu_name_base = 'relu' + str(stage) + '_' + str(branch)
# 1x1 Convolution (Bottleneck layer)
inter_channel = nb_filter * 4
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
x = Activation('relu', name=relu_name_base+'_x1')(x)
x = Convolution2D(inter_channel, 1, 1, name=conv_name_base+'_x1', bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
# 3x3 Convolution
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
x = Activation('relu', name=relu_name_base+'_x2')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
x = Convolution2D(nb_filter, 3, 3, name=conv_name_base+'_x2', bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
# Arguments
x: input tensor
stage: index for dense block
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_blk'
relu_name_base = 'relu' + str(stage) + '_blk'
pool_name_base = 'pool' + str(stage)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
x = Activation('relu', name=relu_name_base)(x)
x = Convolution2D(int(nb_filter * compression), 1, 1, name=conv_name_base, bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)
return x
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
# Arguments
x: input tensor
stage: index for dense block
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
'''
eps = 1.1e-5
concat_feat = x
for i in range(nb_layers):
branch = i+1
x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
concat_feat = merge([concat_feat, x], mode='concat', concat_axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))
if grow_nb_filters:
nb_filter += growth_rate
return concat_feat, nb_filter
|
11517903
|
from conans import ConanFile, CMake, tools
import os
import subprocess
class EigenQLDTestConan(ConanFile):
requires = "eigen-qld/1.0.0@gergondet/stable"
settings = "os", "arch", "compiler", "build_type"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
os.chdir("bin")
self.run(".%sexample" % os.sep)
subprocess.check_call(['python', os.path.join(os.path.dirname(__file__), 'test.py')])
|
11517919
|
import argparse
from model.utils.config import cfg, cfg_from_file, cfg_from_list
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='source training dataset',
default='pascal_voc_0712', type=str)
parser.add_argument('--dataset_t', dest='dataset_t',
help='target training dataset',
default='clipart', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res101 res50',
default='res101', type=str)
parser.add_argument('--start_epoch', dest='start_epoch',
help='starting epoch',
default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs',
help='number of epochs to train',
default=20, type=int)
parser.add_argument('--gamma', dest='gamma',
help='value of gamma',
default=5, type=float)
parser.add_argument('--disp_interval', dest='disp_interval',
help='number of iterations to display',
default=100, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
help='number of iterations to display',
default=10000, type=int)
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save models', default="models",
type=str)
parser.add_argument('--load_name', dest='load_name',
help='path to load models', default="models",
type=str)
parser.add_argument('--nw', dest='num_workers',
help='number of worker to load data',
default=0, type=int)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--detach', dest='detach',
help='whether use detach',
action='store_false')
parser.add_argument('--ef', dest='ef',
help='whether use exponential focal loss',
action='store_true')
parser.add_argument('--lc', dest='lc',
help='whether use context vector for pixel level',
action='store_true')
parser.add_argument('--gc', dest='gc',
help='whether use context vector for global level',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
# config optimization
parser.add_argument('--o', dest='optimizer',
help='training optimizer',
default="sgd", type=str)
parser.add_argument('--lr', dest='lr',
help='starting learning rate',
default=0.001, type=float)
parser.add_argument('--eta', dest='eta',
help='trade-off parameter between detection loss and domain-alignment loss. Used for Car datasets',
default=0.1, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=5, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1, type=float)
parser.add_argument('--s', dest='session',
help='training session',
default=1, type=int)
parser.add_argument('--r', dest='resume',
help='resume checkpoint or not',
default=False, type=bool)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load model',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load model',
default=0, type=int)
# log and diaplay
parser.add_argument('--use_tfb', dest='use_tfboard',
help='whether use tensorboard',
action='store_true')
parser.add_argument('--image_dir', dest='image_dir',
help='directory to load images for demo',
default="images")
args = parser.parse_args()
return args
def set_dataset_args(args, test=False):
if not test:
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_water":
args.imdb_name = "voc_water_2007_trainval+voc_water_2012_trainval"
args.imdbval_name = "voc_clipart_2007_trainval+voc_clipart_2012_trainval"
args.imdb_name_cycle = "voc_cyclewater_2007_trainval+voc_cyclewater_2012_trainval"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_cycleclipart":
args.imdb_name = "voc_cycleclipart_2007_trainval+voc_cycleclipart_2012_trainval"
args.imdbval_name = "voc_cycleclipart_2007_trainval+voc_cycleclipart_2012_trainval"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_cyclewater":
args.imdb_name = "voc_cyclewater_2007_trainval+voc_cyclewater_2012_trainval"
args.imdbval_name = "voc_cyclewater_2007_trainval+voc_cyclewater_2012_trainval"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.imdb_name_cycle = "voc_cycleclipart_2007_trainval+voc_cycleclipart_2012_trainval"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "foggy_cityscape":
args.imdb_name = "foggy_cityscape_trainval"
args.imdbval_name = "foggy_cityscape_trainval"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'30']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "cityscape":
args.imdb_name = "cityscape_trainval"
args.imdbval_name = "cityscape_trainval"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'30']
elif args.dataset == "sim10k":
args.imdb_name = "sim10k_train"
args.imdbval_name = "sim10k_train"
args.imdb_name_cycle = "sim10k_cycle_train" # "voc_cyclewater_2007_trainval+voc_cyclewater_2012_trainval"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']
elif args.dataset == "sim10k_cycle":
args.imdb_name = "sim10k_cycle_train"
args.imdbval_name = "sim10k_cycle_train"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']
## cityscape dataset for only car classes.
# elif args.dataset == "cityscape_kitti":
# args.imdb_name = "cityscape_kitti_trainval"
# args.imdbval_name = "cityscape_kitti_trainval"
# args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
# '30']
if args.dataset_t == "water":
args.imdb_name_target = "water_train"
args.imdbval_name_target = "water_train"
args.set_cfgs_target = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'20']
elif args.dataset_t == "clipart":
args.imdb_name_target = "clipart_trainval"
args.imdbval_name_target = "clipart_test"
args.set_cfgs_target = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'20']
elif args.dataset_t == "cityscape":
args.imdb_name_target = "cityscape_trainval"
args.imdbval_name_target = "cityscape_trainval"
args.set_cfgs_target = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'30']
## cityscape dataset for only car classes.
elif args.dataset_t == "cityscape_car":
args.imdb_name_target = "cityscape_car_trainval"
args.imdbval_name_target = "cityscape_car_trainval"
args.set_cfgs_target = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'20']
# elif args.dataset_t == "kitti":
# args.imdb_name_target = "kitti_trainval"
# args.imdbval_name_target = "kitti_trainval"
# args.set_cfgs_target = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
# '20']
elif args.dataset_t == "foggy_cityscape":
args.imdb_name_target = "foggy_cityscape_trainval"
args.imdbval_name_target = "foggy_cityscape_trainval"
args.set_cfgs_target = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'30']
else:
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "sim10k":
args.imdb_name = "sim10k_val"
args.imdbval_name = "sim10k_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'30']
elif args.dataset == "cityscape":
args.imdb_name = "cityscape_val"
args.imdbval_name = "cityscape_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']
elif args.dataset == "foggy_cityscape":
args.imdb_name = "foggy_cityscape_test"
args.imdbval_name = "foggy_cityscape_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']
elif args.dataset == "cityscape_kitti":
args.imdb_name = "cityscape_kitti_val"
args.imdbval_name = "cityscape_kitti_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']
elif args.dataset == "water":
args.imdb_name = "water_test"
args.imdbval_name = "water_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'20']
elif args.dataset == "clipart":
args.imdb_name = "clipart_trainval"
args.imdbval_name = "clipart_trainval"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'20']
elif args.dataset == "cityscape_car":
args.imdb_name = "cityscape_car_val"
args.imdbval_name = "cityscape_car_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES',
'20']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
return args
|
11517922
|
from dataclasses import dataclass
@dataclass
class Person:
"""Person class creates an user object"""
_weight: float = 0.0
_height: float = 0.0
_age: int = 0
_gender: str = 'male'
_body_fat: float = 0.0
@property
def weight(self) -> float:
"""Weight in pounds"""
return self._weight
@weight.setter
def weight(self, weight: float):
self._weight = weight
@property
def height(self) -> float:
"""Height in feet"""
return self._height
@height.setter
def height(self, height):
self._height = height
@property
def age(self) -> int:
"""Age in years"""
return self._age
@age.setter
def age(self, age: int) -> None:
self._age = age
@property
def gender(self) -> str:
"""Gender of a person"""
return self._gender
@gender.setter
def gender(self, gender: str) -> None:
self._gender = gender
@property
def body_fat(self) -> float:
"""Body fat percentage"""
return self._body_fat
@body_fat.setter
def body_fat(self, body_fat: float):
if body_fat:
self._body_fat = body_fat
else:
self._body_fat = self.approximate_body_fat()
@property
def body_mass_index(self) -> float:
"""BMI is a measure of body fat based on height
and weight that applies to adult men & women."""
return round((self._weight / (self._height * 12) ** 2) * 7, 4)
def approximate_body_fat(self) -> None:
"""Approximates body fat % based on given weight, height, age."""
if self._gender == 'female':
self.approximate_female_body_fat()
elif self._gender == 'male':
self._approximate_male_body_fat()
def _approximate_female_body_fat(self) -> None:
"""Calculates female's body fat % with rough estimation"""
self._body_fat = (1.2 * self.body_mass_index * 100) \
+ (0.23 * self._age) - 5.4
def _approximate_male_body_fat(self) -> None:
"""Calculates male's body fat % with rough estimation"""
self._body_fat = (1.2 * self.body_mass_index * 100) \
+ (0.23 * self._age) - 16.2
@dataclass
class Athlete(Person):
"""Inherits main measures from Person.
Adds activity variables to the pack"""
_exercise_freq: int = 3
_active_job: bool = False
_goal: str = 'Maintain Weight'
@property
def exercise_freq(self) -> str:
return self._exercise_freq
@exercise_freq.setter
def exercise_freq(self, freq: str):
self._exercise_freq = freq
@property
def active_job(self) -> str:
return self._active_job
@active_job.setter
def active_job(self, job: str) -> None:
self._active_job = job
@property
def goal(self) -> str:
return self._goal
@goal.setter
def goal(self, goal: str):
self._goal = goal
<<<<<<< HEAD
@dataclass
=======
>>>>>>> 97b4f2769ab021e403246083eb1c8091cbd092ce
class Measurements():
"""Calculate body measurements and indices of a Person class"""
person: Person = Person()
def lean_body_mass(self) -> float:
"""
LBM is a part of body composition that is defined
as the difference between body weight and body fat weight."""
return self.person.weight * (1 - self.person.body_fat / 100)
def basal_metabolic_rate(self) -> float:
"""
BMR is calories required to keep your body functioning at rest.
BMR is also known as your body's metabolism; therefore, any increase
to your metabolic weight, such as exercise, will increase your BMR.
"""
if self.person.gender == 'male':
return 66 + (6.23 * self.person.weight) \
+ (12.7 * self.person.height * 12) \
- (6.8 * self.person.age)
elif self.person.gender == 'female':
return 665 + (4.35 * self.person.weight) \
+ (4.7 * self.person.height * 12) \
- (4.7 * self.person.age)
def protein_requirement(self) -> float:
"""Minimum protein amount (in grams) needed for your body weight"""
return self.lean_body_mass() / 2.20462 * 2.25
<<<<<<< HEAD
@dataclass
class Diet():
=======
class DietMacros():
>>>>>>> 97b4f2769ab021e403246083eb1c8091cbd092ce
"""
Creates a dispersal of the macros based on a fitness goal.
Uses Person class to approximate various indexed and diet type.
"""
PROTEIN_KCAL = 4
CARBS_KCAL = 4
FATS_KCAL = 9
athlete: Athlete = Athlete()
protein: int = 0
carbs: int = 0
fats: int = 0
total: int = 0
@property
def set_protein(self) -> float:
"""Proteins in grams"""
return self.protein
@set_protein.setter
def set_protein(self, protein: float) -> None:
self.protein = protein
@property
def set_carbs(self) -> float:
"""Carbs in grams"""
return self.carbs
@set_carbs.setter
def set_carbs(self, carbs: float) -> None:
self.carbs = carbs
@property
def set_fats(self) -> float:
"""Fats in grams"""
return self.fats
@set_fats.setter
def set_fats(self, fats: float) -> None:
self.fats = fats
@property
def set_total(self) -> float:
"""Total of macros in grams"""
return self.total
@set_total.setter
def set_total(self, total: float) -> None:
self.total = total
def set_macros(self, goal: str, weight: float) -> None:
"""Asign diet macro values based on a goal"""
if goal == 'Gain Weight':
self.protein = weight * self.PROTEIN_KCAL
self.carbs = weight * 2 * self.CARBS_KCAL
self.fats = weight * 0.45 * self.FATS_KCAL
elif goal == 'Lose Weight':
self.protein = weight * 1.4 * self.PROTEIN_KCAL
self.carbs = weight * self.CARBS_KCAL
self.fats = weight * 0.25 * self.FATS_KCAL
elif goal == 'Maintain Weight':
self.protein = weight * self.PROTEIN_KCAL
self.carbs = weight * 1.6 * self.CARBS_KCAL
self.fats = weight * 0.35 * self.FATS_KCAL
self.total = sum([self.protein, self.carbs, self.fats])
def total_daily_energy_expenditure(self) -> float:
"""
TDEE is an estimation of calories burned per day,
when exercise and job activity is taken into account.
...
:return: BMR adjusted for the exercise amount.
"""
m = Measurements(self.athlete)
tdee = 0
if self.athlete.exercise_freq <= 1:
tdee = m.basal_metabolic_rate() * 1.2
elif self.athlete.exercise_freq <= 3:
tdee = m.basal_metabolic_rate() * 1.375
elif self.athlete.exercise_freq <= 5:
tdee = m.basal_metabolic_rate() * 1.55
elif self.athlete.exercise_freq > 5:
tdee = m.basal_metabolic_rate() * 1.725
# if the user has a physically active job.
if self.athlete.active_job:
return tdee * 1.15
return tdee
def calculate_macros_gain(self) -> dict:
"""
Calculates macros (Proteins, Carbs, Fats) for the muscle gain.
...
:return: protein, carbs, fats, totals: Returns macros as Kcal.
"""
if self.athlete.goal == 'Gain Weight':
tdee = self.total_daily_energy_expenditure()
if tdee > self.total:
diff = tdee - self.total
while self.total <= tdee + 500:
self.protein += diff * (self.protein / self.total)
self.carbs += diff * (self.carbs / self.total)
self.fats += diff * (self.fats / self.total)
self.total = sum([self.protein,
self.carbs,
self.fats])
return {
'protein': self.protein,
'carbs': self.carbs,
'fats': self.fats,
'total': self.total
}
else:
raise TypeError(
"This method is only for users who want to gain weight"
)
def calculate_macros_lose(self) -> dict:
"""
Calculates macros (Proteins, Carbs, Fats) for the weight lose.
"""
if self.athlete.goal == 'Lose Weight':
tdee = self.total_daily_energy_expenditure()
if tdee - self.total < 350:
diff = 350 - (tdee - self.total)
while self.total >= tdee - 350:
self.protein -= diff * (self.protein / self.total)
self.carbs -= diff * (self.carbs / self.total)
self.fats -= diff * (self.fats / self.total)
self.total = sum([self.protein,
self.carbs,
self.fats])
return {
'protein': self.protein,
'carbs': self.carbs,
'fats': self.fats,
'total': self.total
}
else:
raise TypeError(
"This method's only for users who want to lose weight"
)
def calculate_macros_maintain(self) -> dict:
"""
Calculates macros (Proteins, Carbs, Fats) to maintain weight.
...
:return: protein, carbs, fats, totals: Returns macros as Kcal.
"""
if self.athlete.goal == 'Maintain Weight':
tdee = self.total_daily_energy_expenditure()
if tdee > self.total:
while self.total < tdee:
self.protein += 1
self.carbs += 1.6
self.fats += 0.35
self.total = sum([self.protein,
self.carbs,
self.fats])
elif tdee < self.total:
while self.total > tdee:
self.protein -= 1
self.carbs -= 1.6
self.fats -= 0.35
self.total = sum([self.protein,
self.carbs,
self.fats])
return {
'protein': self.protein,
'carbs': self.carbs,
'fats': self.fats,
'total': self.total
}
else:
raise TypeError(
"This method's only for users who want to maintain weight"
)
if __name__ == '__main__':
a = DietMacros()
a.athlete.height=6.0
a.athlete.weight=175
a.athlete.age=33
a.athlete.gender='male'
a.athlete.approximate_body_fat()
a.athlete.goal = 'Gain Weight'
a.set_macros(a.athlete.goal, a.athlete.weight)
<<<<<<< HEAD
print(a.calculate_macros_gain())
a.athlete.goal = 'Maintain Weight'
print(a.calculate_macros_maintain())
print('------------------------')
=======
>>>>>>> 97b4f2769ab021e403246083eb1c8091cbd092ce
print(a)
|
11517924
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.hub import load_state_dict_from_url
import torchvision
from functools import partial
from collections import OrderedDict
import math
import os,inspect,sys
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.insert(0,currentdir)
def convert_relu_to_swish(model):
for child_name, child in model.named_children():
if isinstance(child, nn.ReLU):
setattr(model, child_name, nn.SiLU(True))
# setattr(model, child_name, Swish())
else:
convert_relu_to_swish(child)
class Swish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.mul_(torch.sigmoid(x))
class r2plus1d_18(nn.Module):
def __init__(self, pretrained=True, num_classes=500, dropout_p=0.5):
super(r2plus1d_18, self).__init__()
self.pretrained = pretrained
self.num_classes = num_classes
model = torchvision.models.video.r2plus1d_18(pretrained=self.pretrained)
# delete the last fc layer
modules = list(model.children())[:-1]
# print(modules)
self.r2plus1d_18 = nn.Sequential(*modules)
convert_relu_to_swish(self.r2plus1d_18)
self.fc1 = nn.Linear(model.fc.in_features, self.num_classes)
self.dropout = nn.Dropout(dropout_p, inplace=True)
def forward(self, x):
out = self.r2plus1d_18(x)
# print(out.shape)
# Flatten the layer to fc
out = out.flatten(1)
out = self.dropout(out)
out = self.fc1(out)
return out
class flow_r2plus1d_18(nn.Module):
def __init__(self, pretrained=False, num_classes=500, dropout_p=0.5):
super(flow_r2plus1d_18, self).__init__()
self.pretrained = pretrained
self.num_classes = num_classes
model = torchvision.models.video.r2plus1d_18(pretrained=self.pretrained)
model.stem[0] = nn.Conv3d(2, 45, kernel_size=(1, 7, 7),
stride=(1, 2, 2), padding=(0, 3, 3),
bias=False)
# delete the last fc layer
modules = list(model.children())[:-1]
# print(modules)
self.r2plus1d_18 = nn.Sequential(*modules)
convert_relu_to_swish(self.r2plus1d_18)
self.fc1 = nn.Linear(model.fc.in_features, self.num_classes)
self.dropout = nn.Dropout(dropout_p, inplace=True)
def forward(self, x):
# print(x.size())
out = self.r2plus1d_18(x)
# print(out.shape)
# Flatten the layer to fc
out = out.flatten(1)
out = self.dropout(out)
out = self.fc1(out)
return out
|
11517974
|
sm.setSpeakerID(2159008)
sm.sendNext("Little rats. I say, how DARE you try to escape this place?")
sm.setPlayerAsSpeaker()
sm.sendSay("Shoot, we were spotted!")
sm.setSpeakerID(2159008)
sm.sendSay("Now, now, children. Don't make this harder than it needs to be. Just walk towards me, nice and easy... Wait, you're not one of the test subjects. You're one of the townspeople, aren't you?")
sm.setPlayerAsSpeaker()
sm.sendSay("That's right. I'm a resident of Edelstein, not a test subject. You can't boss ME around.")
sm.setSpeakerID(2159008)
sm.sendSay("Oh my, oh my. I told them to make sure the townspeople kept their kids away from the mines... Alas, it's too late now. I can't allow you to tell anyone about this laboratory, so I guess you'll just have to stay here and...help with the experiments. *snicker*")
sm.setPlayerAsSpeaker()
sm.sendSay("Hmph. Big words, but let's see if you can catch me first.")
sm.setSpeakerID(2159008)
sm.sendSay("Why, you insolent, little-- Ahem, ahem, ahem. Your words don't matter. Time for me to pull out the big guns. I do hope you're ready. If not, you will suffer.")
sm.setPlayerAsSpeaker()
sm.sendSay("#b(Oh no! Schiller's attack HALVED your HP! He's tougher than you anticipated.)#k")
sm.setSpeakerID(2159008)
sm.sendSay("I say, got any more big words, kiddo? I'll make sure Gelimer performs some especially atrocious experiments on you. But I'll be nice if you come with me quiet-like.")
sm.setSpeakerID(2159010)
sm.sendSay("Hold it right there!")
sm.warp(931000021, 1)
|
11518030
|
import ConfigParser
import logging
import os
import unittest
from impacket.examples.secretsdump import LocalOperations, RemoteOperations, SAMHashes, LSASecrets, NTDSHashes
from impacket.smbconnection import SMBConnection
def _print_helper(*args, **kwargs):
try:
print args[-1]
except UnicodeError:
pass
class DumpSecrets:
def __init__(self, remoteName, username='', password='', domain='', options=None):
self.__useVSSMethod = options.use_vss
self.__remoteName = remoteName
self.__remoteHost = options.target_ip
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = options.aesKey
self.__smbConnection = None
self.__remoteOps = None
self.__SAMHashes = None
self.__NTDSHashes = None
self.__LSASecrets = None
self.__systemHive = options.system
self.__bootkey = options.bootkey
self.__securityHive = options.security
self.__samHive = options.sam
self.__ntdsFile = options.ntds
self.__history = options.history
self.__noLMHash = True
self.__isRemote = True
self.__outputFileName = options.outputfile
self.__doKerberos = options.k
self.__justDC = options.just_dc
self.__justDCNTLM = options.just_dc_ntlm
self.__justUser = options.just_dc_user
self.__pwdLastSet = options.pwd_last_set
self.__printUserStatus= options.user_status
self.__resumeFileName = options.resumefile
self.__canProcessSAMLSA = True
self.__kdcHost = options.dc_ip
self.__options = options
if options.hashes is not None:
self.__lmhash, self.__nthash = options.hashes.split(':')
def connect(self):
self.__smbConnection = SMBConnection(self.__remoteName, self.__remoteHost)
if self.__doKerberos:
self.__smbConnection.kerberosLogin(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey, self.__kdcHost)
else:
self.__smbConnection.login(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
def dump(self):
try:
if self.__remoteName.upper() == 'LOCAL' and self.__username == '':
self.__isRemote = False
self.__useVSSMethod = True
if self.__systemHive:
localOperations = LocalOperations(self.__systemHive)
bootKey = localOperations.getBootKey()
if self.__ntdsFile is not None:
# Let's grab target's configuration about LM Hashes storage
self.__noLMHash = localOperations.checkNoLMHashPolicy()
else:
import binascii
bootKey = binascii.unhexlify(self.__bootkey)
else:
self.__isRemote = True
bootKey = None
try:
try:
self.connect()
except Exception, e:
if os.getenv('KRB5CCNAME') is not None and self.__doKerberos is True:
# SMBConnection failed. That might be because there was no way to log into the
# target system. We just have a last resort. Hope we have tickets cached and that they
# will work
logging.debug('SMBConnection didn\'t work, hoping Kerberos will help (%s)' % str(e))
pass
else:
raise
self.__remoteOps = RemoteOperations(self.__smbConnection, self.__doKerberos, self.__kdcHost)
self.__remoteOps.setExecMethod(self.__options.exec_method)
if self.__justDC is False and self.__justDCNTLM is False or self.__useVSSMethod is True:
self.__remoteOps.enableRegistry()
bootKey = self.__remoteOps.getBootKey()
# Let's check whether target system stores LM Hashes
self.__noLMHash = self.__remoteOps.checkNoLMHashPolicy()
except Exception, e:
self.__canProcessSAMLSA = False
if str(e).find('STATUS_USER_SESSION_DELETED') and os.getenv('KRB5CCNAME') is not None \
and self.__doKerberos is True:
# Giving some hints here when SPN target name validation is set to something different to Off
# This will prevent establishing SMB connections using TGS for SPNs different to cifs/
logging.error('Policy SPN target name validation might be restricting full DRSUAPI dump. Try -just-dc-user')
else:
logging.error('RemoteOperations failed: %s' % str(e))
# If RemoteOperations succeeded, then we can extract SAM and LSA
if self.__justDC is False and self.__justDCNTLM is False and self.__canProcessSAMLSA:
try:
if self.__isRemote is True:
SAMFileName = self.__remoteOps.saveSAM()
else:
SAMFileName = self.__samHive
self.__SAMHashes = SAMHashes(SAMFileName, bootKey, isRemote = self.__isRemote, perSecretCallback=_print_helper)
self.__SAMHashes.dump()
if self.__outputFileName is not None:
self.__SAMHashes.export(self.__outputFileName)
except Exception, e:
logging.error('SAM hashes extraction failed: %s' % str(e))
try:
if self.__isRemote is True:
SECURITYFileName = self.__remoteOps.saveSECURITY()
else:
SECURITYFileName = self.__securityHive
self.__LSASecrets = LSASecrets(SECURITYFileName, bootKey, self.__remoteOps,
isRemote=self.__isRemote, history=self.__history, perSecretCallback=_print_helper)
self.__LSASecrets.dumpCachedHashes()
if self.__outputFileName is not None:
self.__LSASecrets.exportCached(self.__outputFileName)
self.__LSASecrets.dumpSecrets()
if self.__outputFileName is not None:
self.__LSASecrets.exportSecrets(self.__outputFileName)
except Exception, e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
logging.error('LSA hashes extraction failed: %s' % str(e))
# NTDS Extraction we can try regardless of RemoteOperations failing. It might still work
if self.__isRemote is True:
if self.__useVSSMethod and self.__remoteOps is not None:
NTDSFileName = self.__remoteOps.saveNTDS()
else:
NTDSFileName = None
else:
NTDSFileName = self.__ntdsFile
self.__NTDSHashes = NTDSHashes(NTDSFileName, bootKey, isRemote=self.__isRemote, history=self.__history,
noLMHash=self.__noLMHash, remoteOps=self.__remoteOps,
useVSSMethod=self.__useVSSMethod, justNTLM=self.__justDCNTLM,
pwdLastSet=self.__pwdLastSet, resumeSession=self.__resumeFileName,
outputFileName=self.__outputFileName, justUser=self.__justUser,
printUserStatus= self.__printUserStatus, perSecretCallback=_print_helper)
try:
self.__NTDSHashes.dump()
except Exception, e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
if str(e).find('ERROR_DS_DRA_BAD_DN') >= 0:
# We don't store the resume file if this error happened, since this error is related to lack
# of enough privileges to access DRSUAPI.
resumeFile = self.__NTDSHashes.getResumeSessionFile()
if resumeFile is not None:
os.unlink(resumeFile)
logging.error(e)
if self.__justUser and str(e).find("ERROR_DS_NAME_ERROR_NOT_UNIQUE") >=0:
logging.info("You just got that error because there might be some duplicates of the same name. "
"Try specifying the domain name for the user as well. It is important to specify it "
"in the form of NetBIOS domain name/user (e.g. contoso/Administratror).")
elif self.__useVSSMethod is False:
logging.info('Something wen\'t wrong with the DRSUAPI approach. Try again with -use-vss parameter')
self.cleanup()
except (Exception, KeyboardInterrupt), e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
logging.error(e)
if self.__NTDSHashes is not None:
if isinstance(e, KeyboardInterrupt):
while True:
answer = raw_input("Delete resume session file? [y/N] ")
if answer.upper() == '':
answer = 'N'
break
elif answer.upper() == 'Y':
answer = 'Y'
break
elif answer.upper() == 'N':
answer = 'N'
break
if answer == 'Y':
resumeFile = self.__NTDSHashes.getResumeSessionFile()
if resumeFile is not None:
os.unlink(resumeFile)
try:
self.cleanup()
except:
pass
def cleanup(self):
logging.info('Cleaning up... ')
if self.__remoteOps:
self.__remoteOps.finish()
if self.__SAMHashes:
self.__SAMHashes.finish()
if self.__LSASecrets:
self.__LSASecrets.finish()
if self.__NTDSHashes:
self.__NTDSHashes.finish()
class Options(object):
aesKey=None
bootkey=None
dc_ip=None
debug=False
exec_method='smbexec'
hashes=None
history=False
just_dc=False
just_dc_ntlm=False
just_dc_user=None
k=False
no_pass=False
ntds=None
outputfile=None
pwd_last_set=False
resumefile=None
sam=None
security=None
system=None
target=''
target_ip=''
use_vss=False
user_status=False
class SecretsDumpTests(unittest.TestCase):
def test_VSS_History(self):
options = Options()
options.target_ip = self.machine
options.use_vss = True
options.history = True
dumper = DumpSecrets(self.serverName, self.username, self.password, self.domain, options)
dumper.dump()
def test_VSS_WMI(self):
options = Options()
options.target_ip = self.machine
options.use_vss = True
options.exec_method='wmiexec'
dumper = DumpSecrets(self.serverName, self.username, self.password, self.domain, options)
dumper.dump()
def test_DRSUAPI_DC_USER(self):
options = Options()
options.target_ip = self.machine
options.use_vss = False
options.just_dc = True
options.just_dc_user = '%s/%s' % (self.domain.split('.')[0], 'Administrator')
dumper = DumpSecrets(self.serverName, self.username, self.password, self.domain, options)
dumper.dump()
def test_VSS_MMC(self):
options = Options()
options.target_ip = self.machine
options.use_vss = True
options.exec_method='mmcexec'
dumper = DumpSecrets(self.serverName, self.username, self.password, self.domain, options)
dumper.dump()
def test_DRSUAPI(self):
options = Options()
options.target_ip = self.machine
options.use_vss = False
dumper = DumpSecrets(self.serverName, self.username, self.password, self.domain, options)
dumper.dump()
class Tests(SecretsDumpTests):
def setUp(self):
SecretsDumpTests.setUp(self)
# Put specific configuration for target machine with SMB1
configFile = ConfigParser.ConfigParser()
configFile.read('dcetests.cfg')
self.username = configFile.get('SMBTransport', 'username')
self.domain = configFile.get('SMBTransport', 'domain')
self.serverName = configFile.get('SMBTransport', 'servername')
self.password = configFile.get('SMBTransport', 'password')
self.machine = configFile.get('SMBTransport', 'machine')
self.hashes = configFile.get('SMBTransport', 'hashes')
self.aesKey = configFile.get('SMBTransport', 'aesKey128')
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
unittest.TextTestRunner(verbosity=1).run(suite)
|
11518032
|
from dataloading.dataloading import get_dataloader
from dataloading.configloading import load_config
|
11518058
|
from startX.serivce.v1 import StartXHandler, get_m2m_display, Option
from django.urls import reverse
from django.utils.safestring import mark_safe
from .base_promission import PermissionHandler
class CourseDetailHandler(PermissionHandler, StartXHandler):
def display_outline(self, model=None, is_header=None, *args, **kwargs):
if is_header:
return '课程大纲'
record_url = reverse('startX:generic_courseoutline_list', kwargs={'course_id': model.pk})
return mark_safe('<a target="_blank" href="%s">课程大纲</a>' % record_url)
def display_chapter(self, model=None, is_header=None, *args, **kwargs):
if is_header:
return '课程章节'
record_url = reverse('startX:generic_coursechapter_list', kwargs={'course_id': model.pk})
return mark_safe('<a target="_blank" href="%s">课程章节</a>' % record_url)
def display_coupon(self, model=None, is_header=None, *args, **kwargs):
if is_header:
return '课程优惠券'
record_url = reverse('startX:generic_coupon_list', kwargs={'course_id': model.pk})
return mark_safe('<a target="_blank" href="%s">课程优惠券</a>' % record_url)
def display_price_policy(self, model=None, is_header=None, *args, **kwargs):
if is_header:
return '价格策略'
record_url = reverse('startX:generic_pricepolicy_list', kwargs={'course_id': model.pk})
return mark_safe('<a target="_blank" href="%s">价格策略</a>' % record_url)
list_display = ['course', 'brief', get_m2m_display('课程', 'teacher'), display_outline, display_chapter,
display_coupon, display_price_policy]
# 常见问题
search_list = ['course__contains']
search_group = [
Option('course'),
Option('teacher'),
]
|
11518094
|
import atexit
import sacred
import argparse
import time
import math
import subprocess
import shutil
import os
import json
import threading
import requests
import glob
from configs import fetch_model_params
import socket
import subprocess
import queue
import sys
import signal
parser = argparse.ArgumentParser()
parser.add_argument('--tpu', type=str, required=True) # Name of TPU to train on, if any
parser.add_argument('--model', type=str, required=True) # JSON file that contains model parameters
parser.add_argument('--experiment_name', type=str, required=True) # name of experiment (will show up in omniboard)
parser.add_argument('--steps_per_checkpoint', type=int, default=5000)
parser.add_argument('--autostack', action="store_false")
parser.add_argument('--auto_layout', action="store_true")
parser.add_argument('--auto_layout_and_mesh_shape', action="store_true")
parser.add_argument('--new', action='store_true')
parser.add_argument('--test', action='store_true')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--predict', action='store_true')
parser.add_argument('--no_delete_tpu', action='store_true')
parser.add_argument('--initial_heartbeat_timeout', type=int, default=7200)
parser.add_argument('--heartbeat_timeout', type=int, default=1800) # kill and restart if nothing logged to tensorboard in this many seconds
args = parser.parse_args()
params = fetch_model_params(args.model)
ex = sacred.Experiment(args.experiment_name)
ex.observers.append(sacred.observers.QueuedMongoObserver(url='127.0.0.1:27017', db_name='db', username='user', password='password'))
def get_open_port(lo=8000, hi=8100):
for i in range(lo, hi):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', i)) != 0:
return i
def train_thread(args, tpu, id, q):
print('starting training on', tpu)
# pass binary flags through
opts = ''
for flag in ['auto_layout', 'auto_layout_and_mesh_shape', 'new', 'test', 'predict', 'eval', ]:
if args.__getattribute__(flag):
opts += ' --' + flag
for flag in ['autostack', ]:
if not args.__getattribute__(flag):
opts += ' --' + flag
cmd = "python3 main.py --tpu {tpu} --model run_configs/config_{id}.json --steps_per_checkpoint {steps_per_checkpoint} {opts} --sacred_id {run_id}".format(tpu=tpu, id=id, steps_per_checkpoint=args.steps_per_checkpoint, opts=opts, run_id=id)
print('Running:', cmd)
proc = subprocess.Popen(cmd, shell=True)
# poll until it's exited
while proc.poll() is None:
time.sleep(60)
try:
nq, *nargs = q.get_nowait()
if nq == 'kill':
print('train thread recieved kill signal from logging thread')
# first send SIGTERM
proc.terminate()
time.sleep(60)
# if it still hasn't exited, we send SIGKILL
if proc.poll() is None:
print('SIGTERM not successful, sending SIGKILL')
proc.kill()
except queue.Empty:
pass
print('exited training!')
if proc.returncode == 0:
print('exited gracefully')
os.kill(os.getpid(), signal.SIGINT)
return
if args.no_delete_tpu:
print('recreate done, exiting train_thread - not killing tpu!')
return
print("Recreating {} in 60sec...".format(tpu))
time.sleep(60)
os.system("pu recreate {} --yes --retry 3600 --retry-randomness 1.5".format(tpu))
print('recreate done, exiting train_thread')
# clear out queue
while True:
try:
q.get_nowait()
print('dropped request in queue after pu recreate')
except queue.Empty:
break
def get_json(uri, params=None, timeout=15):
resp = requests.get(uri, params=params, timeout=timeout)
resp.raise_for_status()
return resp.json()
def get_tag_sets(base_uri):
j = get_json(f'{base_uri}/data/plugin/scalars/tags', {'experiment': ''})
assert isinstance(j, dict)
return {
run: j[run].keys()
for run in j.keys()
}
def get_scalar_data(base_uri, run, tag):
j = get_json(f'{base_uri}/data/plugin/scalars/scalars', {'experiment': '', 'run': run, 'tag': tag})
assert isinstance(j, list)
return j
def get_run_data(port):
base_uri = f'http://localhost:{port}/'
r = {}
try:
tag_sets = get_tag_sets(base_uri)
runs = tag_sets.keys()
if '.' in runs:
if 'loss' in tag_sets['.']:
r['loss'] = get_scalar_data(base_uri, '.', 'loss')
if 'eval' in runs:
if 'loss' in tag_sets['eval']:
r['val_loss'] = get_scalar_data(base_uri, 'eval', 'loss')
if 'eval_lambada' in runs:
if 'lambada_acc' in tag_sets['eval_lambada']:
r['lambada_acc'] = get_scalar_data(base_uri, 'eval_lambada', 'lambada_acc')
if 'lambada_log_ppl' in tag_sets['eval_lambada']:
r['lambada_ppl'] = [
[t, s, math.exp(lp)]
for [t, s, lp] in get_scalar_data(base_uri, 'eval_lambada', 'lambada_log_ppl')
]
except:
import traceback
traceback.print_exc()
return r
@ex.main
def main(_run):
print('Starting run', _run._id)
print('experiment main invoked with argv:', " ".join(sys.argv))
print('WARNING: please remember to remove old metric log files from the model directory.')
os.makedirs('run_configs', exist_ok=True)
shutil.copy(args.model if args.model.endswith('.json') else 'configs/{}.json'.format(args.model), 'run_configs/config_{}.json'.format(_run._id))
tensorboard_port = get_open_port()
print('Tensorboard at port:', tensorboard_port)
print('Tensorboard url: ', 'http://eleutherai.bmk.sh:'+ str(tensorboard_port))
os.system("screen -S tensorboard_{} -d -m bash -c 'tensorboard --logdir {} --port {} --bind_all --reload_multifile=true || tensorboard --logdir {} --port {} --reload_multifile=true'".format(_run._id, params["model_path"], tensorboard_port,params["model_path"], tensorboard_port,))
atexit.register(goodbye, _run._id)
curr_step = {}
seen_predictions = set()
heartbeat_timeout = args.initial_heartbeat_timeout * 2
while True:
last_tb_log_time = time.time()
start_time = time.time()
q = queue.Queue()
trainthd = threading.Thread(target=train_thread, args=(args, args.tpu, _run._id, q))
trainthd.start()
while trainthd.is_alive():
time.sleep(60)
if start_time + args.initial_heartbeat_timeout < time.time():
# after initial args.initial_heartbeat_timeout grace period, now we want to set the timeout threshold much lower
heartbeat_timeout = args.heartbeat_timeout
print('Polling tensorboard for metrics...')
data = get_run_data(tensorboard_port)
for k in data.keys():
for ts, step, val in data[k]:
if step <= curr_step.get(k, -1):
continue
_run.log_scalar(k, val, step)
if k == 'loss':
_run.log_scalar('tb_ts', ts, step)
print('Logged to sacred: step={},loss={},tb_ts={}'.format(step, val, ts))
# found something new, so logging!
last_tb_log_time = time.time()
curr_step[k] = step
for f in glob.glob('predictions_{}_*'.format(_run._id)):
if f in seen_predictions:
continue
print('collecting prediction file', f)
ex.add_artifact(f)
seen_predictions.add(f)
# collect eval metrics from jsonl
if os.path.exists(f'eval_{_run._id}.jsonl'):
with open(f'eval_{_run._id}.jsonl') as fh:
for line in fh:
ob = json.loads(line)
val_step = ob['global_step']
val_task = ob['task']
for metr in ob.keys():
k = 'fs.' + val_task + '.' + metr
if metr in ['task', 'global_step']: continue
if val_step <= curr_step.get(k, -1): continue
_run.log_scalar(k, ob[metr], val_step)
curr_step[k] = val_step
if time.time() - last_tb_log_time > heartbeat_timeout:
# the run hasn't logged in a while, so we restart it
q.put(('kill',))
# give training thread some time to do its thing and recreate tpu
while trainthd.is_alive():
print('logging thread waiting for killing stalled run and for tpu recreate to finish')
time.sleep(60)
# reset heartbeat timeout to initial
heartbeat_timeout = args.initial_heartbeat_timeout
last_tb_log_time = time.time()
if args.no_delete_tpu:
break
def goodbye(id):
print("You are now leaving the Python sector.")
print("Sie verlassen den pythonischen Sektor.")
os.system("screen -S tensorboard_{} -X quit".format(id))
if __name__ == '__main__':
for file in glob.glob("**/*", recursive=True):
if file.split('.')[-1] in ['py']:
print('Adding', file, 'to sacred')
ex.add_source_file(file)
ex.add_config({
'tpu_name': args.tpu,
**params
})
ex.run()
|
11518134
|
import time
import scrapely
from . import slybot_project
from . import kernel
from . import ptree
def generate_slybot_project(url, path='slybot-project', verbose=False):
def _print(s):
if verbose:
print s,
_print('Downloading URL...')
t1 = time.clock()
page = scrapely.htmlpage.url_to_page(url)
_print('done ({0}s)\n'.format(time.clock() - t1))
_print('Extracting items...')
t1 = time.clock()
ie = kernel.ItemExtract(ptree.PageTree(page), separate_descendants=True)
_print('done ({0}s)\n'.format(time.clock() - t1))
_print('Generating slybot project...')
t1 = time.clock()
slybot_project.generate(ie, path)
_print('done ({0}s)\n'.format(time.clock() - t1))
return ie
|
11518147
|
from flask import url_for
import pytest
from flexmeasures.data.services.users import find_user_by_email
from flexmeasures.ui.tests.utils import mock_user_response
"""
Testing if the UI crud views do auth checks and display answers.
Actual logic is tested in the API tests.
"""
@pytest.mark.parametrize("view", ["index", "get", "toggle_active"])
def test_user_crud_as_non_admin(client, as_prosumer_user1, view):
user_index = client.get(url_for("UserCrudUI:index"), follow_redirects=True)
assert user_index.status_code == 403
user2_id = find_user_by_email("<EMAIL>").id
user_page = client.get(
url_for(f"UserCrudUI:{view}", id=user2_id), follow_redirects=True
)
assert user_page.status_code == 403
def test_user_list(client, as_admin, requests_mock):
requests_mock.get(
"http://localhost//api/v2_0/users",
status_code=200,
json=mock_user_response(multiple=True),
)
user_index = client.get(url_for("UserCrudUI:index"), follow_redirects=True)
assert user_index.status_code == 200
assert b"All active users" in user_index.data
assert b"<EMAIL>" in user_index.data
assert b"<EMAIL>" in user_index.data
def test_user_page(client, as_admin, requests_mock):
mock_user = mock_user_response(as_list=False)
requests_mock.get(
"http://localhost//api/v2_0/user/2", status_code=200, json=mock_user
)
requests_mock.get(
"http://localhost//api/v2_0/assets",
status_code=200,
json=[{}, {}, {}], # we only care about the length
)
user_page = client.get(url_for("UserCrudUI:get", id=2), follow_redirects=True)
assert user_page.status_code == 200
assert ("Overview for user %s" % mock_user["username"]).encode() in user_page.data
assert (">3</a>").encode() in user_page.data # this is the asset count
assert mock_user["email"].encode() in user_page.data
def test_deactivate_user(client, as_admin, requests_mock):
"""Test it does not fail (logic is tested in API tests) and displays an answer."""
user2 = find_user_by_email("<EMAIL>", keep_in_session=False)
requests_mock.patch(
f"http://localhost//api/v2_0/user/{user2.id}",
status_code=200,
json={"active": False},
)
# de-activate
user_page = client.get(
url_for("UserCrudUI:toggle_active", id=user2.id), follow_redirects=True
)
assert user_page.status_code == 200
assert user2.username in str(user_page.data)
assert b"new activation status is now False" in user_page.data
def test_reset_password(client, as_admin, requests_mock):
"""Test it does not fail (logic is tested in API tests) and displays an answer."""
user2 = find_user_by_email("<EMAIL>", keep_in_session=False)
requests_mock.patch(
f"http://localhost//api/v2_0/user/{user2.id}/password-reset",
status_code=200,
)
user_page = client.get(
url_for("UserCrudUI:reset_password_for", id=user2.id),
follow_redirects=True,
)
assert user_page.status_code == 200
assert b"has been changed to a random password" in user_page.data
|
11518163
|
import numpy as np
from numpy import linalg as la, random as rnd, testing as np_testing
from scipy.linalg import eigvalsh, expm
from pymanopt.manifolds import SymmetricPositiveDefinite
from pymanopt.tools.multi import multiprod, multisym, multitransp
from .._test import TestCase
class TestSingleSymmetricPositiveDefiniteManifold(TestCase):
def setUp(self):
self.n = n = 15
self.man = SymmetricPositiveDefinite(n)
def test_rand(self):
# Just test that rand returns a point on the manifold and two
# different matrices generated by rand aren't too close together
n = self.n
man = self.man
x = man.rand()
assert np.shape(x) == (n, n)
# Check symmetry
np_testing.assert_allclose(x, multisym(x))
# Check positivity of eigenvalues
w = la.eigvalsh(x)
assert (w > [0]).all()
def test_dist(self):
man = self.man
x = man.rand()
y = man.rand()
# Test separability
np_testing.assert_almost_equal(man.dist(x, x), 0.)
# Test symmetry
np_testing.assert_almost_equal(man.dist(x, y), man.dist(y, x))
# Test alternative implementation
# from Eq 6.14 of "Positive definite matrices"
d = np.sqrt((np.log(eigvalsh(x, y))**2).sum())
np_testing.assert_almost_equal(man.dist(x, y), d)
# check that dist is consistent with log
np_testing.assert_almost_equal(man.dist(x, y),
man.norm(x, man.log(x, y)))
# Test invariance under inversion
np_testing.assert_almost_equal(man.dist(x, y),
man.dist(la.inv(y), la.inv(x)))
# Test congruence-invariance
a = rnd.randn(self.n, self.n) # must be invertible
axa = multiprod(multiprod(a, x), multitransp(a))
aya = multiprod(multiprod(a, y), multitransp(a))
np_testing.assert_almost_equal(man.dist(x, y), man.dist(axa, aya))
def test_exp(self):
man = self.man
x = man.rand()
u = man.randvec(x)
e = expm(la.solve(x, u))
np_testing.assert_allclose(multiprod(x, e), man.exp(x, u))
u = u * 1e-6
np_testing.assert_allclose(man.exp(x, u), x + u)
def test_randvec(self):
# Just test that randvec returns an element of the tangent space
# with norm 1 and that two randvecs are different.
man = self.man
x = man.rand()
u = man.randvec(x)
v = man.randvec(x)
np_testing.assert_allclose(multisym(u), u)
np_testing.assert_almost_equal(1, man.norm(x, u))
assert la.norm(u - v) > 1e-3
def test_norm(self):
man = self.man
x = man.rand()
np.testing.assert_almost_equal(man.norm(np.eye(self.n), x), la.norm(x))
def test_exp_log_inverse(self):
man = self.man
x = man.rand()
y = man.rand()
u = man.log(x, y)
np_testing.assert_allclose(man.exp(x, u), y)
def test_log_exp_inverse(self):
man = self.man
x = man.rand()
u = man.randvec(x)
y = man.exp(x, u)
np_testing.assert_allclose(man.log(x, y), u)
class TestMultiSymmetricPositiveDefiniteManifold(TestCase):
def setUp(self):
self.n = n = 10
self.k = k = 3
self.man = SymmetricPositiveDefinite(n, k)
def test_dim(self):
man = self.man
n = self.n
k = self.k
np_testing.assert_equal(man.dim, 0.5 * k * n * (n+1))
def test_typicaldist(self):
man = self.man
np_testing.assert_equal(man.typicaldist, np.sqrt(man.dim))
def test_dist(self):
# n = self.n
man = self.man
x = man.rand()
y = man.rand()
# Test separability
np_testing.assert_almost_equal(man.dist(x, x), 0.)
# Test symmetry
np_testing.assert_almost_equal(man.dist(x, y), man.dist(y, x))
def test_inner(self):
man = self.man
k = self.k
n = self.n
x = man.rand()
a, b = rnd.randn(2, k, n, n)
np.testing.assert_almost_equal(np.tensordot(a,
b.transpose((0, 2, 1)), axes=a.ndim),
man.inner(x, multiprod(x, a),
multiprod(x, b)))
def test_proj(self):
man = self.man
x = man.rand()
a = rnd.randn(self.k, self.n, self.n)
np.testing.assert_allclose(man.proj(x, a), multisym(a))
def test_egrad2rgrad(self):
man = self.man
x = man.rand()
u = rnd.randn(self.k, self.n, self.n)
np.testing.assert_allclose(man.egrad2rgrad(x, u),
multiprod(multiprod(x, multisym(u)), x))
def test_ehess2rhess(self):
# Use manopt's slow method
man = self.man
n = self.n
k = self.k
x = man.rand()
egrad, ehess = rnd.randn(2, k, n, n)
u = man.randvec(x)
Hess = (multiprod(multiprod(x, multisym(ehess)), x) +
2*multisym(multiprod(multiprod(u, multisym(egrad)), x)))
# Correction factor for the non-constant metric
Hess = Hess - multisym(multiprod(multiprod(u, multisym(egrad)), x))
np_testing.assert_almost_equal(Hess, man.ehess2rhess(x, egrad, ehess,
u))
def test_norm(self):
man = self.man
x = man.rand()
Id = np.array(self.k * [np.eye(self.n)])
np.testing.assert_almost_equal(man.norm(Id, x), la.norm(x))
def test_rand(self):
# Just test that rand returns a point on the manifold and two
# different matrices generated by rand aren't too close together
k = self.k
n = self.n
man = self.man
x = man.rand()
assert np.shape(x) == (k, n, n)
# Check symmetry
np_testing.assert_allclose(x, multisym(x))
# Check positivity of eigenvalues
w = la.eigvalsh(x)
assert (w > [[0]]).all()
def test_randvec(self):
# Just test that randvec returns an element of the tangent space
# with norm 1 and that two randvecs are different.
man = self.man
x = man.rand()
u = man.randvec(x)
v = man.randvec(x)
np_testing.assert_allclose(multisym(u), u)
np_testing.assert_almost_equal(1, man.norm(x, u))
assert la.norm(u - v) > 1e-3
def test_transp(self):
man = self.man
x = man.rand()
y = man.rand()
u = man.randvec(x)
np_testing.assert_allclose(man.transp(x, y, u), u)
def test_exp(self):
# Test against manopt implementation, test that for small vectors
# exp(x, u) = x + u.
man = self.man
x = man.rand()
u = man.randvec(x)
e = np.zeros((self.k, self.n, self.n))
for i in range(self.k):
e[i] = expm(la.solve(x[i], u[i]))
np_testing.assert_allclose(multiprod(x, e), man.exp(x, u))
u = u * 1e-6
np_testing.assert_allclose(man.exp(x, u), x + u)
def test_retr(self):
# Check that result is on manifold and for small vectors
# retr(x, u) = x + u.
man = self.man
x = man.rand()
u = man.randvec(x)
y = man.retr(x, u)
assert np.shape(y) == (self.k, self.n, self.n)
# Check symmetry
np_testing.assert_allclose(y, multisym(y))
# Check positivity of eigenvalues
w = la.eigvalsh(y)
assert (w > [[0]]).all()
u = u * 1e-6
np_testing.assert_allclose(man.retr(x, u), x + u)
def test_exp_log_inverse(self):
man = self.man
x = man.rand()
y = man.rand()
u = man.log(x, y)
np_testing.assert_allclose(man.exp(x, u), y)
def test_log_exp_inverse(self):
man = self.man
x = man.rand()
u = man.randvec(x)
y = man.exp(x, u)
np_testing.assert_allclose(man.log(x, y), u)
|
11518203
|
from . import dependency
import unittest
class DependencyGraphTestCase(unittest.TestCase):
def test_add_all(self):
graph = dependency.DependencyGraph()
graph.add_all({
'/content/test.yaml': ['/content/test1.yaml'],
'/content/test2.yaml': ['/content/test1.yaml'],
})
self.assertEqual(
{
'/content/test.yaml': ['/content/test1.yaml'],
'/content/test2.yaml': ['/content/test1.yaml'],
},
graph.export())
def test_add(self):
graph = dependency.DependencyGraph()
graph.add('/content/test.yaml', '/content/test1.yaml')
graph.add('/content/test.yaml', '/content/test2.yaml')
self.assertEqual(
{
'/content/test.yaml': [
'/content/test1.yaml',
'/content/test2.yaml',
],
},
graph.export())
def test_add_none(self):
graph = dependency.DependencyGraph()
graph.add('/content/test.yaml', '/content/test1.yaml')
graph.add('/content/test.yaml', None)
graph.add(None, '/content/test1.yaml')
self.assertEqual(
{
'/content/test.yaml': [
'/content/test1.yaml',
],
},
graph.export())
def test_add_normalize(self):
graph = dependency.DependencyGraph()
graph.add('/content/test.yaml', 'content/test1.yaml')
graph.add('content/test.yaml', '/content/test2.yaml')
self.assertEqual(
{
'/content/test.yaml': [
'/content/test1.yaml',
'/content/test2.yaml',
],
},
graph.export())
def test_export(self):
graph = dependency.DependencyGraph()
graph.add_references(
'/content/test.yaml',
set(['/content/test1.yaml']))
self.assertEqual(
{
'/content/test.yaml': ['/content/test1.yaml'],
},
graph.export())
def test_get_dependents(self):
graph = dependency.DependencyGraph()
graph.add_references(
'/content/test.yaml',
['/content/test1.yaml'])
self.assertEqual(
set(['/content/test.yaml', '/content/test1.yaml']),
graph.get_dependents('/content/test1.yaml'))
def test_get_dependents_collection(self):
graph = dependency.DependencyGraph()
graph.add_references(
'/content/test.yaml',
['/content/collection'])
graph.add_references(
'/content/test1.yaml',
['/content/collection/coll1.yaml'])
self.assertEqual(
set([
'/content/test.yaml',
'/content/test1.yaml',
'/content/collection/coll1.yaml',
]),
graph.get_dependents('/content/collection/coll1.yaml'))
def test_get_dependents_self(self):
graph = dependency.DependencyGraph()
self.assertEqual(
set(['/content/test.yaml']),
graph.get_dependents('/content/test.yaml'))
def test_get_dependencies(self):
graph = dependency.DependencyGraph()
graph.add_references(
'/content/test.yaml',
['/content/test1.yaml', '/content/test2.yaml'])
self.assertEqual(
set(['/content/test1.yaml', '/content/test2.yaml']),
graph.get_dependencies('/content/test.yaml'))
def test_empty_dependents(self):
graph = dependency.DependencyGraph()
self.assertEqual(set(['/content/test1.yaml']),
graph.get_dependents('/content/test1.yaml'))
def test_empty_dependencies(self):
graph = dependency.DependencyGraph()
self.assertEqual(set(), graph.get_dependencies('/content/test.yaml'))
def test_match_dependents(self):
graph = dependency.DependencyGraph()
graph.add_references(
'/content/ref.yaml',
['/content/test1.yaml'])
graph.add_references(
'/content/ref1.yaml',
['/content/test1.yaml'])
graph.add_references(
'/content/ref2.yaml',
['/content/test2.yaml'])
self.assertEqual(
set(['/content/ref.yaml']),
graph.match_dependents('/content/ref.yaml'))
self.assertEqual(
set(['/content/ref1.yaml']),
graph.match_dependents('/content/ref1.yaml'))
self.assertEqual(
set(['/content/ref2.yaml']),
graph.match_dependents('/content/ref2.yaml'))
self.assertEqual(
set(['/content/test1.yaml', '/content/ref.yaml', '/content/ref1.yaml']),
graph.match_dependents('/content/test1.yaml'))
self.assertEqual(
set(['/content/test1.yaml', '/content/test2.yaml',
'/content/ref.yaml', '/content/ref1.yaml', '/content/ref2.yaml']),
graph.match_dependents('/content/test*.yaml'))
def test_reset(self):
graph = dependency.DependencyGraph()
graph.add_references(
'/content/test.yaml',
['/content/test1.yaml', '/content/test2.yaml'])
self.assertEqual(
set(['/content/test1.yaml', '/content/test2.yaml']),
graph.get_dependencies('/content/test.yaml'))
graph.reset()
self.assertEqual(set(), graph.get_dependencies('/content/test.yaml'))
if __name__ == '__main__':
unittest.main()
|
11518230
|
import os
import javabridge as jv
from cellacdc import bioformats
# import bioformats
print(bioformats.__file__)
# path = r'"G:\My Drive\01_Postdoc_HMGU\Python_MyScripts\MIA\Git\Cell_ACDC\cellacdc\bioformats\jars\bioformats_package.jar"'
jars = bioformats.JARS
print(jars)
jv.start_vm(class_path=jars)
paths = jv.JClassWrapper('java.lang.System').getProperty('java.class.path').split(";")
for path in paths:
print("%s: %s" %("exists" if os.path.isfile(path) else "missing", path))
jv.kill_vm()
|
11518263
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils import simplejson
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.contrib.sites.models import get_current_site
from django.contrib import messages
from django.conf import settings
from django.db.models.aggregates import Min, Max, Avg
from datetime import datetime, timedelta
from util import *
import json
import pygeoip
from survey.models import *
def home(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/account')
else:
return render_to_response('home.html', RequestContext(request))
@login_required()
def print_survey(request, view_key, *args, **kwargs):
try:
survey = Survey.objects.get(key=view_key)
except BaseException as e:
return error_jump(request)
if not survey.user.id == request.user.id and not survey.is_collaborator(request.user):
return error_jump(request,"unauthorized")
questions = survey.questions.order_by('id_in_survey')
request.session['dt_start'] = json.dumps(datetime.now(), default=date_handler)
dict = {'survey': survey, 'questions': questions, 'dt_start': datetime.now()}
return render_to_response('print.html', dict, RequestContext(request))
def string_cmp((k1, v1), (k2, v2)):
return cmp(float(k1), float(k2))
def date_cmp((k1, v1), (k2, v2)):
return cmp(datetime.strptime(k1, '%d %B'), datetime.strptime(k2, '%d %B'))
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
@login_required()
def analyse(request, view_key=""):
try:
survey = Survey.objects.get(key=view_key)
except BaseException as e:
return error_jump(request)
if not survey.user.id == request.user.id and not survey.is_collaborator(request.user):
return error_jump(request,"unauthorized")
questions = survey.questions.order_by('id_in_survey')
raw_data = []
geo_data = []
geo_dict = {}
gi = pygeoip.GeoIP(settings.GEO_DATA_PATH, pygeoip.MEMORY_CACHE)
responses = Response.objects.filter(survey=survey)
# how many people skipped each question marked as not required
no_skipped = []
respondents_data = []
time_data = {}
time_in_seconds = []
date_dict = {}
# analyse geoip data of responses
for response in responses:
date_str = response.dt_end.strftime('%d %B')
time_in_seconds.append((response.dt_end - response.dt_start).seconds)
if date_str not in date_dict:
date_dict[date_str] = 1
else:
date_dict[date_str] += 1
ip = str(response.ip_address)
country = gi.country_name_by_addr(ip)
if country not in geo_dict:
geo_dict[country] = 1
else:
geo_dict[country] += 1
try:
time_data['seconds'] = time_in_seconds
time_data['min'] = min(time_in_seconds)
time_data['max'] = max(time_in_seconds)
time_data['avg'] = sum(time_in_seconds) / len(time_in_seconds)
except BaseException as e:
pass
# analyse number of daily responses
for key, value in sorted(date_dict.items(), cmp=date_cmp):
respondents_data.append([key, value])
for key, value in geo_dict.items():
geo_data.append([key, value])
# analyse each question
for question in questions:
type = question.type
answers = Answer.objects.filter(response__survey=survey, id_in_response=question.id_in_survey)
# calc number of skipped responses for this question
if not question.is_required:
no_skipped.append(answers.filter(value__exact='').count())
else:
no_skipped.append(0)
# after counting skipped questions, excluding all empty answers
answers = answers.exclude(value__exact='')
if type == 'paragraph' or type == 'text':
# for paragraph and text question, only display all responses
raw_data.append([answer.value for answer in answers])
elif type == 'multiplechoice' or type == 'checkbox':
# get choices
if type == 'multiplechoice':
choices = MultipleChoice.objects.filter(question=question)
else:
choices = CheckboxChoice.objects.filter(question=question)
# init list to [0,0,0,0,0...]
resp_count = [0] * len(choices)
# calc number of responses for each choice
for answer in answers:
for choice in answer.value.split(','):
resp_count[int(choice)] += 1
data_dict = []
for choice, count in zip(choices, resp_count):
data_dict.append([str(choice.label), int(count)])
raw_data.append(data_dict)
elif type == 'numeric':
num_dict = {}
num_dict['data'] = [float(answer.value) for answer in answers]
num_dict['min_value'] = answers.aggregate(Min('value'))['value__min']
num_dict['max_value'] = answers.aggregate(Max('value'))['value__max']
num_dict['avg'] = answers.aggregate(Avg('value'))['value__avg']
raw_data.append(num_dict)
elif type == 'scale':
data_dict = {}
for answer in answers:
if answer.value not in data_dict:
data_dict[answer.value] = 1
else:
data_dict[answer.value] += 1
tmp_list = []
# sort according to key
for key, value in sorted(data_dict.items(), cmp=string_cmp):
tmp_list.append([str(key), float(value)])
raw_data.append(tmp_list)
elif type == 'date':
raw_data.append(str(answer.value) for answer in answers)
zipped = zip(questions, raw_data, no_skipped)
dict = {'survey': survey,
'questions': questions,
'dt_start': datetime.now(),
'responses': responses,
'zipped': zipped,
'time_data' : time_data,
'respondents_data': respondents_data,
'geo_data': geo_data}
return render_to_response('analyse.html', dict, RequestContext(request))
def about(request):
return render_to_response('about.html', RequestContext(request))
def signup(request):
return render_to_response('signup.html', RequestContext(request))
def test_view(request):
return render_to_response('cqx_test.html', RequestContext(request))
def publish(request, publish_key):
current_site = get_current_site(request)
dict = {"key": publish_key,
'SITE_URL' : current_site.domain
}
return render_to_response('publish.html', dict, RequestContext(request))
def create_response(request):
if request.POST:
resp = Response()
# get ip address
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
resp.ip_address = x_forwarded_for.split(',')[0]
else:
resp.ip_address = request.META.get('REMOTE_ADDR')
resp.dt_start = request.session.get('dt_start')
resp.dt_end = datetime.now()
resp.survey = Survey.objects.get(id=int(request.POST.get("surveyID")))
resp.save()
dict = {"responseID": resp.id}
return HttpResponse(simplejson.dumps(dict), mimetype='application/javascript')
return error_jump(request)
def validate_answer(request):
if request.POST:
id_in_response = int(request.POST.get("id_in_response"))
type = request.POST.get("type")
value = request.POST.get("value")
surveyID = int(request.POST.get("surveyID"))
errors = ""
li = "<li>Q%d: %s</li>" % (id_in_response, "%s")
survey = Survey.objects.get(id=surveyID)
question = survey.questions.get(id_in_survey=id_in_response)
deadline = survey.deadline
now = datetime.now()
if question.is_required and value == "":
errors += li % "This question is required."
if now > deadline:
errors += "<li>This survey is expired already.</li>"
if type == 'paragraph':
max_no_characters = question.paragraphquestion.max_no_characters
length = len(value)
if length > max_no_characters:
errors += li % "Number of characters cannot exceed %d, %d characters are provided." % (
max_no_characters, length)
if type == 'text':
max_no_characters = question.textquestion.max_no_characters
length = len(value)
if length > max_no_characters:
errors += li % "Number of characters cannot exceed %d, %d characters are provided." % (
max_no_characters, length)
if type == "numeric":
max_value = question.numericquestion.max_value
min_value = question.numericquestion.min_value
value = value.strip()
if not isfloat(value):
errors += li % "Legal digits required."
else:
value = float(value)
if value > max_value or value < min_value:
errors += li % "Please enter in a number in [%f,%f], %f is provided." % (
min_value, max_value, value)
if type == "checkbox":
min_checked = question.checkboxquestion.min_checked
max_checked = question.checkboxquestion.max_checked
no_checked = len(value.split(","))
if value == "":
no_checked = 0
if no_checked > max_checked or no_checked < min_checked:
errors += li % "Please choose [%d,%d], %d choices are chosen." % (min_checked, max_checked, no_checked)
dict = {"errors": errors}
return HttpResponse(simplejson.dumps(dict), mimetype='application/javascript')
return error_jump(request)
def response_survey(request, responseID):
if request.POST:
answer = Answer()
answer.response = Response.objects.get(id=responseID)
answer.id_in_response = int(request.POST.get("id_in_response"))
answer.type = request.POST.get("type")
answer.value = request.POST.get("value")
answer.save()
dict = {}
return HttpResponse(simplejson.dumps(dict), mimetype='application/javascript')
return error_jump(request)
@login_required()
def account(request):
if not request.user.is_authenticated:
return HttpResponseRedirect('/login')
surveys = Survey.objects.filter(user=request.user).order_by('-last_modified')
titles = [str(survey.title) for survey in surveys]
if request.POST:
search_value = request.POST.get('search_value')
if search_value:
surveys = surveys.filter(title__icontains=search_value)
collaborations = Collaboration.objects.filter(user_id=request.user.id, is_active=1)
collaborated_surveys_ids = [c.survey_id for c in collaborations]
collaborated_surveys = Survey.objects.filter(id__in=collaborated_surveys_ids)
print collaborations
return render_to_response('account.html',
{'surveys': surveys, 'titles': titles, 'collaborated_surveys': collaborated_surveys}, RequestContext(request))
@login_required
def edit_account(request):
return render_to_response('edit_account.html', RequestContext(request))
def edit_survey(request, view_key=""):
if request.POST:
request.session['question_created_total'] = str(int(request.session['question_created_total']) + 1)
question_no = int(
request.session.get("question_created_total")) #This is used to group selections, not for the index.
question_description = 'Click here to change the description'
question_helptext = "Click here to add help text"
values = "sample1@#@sample2@#@sample3@#@sample4"
question = ""
if request.POST.get("questionID"):
questionID = int(request.POST.get("questionID"))
question = Question.objects.get(id=questionID)
question_description = question.title
question_helptext = question.help_text
if question.type in ("multiplechoice", "checkbox"):
if question.type == "multiplechoice":
choices = question.multiplechoicequestion.choices.all()
if question.type == "checkbox":
choices = question.checkboxquestion.choices.all()
value = ""
for choice in choices:
value += "%s@#@" % choice.label
values = value[0:-3]
html = "<div class='singleQuestionDiv'>"
html += "<span class='question_no'>Q:</span>"
html += "<span class='question_description editable'>%s</span><br />" % question_description
html += "<span class='question_helptext editable hideable'>%s</span><br />" % question_helptext
type = request.POST.get("question_type")
group_name = question_no
if request.POST.get("questionID"):
if type == "paragraph":
html += show_paragraph(question.paragraphquestion.max_no_characters, is_required=question.is_required)
elif type == "numeric":
html += show_numeric(question.numericquestion.max_value, question.numericquestion.min_value,
question.is_required)
elif type == "checkbox":
html += show_checkbox(group_name, values, question.checkboxquestion.max_checked,
question.checkboxquestion.min_checked, question.is_required)
elif type == "multiplechoice":
html += show_mcq(group_name, values, question.is_required)
elif type == "scale":
html += show_scale(max_value=question.scalequestion.max_value,
min_value=question.scalequestion.min_value, increment=question.scalequestion.increment,
is_required=question.is_required)
elif type == "date":
html += show_date(min_value=question.datequestion.min_value,
max_value=question.datequestion.max_value, start_value=question.datequestion.start_value,
is_required=question.is_required)
elif type == "text":
html += show_text(max_no_character=question.textquestion.max_no_characters,
is_required=question.is_required)
else:
if type == "paragraph":
html += show_paragraph()
elif type == "numeric":
html += show_numeric()
elif type == "checkbox":
html += show_checkbox(group_name, values)
elif type == "multiplechoice":
html += show_mcq(group_name, values)
elif type == "scale":
html += show_scale()
elif type == "date":
html += show_date()
elif type == "text":
html += show_text()
html += "</div>"
dict = {"content": html}
return HttpResponse(simplejson.dumps(dict), mimetype='application/javascript')
request.session['question_created_total'] = '0'
is_collaborator = False
collaborators = ""
survey = ""
title = "New Survey(Click to change)"
description = "Add description here"
deadline = datetime.now() + timedelta(days=7)
# surveyID = int(surveyID)
surveyID = 0
try:
if view_key != "":
surveyID = Survey.objects.get(key=view_key).id
except BaseException as e:
return error_jump(request)
if surveyID != 0:
survey = Survey.objects.get(id=surveyID)
is_collaborator = survey.is_collaborator(request.user)
collaborators = []
for collaborator in survey.collaboration_set.all():
collaborators.append(str(collaborator.user.id))
collaborators = ",".join(collaborators)
title = survey.title
description = survey.description
deadline = survey.deadline
deadline = deadline.strftime("%d/%m/%Y")
dict = {'surveyID': surveyID, 'survey': survey, "title": title, "description": description, "deadline": deadline,
'is_collaborator': is_collaborator, 'collaborators': collaborators}
template = "edit_survey.html"
return render_to_response(template, dict, RequestContext(request))
def validate_survey(request):
if request.POST:
question_type = request.POST.get("question_type")
question_no = int(request.POST.get("question_no"))
question_helptext = request.POST.get("question_helptext")
question_title = request.POST.get("question_title")
selections = request.POST.get("selections")
selections = selections.split("@#@")
attributes = request.POST.get("attributes")
attributes = attributes.split("@#@")
survey_title = request.POST.get("survey_title").strip()
survey_description = request.POST.get("survey_description")
survey_deadline = request.POST.get("survey_deadline")
survey_deadline = survey_deadline.strip()
survey_same_title = Survey.objects.filter(title=survey_title,user=request.user)
print survey_same_title
surveyID = int(request.POST.get("surveyID"))
errors = ""
li = "<li>Q%d: %s</li>" % (question_no, "%s")
if survey_title == "New Survey(Click to change)" or survey_title == "Click here to add...":
survey_title = ""
if len(survey_same_title)>0 and question_no==1 and (surveyID==0 or not Survey.objects.get(id=surveyID).title==survey_title):
errors += "<li>%s: %s</li>" % (
"Survey title", "You have already created a survey with the same title.")
if len(survey_title)==0 and question_no==1:
errors += "<li>%s: %s</li>" % (
"Survey title", "Please enter a title..")
if len(survey_title) > 128 and question_no == 1:
errors += "<li>%s: %s</li>" % (
"Survey title", "Number of characters cannot exceed %d, %d characters are provided.") % (
128, len(survey_title))
if len(survey_description) > 10000 and question_no == 1:
errors += "<li>%s: %s</li>" % (
"Survey description", "Number of characters cannot exceed %d, %d characters are provided.") % (
10000, len(survey_description))
try:
select_time = datetime.strptime(survey_deadline, "%d/%m/%Y")
if select_time < datetime.now():
errors += "<li>%s: %s</li>" % ("Survey deadline", "Please choose one after today's date.")
except BaseException as e:
errors += "<li>%s: %s</li>" % ("Survey deadline", "The date format is invalid, please select one.")
if len(question_title) > 500:
errors += li % "%s -- Cannot exceed 500 characters, %d characters provided." % (
"Question title", len(question_title))
if len(question_helptext) > 500:
errors += li % "%s -- Cannot exceed 500 characters, %d characters provided." % (
"Question help text", len(question_helptext))
if question_type == 'paragraph':
max_no_characters = attributes[0]
try:
max_no_characters = int(max_no_characters)
if max_no_characters > 10000 or max_no_characters < 0:
errors += li % "%s -- Please enter in an integer in [0,10000], %d provided." % (
"Max character", max_no_characters)
except BaseException as e:
errors += li % "%s -- Please enter in an integer in [0,10000]." % "Max character"
if question_type == 'text':
max_no_characters = attributes[0]
try:
max_no_characters = int(max_no_characters)
if max_no_characters > 255 or max_no_characters < 0:
errors += li % "%s -- Please enter in an integer in [0,255], %d provided." % (
"Max character", max_no_characters)
except BaseException as e:
errors += li % "%s -- Please enter in an integer in [0,255]." % "Max character"
if question_type == "numeric":
max_value = attributes[0]
min_value = attributes[1]
try:
max_value = float(max_value)
min_value = float(min_value)
if max_value > 10000 or max_value < -10000:
errors += li % "%s -- Please enter in a float in [-10000,10000], %f provided." % (
"Max value", max_value)
if min_value > 10000 or min_value < -10000:
errors += li % "%s -- Please enter in a float in [-10000,10000], %f provided." % (
"Min value", min_value)
if max_value < min_value:
errors += li % "Max value must be greater than min value."
except BaseException as e:
errors += li % "Max & Min value must be floats in [-10000, 10000]"
if question_type == "scale":
max_value = attributes[0]
min_value = attributes[1]
increment = attributes[2]
try:
max_value = float(max_value)
min_value = float(min_value)
increment = float(increment)
if max_value > 10000 or max_value < -10000:
errors += li % "%s -- Please enter in a float in [-10000,10000], %f provided." % (
"Max value", max_value)
if min_value > 10000 or min_value < -10000:
errors += li % "%s -- Please enter in a float in [-10000,10000], %f provided." % (
"Min value", min_value)
if increment > 10000 or increment <= 0:
errors += li % "%s -- Please enter in a float in (-0,10000], %f provided." % (
"Increment", increment)
if max_value < min_value:
errors += li % "Max value must be greater than min value."
except BaseException as e:
errors += li % "Max,Min and increment value must be invalid floats"
if question_type == "date":
min_value = attributes[0]
max_value = attributes[1]
start_value = attributes[2]
try:
max_value = datetime.strptime(max_value,"%d/%m/%Y")
min_value = datetime.strptime(min_value,"%d/%m/%Y")
start_value = datetime.strptime(start_value,"%d/%m/%Y")
if max_value < min_value:
errors += li % "Max value must be greater than min value."
if max_value<start_value or min_value>start_value:
errors += li % "Start value must be between max value and min value."
except BaseException as e:
errors += li % "Max,Min and start value must be invalid date"
if question_type == "multiplechoice":
pass
if question_type == "checkbox":
max_checked = attributes[0]
min_checked = attributes[1]
no_of_selections = len(selections) - 1
try:
max_checked = int(max_checked)
min_checked = int(min_checked)
if max_checked < 0 or max_checked > no_of_selections:
errors += li % "%s -- Please enter in an integer in [0,%d], %d provided." % (
"Max checked", no_of_selections, max_checked)
if min_checked < 0 or min_checked > no_of_selections:
errors += li % "%s -- Please enter in an integer in [0,%d], %d provided." % (
"Min checked", no_of_selections, min_checked)
if max_checked < min_checked:
errors += li % "Max checked must be greater than min checked."
except BaseException as e:
errors += li % "Max & Min checked must be integer in [0, %d]" % no_of_selections
dict = {"errors": errors}
return HttpResponse(simplejson.dumps(dict), mimetype='application/javascript')
return error_jump(request)
def create_survey(request):
if request.POST:
survey_title = request.POST.get("survey_title")
survey_description = request.POST.get("survey_description")
if survey_title == "New Survey(Click to change)":
survey_title = "No title"
if survey_description == "Add description here" or survey_description == "Click here to add...":
survey_description = ""
publishBool = request.POST.get("publishBool")
survey = Survey(title=survey_title)
survey.description = survey_description
creator = User.objects.get(id = int(request.POST.get( "creatorID")))
survey.user = creator
survey.theme_name = request.POST.get("theme_name")
deadline = request.POST.get("survey_deadline")
survey.deadline = datetime.strptime(deadline.strip(), "%d/%m/%Y")
survey.save()
collaborators = request.POST.get("collaborators")
collaborators = collaborators.split(",")
try:
collaborators.remove("")
except BaseException as e:
pass
for collaborator_id in collaborators:
collaboration = Collaboration()
collaboration.user = User.objects.get(id = int(collaborator_id))
collaboration.survey = survey
collaboration.is_active = True
collaboration.save()
if publishBool == 'true':
survey.is_published = True
survey.save()
surveyID = survey.id
dict = {"surveyID": surveyID, "survey_key": survey.key}
return HttpResponse(simplejson.dumps(dict), mimetype='application/javascript')
return error_jump(request)
from django.http import Http404
@login_required()
def delete_survey(request, survey_key=""):
if request.POST:
surveyID = int(request.POST.get("surveyID"))
survey = Survey.objects.get(id=surveyID)
survey.delete()
dict = {}
return HttpResponse(simplejson.dumps(dict),mimetype='application/javascript')
else:
try:
survey = Survey.objects.get(key=survey_key)
except Survey.DoesNotExist:
raise Http404
if request.user == survey.user or survey.is_collaborator(request.user):
survey_title = survey.title
survey.delete()
messages.success(request, 'Survey "%s deleted" successfully' % survey_title)
else:
return error_jump(request,"unauthorized")
return HttpResponseRedirect('/account')
return error_jump(request)
def save_survey(request, surveyID):
if request.POST:
question_type = request.POST.get("question_type")
question_no = request.POST.get("question_no")
question_helptext = request.POST.get("question_helptext")
is_required = request.POST.get("is_required")
if question_helptext == "Click here to add...":
question_helptext == ""
question_title = request.POST.get("question_title")
selections = request.POST.get("selections")
attributes = request.POST.get("attributes")
if int(surveyID) == 0:
survey = Survey(title="no title")
survey.save()
surveyID = survey.id
if question_type == "paragraph":
question = ParagraphQuestion()
elif question_type == "numeric":
question = NumericQuestion()
elif question_type == "multiplechoice":
question = MultipleChoiceQuestion()
elif question_type == "checkbox":
question = CheckboxQuestion()
elif question_type == "scale":
question = ScaleQuestion()
elif question_type == "text":
question = TextQuestion()
elif question_type == "date":
question = DateQuestion()
else:
return
question.survey = Survey.objects.get(id=surveyID)
question.id_in_survey = question_no
question.title = question_title.strip()
question.help_text = question_helptext
question.max_no_characters = 0
if is_required == 'true':
question.is_required = True
else:
question.is_required = False
question.save()
if question_type == "paragraph":
attributes_list = attributes.split("@#@")
question.max_no_characters = int(attributes_list[0])
elif question_type == "numeric":
attributes_list = attributes.split("@#@")
question.max_value = float(attributes_list[0])
question.min_value = float(attributes_list[1])
elif question_type == "multiplechoice":
choices = selections.split("@#@")
choices.pop()
count = 0
for choice_label in choices:
count += 1
choice = MultipleChoice()
choice.question = question
choice.label = choice_label
choice.id_in_question = count
choice.save()
elif question_type == "checkbox":
attributes_list = attributes.split("@#@")
question.max_checked = int(attributes_list[0])
question.min_checked = int(attributes_list[1])
choices = selections.split("@#@")
choices.pop()
count = 0
for choice_label in choices:
count += 1
choice = CheckboxChoice()
choice.question = question
choice.label = choice_label
choice.id_in_question = count
choice.save()
elif question_type == "scale":
attributes_list = attributes.split("@#@")
question.max_value = float(attributes_list[0])
question.min_value = float(attributes_list[1])
question.increment = float(attributes_list[2])
elif question_type == "date":
attributes_list = attributes.split("@#@")
question.min_value = datetime.strptime(attributes_list[0].strip(),"%d/%m/%Y")
question.max_value = datetime.strptime(attributes_list[1].strip(),"%d/%m/%Y")
question.start_value = datetime.strptime(attributes_list[2].strip(),"%d/%m/%Y")
elif question_type == "text":
attributes_list = attributes.split("@#@")
question.max_no_characters = int(attributes_list[0])
else:
return
question.save()
dict = {"surveyID": surveyID}
return HttpResponse(simplejson.dumps(dict), mimetype='application/javascript')
return error_jump(request)
def respondent(request):
return render_to_response('respondent.html', RequestContext(request))
from survey.models import Survey
def view_survey(request, view_key, *args, **kwargs):
survey = Survey.objects.get(key=view_key)
questions = survey.questions.order_by('id_in_survey')
request.session['dt_start'] = json.dumps(datetime.now(), default=date_handler)
theme_name = survey.theme_name
deadline = survey.deadline
now = datetime.now()
expired = False
if now > deadline:
expired = True
deadline = deadline.strftime("%A, %B %d, %Y %H:%M:%S")
if not theme_name:
theme_name = "grass"
dict = {'survey': survey, 'questions': questions, 'dt_start': datetime.now(), 'theme_name': theme_name,
'deadline': deadline, 'expired': expired}
return render_to_response('respondent.html', dict, RequestContext(request))
def error_jump(request, error_type="404"):
message = ""
next_link = ""
if error_type == "not_login":
message = "Sorry, please log in before do it."
next_link = "/account/login/"
if error_type == "unauthorized":
message = "Sorry, you are unauthorized to do this. Please check again."
next_link = "/"
if error_type == "404":
message = "Sorry, the page is not found. "
next_link = "/"
if error_type == "edit_published_survey":
message = "Sorry, you cannot edit a published survey"
next_link = "/"
if error_type == "response_unpublished_survey":
message = "Sorry, the survey hasn't be published."
next_link = "/"
dict = {'message': message, 'next_link': next_link}
return render_to_response('error.html', dict, RequestContext(request))
def complete(request, view_key=""):
return render_to_response('complete.html', {"view_key": view_key}, RequestContext(request))
def is_valid_email(email):
return True if email_re.match(email) else False
def share_survey(request):
survey_id = request.POST.get('survey_id');
email_data = request.POST.get('collaborators');
owner_message = request.POST.get('owner_message');
success_emails = []
fail_emails = []
emails = [e.strip() for e in email_data.split(',')]
for e in emails:
if is_valid_email(e):
success_emails.append(e)
else:
fail_emails.append(e)
survey = Survey.objects.get(id=survey_id)
survey_url = "http://%s%s" % (get_current_site(request).domain, survey.get_absolute_url())
if request.user.first_name and request.user.last_name:
message = "%s %s (%s) invites you to take the survey.\n\nFollow this link to open the survey:\n%s\n\n%s" % (
request.user.first_name, request.user.last_name, request.user.email, survey_url, owner_message)
else:
message = "%s (%s) invites you to take the survey.\n\nFollow this link to open the survey:\n%s\n\n%s" % (
request.user.username, request.user.email, survey_url, owner_message)
send_mail('You are invited to do a survey', message, "noreply@%s" % get_current_site(request).domain,
success_emails)
if len(fail_emails) > 0:
messages.error(request, "Email %s is invalid" % (', ').join(fail_emails))
if len(success_emails) > 0:
messages.success(request, 'You have successfully share with %s' % (', ').join(success_emails))
return HttpResponseRedirect("/account")
|
11518268
|
import io
import os
import json
import zipfile
import argparse
import urllib.request
import multiprocessing
from os.path import join as pjoin
import tqdm
import numpy as np
import spacy
import textworld
from textworld.logic import State, Rule, Proposition, Variable
from generic import preproc
from generic import process_facts, serialize_facts, gen_graph_commands
from generic import process_local_obs_facts, process_fully_obs_facts
ZIP_FILENAME = "TextWorld_CoG2019.zip"
GAMES_URL = "https://aka.ms/ftwp/dataset.zip"
def download(url, filename=None, force=False):
filename = filename or url.split('/')[-1]
if os.path.isfile(filename) and not force:
return filename
def _report_download_status(chunk_id, max_chunk_size, total_size):
size = chunk_id * max_chunk_size / 1024**2
size_total = total_size / 1024**2
unit = "Mb"
if size <= size_total:
print("{:.1f}{unit} / {:.1f}{unit}".format(size, size_total, unit=unit), end="\r")
filename, _ = urllib.request.urlretrieve(url, filename, _report_download_status)
return filename
def extract_games(zip_filename, dst):
zipped_file = zipfile.ZipFile(zip_filename)
filenames_to_extract = [f for f in zipped_file.namelist() if f.endswith(".z8") or f.endswith(".json")]
subdirs = {
"train": pjoin(dst, "train"),
"valid": pjoin(dst, "valid"),
"test": pjoin(dst, "test"),
}
for d in subdirs.values():
if not os.path.isdir(d):
os.makedirs(d)
print("Extracting...")
extracted_files = []
for filename in tqdm.tqdm(filenames_to_extract):
subdir = subdirs[os.path.basename(os.path.dirname(filename))]
out_file = pjoin(subdir, os.path.basename(filename))
extracted_files.append(out_file)
if os.path.isfile(out_file):
continue
data = zipped_file.read(filename)
with open(out_file, "wb") as f:
f.write(data)
return extracted_files
def collect_data_from_game(gamefile, seed, branching_depth):
tokenizer = spacy.load('en', disable=['ner', 'parser', 'tagger'])
rng = np.random.RandomState(seed)
# Ignore the following commands.
commands_to_ignore = ["look", "examine", "inventory"]
env_infos = textworld.EnvInfos(description=True, location=True, facts=True, last_action=True,
admissible_commands=True, game=True, extras=["walkthrough"])
env = textworld.start(gamefile, env_infos)
env = textworld.envs.wrappers.Filter(env)
obs, infos = env.reset()
walkthrough = infos["extra.walkthrough"]
# Make sure we start with listing the inventory.
if walkthrough[0] != "inventory":
walkthrough = ["inventory"] + walkthrough
# Add 'restart' command as a way to indicate the beginning of the game.
walkthrough = ["restart"] + walkthrough
dataset = []
done = False
facts_seen = set()
for i, cmd in enumerate(walkthrough):
last_facts = facts_seen
if i > 0: # != "restart"
obs, _, done, infos = env.step(cmd)
facts_seen = process_facts(last_facts, infos["game"], infos["facts"], infos["last_action"], cmd)
dataset += [{
"game": os.path.basename(gamefile),
"step": (i, 0),
"observation": preproc(obs, tokenizer=tokenizer),
"previous_action": cmd.lower(),
"target_commands": sorted(gen_graph_commands(facts_seen - last_facts, cmd="add")
+ gen_graph_commands(last_facts - facts_seen, cmd="delete")),
"previous_graph_seen": sorted(serialize_facts(last_facts)),
"graph_seen": sorted(serialize_facts(facts_seen)),
}]
if done:
break # Stop collecting data if game is done.
# Fork the current game & seen facts.
env_ = env.copy()
facts_seen_ = facts_seen
# Then, take N random actions.
for j in range(1, branching_depth + 1):
commands = [c for c in infos["admissible_commands"]
if ((c == "examine cookbook" or c.split()[0] not in commands_to_ignore)
and (i + 1) != len(walkthrough) and c != walkthrough[i + 1])]
if len(commands) == 0:
break
cmd_ = rng.choice(commands)
obs, _, done, infos = env_.step(cmd_)
if done:
break # Stop collecting data if game is done.
last_facts_ = facts_seen_
facts_seen_ = process_facts(last_facts_, infos["game"], infos["facts"], infos["last_action"], cmd_)
dataset += [{
"game": os.path.basename(gamefile),
"step": (i, j),
"observation": preproc(obs, tokenizer=tokenizer),
"previous_action": cmd_.lower(),
"target_commands": sorted(gen_graph_commands(facts_seen_ - last_facts_, cmd="add")
+ gen_graph_commands(last_facts_ - facts_seen_, cmd="delete")),
"previous_graph_seen": sorted(serialize_facts(last_facts_)),
"graph_seen": sorted(serialize_facts(facts_seen_)),
}]
return gamefile, dataset
def collect_data(gamefiles, args):
print("Using {} processes.".format(args.nb_processes))
desc = "Extracting data from {} games".format(len(gamefiles))
pbar = tqdm.tqdm(total=len(gamefiles), desc=desc)
outfile = open(args.output, "w")
outfile.write("[\n")
def _assemble_results(args):
gamefile, data = args
pbar.set_postfix_str(gamefile)
pbar.update()
outfile.write(",\n".join(json.dumps(d) for d in data) + ",\n")
if args.nb_processes > 1:
pool = multiprocessing.Pool(args.nb_processes)
results = []
for i, gamefile in enumerate(gamefiles):
seed = args.seed + i
result = pool.apply_async(collect_data_from_game, (gamefile, seed, args.branching_depth), callback=_assemble_results)
results.append(result)
for result in results:
result.get()
pool.close()
pool.join()
else:
for i, gamefile in enumerate(gamefiles):
seed = args.seed + i
data = collect_data_from_game(gamefile, seed, args.branching_depth)
_assemble_results(data)
pbar.close()
outfile.seek(outfile.tell() - 2, os.SEEK_SET) # Overwrite last comma.
outfile.write("\n]")
outfile.close()
def build_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--output", default="dataset.json",
help="Path where to save the dataset (.json)")
parser.add_argument("--nb-processes", type=int,
help="Number of CPUs to use. Default: all available.")
parser.add_argument("--seed", type=int, default=20190423,
help="Seed for the random exploration.")
parser.add_argument("--branching-depth", type=int, default=0,
help="Number of random commands for each transition in the walkthrough. Default: %(default)s.")
parser.add_argument("--games-dir", default="./games/",
help="Folder where to extract the downloaded games.")
parser.add_argument("-f", "--force", action="store_true",
help="Overwrite existing files.")
return parser
def main():
parser = build_argparser()
args = parser.parse_args()
args.nb_processes = args.nb_processes or multiprocessing.cpu_count()
if os.path.isfile(args.output) and not args.force:
parser.error("{} already exists. Use -f to overwrite.".format(args.output))
if not os.path.exists(args.games_dir):
filename = download(GAMES_URL, filename=ZIP_FILENAME, force=args.force)
extracted_files = extract_games(filename, dst=args.games_dir)
gamefiles = [f for f in extracted_files if f.endswith(".z8")]
else:
gamefiles = [args.games_dir + f for f in os.listdir(args.games_dir) if f.endswith(".z8")]
collect_data(gamefiles, args)
if __name__ == "__main__":
main()
|
11518284
|
from typing import List
import gevent
import pytest
from eth_typing import Address, BlockNumber
from web3 import Web3
from raiden.constants import BLOCK_ID_LATEST, GENESIS_BLOCK_NUMBER
from raiden.exceptions import BrokenPreconditionError
from raiden.network.proxies.proxy_manager import ProxyManager, ProxyManagerMetadata
from raiden.network.rpc.client import JSONRPCClient
from raiden.tests.utils.smartcontracts import is_tx_hash_bytes
from raiden.utils.typing import PrivateKey, TokenAmount, UserDepositAddress
from raiden_contracts.contract_manager import ContractManager
@pytest.mark.parametrize("number_of_nodes", [1])
def test_user_deposit_proxy_withdraw(
private_keys: List[bytes],
web3: Web3,
contract_manager: ContractManager,
user_deposit_address: Address,
):
c0_client = JSONRPCClient(web3, PrivateKey(private_keys[0]))
c0_proxy_manager = ProxyManager(
rpc_client=c0_client,
contract_manager=contract_manager,
metadata=ProxyManagerMetadata(
token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER,
filters_start_at=GENESIS_BLOCK_NUMBER,
),
)
c0_user_deposit_proxy = c0_proxy_manager.user_deposit(
UserDepositAddress(user_deposit_address), BLOCK_ID_LATEST
)
withdraw_plan = c0_user_deposit_proxy.get_withdraw_plan(c0_client.address, BLOCK_ID_LATEST)
# There should be no withdraw plan
assert withdraw_plan.withdraw_block == 0
assert withdraw_plan.withdraw_amount == 0
initial_deposit = c0_user_deposit_proxy.get_total_deposit(c0_client.address, BLOCK_ID_LATEST)
# None of these are valid plan_withdraw amounts
for value in [-1, 0, initial_deposit + 1]:
with pytest.raises(BrokenPreconditionError):
c0_user_deposit_proxy.plan_withdraw(TokenAmount(value), BLOCK_ID_LATEST)
# With no plan any withdraw must fail in the precondition check
with pytest.raises(BrokenPreconditionError):
c0_user_deposit_proxy.withdraw(TokenAmount(1), BLOCK_ID_LATEST)
withdraw_amount = TokenAmount(initial_deposit // 2)
transaction_hash, withdraw_block = c0_user_deposit_proxy.plan_withdraw(
withdraw_amount, BLOCK_ID_LATEST
)
assert is_tx_hash_bytes(transaction_hash)
# The effective balance must take the planned withdraw into account
effective_balance_after_withdraw_plan = c0_user_deposit_proxy.effective_balance(
c0_client.address, BLOCK_ID_LATEST
)
assert effective_balance_after_withdraw_plan == initial_deposit - withdraw_amount
# Wait until target block - 1.
# We set the retry timeout to 0.1 to make sure there is enough time for the failing case
# below.
c0_client.wait_until_block(BlockNumber(withdraw_block - 1), retry_timeout=0.1)
# Withdraw should still fail
with pytest.raises(BrokenPreconditionError):
c0_user_deposit_proxy.withdraw(TokenAmount(withdraw_amount), BLOCK_ID_LATEST)
# Wait the final block
c0_user_deposit_proxy.client.wait_until_block(withdraw_block)
# Now withdraw must succeed
transaction_hash = c0_user_deposit_proxy.withdraw(
TokenAmount(withdraw_amount), BLOCK_ID_LATEST
)
assert is_tx_hash_bytes(transaction_hash)
# The current balance must now match the reduced value
new_current_balance = c0_user_deposit_proxy.get_balance(c0_client.address, BLOCK_ID_LATEST)
assert new_current_balance == initial_deposit - withdraw_amount
# Deposit again after the funds were withdrawn
amount_to_deposit = 1
tasks = set()
# Force a race condition between deposits, letting successive, concurrent calls
# wait for the first inflight transaction
for _ in range(3):
task = gevent.spawn(
c0_user_deposit_proxy.approve_and_deposit,
beneficiary=c0_client.address,
# the total deposit needs to increase monotonically in the contract
total_deposit=initial_deposit + amount_to_deposit,
given_block_identifier=BLOCK_ID_LATEST,
)
tasks.add(task)
results = gevent.joinall(tasks, raise_error=True)
# All tx have the same deposit,
# so one of them should successfully transact,
# while all others should wait for the inflight transaction
# All calls should then be associated to the same on-chain transaction
tx_hashes = set(result.get() for result in results)
assert len(tx_hashes) == 1
assert is_tx_hash_bytes(tx_hashes.pop())
|
11518287
|
import maya.mel as mm
import maya.cmds as mc
import glTools.tools.mesh
import glTools.utils.component
import glTools.utils.mathUtils
import glTools.utils.mesh
import glTools.utils.skinCluster
def cutSkin(mesh,weightThreshold=0.25,reducePercent=None,parentShape=False):
'''
Extract a per influence proxy mesh from a skinned mesh based on influence weights.
@param mesh: Mesh to extract faces from
@type mesh: str
@param weightThreshold: Influence to use to extract faces
@type weightThreshold: float
@param reducePercent: Influence to use to extract faces
@type reducePercent: int or None
'''
# Initialize
startTime = mc.timerX()
mc.undoInfo(state=False)
# Get Skin Info
skin = glTools.utils.skinCluster.findRelatedSkinCluster(mesh)
if not skin:
print('Cut Skin: Mesh "" has no skinCluster! Skipping...')
return None
# Prune Weights
glTools.utils.skinCluster.lockSkinClusterWeights(skin,lock=False,lockAttr=False)
pruneWts = glTools.utils.mathUtils.distributeValue(10,rangeStart=0.001,rangeEnd=weightThreshold)
mc.select(mesh)
for wt in pruneWts:
try: mm.eval('doPruneSkinClusterWeightsArgList 1 {"'+str(wt)+'"}')
except Exception, e:
print('Prune weight FAILED ('+mesh+')! '+str(e))
break
# Extract Influence Meshes
infMeshList = []
infList = mc.skinCluster(skin,q=True,inf=True)
for influence in infList:
infMesh = cutSkin_extractInfluenceMesh(mesh,influence)
if not infMesh: continue
if reducePercent != None:
try: cutSkin_reduce(infMesh,percent=reducePercent)
except Exception, e: print('Error during reduce ('+infMesh+'): '+str(e))
if parentShape:
infMeshShape = cutSkin_parentShape(infMesh)
infMeshList.extend(infMeshShape)
else:
infMeshList.append(infMesh)
# Finalize
totalTime = mc.timerX(startTime=startTime)
print('CutSkin - Total Time: '+str(totalTime))
mc.undoInfo(state=True)
# Return Result
return infMeshList
def cutSkin_extractInfluenceMesh(mesh,influence):
'''
Extract new mesh from faces of original skinned mesh based on influence weights.
@param mesh: Mesh to extract faces from
@type mesh: str
@param influence: Influence to use to extract faces
@type influence: str
'''
# Check Mesh
if not glTools.utils.mesh.isMesh(mesh):
raise Exception('Object "'+mesh+'" is not a valid mesh! Unable to extract influence mesh...')
# Get SkinCluster
skin = glTools.utils.skinCluster.findRelatedSkinCluster(mesh)
if not skin:
raise Exception('Mesh "'+mesh+'" has no skinCluster! Unable to extract influence mesh...')
# Get Influence List
infList = mc.skinCluster(skin,q=True,inf=True)
if not influence in infList:
raise Exception('SkinCluster "'+skin+'" has no influence "'+influence+'"! Unable to extract influence mesh...')
# Get Influence Faces
mc.select(cl=True)
mc.skinCluster(skin,e=True,selectInfluenceVerts=influence)
infVtxList = mc.ls(sl=1)
if not infVtxList: return None
try:
infVtxList = glTools.utils.component.expandVertexSelection(infVtxList)
infVtxList = glTools.utils.component.shrinkVertexSelection(infVtxList)
except: pass
if not infVtxList: return None
infFaceList = mc.polyListComponentConversion(infVtxList,fv=True,tf=True,internal=True) or []
if not infFaceList: return None
if '*' in infFaceList[0]: return None
# Duplicate Mesh
infMesh = glTools.tools.mesh.reconstructMesh(mesh)
infMeshShape = mc.listRelatives(infMesh,s=True,ni=True)[0]
childNodes = mc.listRelatives(infMesh,c=True)
if infMeshShape in childNodes: childNodes.remove(infMeshShape)
if childNodes: mc.delete(childNodes)
try: mc.parent(infMesh,w=True)
except: pass
mc.setAttr(infMesh+'.overrideEnabled',0)
# Extract Influence Faces
infFaceList = [i.replace(mesh,infMesh) for i in infFaceList]
mc.select(infFaceList)
mm.eval('InvertSelection')
mc.delete()
mc.delete(infMesh,ch=True)
# Add Attributes
infProxyAttr = 'influenceProxy'
meshProxyAttr = 'meshProxy'
mc.addAttr(infMesh,ln=infProxyAttr,dt='string')
mc.setAttr(infMesh+'.'+infProxyAttr,influence,type='string',l=True)
mc.addAttr(infMesh,ln=meshProxyAttr,dt='string')
mc.setAttr(infMesh+'.'+meshProxyAttr,mesh,type='string',l=True)
# Return Result
return infMesh
def cutSkin_reduce(mesh,percent=50):
'''
Basic mesh cleanup and reduce.
@param mesh: Mesh to cleanup and reduce
@type mesh: str
@param percent: Poly reduce percent amount
@type percent: int or float
'''
# Get Influence Mesh Attributes
infProxy = None
infProxyAttr = 'influenceProxy'
if mc.objExists(mesh+'.'+infProxyAttr): infProxy = mc.getAttr(mesh+'.'+infProxyAttr)
meshProxy = None
meshProxyAttr = 'meshProxy'
if mc.objExists(mesh+'.'+meshProxyAttr): meshProxy = mc.getAttr(mesh+'.'+meshProxyAttr)
# Separate to Shells
meshItems = [mesh]
try: meshItems = mc.polySeparate(mesh,ch=False)
except: pass
# Clean Non-manifold Geometry
glTools.utils.mesh.polyCleanup( meshList = meshItems,
nonManifold = True,
keepHistory = False,
fix = True )
# Poly Reduce
for meshItem in meshItems:
try:
mc.polyReduce( meshItem,
version = 1, # New
termination = 0, # Percentage termination
percentage = percent,
sharpness = 1,
keepBorder = 1,
keepMapBorder = 1,
keepColorBorder = 0,
keepFaceGroupBorder = 0,
keepHardEdge = 0,
keepCreaseEdge = 0,
keepBorderWeight = 1,
keepMapBorderWeight = 1,
preserveTopology = 1,
keepQuadsWeight = 1,
replaceOriginal = 1,
cachingReduce = 0,
constructionHistory = 0 )
except: pass
# Cleanup
if len(meshItems) > 1:
meshResult = mc.polyUnite(meshItems,ch=False,mergeUVSets=True)
if mc.objExists(mesh): mc.delete(mesh)
mesh = mc.rename(meshResult,mesh)
# Rebuild Influence Mesh Attributes
if infProxy and not mc.objExists(mesh+'.'+infProxyAttr):
mc.addAttr(mesh,ln=infProxyAttr,dt='string')
mc.setAttr(mesh+'.'+infProxyAttr,infProxy,type='string',l=True)
if meshProxy and not mc.objExists(mesh+'.'+meshProxyAttr):
mc.addAttr(mesh,ln=meshProxyAttr,dt='string')
mc.setAttr(mesh+'.'+meshProxyAttr,meshProxy,type='string',l=True)
# Return Result
return mesh
def cutSkin_parentShape(infMesh):
'''
Parent influence mesh shape to influence transform.
@param infMesh: Influence mesh to parent to influence transform.
@type infMesh: str
'''
# Checks Influence Mesh
if not mc.objExists(infMesh):
raise Exception('Influence mesh "'+infMesh+'" does not exist!')
# Checks Influence Attr
infProxyAttr = 'influenceProxy'
if not mc.attributeQuery(infProxyAttr,n=infMesh,ex=True):
raise Exception('Influence mesh "'+infMesh+'" has no "'+infProxyAttr+'" attribute! Unable to parent influence shape...')
influence = mc.getAttr(infMesh+'.'+infProxyAttr)
if not mc.objExists(influence):
raise Exception('Influence does not exist! Unable to parent shape...')
# Get Shape(s)
infShapes = []
if glTools.utils.transform.isTransform(infMesh):
infShapes = mc.listRelatives(infMesh,s=True,ni=True,pa=True)
elif str(mc.objectType(infMesh)) in ['mesh','nurbsSurface']:
infShapes = [str(infMesh)]
# Parent Proxy Shapes to Joint
for i in range(len(infShapes)):
infShapesParent = mc.listRelatives(infShapes[i],p=True,pa=True)[0]
for at in ['tx','ty','tz','rx','ry','rz','sx','sy','sz']:
try: mc.setAttr(infShapesParent+'.'+at,l=False)
except: pass
infShapes[i] = glTools.utils.shape.parent(infShapes[i],influence)[0]
glTools.utils.base.displayOverride(infShapes[i],overrideEnable=1,overrideDisplay=2,overrideLOD=0)
# Delete Original
mc.delete(infMesh)
# Tag Shapes
proxyAttr = 'proxyJoint'
for shape in infShapes:
if not mc.objExists(shape+'.'+proxyAttr):
mc.addAttr(shape,ln=proxyAttr,dt='string')
mc.setAttr(shape+'.'+proxyAttr,influence,type='string',l=True)
# Return Result
return infShapes
|
11518293
|
from altfe.interface.root import interRoot
@interRoot.bind("api/biu/get/idworks/", "PLUGIN")
class getIDWorks(interRoot):
def run(self, cmd):
try:
args = self.STATIC.arg.getArgs(
"userWorks",
[
"userID=%s" % self.CORE.biu.apiAssist.user_id,
"type",
"&sortMode=0",
"&isSort=0",
"&totalPage=5",
"&groupIndex=0",
],
)
except:
return {"code": 0, "msg": "missing parameters"}
return {
"code": 1,
"msg": {
"way": "get",
"args": args,
"rst": self.gank(args["ops"].copy(), args["fun"].copy()),
},
}
def gank(self, opsArg, funArg):
self.STATIC.arg.argsPurer(funArg, {"userID": "user_id"})
status_arg = []
r = []
grpIdx = int(opsArg["groupIndex"]) # 组序号
ttlPage = int(opsArg["totalPage"]) # 每组页数
for p in range(grpIdx * ttlPage, (grpIdx + 1) * ttlPage):
argg = funArg.copy()
argg["offset"] = p * 30
status_arg.append(argg)
for x in self.CORE.biu.pool_srh.map(self.__thread_gank, status_arg):
r += x
if int(opsArg["isSort"]) == 1:
if str(opsArg["sortMode"]) == "1":
r = sorted(r, key=lambda kv: kv["total_view"], reverse=True)
else:
r = sorted(r, key=lambda kv: kv["total_bookmarks"], reverse=True)
self.CORE.biu.appWorksPurer(r)
return {"api": "app", "data": r}
def __thread_gank(self, kw):
try:
data = self.CORE.biu.apiAssist.user_illusts(**kw)
except:
return []
if "illusts" in data and len(data["illusts"]) != 0:
return data["illusts"]
return []
|
11518338
|
import pytest
from jumpscale.loader import j
from solutions_automation import deployer
from tests.sals.automated_chatflows.chatflows_base import ChatflowsBase
from gevent import sleep
@pytest.mark.integration
class PoolChatflows(ChatflowsBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Accept admin T&C for testing identity.
cls.accept_terms_conditions(type_="marketplace")
cls.solution_uuid = ""
@classmethod
def tearDownClass(cls):
# Remove userEntry for accepting T&C
cls.user_factory.delete(cls.user_entry_name)
super().tearDownClass()
def tearDown(self):
if self.solution_uuid:
j.sals.reservation_chatflow.solutions.cancel_solution_by_uuid(self.solution_uuid)
super().tearDown()
def test01_create_pool(self):
"""Test case for creating a pool.
**Test Scenario**
- Create a pool with some CU and SU units.
- Check that the pool has been created with the same units.
"""
self.info("Create a pool with some CU and SU units.")
name = self.random_name()
cu = j.data.idgenerator.random_int(0, 2)
su = j.data.idgenerator.random_int(1, 2)
time_unit = "Day"
time_to_live = j.data.idgenerator.random_int(1, 2)
farm = self.get_farm_name().capitalize()
pool = deployer.create_pool(
solution_name=name,
farm=farm,
cu=cu,
su=su,
time_unit=time_unit,
time_to_live=time_to_live,
wallet_name="demos_wallet",
)
self.info("Check that the pool has been created with the same units.")
reservation_id = pool.pool_data.reservation_id
pool_data = j.sals.zos.get().pools.get(reservation_id)
calculated_su = su * time_to_live * 60 * 60 * 24
calculated_cu = cu * time_to_live * 60 * 60 * 24
self.assertEqual(pool_data.cus, float(calculated_cu))
self.assertEqual(pool_data.sus, float(calculated_su))
def test02_extend_pool(self):
"""Test case for extending a pool.
**Test Scenario**
- Create a pool with some CU and SU units.
- Extend the pool has been created.
- Check that the pool has been extended with the same units.
"""
self.info("Create a pool with some CU and SU units.")
name = self.random_name()
farm = self.get_farm_name().capitalize()
pool = deployer.create_pool(solution_name=name, wallet_name="demos_wallet", farm=farm)
pool_id = pool.pool_data.reservation_id
self.info("Extend the pool has been created.")
cu = j.data.idgenerator.random_int(0, 2)
su = j.data.idgenerator.random_int(1, 2)
time_unit = "Day"
time_to_live = j.data.idgenerator.random_int(1, 2)
deployer.extend_pool(
pool_id=pool_id, wallet_name="demos_wallet", cu=cu, su=su, time_unit=time_unit, time_to_live=time_to_live,
)
self.info("Check that the pool has been extended with the same units.")
pool_data = j.sals.zos.get().pools.get(pool_id)
calculated_cu = (1 * 1 * 60 * 60 * 24) + (cu * time_to_live * 60 * 60 * 24)
calculated_su = (1 * 1 * 60 * 60 * 24) + (su * time_to_live * 60 * 60 * 24)
self.assertEqual(pool_data.cus, float(calculated_cu))
self.assertEqual(pool_data.sus, float(calculated_su))
|
11518352
|
import json
from conftest import setup_dashboard
def test_freeform_mode_has_no_rows_or_cols(monkeypatch, ctx, client):
app, test = client
data = dict(
mode='freeform',
name='Some dashboard',
module_baz=json.dumps(
dict(name=1, width=400, height=112, dataSource='...')
),
module_foo=json.dumps(
dict(name=1, width=300, height=112, dataSource='...')
),
)
dom = setup_dashboard(monkeypatch, app, test, data)
container = dom.find('#container')
assert len(container.find('.grid-row')) == 0
|
11518375
|
from tests.integration.integration_test_case import IntegrationTestCase
class TestSessionExpired(IntegrationTestCase):
def test_session_expired_should_log_user_out(self):
self.launchSurvey('1', '0205')
start_page = self.last_url
self.post(url='/expire-session')
self.assertStatusOK()
self.get(url=start_page)
self.assertStatusUnauthorised()
|
11518376
|
import os
import sys
import argparse
from functools import partial
import time
import matplotlib.pyplot as plt
# import seaborn.apionly as sns
import seaborn as sns
import torch.nn as nn
import torch.nn.utils.spectral_norm as spectral_norm
from torch import autograd
from torch.autograd import Variable
import ray.tune as tune
from ray.tune.schedulers import ASHAScheduler
from residualblock import ResidualBlock
from gu import *
from util import *
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(curPath)
parser = argparse.ArgumentParser()
# action
parser.add_argument(
'--cuda',
type=int,
default=2,
help='Number of CUDA to use if available.')
# data
parser.add_argument('--seed', type=int, default=1, help='Random seed to use.')
parser.add_argument('--gu_num', type=int, default=8,
help='Components of GU clusters.')
# model parameters
parser.add_argument(
'--prior',
type=str,
choices=[
'uniform',
'gaussian'],
default='gaussian',
help='Distribution of prior.')
parser.add_argument(
'--prior_size',
type=int,
default=3,
help='Dimension of prior.')
parser.add_argument('--hidden_size', type=int, default=256,
help='Hidden layer size for GAN/WGAN.')
parser.add_argument(
'--n_hidden',
type=int,
default=3,
help='Number of hidden layers(Residual blocks) in GAN/WGAN.')
parser.add_argument(
'--activation_fn',
type=str,
choices=[
'relu',
'leakyrelu',
'tanh'],
default='leakyrelu',
help='What activation function to use in GAN/WGAN.')
parser.add_argument('--activation_slope', type=float, default=1e-2,
help='Negative slope of LeakyReLU activation function.')
parser.add_argument(
'--no_spectral_norm',
action='store_true',
help='Do not use spectral normalization in critic.')
# parser.add_argument('--no_batch_norm', action='store_true', help='Do not use batch norm')
parser.add_argument(
'--residual_block',
action='store_true',
help='Use residual block')
parser.add_argument('--dropout', action='store_true', help='Use dropout')
parser.add_argument(
'--norm',
type=str,
choices=[
'layer',
'batch',
None],
default='batch',
help='Which normaliztion to be used.')
parser.add_argument(
'--init_method',
type=str,
choices=[
'default',
'xav_u'],
default='default',
help='Use residual block')
# training params
parser.add_argument(
'--batch_size',
type=int,
default=2048,
help='Batch size in training.')
parser.add_argument('--niters', type=int, default=50000,
help='Total iteration numbers in training.')
parser.add_argument(
'--lr',
type=float,
default=1e-4,
help='Learning rate in Adam.')
parser.add_argument(
'--weight_decay',
type=float,
default=1e-6,
help='Weight decay in Adam.')
parser.add_argument('--beta1', type=float, default=0.9, help='Beta 1 in Adam.')
parser.add_argument(
'--beta2',
type=float,
default=0.999,
help='Beta 2 in Adam.')
parser.add_argument(
'--clr',
action='store_true',
help='Use cyclic LR in training.')
parser.add_argument(
'--clr_size_up',
type=int,
default=2000,
help='Size of up step in cyclic LR.')
parser.add_argument('--clr_scale', type=int, default=3,
help='Scale of base lr in cyclic LR.')
parser.add_argument(
'--k',
type=int,
default=5,
help='Update times of critic in each iterations.')
parser.add_argument(
'--l',
type=float,
default=0.1,
help='Coefficient for Gradient penalty.')
parser.add_argument(
'--auto',
action='store_true',
help='Using parameter searching to find the best result.')
parser.add_argument(
'--auto_full',
action='store_true',
help='Using parameter searching to find the best result.')
parser.add_argument(
'--eval_size',
type=int,
default=100000,
help='Sample size in evaluation.')
parser.add_argument(
'--exp_num',
type=int,
default=100,
help='Number of experiments.')
parser.add_argument(
'--eval_est',
action='store_true',
default=False,
help='use w_distance_estimated to choose best model.')
parser.add_argument(
'--log_interval',
type=int,
default=1000,
help='How often to show loss statistics and save models/samples.')
config = { # 'prior': tune.choice(['uniform', 'gaussian']),
'prior_size': tune.choice([1, 3, 5]), 'hidden_size': tune.choice([64, 128, 256]),
'n_hidden': tune.choice([1, 2, 3, 4]), 'activation_slope': 1e-2,
'activation_fn': tune.choice(['relu', 'leakyrelu', 'tanh']), 'init_method': tune.choice(['default', 'xav_u']),
'lr': tune.choice([1e-5, 5e-5, 1e-4, 5e-4, 1e-3]),
'weight_decay': tune.choice([0., 1e-6, 5e-6, 1e-5, 5e-5, 1e-4, 1e-3]),
'beta1': tune.choice([0.5, 0.6, 0.7, 0.8, 0.9]), 'beta2': tune.choice([0.7, 0.8, 0.9, 0.999]),
# In auto_full, these are not used
'clr_scale': tune.choice([2, 3, 4, 5]), 'clr_size_up': tune.choice([2000, 4000, 6000, 8000]),
'k': tune.choice([1, 5, 10, 50, 100]), 'l': tune.choice([0, 1e-2, 1e-1, 1, 10]),
'norm': tune.choice(['batch', None]), 'spect_norm': tune.choice([1, 0]),
# 'spect_norm': 1, # try enforcing spect_norm in critic.
# 'dropout': None,
# 'clr': None,
}
class Generator (nn.Module):
def __init__(
self,
input_size,
n_hidden,
hidden_size,
activation_fn,
activation_slope,
init_method,
norm='batch',
res_block=False,
dropout=False,
dropout_p=0.5):
super().__init__()
# Define activation function.
if activation_fn == 'relu':
activation = nn.ReLU(inplace=True)
elif activation_fn == 'leakyrelu':
activation = nn.LeakyReLU(
inplace=True, negative_slope=activation_slope)
elif activation_fn == 'tanh':
activation = nn.Tanh()
else:
raise NotImplementedError('Check activation_fn.')
if norm == 'batch':
norm = nn.BatchNorm1d
elif norm == 'layer':
norm = nn.LayerNorm
else:
norm = None
modules = [
nn.Linear(
input_size,
hidden_size),
norm(hidden_size)] if norm else [
nn.Linear(
input_size,
hidden_size)]
for _ in range(n_hidden):
# Add dropout.
if dropout:
modules += [nn.Dropout(dropout_p)]
# Add act and layer.
if res_block:
modules += [activation,
ResidualBlock(hidden_size,
hidden_size,
activation,
False,
norm)]
else:
modules += [activation, nn.Linear(hidden_size, hidden_size)]
if norm:
modules += [norm(hidden_size)]
if dropout:
modules += [nn.Dropout(dropout_p)]
modules += [activation, nn.Linear(hidden_size, 1)]
self.model = nn.Sequential(*modules)
self.init_method = init_method
self.model.apply(self.__init)
def forward(self, x):
return self.model(x)
def __init(self, m):
classname = m.__class__.__name__
if self.init_method == 'default':
return
elif self.init_method == 'xav_u':
if classname.find('Linear') != -1:
nn.init.xavier_uniform_(m.weight, gain=1)
else:
raise NotImplementedError('Check init_method')
class Critic (nn.Module):
def __init__(
self,
n_hidden,
hidden_size,
activation_fn,
activation_slope,
init_method,
spect_norm=True,
norm='layer',
res_block=False,
dropout=False,
dropout_p=0.5):
super().__init__()
# Define activation function.
if activation_fn == 'relu':
activation = nn.ReLU(inplace=True)
elif activation_fn == 'leakyrelu':
activation = nn.LeakyReLU(
inplace=True, negative_slope=activation_slope)
elif activation_fn == 'tanh':
activation = nn.Tanh()
else:
raise NotImplementedError('Check activation_fn.')
if norm == 'layer':
norm = nn.LayerNorm
else:
norm = None
modules = [
spectral_norm(
nn.Linear(
1,
hidden_size)) if spect_norm else nn.Linear(
1,
hidden_size)]
if norm:
modules += [norm(hidden_size)]
for _ in range(n_hidden):
# Add dropout.
if dropout:
modules += [nn.Dropout(dropout_p)]
# Add act and layer.
if res_block:
modules += [activation,
ResidualBlock(hidden_size,
hidden_size,
activation,
spect_norm,
norm)]
else:
modules += [activation,
spectral_norm(
nn.Linear(
hidden_size,
hidden_size)) if spect_norm else nn.Linear(
hidden_size,
hidden_size)]
if norm:
modules += [norm(hidden_size)]
if dropout:
modules += [nn.Dropout(dropout_p)]
modules += [activation]
modules += [spectral_norm(nn.Linear(hidden_size, 1))
if spect_norm else nn.Linear(hidden_size, 1)]
self.model = nn.Sequential(*modules)
self.init_method = init_method
self.model.apply(self.__init)
def forward(self, x):
return self.model(x)
def __init(self, m):
classname = m.__class__.__name__
if self.init_method == 'default':
return
elif self.init_method == 'xav_u':
if classname.find('Linear') != -1:
nn.init.xavier_uniform_(m.weight, gain=1)
else:
raise NotImplementedError('Check init_method')
class WGANTrainer (tune.Trainable):
def _setup(self, config):
self.config = config
self.prior = torch.randn if self.config['prior'] == 'uniform' else partial(
torch.normal, mean=0., std=1.)
self.i = 0
# model
self.generator = Generator(
input_size=config['prior_size'],
n_hidden=config['n_hidden'],
hidden_size=config['hidden_size'],
activation_slope=config['activation_slope'],
init_method=config['init_method'],
activation_fn=config['activation_fn'],
norm=config['norm'],
res_block=config['residual_block'],
dropout=config['dropout']).to(
config['device'])
self.critic = Critic(
n_hidden=config['n_hidden'],
hidden_size=config['hidden_size'],
activation_slope=config['activation_slope'],
init_method=config['init_method'],
activation_fn=config['activation_fn'],
norm=config['norm'],
res_block=config['residual_block'],
dropout=config['dropout'],
spect_norm=config['spect_norm']).to(
config['device'])
# data
if self.config['gu_num'] == 8:
self.dataloader = GausUniffMixture(
n_mixture=self.config['gu_num'],
mean_dist=10,
sigma=2,
unif_intsect=1.5,
unif_ratio=1.,
device=self.config['device'])
else:
self.dataloader = GausUniffMixture(
n_mixture=self.config['gu_num'],
mean_dist=5,
sigma=0.1,
unif_intsect=5,
unif_ratio=3,
device=self.config['device'])
# optimizer
self.optim_g = torch.optim.Adam(
[
p for p in self.generator.parameters() if p.requires_grad],
lr=config['lr'],
betas=(
config['beta1'],
config['beta2']),
weight_decay=config['weight_decay'])
self.optim_c = torch.optim.Adam(
[
p for p in self.critic.parameters() if p.requires_grad],
lr=config['lr'],
betas=(
config['beta1'],
config['beta2']),
weight_decay=config['weight_decay'])
if self.config['clr']:
self.sche_g = torch.optim.lr_scheduler.CyclicLR(
self.optim_g,
base_lr=config['lr'] /
config['clr_scale'],
max_lr=config['lr'],
step_size_up=config['clr_size_up'],
cycle_momentum=False)
self.sche_c = torch.optim.lr_scheduler.CyclicLR(
self.optim_c,
base_lr=config['lr'] /
config['clr_scale'],
max_lr=config['lr'],
step_size_up=config['clr_size_up'],
cycle_momentum=False)
else:
self.sche_g, self.sche_c = None, None
def _train(self):
if self.i == 0:
self.start = time.time()
self.i += 1
self.generator.train()
self.critic.train()
for k in range(self.config['k']):
real = self.dataloader.get_sample(self.config['batch_size'])
prior = self.prior(
size=(
self.config['batch_size'],
self.config['prior_size']),
device=self.config['device'])
fake = self.generator(prior)
loss_c = self.critic(fake.detach()).mean() - \
self.critic(real).mean()
loss_c += self.config["l"] * self._gradient_penalty(real, fake)
self.optim_c.zero_grad()
loss_c.backward()
self.optim_c.step()
if self.sche_c:
self.sche_c.step()
prior = self.prior(
size=(
self.config['batch_size'],
self.config['prior_size']),
device=self.config['device'])
fake = self.generator(prior)
loss_g = - self.critic(fake).mean()
self.optim_g.zero_grad()
loss_g.backward()
self.optim_g.step()
if self.sche_g:
self.sche_g.step()
if self.i % self.config['log_interval'] == 0 and not self.config['auto']:
cur_state_path = os.path.join(model_path, str(self.i))
torch.save(self.generator, cur_state_path + '_' + 'generator.pth')
torch.save(self.critic, cur_state_path + '_' + 'critic.pth')
w_distance_real, w_distance_est = self._evaluate(
display=True, niter=self.i)
logger.info(
f'Iter: {self.i} / {self.config["niters"]}, Time: {round (time.time () - self.start, 4)}, '
f'w_distance_real: {w_distance_real}, w_distance_estimated: {w_distance_est}')
self.start = time.time()
w_distance_real, w_distance_est = self._evaluate(
display=False, niter=self.config['niters'])
return {
'w_distance_estimated': w_distance_est,
'w_distance_real': w_distance_real,
'iteration': self.i}
def _save(self, tmp_checkpoint_dir):
generator_path = os.path.join(tmp_checkpoint_dir, 'generator.pth')
critic_path = os.path.join(tmp_checkpoint_dir, 'critic.pth')
torch.save(self.generator.state_dict(), generator_path)
torch.save(self.critic.state_dict(), critic_path)
return tmp_checkpoint_dir
def _save_whole(self, tmp_checkpoint_dir):
generator_path = os.path.join(tmp_checkpoint_dir, 'generator.pth')
critic_path = os.path.join(tmp_checkpoint_dir, 'critic.pth')
torch.save(self.generator.to('cpu'), generator_path)
torch.save(self.critic.to('cpu'), critic_path)
return tmp_checkpoint_dir
def _restore(self, checkpoint_dir):
generator_path = os.path.join(checkpoint_dir, 'generator.pth')
critic_path = os.path.join(checkpoint_dir, 'critic.pth')
self.generator.load_state_dict(torch.load(generator_path))
self.critic.load_state_dict(torch.load(critic_path))
def _evaluate(self, display, niter):
self.generator.eval()
self.critic.eval()
with torch.no_grad():
real = self.dataloader.get_sample(self.config['eval_size'])
prior = self.prior(
size=(
self.config['eval_size'],
self.config['prior_size']),
device=self.config['device'])
fake = self.generator(prior)
w_distance_est = self.critic(
real).mean() - self.critic(fake).mean()
w_distance_est = abs(round(w_distance_est.item(), 5))
w_distance_real = w_distance(real, fake)
if display:
# save images
real_sample = real.cpu().data.numpy().squeeze()
fake_sample = fake.cpu().data.numpy().squeeze()
plt.cla()
fig = plt.figure(figsize=(FIG_W, FIG_H))
ax = fig.add_subplot(111)
ax.set_facecolor('whitesmoke')
ax.grid(True, color='white', linewidth=2)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
kde_num = 200
min_real, max_real = min(real_sample), max(real_sample)
kde_width_real = kde_num * \
(max_real - min_real) / args.eval_size
min_fake, max_fake = min(fake_sample), max(fake_sample)
kde_width_fake = kde_num * \
(max_fake - min_fake) / args.eval_size
sns.kdeplot(
real_sample,
bw=kde_width_real,
label='Data',
color='green',
shade=True,
linewidth=6)
sns.kdeplot(
fake_sample,
bw=kde_width_fake,
label='Model',
color='orange',
shade=True,
linewidth=6)
ax.set_title(
f'True EM Distance: {w_distance_real}, '
f'Est. EM Distance: {w_distance_est}.',
fontsize=FONTSIZE)
ax.legend(loc=2, fontsize=FONTSIZE)
ax.set_ylabel('Estimated Density by KDE', fontsize=FONTSIZE)
ax.tick_params(axis='x', labelsize=FONTSIZE * 0.7)
ax.tick_params(
axis='y',
labelsize=FONTSIZE * 0.5,
direction='in')
cur_img_path = os.path.join(image_path, str(niter) + '.jpg')
plt.tight_layout()
plt.savefig(cur_img_path)
plt.close()
return w_distance_real, w_distance_est
def _gradient_penalty(self, real, fake):
batch_size = fake.size(0)
alpha = torch.rand(size=(batch_size, 1), device=self.config['device'])
alpha = alpha.expand_as(real)
interpolated = alpha * real + (1 - alpha) * fake
interpolated = Variable(
interpolated,
requires_grad=True).to(
self.config['device'])
interpolation_loss = self.critic(interpolated)
gradients = autograd.grad(
outputs=interpolation_loss,
inputs=interpolated,
grad_outputs=torch.ones(
interpolation_loss.size(),
device=self.config['device']),
create_graph=True,
retain_graph=True)[0]
gradients = gradients.view(gradients.size(0), -1)
return ((gradients.norm(2, dim=1) - 1.) ** 2).mean()
if __name__ == '__main__':
args = parser.parse_args()
args.spect_norm = not args.no_spectral_norm
args.eval_real = not args.eval_est
if args.auto or args.auto_full:
args.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
else:
args.device = torch.device(
f'cuda:{args.cuda}' if torch.cuda.is_available() else 'cpu')
if args.auto_full:
# Search over all tweaks, but don't search over clr parameters.
# Further, since ResNet doesn't improve, don't search over it as well.
config = {'prior': tune.choice(['uniform', 'gaussian']), 'prior_size': tune.choice([1, 3, 5]),
'hidden_size': tune.choice([64, 128, 256]), 'n_hidden': tune.choice([1, 2, 3, 4]),
'activation_slope': 1e-2, 'activation_fn': tune.choice(['relu', 'leakyrelu', 'tanh']),
'init_method': tune.choice(['default', 'xav_u']),
'lr': tune.choice([1e-5, 5e-5, 1e-4, 5e-4, 1e-3]),
'weight_decay': tune.choice([0., 1e-6, 5e-6, 1e-5, 5e-5, 1e-4, 1e-3]),
'beta1': tune.choice([0.5, 0.6, 0.7, 0.8, 0.9]), 'beta2': tune.choice([0.7, 0.8, 0.9, 0.999]),
'k': tune.choice([1, 5, 10, 50, 100]), 'l': tune.choice([0, 1e-2, 1e-1, 1, 10]),
'spect_norm': tune.choice([1, 0]),
'norm': tune.choice(['batch', None]), 'dropout': tune.choice([1, 0]), 'clr': tune.choice([1, 0]),
}
# Add constant params in args to config.
dict_args = vars(args)
for key in dict_args:
if key in ['no_batch_norm', 'no_spectral_norm', 'batch_norm']:
# redundant args.
continue
if key in config:
if not args.auto:
# In Manual experiment: overwrite existed config settings.
config[key] = dict_args[key]
else:
config[key] = dict_args[key]
if args.auto:
if not args.clr:
# Reset hyperparameter choices in clr if it is not a tuning field.
config["clr_scale"] = 2
config["clr_size_up"] = 2000
if args.residual_block:
# Set deeper depth of Resnet.
config["n_hidden"] = tune.choice([1, 3, 5, 7])
config['device'] = args.device
# save path
search_type = 'automatic' if args.auto else 'manual'
experiment = f'gu{args.gu_num}/wgan/{args.niters}|' + args.eval_real * 'w_distance_real|' + (
args.eval_est) * 'w_distance_estimated|' + 'resnt|' * args.residual_block + 'fcnet|' * (
not args.residual_block) + f'{args.prior}|' + f'clr|' * args.clr + f'dropout|' * args.dropout + f'{args.activation_fn}|' * (
not args.auto) + f'{args.norm}_norm|' * (not args.auto) + (
'no_' * args.no_spectral_norm + 'spect_norm|') * (not args.auto) + (
'no_' * (args.l != 0) + 'gradient_penalty|') * (
not args.auto) + f'{args.init_method}_init|{args.k}_updates' * (not args.auto)
model_path = os.path.join(curPath, search_type, 'models', experiment)
image_path = os.path.join(curPath, search_type, 'images', experiment)
if args.auto_full:
model_path = os.path.join(
curPath,
search_type,
f'models/gu{args.gu_num}/wgan/{args.niters}|full_new')
image_path = os.path.join(
curPath,
search_type,
f'images/gu{args.gu_num}/wgan/{args.niters}|full_new')
makedirs(model_path, image_path)
log_path = model_path + '/logs'
logger = get_logger(log_path)
logger.info('Trained model will save to: ' + model_path)
logger.info('Result plot will save to : ' + image_path)
logger.info('Search space: ')
logger.info(config)
logger.info(SEP)
logger.info('Start training...')
if args.auto:
if args.eval_est:
sched = ASHAScheduler(
metric='w_distance_estimated',
mode='min',
grace_period=args.niters // 10,
max_t=args.niters,
time_attr="iteration")
else:
sched = ASHAScheduler(
metric='w_distance_real',
mode='min',
grace_period=args.niters // 10,
max_t=args.niters,
time_attr="iteration")
analysis = tune.run(WGANTrainer, name=experiment, scheduler=sched, # search_alg=algo,
stop={"iteration": args.niters}, resources_per_trial={"cpu": 3, "gpu": 1},
num_samples=args.exp_num, checkpoint_at_end=True, config=config)
if args.eval_real:
best_config = analysis.get_best_config(
metric='w_distance_real', mode='min')
best_path = analysis.get_best_logdir(
metric='w_distance_real', mode='min')
else:
best_config = analysis.get_best_config(
metric='w_distance_estimated', mode='min')
best_path = analysis.get_best_logdir(
metric='w_distance_estimated', mode='min')
results = analysis.dataframe()
if args.eval_real:
results.to_csv(model_path + f'results.csv')
else:
results.to_csv(model_path + f'results.csv')
logger.info(f'Best config is: {best_config}')
best_model_dir = retrieve_best_result_from_tune(best_path)
else:
trainer = WGANTrainer(config)
for _ in range(1, config['niters'] + 1):
_ = trainer._train()
best_config = config
best_model_dir = model_path
logger.info(f'Saving to {model_path}')
trainer._save(model_path)
logger.info('Start evaluation...')
eval_trainer = WGANTrainer(best_config)
eval_trainer._restore(best_model_dir)
eval_trainer._evaluate(display=True, niter=args.niters)
logger.info('Saving to: ' + model_path)
eval_trainer._save_whole(model_path)
logger.info('Finish All...')
logger.info(SEP)
|
11518412
|
from os import remove
import shlex
from os.path import isfile, join, split, splitext
from prody.tests import TestCase, skipIf, skipUnless
from numpy.testing import *
try:
import numpy.testing.decorators as dec
except ImportError:
from numpy.testing import dec
from prody import parsePDB, DCDFile, parseDCD
from prody.tests.datafiles import TEMPDIR, pathDatafile
from prody.apps import prody_parser
from prody.tests import MATPLOTLIB, NOPRODYCMD, WINDOWS
class TestCatdcdCommand(TestCase):
def setUp(self):
self.output = join(TEMPDIR, 'test_prody_catdcd.dcd')
self.dcdpath = pathDatafile('dcd')
self.pdbpath = pathDatafile('multi_model_truncated')
self.dcd = DCDFile(self.dcdpath)
self.ag = parsePDB(self.pdbpath, model=1)
self.command = 'catdcd -o ' + self.output
self.tearDown()
@dec.slow
@skipIf(NOPRODYCMD, 'prody command not found')
@skipIf(WINDOWS, 'command tests are not run on Windows')
def testSimpleConcat(self):
command = self.command + ' {0:s} {0:s} {0:s}'.format(self.dcdpath)
namespace = prody_parser.parse_args(shlex.split(command))
namespace.func(namespace)
coords = self.dcd[:]._getCoordsets()
concat = parseDCD(self.output)._getCoordsets()
assert_equal(coords, concat[:3])
assert_equal(coords, concat[3:6])
assert_equal(coords, concat[6:])
@dec.slow
@skipIf(NOPRODYCMD, 'prody command not found')
@skipIf(WINDOWS, 'command tests are not run on Windows')
def testSelectConcat(self):
command = self.command + ' -s ca --pdb {1:s} {0:s} {0:s}'.format(
self.dcdpath, self.pdbpath)
namespace = prody_parser.parse_args(shlex.split(command))
namespace.func(namespace)
select = self.ag.ca
assert_equal(select.numAtoms(), 10)
coords = self.dcd[:]
coords.setAtoms(select)
coords = coords._getCoordsets()
concat = parseDCD(self.output)
assert_equal(concat.numAtoms(), select.numAtoms())
concat = concat._getCoordsets()
assert_equal(select.numAtoms(), coords.shape[1])
assert_equal(select.numAtoms(), concat.shape[1])
assert_equal(coords, concat[:3])
assert_equal(coords, concat[3:])
@dec.slow
@skipIf(NOPRODYCMD, 'prody command not found')
@skipIf(WINDOWS, 'command tests are not run on Windows')
def testAlignConcat(self):
command = self.command + ' --align ca --pdb {1:s} {0:s} {0:s}'.format(
self.dcdpath, self.pdbpath)
namespace = prody_parser.parse_args(shlex.split(command))
namespace.func(namespace)
select = self.ag.ca
coords = self.dcd[:]
concat = parseDCD(self.output)
assert_equal(concat.numAtoms(), coords.numAtoms())
coords.setCoords(self.ag.getCoords())
coords.setAtoms(select)
coords.superpose()
coords.setAtoms(None)
coords = coords._getCoordsets()
concat = concat._getCoordsets()
assert_equal(coords, concat[:3])
assert_equal(coords, concat[3:])
@dec.slow
@skipIf(NOPRODYCMD, 'prody command is not found')
@skipIf(WINDOWS, 'command tests are not run on Windows')
def testSelectException(self):
command = self.command + ' -s ca {0:s} {0:s}'.format(
self.dcdpath)
namespace = prody_parser.parse_args(shlex.split(command))
self.assertRaises(ValueError, namespace.func, namespace)
@dec.slow
@skipIf(NOPRODYCMD, 'prody command is not found')
@skipIf(WINDOWS, 'command tests are not run on Windows')
def testAlignException(self):
command = self.command + ' --align ca {0:s} {0:s}'.format(
self.dcdpath)
namespace = prody_parser.parse_args(shlex.split(command))
self.assertRaises(ValueError, namespace.func, namespace)
@dec.slow
@skipIf(NOPRODYCMD, 'prody command is not found')
@skipIf(WINDOWS, 'command tests are not run on Windows')
def testIOException(self):
command = self.command + ' {0:s} {0:s}'.format('deneme.dcd')
namespace = prody_parser.parse_args(shlex.split(command))
self.assertRaises(IOError, namespace.func, namespace)
@dec.slow
@skipIf(NOPRODYCMD, 'prody command is not found')
@skipIf(WINDOWS, 'command tests are not run on Windows')
def testSelectException2(self):
command = self.command + ' -s None {0:s} {0:s}'.format(self.dcdpath)
namespace = prody_parser.parse_args(shlex.split(command))
self.assertRaises(ValueError, namespace.func, namespace)
def tearDown(self):
if isfile(self.output): remove(self.output)
|
11518426
|
import os
import torch
import torch.optim as optim
from imbalanceddl.utils.utils import AverageMeter, save_checkpoint, collect_result
from imbalanceddl.utils.metrics import accuracy
from .base import BaseTrainer
class Trainer(BaseTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = kwargs.pop('model', None)
if self.model is None:
raise TypeError(
"__init__() missing required keyward-only argument: 'model' !")
else:
print("=> Model = {}".format(self.model))
self.strategy = kwargs.pop('strategy', None)
if self.strategy is None:
raise TypeError("__init__() missing required keyward-only \
argument: 'strategy' !")
else:
print("=> Strategy = {}".format(self.strategy))
self.optimizer = self._init_optimizer()
self.cls_num_list = self.cfg.cls_num_list
self.best_acc1 = 0.
def get_criterion(self):
return NotImplemented
def train_one_epoch(self):
return NotImplemented
def _init_optimizer(self):
if self.cfg.optimizer == 'sgd':
print("=> Initialize optimizer {}".format(self.cfg.optimizer))
optimizer = optim.SGD(self.model.parameters(),
self.cfg.learning_rate,
momentum=self.cfg.momentum,
weight_decay=self.cfg.weight_decay)
return optimizer
else:
raise ValueError("[Warning] Selected Optimizer not supported !")
def adjust_learning_rate(self):
"""Sets the learning rate"""
# total 200 epochs scheme
if self.cfg.epochs == 200:
epoch = self.epoch + 1
if epoch <= 5:
lr = self.cfg.learning_rate * epoch / 5
elif epoch > 180:
lr = self.cfg.learning_rate * 0.0001
elif epoch > 160:
lr = self.cfg.learning_rate * 0.01
else:
lr = self.cfg.learning_rate
# total 300 epochs scheme
elif self.cfg.epochs == 300:
epoch = self.epoch + 1
if epoch <= 5:
lr = self.cfg.learning_rate * epoch / 5
elif epoch > 250:
lr = self.cfg.learning_rate * 0.01
elif epoch > 150:
lr = self.cfg.learning_rate * 0.1
else:
lr = self.cfg.learning_rate
else:
raise ValueError(
"[Warning] Total epochs {} not supported !".format(
self.cfg.epochs))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def do_train_val(self):
for epoch in range(self.cfg.start_epoch, self.cfg.epochs):
self.epoch = epoch
# learning rate control
self.adjust_learning_rate()
# criterion
self.get_criterion()
assert self.criterion is not None, "No criterion !"
self.train_one_epoch()
acc1 = self.validate()
# remember best acc@1 and save checkpoint
is_best = acc1 > self.best_acc1
self.best_acc1 = max(acc1, self.best_acc1)
self.tf_writer.add_scalar('acc/test_top1_best', self.best_acc1,
self.epoch)
output_best = 'Best Prec@1: %.3f\n' % (self.best_acc1)
print(output_best)
self.log_testing.write(output_best + '\n')
self.log_testing.flush()
if epoch == self.cfg.epochs - 1:
collect_result(self.cfg, output_best)
save_checkpoint(
self.cfg, {
'epoch': self.epoch + 1,
'backbone': self.cfg.backbone,
'classifier': self.cfg.classifier,
'state_dict': self.model.state_dict(),
'best_acc1': self.best_acc1,
'optimizer': self.optimizer.state_dict(),
}, is_best, self.epoch)
def eval_best_model(self):
assert self.cfg.best_model is not None, "[Warning] Best Model \
must be loaded !"
assert 'best' in self.cfg.best_model, "[Need Best Model]"
if os.path.isfile(self.cfg.best_model):
print("=> [Loading Best Model] '{}'".format(self.cfg.best_model))
checkpoint = torch.load(self.cfg.best_model, map_location='cuda:0')
self.epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if self.cfg.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(self.cfg.gpu)
self.model.load_state_dict(checkpoint['state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer'])
print("=> [Loaded Best Model] '{}' (epoch {})".format(
self.cfg.best_model, checkpoint['epoch']))
else:
print("=> [No Trained Model Path found at '{}'".format(
self.cfg.best_model))
raise ValueError("[Warning] No Trained Model Path Found !!!")
self.get_criterion()
assert self.criterion is not None, "No criterion !"
acc1, cls_acc_string = self.validate()
output_best = 'Best Prec@1: %.3f' % (acc1)
print(output_best)
print(cls_acc_string)
print("[Done] with evaluating with best model of {}".format(
self.cfg.best_model))
return
def validate(self):
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
# switch to evaluate mode
self.model.eval()
all_preds = list()
all_targets = list()
with torch.no_grad():
for i, (_input, target) in enumerate(self.val_loader):
_input = _input.cuda(self.cfg.gpu, non_blocking=True)
target = target.cuda(self.cfg.gpu, non_blocking=True)
# compute output
output, _ = self.model(_input)
loss = self.criterion(output, target).mean()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), _input.size(0))
top1.update(acc1[0], _input.size(0))
top5.update(acc5[0], _input.size(0))
_, pred = torch.max(output, 1)
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
if i % self.cfg.print_freq == 0:
output = ('Test: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i,
len(self.val_loader),
loss=losses,
top1=top1,
top5=top5))
print(output)
cls_acc_string = self.compute_metrics_and_record(all_preds,
all_targets,
losses,
top1,
top5,
flag='Testing')
if cls_acc_string is not None:
return top1.avg, cls_acc_string
else:
return top1.avg
|
11518435
|
import utils
import random
import argparse
import tsp_ga as ga
from datetime import datetime
def run(args):
genes = utils.get_genes_from(args.cities_fn)
if args.verbose:
print("-- Running TSP-GA with {} cities --".format(len(genes)))
history = ga.run_ga(genes, args.pop_size, args.n_gen,
args.tourn_size, args.mut_rate, args.verbose)
if args.verbose:
print("-- Drawing Route --")
utils.plot(history['cost'], history['route'])
if args.verbose:
print("-- Done --")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', type=int, default=1)
parser.add_argument('--pop_size', type=int, default=500, help='Population size')
parser.add_argument('--tourn_size', type=int, default=50, help='Tournament size')
parser.add_argument('--mut_rate', type=float, default=0.02, help='Mutation rate')
parser.add_argument('--n_gen', type=int, default=20, help='Number of equal generations before stopping')
parser.add_argument('--cities_fn', type=str, default="data/cities.csv", help='Data containing the geographical coordinates of cities')
random.seed(datetime.now())
args = parser.parse_args()
if args.tourn_size > args.pop_size:
raise argparse.ArgumentTypeError('Tournament size cannot be bigger than population size.')
run(args)
|
11518440
|
from __future__ import unicode_literals
from abc import ABCMeta
from future.utils import with_metaclass
from .base import Base
class Message(with_metaclass(ABCMeta, Base)):
"""Abstract Base Class of Message."""
def __init__(self, id=None, mid=None, seq=None, is_echo=None, app_id=None, metadata=None, **kwargs):
super(Message, self).__init__(**kwargs)
self.id = id
self.mid = mid
self.seq = seq
class TextMessage(Message):
def __init__(self, id=None, mid=None, seq=None, text=None, **kwargs):
super(TextMessage, self).__init__(id=id, mid=mid, seq=seq, **kwargs)
self.text = text
class QuickReplyMessage(Message):
def __init__(self, id=None, mid=None, seq=None, text=None, quick_reply=None, **kwargs):
super(QuickReplyMessage, self).__init__(id=id, mid=mid, seq=seq, text=text, quick_reply=quick_reply, **kwargs)
self.text = text
self.quick_reply = self.get_or_new_from_json_dict(quick_reply, QuickReply)
class AttachmentMessage(Message):
def __init__(self, id=None, mid=None, seq=None, attachments=None, **kwargs):
super(AttachmentMessage, self).__init__(id=id, mid=mid, seq=seq, attachments=attachments, **kwargs)
for attachment in attachments:
self.attachment = self.get_or_new_from_json_dict_with_types(
attachment, {
'image': ImageMessage,
'video': VideoMessage,
'audio': AudioMessage,
'file': FileMessage,
'template': TemplateMessage,
'location': LocationMessage,
'fallback': FallbackMessage
}
)
## Payload
class ImageMessage(Message):
def __init__(self, id=None, payload=None, **kwargs):
self.type = 'image'
self.payload = self.get_or_new_from_json_dict(payload, Payload)
class VideoMessage(Message):
def __init__(self, id=None, payload=None, **kwargs):
self.type = 'video'
self.payload = self.get_or_new_from_json_dict(payload, Payload)
class AudioMessage(Message):
def __init__(self, id=None, payload=None, **kwargs):
self.type = 'audio'
self.payload = self.get_or_new_from_json_dict(payload, Payload)
class FileMessage(Message):
def __init__(self, id=None, payload=None, **kwargs):
self.type = 'file'
self.payload = self.get_or_new_from_json_dict(payload, Payload)
class TemplateMessage(Message):
def __init__(self, id=None, payload=None, **kwargs):
self.type = 'template'
self.payload = self.get_or_new_from_json_dict(payload, Payload)
class LocationMessage(Message):
def __init__(self, id=None, payload=None, **kwargs):
self.type = 'location'
self.payload = self.get_or_new_from_json_dict(payload, Payload)
class FallbackMessage(Message):
def __init__(self, id=None, title=None, url = None, payload=None, **kwargs):
self.type = 'fallback'
self.title = title
self.url = url
self.payload = self.get_or_new_from_json_dict(payload, Payload)
class Coordinates(with_metaclass(ABCMeta, Base)):
def __init__(self, lat, long, **kwargs):
super(Coordinates, self).__init__(**kwargs)
self.lat = lat
self.long = long
class Payload(with_metaclass(ABCMeta, Base)):
def __init__(self, id=None, url=None, coordinates=None, **kwargs):
super(Payload, self).__init__(**kwargs)
self.url = url
self.coordinates = self.get_or_new_from_json_dict(coordinates, Coordinates)
class QuickReply(with_metaclass(ABCMeta, Base)):
def __init__(self, payload=None, **kwargs):
super(QuickReply, self).__init__(**kwargs)
self.payload = payload
|
11518497
|
from django.shortcuts import render
import re
from punctuation.removePunctuations import Punctuation
def indexView(request):
if request.method=="POST":
string = request.POST["string"]
removePunc = request.POST.get("removePunc",False)
removeDigit = request.POST.get("removeDigit", False)
try:
if removeDigit == "on":
result = re.sub(r"\d", "", string)
else:
result = string
if removePunc == "on":
result = Punctuation(result)
context = {"result": result}
else:
context = {"result": result}
except:
context = {"result": string}
return render(request, "punctuation/predict.html", context)
return render(request, "punctuation/index.html")
#https://stackoverflow.com/questions/5895588/django-multivaluedictkeyerror-error-how-do-i-deal-with-it
|
11518498
|
from modelchimp.tests import BaseTest
from modelchimp.factories.factory_user import UserFactory
from modelchimp.models.user import User
class UserModelTest(BaseTest):
model_class = User
factory_class = UserFactory
data = {
'username' : 'modelchimp_admin',
'email':'<EMAIL>',
'password': '<PASSWORD>',
}
def setUp(self):
super().setUp()
self.obj1 = self.factory_class()
def test_create(self):
instance = self.model_class.objects.create(**self.data)
self.assertEqual(instance.email, self.data['email'])
self.assertEqual(instance.username, self.data['username'])
def test_retrieve(self):
instance = self.model_class.objects.get(username=self.obj1.username)
self.assertEqual(instance.email, self.obj1.email)
self.assertEqual(instance.username, self.obj1.username)
def test_update(self):
instance = self.model_class.objects.get(username=self.obj1.username)
instance.username = 'admin'
instance.save()
self.assertEqual(instance.username, 'admin')
def test_delete(self):
instance = self.model_class.objects.get(username=self.obj1.username)
instance.delete()
delete_flag = False
try:
self.model_class.objects.get(username=self.obj1.username)
except User.DoesNotExist:
delete_flag = True
self.assertTrue(delete_flag)
|
11518542
|
from math import sqrt
from tessagon.core.tile import Tile
from tessagon.core.tessagon import Tessagon
from tessagon.core.tessagon_metadata import TessagonMetadata
# TODO: gulp, 'octagon' does not begin with 'octo'
metadata = TessagonMetadata(name='Octagons and Squares',
classification='archimedean',
shapes=['octagons', 'squares'],
sides=[8, 4])
class OctoTile(Tile):
# ^ ..o-o..
# | ./...\.
# | o.....o
# | |.....|
# | o.....o
# | .\.../.
# ..o-o..
# V
# U ---->
CORNER_TO_VERT_RATIO = 1.0 / (2.0 + sqrt(2))
def __init__(self, tessagon, **kwargs):
super().__init__(tessagon, **kwargs)
self.u_symmetric = True
self.v_symmetric = True
def init_verts(self):
return {'left': {'top': {'u_boundary': None,
'v_boundary': None},
'bottom': {'u_boundary': None,
'v_boundary': None}},
'right': {'top': {'u_boundary': None,
'v_boundary': None},
'bottom': {'u_boundary': None,
'v_boundary': None}}}
def init_faces(self):
return {'middle': None,
'left': {'top': None,
'bottom': None},
'right': {'top': None,
'bottom': None}}
def calculate_verts(self):
self.add_vert(['left', 'top', 'v_boundary'],
self.CORNER_TO_VERT_RATIO, 1, v_boundary=True)
self.add_vert(['left', 'top', 'u_boundary'],
0, 1.0 - self.CORNER_TO_VERT_RATIO, u_boundary=True)
def calculate_faces(self):
# Middle interior face
self.add_face('middle', [['left', 'top', 'v_boundary'],
['left', 'top', 'u_boundary'],
['left', 'bottom', 'u_boundary'],
['left', 'bottom', 'v_boundary'],
['right', 'bottom', 'v_boundary'],
['right', 'bottom', 'u_boundary'],
['right', 'top', 'u_boundary'],
['right', 'top', 'v_boundary']])
# Four faces, define top left corner, others via symmetry
self.add_face(['left', 'top'],
[['left', 'top', 'v_boundary'],
['left', 'top', 'u_boundary'],
# Verts on neighbor tiles
[['left'], ['right', 'top', 'v_boundary']],
[['top'], ['left', 'bottom', 'u_boundary']]],
corner=True)
class OctoTessagon(Tessagon):
tile_class = OctoTile
metadata = metadata
|
11518548
|
import os
import random
from avel.video_lib import drawtext, create_drawtext_dict
"""
create_hiit_video takes in a video file (e.g. TV show) and outputs a HIIT workout video
It employs a routine of 15s of high-intensity exercises, followed by 45s of low-intensity
It displays text to inform me when to work out and rest
todo: mix in EDM music in the background, with louder volume for high-intensity portion
"""
def get_countdown_str(seconds):
return r'%{eif\:trunc(mod(' + str(seconds) + r'-t\,60))\:d\:2}'
def get_rand_exercise():
return random.choice(["jump rope", "jumping jacks", "squats", "jumping squats", "sprint in place", "push ups"])
def create_hiit_overlays(initial_time, hi_time, low_time, num_sets):
"""
Creates a list of overlay dictionaries
fontsize is hardcoded for 720p video resolution
"""
hiit_overlay = []
exercise = get_rand_exercise()
for i in range(num_sets):
t1 = initial_time + i*(hi_time+low_time)
hiit_overlay.append(
create_drawtext_dict(exercise.capitalize(), "mid_x", "top", 90, box='1:boxcolor=black@0.5:boxborderw=5:',
enable=f"between(t,{t1},{t1 + hi_time})" ))
hiit_overlay.append(
create_drawtext_dict(get_countdown_str(t1 + hi_time), "right", "top", 75, box='1:boxcolor=black@0.5:boxborderw=5:',
enable=f"between(t,{t1},{t1 + hi_time})" ))
hiit_overlay.append(
create_drawtext_dict("rest", "mid_x", "top", 75, box='1:boxcolor=black@0.5:boxborderw=5:',
enable=f"between(t,{t1 + hi_time},{t1 + hi_time + 5})"))
hiit_overlay.append(
create_drawtext_dict("rest " + get_countdown_str(t1 + hi_time + low_time), "right", "top", 75, box='1:boxcolor=black@0.5:boxborderw=5:',
enable=f"between(t,{t1 + hi_time + 5},{t1 + hi_time + low_time - 5})" ))
exercise = get_rand_exercise()
hiit_overlay.append(
create_drawtext_dict(f"next is {exercise} {get_countdown_str(t1 + hi_time + low_time)}", "right", "top", 75, box='1:boxcolor=black@0.5:boxborderw=5:',
enable=f"between(t,{t1 + hi_time + low_time - 5},{t1 + hi_time + low_time})" ))
return hiit_overlay
def create_hiit_video(input_video, output_dir=None):
warmup_time, cooldown_time = 4 * 60, 2 * 60
hi_time, low_time = 15, 45
exercise_time = 15 * 60
overlay = [
create_drawtext_dict("warmup", "mid_x", "top", 50, enable=f"between(t,0,{warmup_time - 5})", box='1:boxcolor=black@0.5:boxborderw=5:'),
create_drawtext_dict("GET READY", "mid_x", "mid_y", 100, enable=f"between(t,{warmup_time - 5},{warmup_time})", box='1:boxcolor=black@0.5:boxborderw=5:') ]
overlay.extend(create_hiit_overlays(warmup_time, hi_time, low_time, exercise_time // (hi_time + low_time)))
# burn mode
overlay.append(create_drawtext_dict("cool down!", "mid_x", "mid_y", 80, enable=f"between(t,{warmup_time + exercise_time},{warmup_time + exercise_time + cooldown_time})"))
f,_,ext = os.path.basename(input_video).rpartition('.')
new_filename = f"{f}_cardio.{ext}"
output_dir = os.path.dirname(input_video) if output_dir is None else output_dir
output_video = os.path.join(output_dir, new_filename)
drawtext(input_video, output_video, overlay)
return output_video
if __name__ == '__main__':
create_hiit_video("D:\\TV\\Hunter X Hunter 2011\\Season 2\\Hunter.X.Hunter.2011.S02E58.mkv", "D:\\workout_vids\\")
|
11518565
|
from .aes import InvalidPadding
from .keys import verify_signed_text, Key, PublicKey, PrivateKey
from .transaction import TxInput, TxOutput, Transaction, Unspent, InsufficientFunds
from .wallet import Wallet
__version__ = '0.8.0'
|
11518566
|
from __future__ import print_function, division
import os
import shutil
import tempfile
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal_nulp
from astropy import units as u
from .. import Model
from ..image import Image
from ...util.functions import random_id
from .test_helpers import get_test_dust, get_highly_reflective_dust
class TestFilters(object):
def setup_class(self):
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
m.add_density_grid(np.array([[[1.]]]), get_test_dust())
s = m.add_point_source()
s.name = 'first'
s.luminosity = 1.
s.temperature = 6000.
s = m.add_point_source()
s.name = 'second'
s.luminosity = 1.
s.temperature = 6000.
i = m.add_peeled_images(sed=True, image=True)
i.set_viewing_angles([1., 2., 3.], [1., 2., 3.])
i.set_image_limits(-1., 1., -1., 1.)
i.set_image_size(10, 20)
f1 = i.add_filter()
f1.name = 'F1'
f1.spectral_coord = [1, 1.1, 1.2, 1.3] * u.micron
f1.transmission = [0., 100., 50, 0.] * u.percent
f1.detector_type = 'photons'
f1.alpha = 0.
f1.central_spectral_coord = 1.15 * u.micron
f2 = i.add_filter()
f2.name = 'F2'
f2.spectral_coord = [2, 2.1, 2.2, 2.3, 2.4] * u.micron
f2.transmission = [0., 50, 100, 60, 0.] * u.percent
f2.detector_type = 'energy'
f2.alpha = 1.
f2.central_spectral_coord = 2.15 * u.micron
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=1000)
self.tmpdir = tempfile.mkdtemp()
m.write(os.path.join(self.tmpdir, random_id()))
self.m = m.run()
def teardown_class(self):
shutil.rmtree(self.tmpdir)
def test_image_wav(self):
image = self.m.get_image()
np.testing.assert_allclose(image.nu, [2.60689094e+14, 1.39438353e+14])
np.testing.assert_allclose(image.wav, [1.15, 2.15])
def test_sed_wav(self):
sed = self.m.get_sed()
np.testing.assert_allclose(sed.nu, [2.60689094e+14, 1.39438353e+14])
np.testing.assert_allclose(sed.wav, [1.15, 2.15])
def test_image_shape(self):
image = self.m.get_image()
assert image.val.shape == (3, 20, 10, 2)
def test_sed_shape(self):
sed = self.m.get_sed()
assert sed.val.shape == (3, 1, 2)
def test_image_values(self):
image = self.m.get_image(units='MJy/sr', distance=1)
np.testing.assert_allclose(np.sum(image.val[:, :, :, 0]), 3438.059082285024, rtol=0.1)
np.testing.assert_allclose(np.sum(image.val[:, :, :, 1]), 2396.4803378036186, rtol=0.1)
|
11518571
|
import os.path as osp
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
def get_planetoid_dataset(name, normalize_features=False, transform=None):
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', name)
dataset = Planetoid(path, name)
if transform is not None and normalize_features:
dataset.transform = T.Compose([T.NormalizeFeatures(), transform])
elif normalize_features:
dataset.transform = T.NormalizeFeatures()
elif transform is not None:
dataset.transform = transform
return dataset
|
11518580
|
from apple.util.ints import uint64
# The actual space in bytes of a plot, is _expected_plot_size(k) * UI_ACTUAL_SPACE_CONSTANT_FACTO
# This is not used in consensus, only for display purposes
UI_ACTUAL_SPACE_CONSTANT_FACTOR = 0.762
def _expected_plot_size(k: int) -> uint64:
"""
Given the plot size parameter k (which is between 32 and 59), computes the
expected size of the plot in bytes (times a constant factor). This is based on efficient encoding
of the plot, and aims to be scale agnostic, so larger plots don't
necessarily get more rewards per byte. The +1 is added to give half a bit more space per entry, which
is necessary to store the entries in the plot.
"""
return ((2 * k) + 1) * (2 ** (k - 1))
|
11518598
|
from BaseController import BaseController
import tornado.ioloop
import tornado.web
import dateutil.parser
import datetime
class TopCommandsController(BaseController):
def get(self):
return_data = dict(data=[],
timestamp=datetime.datetime.now().isoformat())
server = self.get_argument("server")
from_date = self.get_argument("from", None)
to_date = self.get_argument("to", None)
if not from_date or not to_date:
end = datetime.datetime.now()
delta = datetime.timedelta(seconds=120)
start = end - delta
else:
start = dateutil.parser.parse(from_date)
end = dateutil.parser.parse(to_date)
for data in self.stats_provider.get_top_commands_stats(server, start,
end):
return_data['data'].append([data[0], data[1]])
self.write(return_data)
|
11518609
|
import os
import glob
import numpy as np
import nibabel as nib
import pandas as pd
from glmsingle.design.make_design_matrix import make_design
from glmsingle.glmsingle import GLM_single
import time
sub = 2
ses = 1
stimdur = 0.5
tr = 2
proj_path = os.path.join(
'/home',
'adf',
'charesti',
'data',
'arsa-fmri',
'BIDS')
data_path = os.path.join(
proj_path,
'derivatives',
'fmriprep',
'sub-{}',
'ses-{}',
'func')
design_path = os.path.join(
proj_path,
'sub-{}',
'ses-{}',
'func')
runs = glob.glob(
os.path.join(data_path.format(sub, ses), '*preproc*nii.gz'))
runs.sort()
runs = runs[:-1]
eventfs = glob.glob(
os.path.join(design_path.format(sub, ses), '*events.tsv'))
eventfs.sort()
runs = runs[:3]
eventfs = eventfs[:3]
data = []
design = []
for i, (run, eventf) in enumerate(zip(runs, eventfs)):
print(f'run {i}')
y = nib.load(run).get_fdata().astype(np.float32)
dims = y.shape
# y = np.moveaxis(y, -1, 0)
# y = y.reshape([y.shape[0], -1])
n_volumes = y.shape[-1]
# Load onsets and item presented
onsets = pd.read_csv(eventf, sep='\t')["onset"].values
items = pd.read_csv(eventf, sep='\t')["stimnumber"].values
n_events = len(onsets)
# Create design matrix
events = pd.DataFrame()
events["duration"] = [stimdur] * n_events
events["onset"] = np.round(onsets)
events["trial_type"] = items
# pass in the events data frame. the convolving of the HRF now
# happens internally
design.append(
make_design(events, tr, n_volumes)
)
data.append(y)
opt = {'wantlss': 0}
outputdir = 'GLMestimatesingletrialoutputs'
start_time = time.time()
gst = GLM_single(opt)
results = gst.fit(
design,
data,
stimdur,
tr,
outputdir=outputdir)
elapsed_time = time.time() - start_time
print(
'elapsedtime: ',
f'{time.strftime("%H:%M:%S", time.gmtime(elapsed_time))}'
)
|
11518640
|
from unittest import mock, skipUnless
from django.db import connection
from django.db.backends.mysql.features import DatabaseFeatures
from django.test import TestCase
@skipUnless(connection.vendor == 'mysql', 'MySQL tests')
class TestFeatures(TestCase):
def test_supports_transactions(self):
"""
All storage engines except MyISAM support transactions.
"""
with mock.patch('django.db.connection.features._mysql_storage_engine', 'InnoDB'):
self.assertTrue(connection.features.supports_transactions)
del connection.features.supports_transactions
with mock.patch('django.db.connection.features._mysql_storage_engine', 'MyISAM'):
self.assertFalse(connection.features.supports_transactions)
del connection.features.supports_transactions
def test_skip_locked_no_wait(self):
with mock.MagicMock() as _connection:
_connection.mysql_version = (8, 0, 1)
_connection.mysql_is_mariadb = False
database_features = DatabaseFeatures(_connection)
self.assertTrue(database_features.has_select_for_update_skip_locked)
self.assertTrue(database_features.has_select_for_update_nowait)
with mock.MagicMock() as _connection:
_connection.mysql_version = (8, 0, 0)
_connection.mysql_is_mariadb = False
database_features = DatabaseFeatures(_connection)
self.assertFalse(database_features.has_select_for_update_skip_locked)
self.assertFalse(database_features.has_select_for_update_nowait)
|
11518657
|
from boa3.builtin import public
@public
def Main(condition: bool) -> int:
a = 0
if condition:
a = a + 2
else:
a = 10
return a
|
11518679
|
import os
import sys
import acconeer.exptool as et
def main():
parser = et.utils.ExampleArgumentParser()
parser.add_argument("-o", "--output-dir", type=str, required=True)
parser.add_argument("--file-format", type=str, default="h5")
parser.add_argument("--frames-per-file", type=int, default=10000)
args = parser.parse_args()
et.utils.config_logging(args)
if os.path.exists(args.output_dir):
print("Directory '{}' already exists, won't overwrite".format(args.output_dir))
sys.exit(1)
file_format = args.file_format.lower()
if file_format == "np":
file_format = "npz"
if file_format not in ["h5", "npz"]:
print("Unknown format '{}'".format(args.file_format))
sys.exit(1)
if args.frames_per_file < 10:
print("Frames per file must be at least 10")
sys.exit(1)
if args.socket_addr:
client = et.SocketClient(args.socket_addr)
elif args.spi:
client = et.SPIClient()
else:
port = args.serial_port or et.utils.autodetect_serial_port()
client = et.UARTClient(port)
config = et.configs.EnvelopeServiceConfig()
config.sensor = args.sensors
config.update_rate = 30
session_info = client.start_session(config)
os.makedirs(args.output_dir)
interrupt_handler = et.utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
total_num_frames = 0
while not interrupt_handler.got_signal:
record_count, num_frames_in_record = divmod(total_num_frames, args.frames_per_file)
if num_frames_in_record == 0:
recorder = et.recording.Recorder(sensor_config=config, session_info=session_info)
data_info, data = client.get_next()
recorder.sample(data_info, data)
if num_frames_in_record + 1 == args.frames_per_file:
record = recorder.close()
filename = os.path.join(
args.output_dir, "{:04}.{}".format(record_count + 1, file_format)
)
print("Saved", filename)
et.recording.save(filename, record)
total_num_frames += 1
print("Sampled {:>5}".format(total_num_frames), end="\r", flush=True)
try:
client.disconnect()
except Exception:
pass
record_count, num_frames_in_record = divmod(total_num_frames, args.frames_per_file)
if num_frames_in_record > 0:
record = recorder.close()
filename = os.path.join(args.output_dir, "{:04}.{}".format(record_count + 1, file_format))
print("Saved", filename)
et.recording.save(filename, record)
if __name__ == "__main__":
main()
|
11518684
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class CRF(nn.Module):
"""
Class for learning and inference in conditional random field model using mean field approximation
and convolutional approximation in pairwise potentials term.
Parameters
----------
n_spatial_dims : int
Number of spatial dimensions of input tensors.
filter_size : int or sequence of ints
Size of the gaussian filters in message passing.
If it is a sequence its length must be equal to ``n_spatial_dims``.
n_iter : int
Number of iterations in mean field approximation.
requires_grad : bool
Whether or not to train CRF's parameters.
returns : str
Can be 'logits', 'proba', 'log-proba'.
smoothness_weight : float
Initial weight of smoothness kernel.
smoothness_theta : float or sequence of floats
Initial bandwidths for each spatial feature in the gaussian smoothness kernel.
If it is a sequence its length must be equal to ``n_spatial_dims``.
"""
def __init__(self, n_spatial_dims=2, filter_size=11, n_iter=5, requires_grad=True,
returns='logits', smoothness_weight=1, smoothness_theta=1):
super().__init__()
self.n_spatial_dims = n_spatial_dims
self.n_iter = n_iter
self.filter_size = np.broadcast_to(filter_size, n_spatial_dims)
self.returns = returns
self.requires_grad = requires_grad
self._set_param('smoothness_weight', smoothness_weight)
self._set_param('inv_smoothness_theta', 1 / np.broadcast_to(smoothness_theta, n_spatial_dims))
def _set_param(self, name, init_value):
setattr(self, name, nn.Parameter(torch.tensor(init_value, dtype=torch.float, requires_grad=self.requires_grad)))
def forward(self, x, spatial_spacings=None, verbose=False):
"""
Parameters
----------
x : torch.tensor
Tensor of shape ``(batch_size, n_classes, *spatial)`` with negative unary potentials, e.g. the CNN's output.
spatial_spacings : array of floats or None
Array of shape ``(batch_size, len(spatial))`` with spatial spacings of tensors in batch ``x``.
None is equivalent to all ones. Used to adapt spatial gaussian filters to different inputs' resolutions.
verbose : bool
Whether to display the iterations using tqdm-bar.
Returns
-------
output : torch.tensor
Tensor of shape ``(batch_size, n_classes, *spatial)``
with logits or (log-)probabilities of assignment to each class.
"""
batch_size, n_classes, *spatial = x.shape
assert len(spatial) == self.n_spatial_dims
# binary segmentation case
if n_classes == 1:
x = torch.cat([x, torch.zeros(x.shape).to(x)], dim=1)
if spatial_spacings is None:
spatial_spacings = np.ones((batch_size, self.n_spatial_dims))
negative_unary = x.clone()
for i in range(self.n_iter):
# normalizing
x = F.softmax(x, dim=1)
# message passing
x = self.smoothness_weight * self._smoothing_filter(x, spatial_spacings)
# compatibility transform
x = self._compatibility_transform(x)
# adding unary potentials
x = negative_unary - x
if self.returns == 'logits':
output = x
elif self.returns == 'proba':
output = F.softmax(x, dim=1)
elif self.returns == 'log-proba':
output = F.log_softmax(x, dim=1)
else:
raise ValueError("Attribute ``returns`` must be 'logits', 'proba' or 'log-proba'.")
if n_classes == 1:
output = output[:, 0] - output[:, 1] if self.returns == 'logits' else output[:, 0]
output.unsqueeze_(1)
return output
def _smoothing_filter(self, x, spatial_spacings):
"""
Parameters
----------
x : torch.tensor
Tensor of shape ``(batch_size, n_classes, *spatial)`` with negative unary potentials, e.g. logits.
spatial_spacings : torch.tensor or None
Tensor of shape ``(batch_size, len(spatial))`` with spatial spacings of tensors in batch ``x``.
Returns
-------
output : torch.tensor
Tensor of shape ``(batch_size, n_classes, *spatial)``.
"""
return torch.stack([self._single_smoothing_filter(x[i], spatial_spacings[i]) for i in range(x.shape[0])])
@staticmethod
def _pad(x, filter_size):
padding = []
for fs in filter_size:
padding += 2 * [fs // 2]
return F.pad(x, list(reversed(padding))) # F.pad pads from the end
def _single_smoothing_filter(self, x, spatial_spacing):
"""
Parameters
----------
x : torch.tensor
Tensor of shape ``(n, *spatial)``.
spatial_spacing : sequence of len(spatial) floats
Returns
-------
output : torch.tensor
Tensor of shape ``(n, *spatial)``.
"""
x = self._pad(x, self.filter_size)
for i, dim in enumerate(range(1, x.ndim)):
# reshape to (-1, 1, x.shape[dim])
x = x.transpose(dim, -1)
shape_before_flatten = x.shape[:-1]
x = x.flatten(0, -2).unsqueeze(1)
# 1d gaussian filtering
kernel = self._create_gaussian_kernel1d(self.inv_smoothness_theta[i], spatial_spacing[i],
self.filter_size[i]).view(1, 1, -1).to(x)
x = F.conv1d(x, kernel)
# reshape back to (n, *spatial)
x = x.squeeze(1).view(*shape_before_flatten, x.shape[-1]).transpose(-1, dim)
return x
@staticmethod
def _create_gaussian_kernel1d(inverse_theta, spacing, filter_size):
"""
Parameters
----------
inverse_theta : torch.tensor
Tensor of shape ``(,)``
spacing : float
filter_size : int
Returns
-------
kernel : torch.tensor
Tensor of shape ``(filter_size,)``.
"""
distances = spacing * torch.arange(-(filter_size // 2), filter_size // 2 + 1).to(inverse_theta)
kernel = torch.exp(-(distances * inverse_theta) ** 2 / 2)
zero_center = torch.ones(filter_size).to(kernel)
zero_center[filter_size // 2] = 0
return kernel * zero_center
def _compatibility_transform(self, x):
"""
Parameters
----------
x : torch.Tensor of shape ``(batch_size, n_classes, *spatial)``.
Returns
-------
output : torch.tensor of shape ``(batch_size, n_classes, *spatial)``.
"""
labels = torch.arange(x.shape[1])
compatibility_matrix = self._compatibility_function(labels, labels.unsqueeze(1)).to(x)
return torch.einsum('ij..., jk -> ik...', x, compatibility_matrix)
@staticmethod
def _compatibility_function(label1, label2):
"""
Input tensors must be broadcastable.
Parameters
----------
label1 : torch.Tensor
label2 : torch.Tensor
Returns
-------
compatibility : torch.Tensor
"""
return -(label1 == label2).float()
|
11518686
|
import argparse
import os
import ubiq_internal_tasks as tasks
from Ity.Tokenizers import RegexTokenizer
from collections import defaultdict
import sys
import csv
import math
from operator import itemgetter
__author__ = 'wintere'
parser = argparse.ArgumentParser(description='Test Ubiqu+Ity tag_corpus task')
parser.add_argument('corpus_path', help='path to corpus to tag relative to the location of this script')
parser.add_argument('output_dir', help='path to output folder relative to the location of this script')
parser.add_argument('ngram_count', help='flag for generating ngram csv, between 1 and 3 for n-grams')
parser.add_argument('--ngram_pun', help='flag for including punctuation characters in ngrams', action='store_true')
parser.add_argument('--per_doc', help='generate per document ngrams', action='store_true')
#companion to tagCorpus.py
#uses Ubiq+Ity standard tokenizer to tokenize and ngrama provided folder of texts
#faster than Ubiq+Ity if only ngrams are desired
def ngramCorpus(args):
corpus_path = args.corpus_path
if not os.path.exists(corpus_path):
raise ValueError("Invalid input corpus input path.", corpus_path, "does not exist on disk.")
ncount = int(args.ngram_count)
if ncount > 3 or ncount < 1:
raise ValueError("Invalid parameter: ngram count must be between 1 and 3.")
#instantiate tokenizer
tokenizer = RegexTokenizer()
tokens = []
bad_files = []
#traverse files and tokenize
documentNgramCounts = defaultdict(int) # to count number of documents ngrams appear in
corpusNgramCounts = defaultdict(int)
per_doc_path = None
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
if args.per_doc:
per_doc_path = os.path.join(args.output_dir, 'perDocNgrams')
if not os.path.exists(per_doc_path):
os.mkdir(per_doc_path)
c = 0
for dirpath, subdirs, files in os.walk(corpus_path):
for file in files:
if '.txt' in file:
print(c)
c+=1
filepath = os.path.join(dirpath, file)
try:
#tokenize
tokens = tasks.tokenizeText(filepath, False, None, None, tokenizer)
#process out punctuation
tokens = tasks.ngramProcess(tokens, args.ngram_pun)
# update corpus dictionaries
docCounts = tasks.ngramUpdate(tokens, documentNgramCounts, corpusNgramCounts, ncount, args.ngram_pun)
if args.per_doc:
docName = os.path.splitext(os.path.basename(filepath))[0]
ngramCSV(documentNgramCounts=None, corpusNgramCounts=docCounts, maxN=ncount, output_dir=per_doc_path, name=docName, doc=True)
except NotImplementedError:
bad_files.append(filepath)
ngramCSV(documentNgramCounts=documentNgramCounts, corpusNgramCounts=corpusNgramCounts, maxN=ncount, output_dir=args.output_dir, name=os.path.basename(corpus_path), doc=False)
print("Completed ngram processing of corpus " + os.path.basename(corpus_path))
if bad_files != []:
print("Unable to ngram the following files" + str(bad_files))
#given dictionaries representing corpus and document frequencies, outputs ngram statistics to csv
#one csv for each k-gram
def ngramCSV(corpusNgramCounts, documentNgramCounts, maxN,output_dir,name, doc):
fds = []
writers = []
rank = [0] * maxN
prevCount = [sys.maxsize] * maxN
# open and initialize ngram csvs
for i in range(1, maxN + 1):
path = os.path.join(output_dir, name + '-' + str(i) + 'grams.csv')
f = open(path, 'wb')
w = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)
if doc:
w.writerow(["ngram", "document frequency", "rank in document"])
else:
w.writerow(["ngram", "corpus frequency", "document frequency", "rank in corpus"])
fds.append(f)
writers.append(w)
# rank and print ngrams
cc = lambda (a1, a2), (b1, b2):cmp((b2, a1), (a2, b1))
for key, value in sorted(corpusNgramCounts.iteritems(), cmp=cc):
n = len(key) - 1
if value < prevCount[n]:
rank[n] += 1
prevCount[n] = value
if doc:
row = [' '.join(key), value, rank[n]]
else:
row = [' '.join(key), value, documentNgramCounts[key], rank[n]]
writers[n].writerow(row)
for fd in fds:
fd.close()
if __name__ == '__main__':
args = parser.parse_args()
ngramCorpus(args)
|
11518708
|
import datetime as dt
import pytest
import pytz
import stix2
from .constants import CAMPAIGN_ID, CAMPAIGN_MORE_KWARGS, IDENTITY_ID
EXPECTED = """{
"type": "campaign",
"id": "campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
"created_by_ref": "identity--311b2d2d-f010-4473-83ec-1edf84858f4c",
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"name": "Green Group Attacks Against Finance",
"description": "Campaign by Green Group against a series of targets in the financial services sector."
}"""
def test_campaign_example():
campaign = stix2.v20.Campaign(**CAMPAIGN_MORE_KWARGS)
assert campaign.serialize(pretty=True) == EXPECTED
@pytest.mark.parametrize(
"data", [
EXPECTED,
{
"type": "campaign",
"id": CAMPAIGN_ID,
"created": "2016-04-06T20:03:00Z",
"modified": "2016-04-06T20:03:00Z",
"created_by_ref": IDENTITY_ID,
"description": "Campaign by Green Group against a series of targets in the financial services sector.",
"name": "Green Group Attacks Against Finance",
},
],
)
def test_parse_campaign(data):
cmpn = stix2.parse(data, version="2.0")
assert cmpn.type == 'campaign'
assert cmpn.id == CAMPAIGN_ID
assert cmpn.created == dt.datetime(2016, 4, 6, 20, 3, 0, tzinfo=pytz.utc)
assert cmpn.modified == dt.datetime(2016, 4, 6, 20, 3, 0, tzinfo=pytz.utc)
assert cmpn.created_by_ref == IDENTITY_ID
assert cmpn.description == "Campaign by Green Group against a series of targets in the financial services sector."
assert cmpn.name == "Green Group Attacks Against Finance"
# TODO: Add other examples
|
11518713
|
import sys
import os
import numpy as np
import time
from PIL import Image
APS = 100;
TileFolder = sys.argv[1] + '/';
heat_map_out = 'patch-level-color.txt';
def whiteness(png):
wh = (np.std(png[:,:,0].flatten()) + np.std(png[:,:,1].flatten()) + np.std(png[:,:,2].flatten())) / 3.0;
return wh;
def blackness(png):
bk = np.mean(png);
return bk;
def redness(png):
rd = np.mean((png[:,:,0] >= 190) * (png[:,:,1] <= 100) * (png[:,:,2] <= 100));
return rd;
def load_data():
X = np.zeros(shape=(1000000, 3), dtype=np.float32);
coor = np.zeros(shape=(1000000, 2), dtype=np.int32);
ind = 0;
for fn in os.listdir(TileFolder):
full_fn = TileFolder + '/' + fn;
if not os.path.isfile(full_fn):
continue;
if len(fn.split('_')) < 4:
continue;
x_off = float(fn.split('_')[0]);
y_off = float(fn.split('_')[1]);
svs_pw = float(fn.split('_')[2]);
png_pw = float(fn.split('_')[3].split('.png')[0]);
png = np.array(Image.open(full_fn).convert('RGB'));
for x in range(0, png.shape[1], APS):
if x + APS > png.shape[1]:
continue;
for y in range(0, png.shape[0], APS):
if y + APS > png.shape[0]:
continue;
X[ind, 0] = whiteness(png[y:y+APS, x:x+APS, :]);
X[ind, 1] = blackness(png[y:y+APS, x:x+APS, :]);
X[ind, 2] = redness(png[y:y+APS, x:x+APS, :]);
coor[ind, 0] = np.int32(x_off + (x + APS/2) * svs_pw / png_pw);
coor[ind, 1] = np.int32(y_off + (y + APS/2) * svs_pw / png_pw);
ind += 1;
X = X[0:ind];
coor = coor[0:ind];
return X, coor;
def split_validation():
Wh, coor = load_data();
fid = open(TileFolder + '/' + heat_map_out, 'w');
for idx in range(0, Wh.shape[0]):
fid.write('{} {} {} {} {}\n'.format(coor[idx][0], coor[idx][1], Wh[idx][0], Wh[idx][1], Wh[idx][2]));
fid.close();
def main():
split_validation();
if __name__ == "__main__":
main();
|
11518734
|
from jikji import getview
getview('myview.home').url_rule = '/'
getview('myview.profile').url_rule = '/$1/'
getview('myview.requirements').url_rule = '/requirements.txt'
|
11518749
|
number1 = input('Enter first number: ')
number2 = input('Enter second number: ')
number3 = input('Enter third number')
sum = int(number1) + int(number2) + int(number3)
print(sum)
|
11518758
|
import itertools
from array import array
from collections import Counter
import pytest
from hypothesis import given, strategies
from anonlink.solving import (
greedy_solve, greedy_solve_python, greedy_solve_native, pairs_from_groups,
probabilistic_greedy_solve, probabilistic_greedy_solve_native,
probabilistic_greedy_solve_python)
from tests import UINT_MAX
def _zip_candidates(candidates):
candidates = tuple(candidates)
sims = array('d')
dset_is0 = array('I')
dset_is1 = array('I')
rec_is0 = array('I')
rec_is1 = array('I')
for sim, ((dset_i0, rec_i0), (dset_i1, rec_i1)) in candidates:
sims.append(sim)
dset_is0.append(dset_i0)
dset_is1.append(dset_i1)
rec_is0.append(rec_i0)
rec_is1.append(rec_i1)
return sims, (dset_is0, dset_is1), (rec_is0, rec_is1)
def _compare_matching(result, truth):
result_set = set(map(frozenset, result))
assert len(result) == len(result_set)
truth_set = set(map(frozenset, truth))
assert len(truth) == len(truth_set)
assert result_set == truth_set
@pytest.mark.parametrize("greedy_solve", [greedy_solve_native, greedy_solve_python])
def test_greedy_twoparty(greedy_solve):
candidates = [(.8, ((0, 0), (1, 0)))]
result = greedy_solve(_zip_candidates(candidates))
_compare_matching(result, [{(0, 0), (1, 0)}])
candidates = [(.8, ((0, 0), (1, 0))),
(.7, ((0, 1), (1, 0)))]
result = greedy_solve(_zip_candidates(candidates))
_compare_matching(result, [{(0,0), (1,0)}])
candidates = []
result = greedy_solve(_zip_candidates(candidates))
_compare_matching(result, [])
candidates = [(.8, ((0, 0), (1, 0))),
(.7, ((0, 0), (1, 1))),
(.7, ((0, 1), (1, 0))),
(.6, ((0, 1), (1, 1)))]
result = greedy_solve(_zip_candidates(candidates))
_compare_matching(result, [{(0, 0), (1, 0)},
{(0, 1), (1, 1)}])
@pytest.mark.parametrize("greedy_solve", [greedy_solve_native, greedy_solve_python])
def test_greedy_threeparty(greedy_solve):
candidates = [(.9, ((1, 0), (2, 0))),
(.8, ((0, 0), (1, 1))),
(.8, ((0, 0), (2, 1))),
(.8, ((1, 1), (2, 1))),
(.7, ((0, 0), (1, 0))),
(.7, ((0, 0), (2, 0)))]
result = greedy_solve(_zip_candidates(candidates))
_compare_matching(result, [{(0,0), (1,1), (2,1)},
{(1,0), (2,0)}])
candidates = [(.8, ((0, 0), (1, 0))),
(.8, ((0, 1), (2, 1))),
(.8, ((1, 1), (2, 1))),
(.7, ((0, 0), (2, 0))),
(.7, ((0, 1), (1, 1)))]
result = greedy_solve(_zip_candidates(candidates))
_compare_matching(result, [{(0,0), (1,0)},
{(0,1), (1,1), (2,1)}])
candidates = [(1., ((0, 0), (1, 0))),
(1., ((0, 0), (2, 0))),
(1., ((2, 0), (2, 1)))]
result = greedy_solve(_zip_candidates(candidates))
_compare_matching(result, [{(0,0), (1,0)}, {(2,0), (2,1)}])
candidates = [(1., ((0, 0), (1, 0))),
(1., ((2, 0), (3, 0))),
(1., ((2, 0), (4, 0))),
(1., ((3, 0), (4, 0))),
(1., ((0, 0), (2, 0))),
(1., ((0, 0), (3, 0))),
(1., ((0, 0), (4, 0))),
(1., ((1, 0), (2, 0))),
(1., ((1, 0), (3, 0))),
(1., ((1, 0), (4, 0)))]
result = greedy_solve(_zip_candidates(candidates))
_compare_matching(result, [{(0,0), (1,0), (2,0), (3,0), (4,0)}])
@pytest.mark.parametrize("greedy_solve", [greedy_solve_native, greedy_solve_python])
def test_greedy_fourparty(greedy_solve):
candidates = [(.9, ((0, 0), (1, 0))),
(.9, ((2, 0), (3, 0))),
(.7, ((0, 0), (2, 0))),
(.7, ((1, 0), (3, 0))),
(.7, ((0, 0), (3, 0))),
(.7, ((1, 0), (2, 0)))]
result = greedy_solve(_zip_candidates(candidates))
_compare_matching(result, [{(0,0), (1,0), (2,0), (3,0)}])
@pytest.mark.parametrize("greedy_solve", [greedy_solve_native, greedy_solve_python])
def test_inconsistent_dataset_number(greedy_solve):
candidates = (
array('d', [.5]),
(array('I', [3]), array('I', [4])),
(array('I', [2]), array('I', [6]), array('I', [7])))
with pytest.raises(ValueError):
greedy_solve(candidates)
@pytest.mark.parametrize("prob_greedy_solve", [probabilistic_greedy_solve_native, probabilistic_greedy_solve_python])
def test_wrong_merge_threshold(prob_greedy_solve):
candidates = [(.8, ((0, 0), (1, 0)))]
with pytest.raises(ValueError):
prob_greedy_solve(_zip_candidates(candidates), merge_threshold=-0.1)
with pytest.raises(ValueError):
prob_greedy_solve(_zip_candidates(candidates), merge_threshold=1.01)
@pytest.mark.parametrize('datasets_n', [0, 1, 3, 5])
def test_unsupported_shape(datasets_n):
candidates = (
array('d', [.5]),
tuple(array('I', [3]) for _ in range(datasets_n)),
tuple(array('I', [2]) for _ in range(datasets_n)))
with pytest.raises(NotImplementedError):
greedy_solve(candidates)
@pytest.mark.parametrize("greedy_solve", [greedy_solve_native, greedy_solve_python])
def test_inconsistent_entry_number(greedy_solve):
candidates = (
array('d', [.5, .3]),
(array('I', [3]), array('I', [4])),
(array('I', [2]), array('I', [6])))
with pytest.raises(ValueError):
greedy_solve(candidates)
candidates = (
array('d', [.5]),
(array('I', [3, 3]), array('I', [4])),
(array('I', [2]), array('I', [6])))
with pytest.raises(ValueError):
greedy_solve(candidates)
candidates = (
array('d', [.5]),
(array('I', [3, 3]), array('I', [4, 6])),
(array('I', [2]), array('I', [6])))
with pytest.raises(ValueError):
greedy_solve(candidates)
candidates = (
array('d', [.5]),
(array('I', [3]), array('I', [4, 6])),
(array('I', [2]), array('I', [6])))
with pytest.raises(ValueError):
greedy_solve(candidates)
candidates = (
array('d', [.5]),
(array('I', [3]), array('I', [4])),
(array('I', [2]), array('I', [6, 3])))
with pytest.raises(ValueError):
greedy_solve(candidates)
candidates = (
array('d', [.5]),
(array('I', [3]), array('I', [4])),
(array('I', [2, 1]), array('I', [6, 3])))
with pytest.raises(ValueError):
greedy_solve(candidates)
candidates = (
array('d', [.5]),
(array('I', [3]), array('I', [4])),
(array('I', [2, 1]), array('I', [6])))
with pytest.raises(ValueError):
greedy_solve(candidates)
# === HYPOTHESIS TESTS ===
def dict_to_candidate_pairs(candidate_dict):
candidates = map(tuple, map(reversed, candidate_dict.items()))
return sorted(candidates, key=lambda x: (-x[0],) + x[1:])
indices_np = strategies.tuples(
strategies.integers(min_value=0, max_value=UINT_MAX),
strategies.integers(min_value=0, max_value=UINT_MAX))
index_pair_np = strategies.tuples(
indices_np, indices_np
).filter(
lambda x: x[0] != x[1]
).map(lambda x: tuple(sorted(x)))
candidate_pairs_np = strategies.dictionaries(
index_pair_np,
strategies.floats(min_value=0, max_value=1)
).map(dict_to_candidate_pairs)
index_pair_np_ndedup = strategies.tuples(
indices_np, indices_np
).map(lambda x: tuple(sorted(x)))
candidate_pairs_np_ndedup = strategies.dictionaries(
index_pair_np,
strategies.floats(min_value=0, max_value=1)
).map(dict_to_candidate_pairs)
@given(candidate_pairs_np)
def test_greedy_np(candidate_pairs):
candidates = _zip_candidates(candidate_pairs)
all_candidate_pairs = {x for _, x in candidate_pairs}
all_records = set(itertools.chain.from_iterable(all_candidate_pairs))
solution = list(greedy_solve(candidates))
matched = Counter(itertools.chain.from_iterable(solution))
# Every record is in at most one group
assert all(matched[i] <= 1 and matched[j] <= 1
for _, (i, j) in candidate_pairs)
# Include singleton groups
all_groups = list(solution)
all_groups.extend([x] for x in all_records - matched.keys())
# All groups that can be merged have been merged.
for g1, g2 in itertools.combinations(all_groups, 2):
assert any(tuple(sorted((r1, r2))) not in all_candidate_pairs
for r1 in g1 for r2 in g2)
indices0_2p = strategies.tuples(strategies.just(0),
strategies.integers(min_value=0, max_value=UINT_MAX))
indices1_2p = strategies.tuples(strategies.just(1),
strategies.integers(min_value=0, max_value=UINT_MAX))
index_pair_2p = strategies.tuples(indices0_2p, indices1_2p)
candidate_pairs_2p = strategies.dictionaries(
index_pair_2p,
strategies.floats(min_value=0, max_value=1)
).map(dict_to_candidate_pairs)
@given(candidate_pairs_2p)
def test_greedy_2p(candidate_pairs):
candidates = _zip_candidates(candidate_pairs)
solution = greedy_solve(candidates)
assert all(len(group) <= 2 for group in solution)
similarity_map = dict(map(reversed, candidate_pairs))
matches = {records: similarity_map[records]
for records in map(tuple, map(sorted, solution))}
# Every record that could have a match does have a match
matched = set(itertools.chain.from_iterable(solution))
assert all(i in matched or j in matched for _, (i, j) in candidate_pairs)
# Every pair is taken unless either of the candidates have a better match
match_similarities = {i: sim for recs, sim in matches.items() for i in recs}
for sim, (i, j) in candidate_pairs:
assert ((i, j) in matches
or match_similarities.get(i, float('-inf')) >= sim
or match_similarities.get(j, float('-inf')) >= sim)
@given(candidate_pairs_2p)
def test_python_native_match_2p(candidate_pairs):
candidates = _zip_candidates(candidate_pairs)
solution_python = greedy_solve_python(candidates)
solution_native = greedy_solve_native(candidates)
# We don't care about the order
solution_python = frozenset(map(frozenset, solution_python))
solution_native = frozenset(map(frozenset, solution_native))
assert solution_python == solution_native
@given(candidate_pairs_np)
def test_python_native_match_np(candidate_pairs):
candidates = _zip_candidates(candidate_pairs)
solution_python = greedy_solve_python(candidates)
solution_native = greedy_solve_native(candidates)
# We don't care about the order
solution_python = frozenset(map(frozenset, solution_python))
solution_native = frozenset(map(frozenset, solution_native))
assert solution_python == solution_native
def _all_indices_unique(groups):
seen0 = set()
seen1 = set()
for group in groups:
(dset_i0, rec_i0), (dset_i1, rec_i1) = group
assert dset_i0 == 0
assert dset_i1 == 1
if rec_i0 in seen0 or rec_i1 in seen1:
return False
seen0.add(rec_i0)
seen1.add(rec_i1)
return True
groups_space_2p = strategies.lists(index_pair_2p).filter(_all_indices_unique)
def _groups_from_pairs(pairs):
return [((0, i), (1, j)) for i, j in pairs]
@given(groups_space_2p)
def test_pairs_from_groups(groups):
assert groups == _groups_from_pairs(pairs_from_groups(groups))
@given(candidate_pairs_2p,
strategies.floats(min_value=0, max_value=1),
strategies.booleans())
def test_probabilistic_python_native_match_2p(
candidate_pairs,
merge_threshold,
deduplicated
):
candidates = _zip_candidates(candidate_pairs)
solution_python = probabilistic_greedy_solve_python(
candidates, merge_threshold=merge_threshold, deduplicated=deduplicated)
solution_native = probabilistic_greedy_solve_native(
candidates, merge_threshold=merge_threshold, deduplicated=deduplicated)
# We don't care about the order
solution_python = frozenset(map(frozenset, solution_python))
solution_native = frozenset(map(frozenset, solution_native))
assert solution_python == solution_native
@given(candidate_pairs_np,
strategies.floats(min_value=0, max_value=1),
strategies.booleans())
def test_probabilistic_python_native_match_np(
candidate_pairs,
merge_threshold,
deduplicated
):
candidates = _zip_candidates(candidate_pairs)
solution_python = probabilistic_greedy_solve_python(
candidates, merge_threshold=merge_threshold, deduplicated=deduplicated)
solution_native = probabilistic_greedy_solve_native(
candidates, merge_threshold=merge_threshold, deduplicated=deduplicated)
# We don't care about the order
solution_python = frozenset(map(frozenset, solution_python))
solution_native = frozenset(map(frozenset, solution_native))
assert solution_python == solution_native
@given(candidate_pairs_np_ndedup,
strategies.floats(min_value=0, max_value=1),
strategies.booleans())
def test_probabilistic_python_native_match_np_ndedup(
candidate_pairs,
merge_threshold,
deduplicated
):
candidates = _zip_candidates(candidate_pairs)
solution_python = probabilistic_greedy_solve_python(
candidates, merge_threshold=merge_threshold, deduplicated=deduplicated)
solution_native = probabilistic_greedy_solve_native(
candidates, merge_threshold=merge_threshold, deduplicated=deduplicated)
# We don't care about the order
solution_python = frozenset(map(frozenset, solution_python))
solution_native = frozenset(map(frozenset, solution_native))
assert solution_python == solution_native
@given(candidate_pairs_np)
def test_probabilistic_nonprobabilistic_match(candidate_pairs):
candidates = _zip_candidates(candidate_pairs)
solution_probabilistic = probabilistic_greedy_solve(
candidates, merge_threshold=1, deduplicated=False)
solution_nonprobabilistic = greedy_solve(candidates)
# We don't care about the order
solution_probabilistic = frozenset(map(frozenset, solution_probabilistic))
solution_nonprobabilistic = frozenset(map(frozenset,
solution_nonprobabilistic))
assert solution_probabilistic == solution_nonprobabilistic
@given(candidate_pairs_np_ndedup)
def test_probabilistic_nonprobabilistic_match_ndedup(candidate_pairs):
candidates = _zip_candidates(candidate_pairs)
solution_probabilistic = probabilistic_greedy_solve(
candidates, merge_threshold=1, deduplicated=False)
solution_nonprobabilistic = greedy_solve(candidates)
# We don't care about the order
solution_probabilistic = frozenset(map(frozenset, solution_probabilistic))
solution_nonprobabilistic = frozenset(map(frozenset,
solution_nonprobabilistic))
assert solution_probabilistic == solution_nonprobabilistic
def test_probabilistic_greedy():
candidates = [(.9, ((0, 0), (0, 1))),
(.8, ((1, 0), (1, 1))),
(.7, ((0, 0), (1, 0))),
(.6, ((0, 0), (1, 1))),
(.5, ((0, 1), (1, 0)))]
result = probabilistic_greedy_solve(
_zip_candidates(candidates), merge_threshold=0., deduplicated=True)
_compare_matching(result, [{(0,0), (1,0)}])
result = probabilistic_greedy_solve(
_zip_candidates(candidates), merge_threshold=.75, deduplicated=True)
_compare_matching(result, [{(0,0), (1,0)}])
result = probabilistic_greedy_solve(
_zip_candidates(candidates), merge_threshold=.76, deduplicated=True)
_compare_matching(result, [{(0,0), (1,0)}])
result = probabilistic_greedy_solve(
_zip_candidates(candidates), merge_threshold=1., deduplicated=True)
_compare_matching(result, [{(0,0), (1,0)}])
result = probabilistic_greedy_solve(
_zip_candidates(candidates), merge_threshold=0.0, deduplicated=False)
_compare_matching(result, [{(0,0), (1,0), (0,1), (1,1)}])
result = probabilistic_greedy_solve(
_zip_candidates(candidates), merge_threshold=0.75, deduplicated=False)
_compare_matching(result, [{(0,0), (1,0), (0,1), (1,1)}])
result = probabilistic_greedy_solve(
_zip_candidates(candidates), merge_threshold=0.76, deduplicated=False)
_compare_matching(result, [{(0,0), (0,1)}, {(1,0), (1,1)}])
result = probabilistic_greedy_solve(
_zip_candidates(candidates), merge_threshold=1, deduplicated=False)
_compare_matching(result, [{(0,0), (0,1)}, {(1,0), (1,1)}])
|
11518769
|
import boto3
import os
import time
import json
'''
SAMPLE EVENTS
####
Import Events from config.json file
####
{
"LanguageCodes": [
"en",
"fr",
"es"
]
}
####
Insert or overwrite messages using events
####
{
"LanguageCodes": [
"en",
"fr",
"es"
],
"Configs": [
{
"DefaultResponse": "Hello. Thank you for calling the Amazon Connect Command Center hotline",
"ConfigType": "MESSAGE",
"CollectionId": "ENTRY_FLOW",
"ConfigId": "GREETING"
}
]
}
'''
ddb = boto3.resource('dynamodb')
tb_name = os.environ['ConfigTable']
translate = boto3.client('translate')
primary_key = os.environ['TablePrimaryKey']
sort_key = os.environ['TableSortKey']
table = ddb.Table(tb_name)
def parse_parameters(params):
if "LanguageCodes" in params:
language_codes = params['LanguageCodes']
else:
language_codes = ['en']
return language_codes
def translate_text(message, language_code):
try:
resp = translate.translate_text(
Text=message,
SourceLanguageCode='en',
TargetLanguageCode=language_code
)
new_message = resp['TranslatedText']
return new_message, language_code
except Exception as e:
print(e)
return message, 'en'
def build_translation_dict(message, language_codes, perform_translation=True):
translations = {
'en': message
}
for code in language_codes:
if perform_translation:
temp_txt, temp_code = translate_text(message, code)
translations[temp_code] = temp_txt
else:
translations[code] = message
return translations
def process_config(item, language_codes):
try:
collection_id = item["CollectionId"]
config_id = item["ConfigId"]
config_type = item["ConfigType"]
default_response = item["DefaultResponse"]
if item["ConfigType"] == "STATIC_ROUTING":
return item
elif item["ConfigType"] == "LANGUAGE_ROUTING":
translations = build_translation_dict(item["DefaultResponse"], language_codes, False)
item.update(translations)
return item
elif item["ConfigType"] == "MESSAGE":
translations = build_translation_dict(item["DefaultResponse"], language_codes)
item.update(translations)
return item
except Exception as e:
print(e)
print("Failed to load config ", item)
def lambda_handler(event, context):
try:
language_codes = parse_parameters(event)
if "Configs" in event:
raw_configs = event["Configs"]
else:
with open('configs.json') as f:
data = json.loads(f.read())
raw_configs = data['Configs']
with table.batch_writer() as batch:
for config in raw_configs:
processed = process_config(config, language_codes)
batch.put_item(Item=processed)
return "success!"
except Exception as e:
print(e)
return "failed"
|
11518832
|
from subprocess import *
class HighlightPipe:
""" This Python package serves as interface to the highlight utility.
Input and output streams are handled with pipes.
Command line parameter length is validated before use."""
def __init__(self):
self.cmd = 'highlight'
self.src=''
self.options=dict()
self.success=False
def getResult(self):
cmd = self.cmd
for k, v in self.options.iteritems():
option=" --%s" % k
if ( v != '1'): option += "=%s" % v
if (len(option)<50): cmd += option
p = Popen(cmd, shell=True, bufsize=512, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin, child_stdout, child_stderr) = (p.stdin, p.stdout, p.stderr)
child_stdin.write(self.src)
child_stdin.close()
err_msg = child_stderr.readlines()
if (len(err_msg)>0): return err_msg
self.success=True
return child_stdout.readlines()
###############################################################################
def main():
gen = HighlightPipe();
gen.options['syntax'] = 'c'
gen.options['style'] = 'vim'
gen.options['enclose-pre'] = '1'
gen.options['fragment'] = '1'
gen.options['inline-css'] = '1'
gen.src = 'int main ()\n{ return 0; }'
print gen.getResult()
if not gen.success: print "Execution failed."
if __name__=="__main__":
main()
|
11518862
|
from time import time
from urllib.parse import parse_qs, urlsplit
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.db import IntegrityError
from django.test import RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from django_otp.tests import TestCase, ThrottlingTestMixin
from .admin import TOTPDeviceAdmin
from .models import TOTPDevice
class TOTPDeviceMixin:
"""
A TestCase helper that gives us a TOTPDevice to work with.
"""
# The next ten tokens
tokens = [<PASSWORD>, 6<PASSWORD>, 8<PASSWORD>, <PASSWORD>, <PASSWORD>, <PASSWORD>, 45675, 101397, 491039, 784503]
def setUp(self):
"""
Create a device at the fourth time step. The current token is <PASSWORD>.
"""
try:
self.alice = self.create_user(
'alice', 'password', email='<EMAIL>')
except IntegrityError:
self.skipTest("Unable to create the test user.")
else:
self.device = self.alice.totpdevice_set.create(
key='<KEY>', step=30,
t0=int(time() - (30 * 3)), digits=6, tolerance=0, drift=0
)
@override_settings(
OTP_TOTP_SYNC=False,
OTP_TOTP_THROTTLE_FACTOR=0,
)
class TOTPTest(TOTPDeviceMixin, TestCase):
def test_default_key(self):
device = self.alice.totpdevice_set.create()
# Make sure we can decode the key.
device.bin_key
def test_single(self):
results = [self.device.verify_token(token) for token in self.tokens]
self.assertEqual(results, [False] * 3 + [True] + [False] * 6)
def test_tolerance(self):
self.device.tolerance = 1
results = [self.device.verify_token(token) for token in self.tokens]
self.assertEqual(results, [False] * 2 + [True] * 3 + [False] * 5)
def test_drift(self):
self.device.tolerance = 1
self.device.drift = -1
results = [self.device.verify_token(token) for token in self.tokens]
self.assertEqual(results, [False] * 1 + [True] * 3 + [False] * 6)
def test_sync_drift(self):
self.device.tolerance = 2
with self.settings(OTP_TOTP_SYNC=True):
ok = self.device.verify_token(self.tokens[5])
self.assertTrue(ok)
self.assertEqual(self.device.drift, 2)
def test_no_reuse(self):
verified1 = self.device.verify_token(self.tokens[3])
verified2 = self.device.verify_token(self.tokens[3])
self.assertEqual(self.device.last_t, 3)
self.assertTrue(verified1)
self.assertFalse(verified2)
def test_config_url(self):
with override_settings(OTP_TOTP_ISSUER=None):
url = self.device.config_url
parsed = urlsplit(url)
params = parse_qs(parsed.query)
self.assertEqual(parsed.scheme, 'otpauth')
self.assertEqual(parsed.netloc, 'totp')
self.assertEqual(parsed.path, '/alice')
self.assertIn('secret', params)
self.assertNotIn('issuer', params)
def test_config_url_issuer(self):
with override_settings(OTP_TOTP_ISSUER='example.com'):
url = self.device.config_url
parsed = urlsplit(url)
params = parse_qs(parsed.query)
self.assertEqual(parsed.scheme, 'otpauth')
self.assertEqual(parsed.netloc, 'totp')
self.assertEqual(parsed.path, '/example.com%3Aalice')
self.assertIn('secret', params)
self.assertIn('issuer', params)
self.assertEqual(params['issuer'][0], 'example.com')
def test_config_url_issuer_spaces(self):
with override_settings(OTP_TOTP_ISSUER='Very Trustworthy Source'):
url = self.device.config_url
parsed = urlsplit(url)
params = parse_qs(parsed.query)
self.assertEqual(parsed.scheme, 'otpauth')
self.assertEqual(parsed.netloc, 'totp')
self.assertEqual(parsed.path, '/Very%20Trustworthy%20Source%3Aalice')
self.assertIn('secret', params)
self.assertIn('issuer', params)
self.assertEqual(params['issuer'][0], 'Very Trustworthy Source')
def test_config_url_issuer_method(self):
with override_settings(OTP_TOTP_ISSUER=lambda d: d.user.email):
url = self.device.config_url
parsed = urlsplit(url)
params = parse_qs(parsed.query)
self.assertEqual(parsed.scheme, 'otpauth')
self.assertEqual(parsed.netloc, 'totp')
self.assertEqual(parsed.path, '/alice%40example.com%3Aalice')
self.assertIn('secret', params)
self.assertIn('issuer', params)
self.assertEqual(params['issuer'][0], '<EMAIL>')
class TOTPAdminTest(TestCase):
def setUp(self):
"""
Create a device at the fourth time step. The current token is <PASSWORD>.
"""
try:
self.admin = self.create_user(
'admin', 'password', email='<EMAIL>',
is_staff=True
)
except IntegrityError:
self.skipTest("Unable to create the test user.")
else:
self.device = self.admin.totpdevice_set.create(
key='2a2bbba1092ffdd25a328ad1a0a5f5d61d7aacc4', step=30,
t0=int(time() - (30 * 3)), digits=6, tolerance=0, drift=0
)
self.device_admin = TOTPDeviceAdmin(TOTPDevice, AdminSite())
self.get_request = RequestFactory().get('/')
self.get_request.user = self.admin
def test_anonymous(self):
for suffix in ['config', 'qrcode']:
with self.subTest(view=suffix):
url = reverse('admin:otp_totp_totpdevice_' + suffix, kwargs={'pk': self.device.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_unauthorized(self):
self.client.login(username='admin', password='password')
for suffix in ['config', 'qrcode']:
with self.subTest(view=suffix):
url = reverse('admin:otp_totp_totpdevice_' + suffix, kwargs={'pk': self.device.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_view_perm(self):
self._add_device_perms('view_totpdevice')
self.client.login(username='admin', password='password')
for suffix in ['config', 'qrcode']:
with self.subTest(view=suffix):
url = reverse('admin:otp_totp_totpdevice_' + suffix, kwargs={'pk': self.device.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_change_perm(self):
self._add_device_perms('change_totpdevice')
self.client.login(username='admin', password='password')
for suffix in ['config', 'qrcode']:
with self.subTest(view=suffix):
url = reverse('admin:otp_totp_totpdevice_' + suffix, kwargs={'pk': self.device.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(OTP_ADMIN_HIDE_SENSITIVE_DATA=True)
def test_sensitive_information_hidden_while_adding_device(self):
fields = self._get_fields(device=None)
self.assertIn('key', fields)
self.assertNotIn('qrcode_link', fields)
@override_settings(OTP_ADMIN_HIDE_SENSITIVE_DATA=True)
def test_sensitive_information_hidden_while_changing_device(self):
fields = self._get_fields(device=self.device)
self.assertNotIn('key', fields)
self.assertNotIn('qrcode_link', fields)
@override_settings(OTP_ADMIN_HIDE_SENSITIVE_DATA=False)
def test_sensitive_information_shown_while_adding_device(self):
fields = self._get_fields(device=None)
self.assertIn('key', fields)
self.assertNotIn('qrcode_link', fields)
@override_settings(OTP_ADMIN_HIDE_SENSITIVE_DATA=False)
def test_sensitive_information_shown_while_changing_device(self):
fields = self._get_fields(device=self.device)
self.assertIn('key', fields)
self.assertIn('qrcode_link', fields)
@override_settings(OTP_ADMIN_HIDE_SENSITIVE_DATA=True)
def test_list_display_when_sensitive_information_hidden(self):
self.assertEqual(
self.device_admin.get_list_display(self.get_request),
['user', 'name', 'confirmed'],
)
@override_settings(OTP_ADMIN_HIDE_SENSITIVE_DATA=False)
def test_list_display_when_sensitive_information_shown(self):
self.assertEqual(
self.device_admin.get_list_display(self.get_request),
['user', 'name', 'confirmed', 'qrcode_link'],
)
@override_settings(OTP_ADMIN_HIDE_SENSITIVE_DATA=True)
def test_config_view_when_sensitive_information_hidden(self):
self._add_device_perms('change_totpdevice')
with self.assertRaises(PermissionDenied):
self.device_admin.config_view(self.get_request, self.device.id)
@override_settings(OTP_ADMIN_HIDE_SENSITIVE_DATA=False)
def test_config_view_when_sensitive_information_shown(self):
self._add_device_perms('change_totpdevice')
response = self.device_admin.config_view(self.get_request, self.device.id)
self.assertEqual(response.status_code, 200)
@override_settings(OTP_ADMIN_HIDE_SENSITIVE_DATA=True)
def test_qrcode_view_when_sensitive_information_hidden(self):
self._add_device_perms('change_totpdevice')
with self.assertRaises(PermissionDenied):
self.device_admin.qrcode_view(self.get_request, self.device.id)
@override_settings(OTP_ADMIN_HIDE_SENSITIVE_DATA=False)
def test_qrcode_view_when_sensitive_information_shown(self):
self._add_device_perms('change_totpdevice')
response = self.device_admin.qrcode_view(self.get_request, self.device.id)
self.assertEqual(response.status_code, 200)
#
# Helpers
#
def _add_device_perms(self, *codenames):
ct = ContentType.objects.get_for_model(TOTPDevice)
perms = [
Permission.objects.get(content_type=ct, codename=codename)
for codename in codenames
]
self.admin.user_permissions.add(*perms)
def _get_fields(self, device):
return {
field
for fieldset in self.device_admin.get_fieldsets(self.get_request, obj=device)
for field in fieldset[1]['fields']
}
@override_settings(
OTP_TOTP_THROTTLE_FACTOR=1,
)
class ThrottlingTestCase(TOTPDeviceMixin, ThrottlingTestMixin, TestCase):
def valid_token(self):
return self.tokens[3]
def invalid_token(self):
return -1
|
11518947
|
from __future__ import print_function
import unittest
import numpy
import irbasis
from irbasis_util.internal import *
class TestMethods(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMethods, self).__init__(*args, **kwargs)
def test_o_to_matsubara(self):
for n in range(-10,10):
assert o_to_matsubara_idx_f(2*n+1) == n
assert o_to_matsubara_idx_b(2*n) == n
if __name__ == '__main__':
unittest.main()
|
11518964
|
import sublime
import traceback, threading, os, sys, imp, tarfile, zipfile, urllib, json, shutil
import node_variables
from animation_loader import AnimationLoader
from repeated_timer import RepeatedTimer
from main import NodeJS
from main import NPM
def check_thread_is_alive(thread_name) :
for thread in threading.enumerate() :
if thread.getName() == thread_name and thread.is_alive() :
return True
return False
def create_and_start_thread(target, thread_name, args=[]) :
if not check_thread_is_alive(thread_name) :
thread = threading.Thread(target=target, name=thread_name, args=args)
thread.setDaemon(True)
thread.start()
return thread
return None
class DownloadNodeJS(object):
def __init__(self, node_version):
self.NODE_JS_VERSION = node_version
self.NODE_JS_TAR_EXTENSION = ".zip" if node_variables.NODE_JS_OS == "win" else ".tar.gz"
self.NODE_JS_BINARY_URL = "https://nodejs.org/dist/"+self.NODE_JS_VERSION+"/node-"+self.NODE_JS_VERSION+"-"+node_variables.NODE_JS_OS+"-"+node_variables.NODE_JS_ARCHITECTURE+self.NODE_JS_TAR_EXTENSION
self.NODE_JS_BINARY_TARFILE_NAME = self.NODE_JS_BINARY_URL.split('/')[-1]
self.NODE_JS_BINARY_TARFILE_FULL_PATH = os.path.join(node_variables.NODE_JS_BINARIES_FOLDER_PLATFORM, self.NODE_JS_BINARY_TARFILE_NAME)
self.animation_loader = AnimationLoader(["[= ]", "[ = ]", "[ = ]", "[ = ]", "[ =]", "[ = ]", "[ = ]", "[ = ]"], 0.067, "Downloading: "+self.NODE_JS_BINARY_URL+" ")
self.interval_animation = None
self.thread = None
def download(self):
try :
if os.path.exists(node_variables.NODE_JS_BINARIES_FOLDER_PLATFORM):
self.rmtree(node_variables.NODE_JS_BINARIES_FOLDER_PLATFORM)
os.makedirs(node_variables.NODE_JS_BINARIES_FOLDER_PLATFORM)
else :
os.makedirs(node_variables.NODE_JS_BINARIES_FOLDER_PLATFORM)
if os.path.exists(node_variables.NODE_MODULES_PATH):
self.rmtree(node_variables.NODE_MODULES_PATH)
request = urllib.request.Request(self.NODE_JS_BINARY_URL)
request.add_header('User-agent', r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1')
with urllib.request.urlopen(request) as response :
with open(self.NODE_JS_BINARY_TARFILE_FULL_PATH, 'wb') as out_file :
shutil.copyfileobj(response, out_file)
except Exception as err :
traceback.print_exc()
self.on_error(err)
return
self.extract()
self.on_complete()
def start(self):
self.thread = create_and_start_thread(self.download, "DownloadNodeJS")
if self.animation_loader :
self.interval_animation = RepeatedTimer(self.animation_loader.sec, self.animation_loader.animate)
def extract(self):
sep = os.sep
if self.NODE_JS_TAR_EXTENSION != ".zip" :
with tarfile.open(self.NODE_JS_BINARY_TARFILE_FULL_PATH, "r:gz") as tar :
for member in tar.getmembers() :
member.name = sep.join(member.name.split(sep)[1:])
tar.extract(member, node_variables.NODE_JS_BINARIES_FOLDER_PLATFORM)
else :
if node_variables.NODE_JS_OS == "win" :
import string
from ctypes import windll, c_int, c_wchar_p
UNUSUED_DRIVE_LETTER = ""
for letter in string.ascii_uppercase:
if not os.path.exists(letter+":") :
UNUSUED_DRIVE_LETTER = letter+":"
break
if not UNUSUED_DRIVE_LETTER :
sublime.message_dialog("Can't install node.js and npm! UNUSUED_DRIVE_LETTER not found.")
return
DefineDosDevice = windll.kernel32.DefineDosDeviceW
DefineDosDevice.argtypes = [ c_int, c_wchar_p, c_wchar_p ]
DefineDosDevice(0, UNUSUED_DRIVE_LETTER, node_variables.NODE_JS_BINARIES_FOLDER_PLATFORM)
try:
with zipfile.ZipFile(self.NODE_JS_BINARY_TARFILE_FULL_PATH, "r") as zip_file :
for member in zip_file.namelist() :
if not member.endswith("/") :
with zip_file.open(member) as node_file:
with open(UNUSUED_DRIVE_LETTER + "\\"+ member.replace("node-"+self.NODE_JS_VERSION+"-"+node_variables.NODE_JS_OS+"-"+node_variables.NODE_JS_ARCHITECTURE+"/", ""), "wb+") as target :
shutil.copyfileobj(node_file, target)
elif not member.endswith("node-"+self.NODE_JS_VERSION+"-"+node_variables.NODE_JS_OS+"-"+node_variables.NODE_JS_ARCHITECTURE+"/"):
os.mkdir(UNUSUED_DRIVE_LETTER + "\\"+ member.replace("node-"+self.NODE_JS_VERSION+"-"+node_variables.NODE_JS_OS+"-"+node_variables.NODE_JS_ARCHITECTURE+"/", ""))
except Exception as e:
print("Error: "+traceback.format_exc())
finally:
DefineDosDevice(2, UNUSUED_DRIVE_LETTER, node_variables.NODE_JS_BINARIES_FOLDER_PLATFORM)
def rmtree(self, path) :
if node_variables.NODE_JS_OS == "win" :
import string
from ctypes import windll, c_int, c_wchar_p
UNUSUED_DRIVE_LETTER = ""
for letter in string.ascii_uppercase:
if not os.path.exists(letter+":") :
UNUSUED_DRIVE_LETTER = letter+":"
break
if not UNUSUED_DRIVE_LETTER :
sublime.message_dialog("Can't remove node.js! UNUSUED_DRIVE_LETTER not found.")
return
DefineDosDevice = windll.kernel32.DefineDosDeviceW
DefineDosDevice.argtypes = [ c_int, c_wchar_p, c_wchar_p ]
DefineDosDevice(0, UNUSUED_DRIVE_LETTER, path)
try:
shutil.rmtree(UNUSUED_DRIVE_LETTER)
except Exception as e:
print("Error: "+traceback.format_exc())
finally:
DefineDosDevice(2, UNUSUED_DRIVE_LETTER, path)
else :
shutil.rmtree(path)
def on_error(self, err):
self.animation_loader.on_complete()
self.interval_animation.stop()
sublime.active_window().status_message("Can't install Node.js! Check your internet connection!")
def on_complete(self):
self.animation_loader.on_complete()
self.interval_animation.stop()
if os.path.isfile(self.NODE_JS_BINARY_TARFILE_FULL_PATH) :
os.remove(self.NODE_JS_BINARY_TARFILE_FULL_PATH)
node_js = NodeJS()
npm = NPM()
self.animation_loader = AnimationLoader(["[= ]", "[ = ]", "[ = ]", "[ = ]", "[ =]", "[ = ]", "[ = ]", "[ = ]"], 0.067, "Installing npm dependencies ")
self.interval_animation = RepeatedTimer(self.animation_loader.sec, self.animation_loader.animate)
try :
npm.getCurrentNPMVersion(True)
except Exception as e:
print("Error: "+traceback.format_exc())
try :
npm.install_all()
except Exception as e :
#print("Error: "+traceback.format_exc())
pass
self.animation_loader.on_complete()
self.interval_animation.stop()
if node_js.getCurrentNodeJSVersion(True) == self.NODE_JS_VERSION :
sublime.active_window().status_message("Node.js "+self.NODE_JS_VERSION+" installed correctly! NPM version: "+npm.getCurrentNPMVersion(True))
else :
sublime.active_window().status_message("Can't install Node.js! Something went wrong during installation.")
# def checkUpgrade():
# updateNPMDependencies()
# try :
# response = urllib.request.urlopen(node_variables.NODE_JS_VERSION_URL_LIST_ONLINE)
# data = json.loads(response.read().decode("utf-8"))
# nodejs_latest_version = data[0]["version"]
# node_js = NodeJS()
# if node_js.getCurrentNodeJSVersion(True) != nodejs_latest_version :
# sublime.active_window().status_message("There is a new version ( "+nodejs_latest_version+" ) of Node.js available! Change your settings to download this version.")
# else :
# try :
# npm = NPM()
# npm_version = npm.getCurrentNPMVersion(True)
# sublime.active_window().status_message("No need to update Node.js. Current version: "+node_js.getCurrentNodeJSVersion(True)+", npm: "+npm_version)
# except Exception as e:
# sublime.active_window().status_message("No need to update Node.js. Current version: "+node_js.getCurrentNodeJSVersion(True)+", npm not installed!")
# except Exception as err :
# traceback.print_exc()
def updateNPMDependencies():
npm = NPM()
try :
npm.getCurrentNPMVersion(True)
except Exception as e:
print("Error: "+traceback.format_exc())
return
#animation_loader = AnimationLoader(["[= ]", "[ = ]", "[ = ]", "[ = ]", "[ =]", "[ = ]", "[ = ]", "[ = ]"], 0.067, "Updating npm dependencies ")
#interval_animation = RepeatedTimer(animation_loader.sec, animation_loader.animate)
try :
npm.update_all(False)
except Exception as e:
pass
#animation_loader.on_complete()
#interval_animation.stop()
def already_installed():
return os.path.isfile(node_variables.NODE_JS_PATH_EXECUTABLE)
def can_start_download():
for thread in threading.enumerate() :
if thread.getName() == "DownloadNodeJS" and thread.is_alive() :
return False
return True
def install(node_version=""):
if node_version == "" :
node_version = node_variables.NODE_JS_VERSION
nodejs_can_start_download = can_start_download()
nodejs_already_installed = already_installed()
if nodejs_can_start_download and not nodejs_already_installed :
DownloadNodeJS( node_version ).start()
return
elif nodejs_can_start_download and nodejs_already_installed :
node_js = NodeJS()
if node_version != node_js.getCurrentNodeJSVersion(True) :
DownloadNodeJS( node_version ).start()
return
if nodejs_already_installed :
create_and_start_thread(updateNPMDependencies, "updateNPMDependencies")
|
11518992
|
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test, np.rint(y_pred)))
cf_10_3 = confusion_matrix(y_test, np.rint(y_pred))
|
11519005
|
import os
from conans import ConanFile, CMake
def get_version():
with open(os.path.join(os.path.dirname(__file__), 'version'), 'r') as f:
content = f.read()
try:
content = content.decode()
except AttributeError:
pass
return content.strip()
class Bip39Conan(ConanFile):
name = "bip39"
version = get_version()
license = "MIT"
author = "<NAME> <EMAIL>"
url = "https://github.com/edwardstock/bip3x"
description = "Bip39 mnemonic C++ implementation. Contains java and pure C bindings."
topics = ("bip39", "bip39-mnemonic", "bip44", "bip39-java")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"enableJNI": [True, False],
"enableC": [True, False],
"with_openssl_rand": [True, False]
}
default_options = {
"shared": False,
"with_openssl_rand": False,
"enableJNI": False,
"enableC": False,
"toolbox:shared": False,
}
exports = "version"
exports_sources = (
"modules/*",
"include/*",
"cfg/*",
"tests/*",
"src/*",
"libs/*",
"CMakeLists.txt",
"conanfile.py",
"LICENSE",
"README.md",
)
generators = "cmake"
default_user = "edwardstock"
default_channel = "latest"
requires = (
"toolbox/3.2.3@edwardstock/latest"
)
build_requires = (
"gtest/1.8.1",
)
def source(self):
if "CONAN_LOCAL" not in os.environ:
self.run("rm -rf *")
self.run("git clone --recursive https://github.com/edwardstock/bip3x.git .")
def configure(self):
if self.settings.compiler == "Visual Studio":
del self.settings.compiler.runtime
if self.options.with_openssl_rand:
self.requires.add("openssl/1.1.1k")
def build(self):
cmake = CMake(self)
opts = {
'ENABLE_TEST': 'Off',
'CMAKE_BUILD_TYPE': 'Release',
'ENABLE_BIP39_C': 'Off',
'ENABLE_BIP39_JNI': 'Off',
'ENABLE_SHARED': 'Off',
'USE_OPENSSL_RANDOM': 'Off'
}
if self.options.shared:
opts['ENABLE_SHARED'] = 'On'
if self.options.enableJNI:
opts['ENABLE_BIP39_JNI'] = 'On'
if self.options.enableC:
opts['ENABLE_BIP39_C'] = 'On'
if self.options.with_openssl_rand:
opts["USE_OPENSSL_RANDOM"] = 'On'
opts['CMAKE_BUILD_TYPE'] = self.settings.get_safe("build_type")
cmake.configure(defs=opts)
if self.settings.compiler == "Visual Studio":
cmake.build(args=['--config', self.settings.get_safe("build_type")])
else:
cmake.build()
def package(self):
self.copy("*", dst="include", src="include", keep_path=True)
if self.options.enableC:
self.copy("*.h", dst="include", src="src/bindings", keep_path=True)
self.copy("*.hpp", dst="include", src="src/bindings", keep_path=True)
dir_types = ['bin', 'lib', 'Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel']
file_types = ['lib', 'dll', 'dll.a', 'a', 'so', 'exp', 'pdb', 'ilk', 'dylib']
for dirname in dir_types:
for ftype in file_types:
self.copy("*." + ftype, src=dirname, dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.includedirs = ['include']
self.cpp_info.libs = ['bip39']
if self.options.enableC:
self.cpp_info.libs.append('cbip39')
if self.options.enableJNI:
self.cpp_info.libs.append('bip39_jni')
|
11519035
|
from roiextractors import Suite2pSegmentationExtractor
from ..basesegmentationextractorinterface import BaseSegmentationExtractorInterface
from ....utils.json_schema import FilePathType, IntType
class Suite2pSegmentationInterface(BaseSegmentationExtractorInterface):
"""Data interface for Suite2pSegmentationExtractor."""
SegX = Suite2pSegmentationExtractor
def __init__(self, file_path: FilePathType, combined: bool = False, plane_no: IntType = 0):
super().__init__(file_path=file_path, combined=combined, plane_no=plane_no)
|
11519047
|
from default_config import basic_cfg
cfg = basic_cfg
cfg.train_df = cfg.data_dir + "train_meta_4folded_v1.csv"
cfg.val_df = cfg.data_dir + "train_soundscape_labels_v2.csv"
# dataset
cfg.min_rating = 2.0
cfg.wav_crop_len = 30 # seconds
cfg.lr = 0.0001
cfg.epochs = 20
cfg.batch_size = 32
cfg.batch_size_val = 1
cfg.dataset = "ps_ds_2_inf"
cfg.model = "ps_model_3_inf2"
cfg.backbone = "resnet34"
cfg.num_workers = 32
cfg.save_val_data = True
cfg.mixed_precision = True
cfg.mixup = True
cfg.mix_beta = 1
|
11519061
|
import logging
from texts.models import Subscriber
from twilio import TwilioRestException
from util.showerthoughts import get_todays_thought
from util.texter import DuplicateTextException, Texter
def subscribe(sms_number):
if not sms_number:
return 'You sent nothing yo.'
sms_number = filter(str.isdigit, str(sms_number))
subscriber, created = Subscriber.objects.get_or_create(sms_number=sms_number)
texter = Texter()
if not created:
if subscriber.expired:
# yay! a renewal
subscriber.renew()
subscriber.save()
thought = get_todays_thought()
try:
texter.send_text(subscriber, thought.thought_text, thought.post_id)
except TwilioRestException as e:
subscriber.active = False
subscriber.save()
logging.error('Exception sending number to: ' + subscriber.sms_number + ' - ' + str(e))
return 'I couldn\'t send a text to that number! (' + str(e.msg) + ')'
except DuplicateTextException:
# no prob, they already got todays message
pass
return 'Welcome back! Check your phone!'
elif not subscriber.active:
# technically they could be blacklisted, but i can't do anything about that
return 'Did you reply STOP? Reply START and try again.'
else:
return 'You\'re already subscribed, yo.'
try:
message = "Cool! Welcome to ShowerTexts.com! You'll start receiving Shower Texts daily. " \
"Reply STOP at any time if you get sick of them. " \
"Your first one will follow..."
texter.send_text(subscriber, message, 'initial')
except TwilioRestException as e:
logging.error('Exception sending number to: ' + subscriber.sms_number + ' - ' + str(e))
return 'I couldn\'t send a text to that number! (' + str(e.msg) + ')'
except DuplicateTextException:
logging.warning('Duplicate welcome text.')
thought = get_todays_thought()
try:
texter.send_text(subscriber, thought.thought_text, thought.post_id)
except TwilioRestException as e:
logging.error('Exception sending number to: ' + subscriber.sms_number + ' - ' + str(e))
return 'I couldn\'t send a text to that number! (' + str(e.msg) + ')'
except DuplicateTextException:
logging.error('Duplicate initial thought. Shouldn\'t happen - odd.')
return 'Cool! Check your phone!'
|
11519091
|
import unittest
from carbon.aggregator.rules import AggregationRule
class AggregationRuleTest(unittest.TestCase):
def test_inclusive_regexes(self):
"""
Test case for https://github.com/graphite-project/carbon/pull/120
Consider the two rules:
aggregated.hist.p99 (10) = avg hosts.*.hist.p99
aggregated.hist.p999 (10) = avg hosts.*.hist.p999
Before the abovementioned patch the second rule would be treated as
expected but the first rule would lead to an aggregated metric
aggregated.hist.p99 which would in fact be equivalent to
avgSeries(hosts.*.hist.p99,hosts.*.hist.p999).
"""
method = 'avg'
frequency = 10
input_pattern = 'hosts.*.hist.p99'
output_pattern = 'aggregated.hist.p99'
rule99 = AggregationRule(input_pattern, output_pattern,
method, frequency)
input_pattern = 'hosts.*.hist.p999'
output_pattern = 'aggregated.hist.p999'
rule999 = AggregationRule(input_pattern, output_pattern,
method, frequency)
self.assertEqual(rule99.get_aggregate_metric('hosts.abc.hist.p99'),
'aggregated.hist.p99')
self.assertEqual(rule99.get_aggregate_metric('hosts.abc.hist.p999'),
None)
self.assertEqual(rule999.get_aggregate_metric('hosts.abc.hist.p99'),
None)
self.assertEqual(rule999.get_aggregate_metric('hosts.abc.hist.p999'),
'aggregated.hist.p999')
|
11519092
|
import unittest
import mock
from rollingpin.args import (
parse_args,
make_profile_parser,
construct_canonical_commandline,
)
class TestArgumentParsing(unittest.TestCase):
def setUp(self):
self.config = {
"deploy": {
"default-parallel": 5,
"default-sleeptime": 2,
"execution-timeout": 60,
},
"harold": {
"base-url": "http://example.com",
"secret": None,
},
}
# -h
def test_no_args(self):
with self.assertRaises(SystemExit):
parse_args(self.config, [])
def test_host_list(self):
args = parse_args(self.config, ["-h", "a", "b", "c"])
self.assertEqual(args.host_refs, ["a", "b", "c"])
def test_multiple_host_lists(self):
args = parse_args(self.config, ["-h", "a", "-h", "b"])
self.assertEqual(args.host_refs, ["a", "b"])
# --parallel
def test_parallel_default(self):
args = parse_args(self.config, ["-h", "a"])
self.assertEqual(args.parallel, 5)
def test_parallel_override(self):
args = parse_args(self.config, ["-h", "a", "--parallel", "3"])
self.assertEqual(args.parallel, 3)
# --sleeptime
def test_sleeptime_default(self):
args = parse_args(self.config, ["-h", "a"])
self.assertEqual(args.sleeptime, 2)
def test_sleeptime_override(self):
args = parse_args(self.config, ["-h", "a", "--sleeptime", "1"])
self.assertEqual(args.sleeptime, 1)
# --list
def test_list_default(self):
args = parse_args(self.config, ["-h", "a"])
self.assertFalse(args.list_hosts)
def test_list_flagged(self):
args = parse_args(self.config, ["-h", "a", "--list"])
self.assertTrue(args.list_hosts)
# --no-harold
def test_harold_default(self):
args = parse_args(self.config, ["-h", "a"])
self.assertTrue(args.notify_harold)
def test_harold_disabled(self):
args = parse_args(self.config, ["-h", "a", "--really-no-harold"])
self.assertFalse(args.notify_harold)
# -v / --verbose
def test_verbose_default(self):
args = parse_args(self.config, ["-h", "a"])
self.assertFalse(args.verbose_logging)
def test_verbose_flagged_short(self):
args = parse_args(self.config, ["-h", "a", "-v"])
self.assertTrue(args.verbose_logging)
def test_verbose_flagged_long(self):
args = parse_args(self.config, ["-h", "a", "--verbose"])
self.assertTrue(args.verbose_logging)
# --dangerously-fast
def test_dangerously_fast_default(self):
args = parse_args(self.config, ["-h", "a"])
self.assertFalse(args.dangerously_fast)
def test_dangerously_fast_on(self):
args = parse_args(self.config, ["-h", "a", "--dangerously-fast"])
self.assertTrue(args.dangerously_fast)
# -d
def test_empty_deploys(self):
args = parse_args(self.config, ["-h", "a"])
self.assertEqual(args.components, [])
def test_one_deploy(self):
args = parse_args(self.config, ["-h", "a", "-d", "comp"])
self.assertEqual(args.components, ["comp"])
def test_double_deploy(self):
args = parse_args(self.config, ["-h", "a", "-d", "comp", "comp2"])
self.assertEqual(args.components, ["comp", "comp2"])
def test_multiple_deploys(self):
args = parse_args(self.config,
["-h", "a", "-d", "comp", "-d", "comp2"])
self.assertEqual(args.components, ["comp", "comp2"])
# -r
def test_one_restart(self):
args = parse_args(self.config, ["-h", "a", "-r", "all"])
self.assertEqual(cmdline(args.commands), [["restart", "all"]])
def test_multi_restart(self):
args = parse_args(self.config, ["-h", "a", "-r", "all", "-r", "more"])
self.assertEqual(
cmdline(args.commands), [["restart", "all"], ["restart", "more"]])
# -c
def test_no_commands(self):
args = parse_args(self.config, ["-h", "a"])
self.assertEqual(cmdline(args.commands), [])
def test_simple_command(self):
args = parse_args(self.config, ["-h", "a", "-c", "test"])
self.assertEqual(cmdline(args.commands), [["test"]])
def test_command_with_args(self):
args = parse_args(self.config, ["-h", "a", "-c", "test", "args"])
self.assertEqual(cmdline(args.commands), [["test", "args"]])
# mixup
def test_commands_together(self):
args = parse_args(
self.config, ["-h", "a", "-c", "test", "args", "-r", "all"])
self.assertEqual(cmdline(args.commands), [["test", "args"], ["restart", "all"]])
class TestProfileArguments(unittest.TestCase):
def setUp(self):
self.config = {
"deploy": {
"default-parallel": 5,
"default-sleeptime": 2,
"execution-timeout": 60,
},
"harold": {
"base-url": "http://example.com",
"secret": None,
},
}
self.profiles = ["foo", "bar", "baz"]
with mock.patch('rollingpin.args._get_available_profiles', return_value=self.profiles):
self.profile_parser = make_profile_parser()
def test_profiles_arg(self):
args = ["foo", "-h", "a", "-c", "test"]
profile_info, args = self.profile_parser.parse_known_args(args=args)
full_args = parse_args(self.config, args)
self.assertEqual(profile_info.profile, "foo")
self.assertEqual(cmdline(full_args.commands), [["test"]])
def test_invalid_profile(self):
args = ["bad", "-h", "a"]
with self.assertRaises(SystemExit):
profile_info, _ = self.profile_parser.parse_known_args(args=args)
class TestArgumentReconstruction(unittest.TestCase):
def setUp(self):
self.config = {
"deploy": {
"default-parallel": 5,
"default-sleeptime": 2,
"execution-timeout": 60,
},
"harold": {
"base-url": "http://example.com",
"secret": None,
},
}
def test_single_host(self):
args = parse_args(self.config, ["-h", "host"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual("-h host --parallel=5 --timeout=60", canonical)
def test_multiple_hosts(self):
args = parse_args(self.config, ["-h", "host", "host2"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual("-h host host2 --parallel=5 --timeout=60", canonical)
def test_multiple_dash_hs(self):
args = parse_args(self.config, ["-h", "host", "-h", "host2"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual("-h host host2 --parallel=5 --timeout=60", canonical)
def test_parallel(self):
args = parse_args(self.config, ["-h", "host", "--parallel", "1"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual("-h host --parallel=1 --timeout=60", canonical)
def test_sleeptime(self):
args = parse_args(self.config, ["-h", "host", "--sleeptime", "5"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual(
"-h host --parallel=5 --sleeptime=5 --timeout=60", canonical)
def test_no_harold(self):
args = parse_args(self.config, ["-h", "host", "--really-no-harold"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual(
"-h host --parallel=5 --timeout=60 --really-no-harold", canonical)
def test_single_deploy(self):
args = parse_args(self.config, ["-h", "host", "-d", "component"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual(
"-h host --parallel=5 --timeout=60 -d component", canonical)
def test_multiple_deploys(self):
args = parse_args(self.config,
["-h", "host", "-d", "component", "component2"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual(
"-h host --parallel=5 --timeout=60 -d component component2",
canonical
)
def test_multiple_dash_ds(self):
args = parse_args(
self.config, ["-h", "host", "-d", "component", "-d", "component2"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual(
"-h host --parallel=5 --timeout=60 -d component component2",
canonical
)
def test_restart(self):
args = parse_args(self.config, ["-h", "host", "-r", "component"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual(
"-h host --parallel=5 --timeout=60 -r component", canonical)
def test_multi_restart(self):
args = parse_args(self.config,
["-h", "host", "-r", "com1", "-r", "com2"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual(
"-h host --parallel=5 --timeout=60 -r com1 -r com2", canonical)
def test_simple_command(self):
args = parse_args(self.config, ["-h", "host", "-c", "cmd"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual("-h host --parallel=5 --timeout=60 -c cmd", canonical)
def test_command_with_args(self):
args = parse_args(self.config, ["-h", "host", "-c", "cmd", "arg"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual(
"-h host --parallel=5 --timeout=60 -c cmd arg", canonical)
def test_multiple_commands(self):
args = parse_args(self.config,
["-h", "host", "-c", "cmd1", "-c", "cmd2"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual(
"-h host --parallel=5 --timeout=60 -c cmd1 -c cmd2", canonical)
def test_verbose(self):
args = parse_args(self.config, ["-h", "host", "-v"])
canonical = construct_canonical_commandline(self.config, args)
self.assertEqual(
"-h host --parallel=5 --timeout=60 --verbose", canonical)
# Helper
def cmdline(cmds):
return [cmd.cmdline() for cmd in cmds]
|
11519094
|
import os
import sys
import io
from mock import *
from .gp_unittest import *
from gppylib.programs.clsAddMirrors import GpAddMirrorsProgram, ProgramArgumentValidationException
from gparray import Segment, GpArray
from gppylib.system.environment import GpCoordinatorEnvironment
from gppylib.system.configurationInterface import GpConfigurationProvider
class GpAddMirrorsTest(GpTestCase):
def setUp(self):
# because gpaddmirrors does not have a .py extension,
# we have to use imp to import it
# if we had a gpaddmirrors.py, this is equivalent to:
# import gpaddmirrors
# self.subject = gpaddmirrors
self.subject = GpAddMirrorsProgram(None)
self.gparrayMock = self._createGpArrayWith2Primary2Mirrors()
self.gparray_get_segments_by_hostname = dict(sdw1=[self.primary0])
self.apply_patches([
patch('builtins.input'),
patch('gppylib.programs.clsAddMirrors.base.WorkerPool'),
patch('gppylib.programs.clsAddMirrors.logger', return_value=Mock(spec=['log', 'info', 'debug', 'error'])),
patch('gppylib.programs.clsAddMirrors.log_to_file_only', return_value=Mock()),
patch('gppylib.programs.clsAddMirrors.GpCoordinatorEnvironment', return_value=Mock(), spec=GpCoordinatorEnvironment),
patch('gppylib.system.faultProberInterface.getFaultProber'),
patch('gppylib.programs.clsAddMirrors.configInterface.getConfigurationProvider', return_value=Mock()),
patch('gppylib.programs.clsAddMirrors.heapchecksum.HeapChecksum'),
patch('gppylib.gparray.GpArray.getSegmentsByHostName', return_value=self.gparray_get_segments_by_hostname),
])
self.input_mock = self.get_mock_from_apply_patch("input")
self.mock_logger = self.get_mock_from_apply_patch('logger')
self.gpCoordinatorEnvironmentMock = self.get_mock_from_apply_patch("GpCoordinatorEnvironment")
self.gpCoordinatorEnvironmentMock.return_value.getCoordinatorPort.return_value = 123456
self.gpCoordinatorEnvironmentMock.return_value.getCoordinatorDataDir.return_value = "/data/coordinator/gpseg-1"
self.getConfigProviderFunctionMock = self.get_mock_from_apply_patch('getConfigurationProvider')
self.config_provider_mock = Mock(spec=GpConfigurationProvider)
self.getConfigProviderFunctionMock.return_value = self.config_provider_mock
self.config_provider_mock.initializeProvider.return_value = self.config_provider_mock
self.config_provider_mock.loadSystemConfig.return_value = self.gparrayMock
self.mock_heap_checksum = self.get_mock_from_apply_patch('HeapChecksum')
self.mock_heap_checksum.return_value.get_coordinator_value.return_value = 1
self.mock_heap_checksum.return_value.get_segments_checksum_settings.return_value = ([1], [0])
self.mock_heap_checksum.return_value.are_segments_consistent.return_value = False
self.coordinator.heap_checksum = 1
self.mock_heap_checksum.return_value.check_segment_consistency.return_value = (
[self.primary0], [], self.coordinator.heap_checksum)
self.cdd = os.getenv("COORDINATOR_DATA_DIRECTORY")
if not self.cdd:
self.cdd = "/Users/pivotal/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1"
os.environ["COORDINATOR_DATA_DIRECTORY"] = self.cdd
self.parser = GpAddMirrorsProgram.createParser()
def tearDown(self):
super(GpAddMirrorsTest, self).tearDown()
def test_validate_heap_checksum_succeeds_if_cluster_consistent(self):
sys.argv = ['gpaddmirrors', '-a']
options, _ = self.parser.parse_args()
self.subject = GpAddMirrorsProgram(options)
self.mock_heap_checksum.return_value.get_segments_checksum_settings.return_value = ([1], [1])
self.mock_heap_checksum.return_value.are_segments_consistent.return_value = True
self.mock_heap_checksum.return_value.check_segment_consistency.return_value = ([2], [], 1)
self.subject.validate_heap_checksums(self.gparrayMock)
self.mock_logger.info.assert_any_call("Heap checksum setting consistent across cluster")
def test_run_calls_validate_heap_checksum(self):
self.primary0.heap_checksum = 1
self.primary1.heap_checksum = 0
self.coordinator.heap_checksum = 1
self.mock_heap_checksum.return_value.check_segment_consistency.return_value = (
[self.primary0], [self.primary1], self.coordinator.heap_checksum)
sys.argv = ['gpaddmirrors', '-a']
options, args = self.parser.parse_args()
command_obj = self.subject.createProgram(options, args)
with self.assertRaisesRegex(Exception, 'Segments have heap_checksum set inconsistently to coordinator'):
command_obj.run()
def test_option_batch_of_size_0_will_raise(self):
sys.argv = ['gpaddmirrors', '-B', '0']
options, _ = self.parser.parse_args()
self.subject = GpAddMirrorsProgram(options)
with self.assertRaises(ProgramArgumentValidationException):
self.subject.run()
@patch('sys.stdout', new_callable=io.StringIO)
def test_option_version(self, mock_stdout):
sys.argv = ['gpaddmirrors', '--version']
with self.assertRaises(SystemExit) as cm:
options, _ = self.parser.parse_args()
self.assertIn("gpaddmirrors version $Revision$", mock_stdout.getvalue())
self.assertEqual(cm.exception.code, 0)
def test_generated_file_contains_default_port_offsets(self):
datadir_config = _write_datadir_config(self.cdd)
mirror_config_output_file = "/tmp/test_gpaddmirrors.config"
sys.argv = ['gpaddmirrors', '-o', mirror_config_output_file, '-m', datadir_config]
self.config_provider_mock.loadSystemConfig.return_value = GpArray([self.coordinator, self.primary0, self.primary1])
options, _ = self.parser.parse_args()
self.subject = GpAddMirrorsProgram(options)
self.subject.run()
with open(mirror_config_output_file, 'r') as fp:
result = fp.readlines()
self.assertIn("41000", result[0])
def test_generated_file_contains_port_offsets(self):
datadir_config = _write_datadir_config(self.cdd)
mirror_config_output_file = "/tmp/test_gpaddmirrors.config"
sys.argv = ['gpaddmirrors', '-p', '5000', '-o', mirror_config_output_file, '-m', datadir_config]
options, _ = self.parser.parse_args()
self.config_provider_mock.loadSystemConfig.return_value = GpArray([self.coordinator, self.primary0, self.primary1])
self.subject = GpAddMirrorsProgram(options)
self.subject.run()
with open(mirror_config_output_file, 'r') as fp:
result = fp.readlines()
self.assertIn("45000", result[0])
def test_datadir_interview(self):
self.input_mock.side_effect = ["/tmp/datadirs/mirror1", "/tmp/datadirs/mirror2", "/tmp/datadirs/mirror3"]
sys.argv = ['gpaddmirrors', '-p', '5000']
options, _ = self.parser.parse_args()
self.config_provider_mock.loadSystemConfig.return_value = GpArray([self.coordinator, self.primary0, self.primary1])
self.subject = GpAddMirrorsProgram(options)
directories = self.subject._GpAddMirrorsProgram__getDataDirectoriesForMirrors(3, None)
self.assertEqual(len(directories), 3)
def _createGpArrayWith2Primary2Mirrors(self):
self.coordinator = Segment.initFromString(
"1|-1|p|p|s|u|cdw|cdw|5432|/data/coordinator")
self.primary0 = Segment.initFromString(
"2|0|p|p|s|u|sdw1|sdw1|40000|/Users/pivotal/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1")
self.primary1 = Segment.initFromString(
"3|1|p|p|s|u|sdw2|sdw2|40001|/data/primary1")
mirror0 = Segment.initFromString(
"4|0|m|m|s|u|sdw2|sdw2|50000|/data/mirror0")
mirror1 = Segment.initFromString(
"5|1|m|m|s|u|sdw1|sdw1|50001|/data/mirror1")
return GpArray([self.coordinator, self.primary0, self.primary1, mirror0, mirror1])
def _write_datadir_config(cdd):
cdd_parent_parent = os.path.realpath(cdd + "../../../")
mirror_data_dir = os.path.join(cdd_parent_parent, 'mirror')
if not os.path.exists(mirror_data_dir):
os.mkdir(mirror_data_dir)
datadir_config = '/tmp/gpaddmirrors_datadir_config'
contents = \
"""
{0}
""".format(mirror_data_dir)
with open(datadir_config, 'w') as fp:
fp.write(contents)
return datadir_config
if __name__ == '__main__':
run_tests()
|
11519115
|
StringType = getmeta("")
ListType = getmeta([])
DictType = getmeta({})
class Exception:
def __init__(self, *args):
self.args = args
def __repr__(self):
return str(self.args)
class ImportError(Exception):
pass
def startswith(self, prefix):
return self.find(prefix) == 0
StringType['startswith'] = startswith
def format(s, d):
r = []
i = 0
j = 0
n = len(s)
while i < n:
if s[i] == '{':
r.append(s[j:i])
j = i
while j < n:
if s[j] == '}':
j = j + 1
break
j = j + 1
spec = s[i+1:j-1]
#name, fmt = spec.split(':')
foo = d[spec] # eval requires the compiler: eval(spec, d)
#foo = spec
# print('foo', foo, spec, d)
r.append(str(foo))
i = j - 1
i = i + 1
r.append(s[j:i])
return ''.join(r)
StringType['format'] = format
|
11519125
|
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.version import __version__
import docker
import time
import os
import subprocess
"""
Using NVIDIA Flex in a docker container, drape a cloth over an object.
"""
class DockerController(Controller):
def __init__(self):
client = docker.from_env()
client.images.pull("tdw:{__version__}")
# This method is preferred to subprocess, but the gpu flag is currently
# not supported by docker-py (see PR: #2419 in the docker-py repo)
# client.containers.run("tdw:v1.6.0",
# network="host",
# environment=["DISPLAY=$DISPLAY"],
# gpus="all")
subprocess.Popen(["docker", "run",
"--rm",
"--gpus", "all",
"-v", "/tmp/.X11-unix:/tmp/.X11-unix",
"-e", "DISPLAY={}".format(os.environ["DISPLAY"]),
"--network", "host",
"-t", f"tdw:{__version__}"],
shell=False)
super().__init__()
def run(self):
# Load procedurally generated room
self.start()
self.communicate(TDWUtils.create_empty_room(12, 12))
# Create the Flex container.
self.communicate({"$type": "create_flex_container",
"collision_distance": 0.001,
"static_friction": 1.0,
"dynamic_friction": 1.0,
"iteration_count": 3,
"substep_count": 8,
"radius": 0.1875,
"damping": 0,
"drag": 0})
# Create the avatar.
# Teleport the avatar.
# Look at the target position.
self.communicate(TDWUtils.create_avatar(position={"x": 2.0, "y": 1, "z": 1},
look_at={"x": -1.2, "y": 0.5, "z": 0}))
# Create the solid object.
solid_id = self.add_object("rh10",
position={"x": -1.2, "y": 0, "z": 0},
rotation={"x": 0.0, "y": -90.0, "z": 0.0})
# Make the object kinematic.
self.communicate({"$type": "set_kinematic_state",
"id": solid_id})
# Assign the object a FlexActor.
# Assign the object a Flex container.
self.communicate([{"$type": "set_flex_solid_actor",
"id": solid_id,
"mass_scale": 100.0,
"particle_spacing": 0.035},
{"$type": "assign_flex_container",
"id": solid_id,
"container_id": 0}])
# Set the Flex scale.
self.communicate({"$type": "set_flex_object_scale",
"id": solid_id,
"scale": {"x": 1, "y": 1, "z": 1}})
# Create the cloth.
cloth_id = self.add_object("cloth_square",
position={"x": -1.2, "y": 1.0, "z": 0},
library="models_special.json")
# Make the cloth kinematic.
self.communicate({"$type": "set_kinematic_state",
"id": cloth_id})
# Assign the cloth a FlexActor.
# Assign the cloth a Flex container.
self.communicate([{"$type": "set_flex_cloth_actor",
"id": cloth_id,
"mass_scale": 1,
"mesh_tesselation": 1,
"tether_stiffness": 0.5,
"bend_stiffness": 1.0,
"self_collide": False,
"stretch_stiffness": 1.0},
{"$type": "assign_flex_container",
"id": cloth_id,
"container_id": 0}
])
# Set the Flex scale.
self.communicate({"$type": "set_flex_object_scale",
"id": cloth_id,
"scale": {"x": 1, "y": 1, "z": 1}})
# Iterate for 500 frames.
for i in range(500):
self.communicate({"$type": "step_physics", "frames": 0})
if __name__ == "__main__":
DockerController().run()
|
11519145
|
from .code import CodeUtil
from .mol import MolUtil
from .tud import TUUtil
DATASET_UTILS = {
'ogbg-code': CodeUtil,
'ogbg-code2': CodeUtil,
'ogbg-molhiv': MolUtil,
'ogbg-molpcba': MolUtil,
'NCI1': TUUtil,
'NCI109': TUUtil,
}
|
11519209
|
import torch
from torch import nn
from torch.cuda import amp
from quickvision import utils
from tqdm import tqdm
import time
from collections import OrderedDict
from quickvision.models.detection.utils import _evaluate_iou, _evaluate_giou
__all__ = ["train_step", "val_step", "fit", "train_sanity_fit",
"val_sanity_fit", "sanity_fit", ]
def train_step(model: nn.Module, train_loader, device: str, optimizer,
scheduler=None, num_batches: int = None,
log_interval: int = 100, scaler=None,):
"""
Performs one step of training. Calculates loss, forward pass, computes gradient and returns metrics.
Args:
model : PyTorch RetinaNet Model.
train_loader : Train loader.
device : "cuda" or "cpu"
optimizer : Torch optimizer to train.
scheduler : Learning rate scheduler.
num_batches : (optional) Integer To limit training to certain number of batches.
log_interval : (optional) Defualt 100. Integer to Log after specified batch ids in every batch.
grad_penalty : (optional) To penalize with l2 norm for big gradients.
scaler: (optional) Pass torch.cuda.amp.GradScaler() for fp16 precision Training.
"""
model = model.to(device)
start_train_step = time.time()
model.train()
last_idx = len(train_loader) - 1
batch_time_m = utils.AverageMeter()
cnt = 0
batch_start = time.time()
metrics = OrderedDict()
total_loss = utils.AverageMeter()
loss_classifier = utils.AverageMeter()
loss_box_reg = utils.AverageMeter()
for batch_idx, (inputs, targets) in enumerate(train_loader):
last_batch = batch_idx == last_idx
images = list(image.to(device) for image in inputs)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
# zero the parameter gradients
optimizer.zero_grad()
if scaler is not None:
with amp.autocast():
loss_dict = model(images, targets)
loss = sum(loss_v for loss_v in loss_dict.values())
scaler.scale(loss).backward()
# Step using scaler.step()
scaler.step(optimizer)
# Update for next iteration
scaler.update()
else:
loss_dict = model(images, targets)
loss = sum(loss_v for loss_v in loss_dict.values())
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
cnt += 1
total_loss.update(loss.item())
loss_classifier.update(loss_dict["classification"].item())
loss_box_reg.update(loss_dict["bbox_regression"].item())
batch_time_m.update(time.time() - batch_start)
batch_start = time.time()
if last_batch or batch_idx % log_interval == 0: # If we reach the log intervel
print("Batch Train Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ".format(
batch_time=batch_time_m,))
# "loss classifier: {loss_d.loss_classifier:>7.4f} "
# "loss box_reg: {loss_d.loss_box_reg:>7.4f} "
# "loss objectness: {loss_d.loss_objectness:>7.4f} "
# "loss rpn box reg: {loss_d.loss_rpn_box_reg:>7.4f}"
if num_batches is not None:
if cnt >= num_batches:
end_train_step = time.time()
metrics["total_loss"] = total_loss.avg
metrics["loss_classifier"] = loss_classifier.avg
metrics["loss_box_reg"] = loss_box_reg.avg
print(f"Done till {num_batches} train batches")
print(f"Time taken for Training step = {end_train_step - start_train_step} sec")
return metrics
end_train_step = time.time()
metrics["total_loss"] = total_loss.avg
metrics["loss_classifier"] = loss_classifier.avg
metrics["loss_box_reg"] = loss_box_reg.avg
print(f"Time taken for Training step = {end_train_step - start_train_step} sec")
return metrics
def val_step(model: nn.Module, val_loader, device: str,
num_batches=None, log_interval: int = 100):
"""
Performs one step of validation. Calculates loss, forward pass and returns metrics.
Args:
model : PyTorch RetinaNet Model.
val_loader : Validation loader.
device : "cuda" or "cpu"
num_batches : (optional) Integer To limit validation to certain number of batches.
log_interval : (optional) Defualt 100. Integer to Log after specified batch ids in every batch.
"""
model = model.to(device)
start_val_step = time.time()
last_idx = len(val_loader) - 1
batch_time_m = utils.AverageMeter()
cnt = 0
model.eval()
batch_start = time.time()
metrics = OrderedDict()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(val_loader):
last_batch = batch_idx == last_idx
images = list(image.to(device) for image in inputs)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
out = model(images)
iou = torch.stack([_evaluate_iou(t, o) for t, o in zip(targets, out)]).mean()
giou = torch.stack([_evaluate_giou(t, o) for t, o in zip(targets, out)]).mean()
cnt += 1
batch_time_m.update(time.time() - batch_start)
batch_start = time.time()
if last_batch or batch_idx % log_interval == 0: # If we reach the log intervel
print("Batch Validation Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ".format(
batch_time=batch_time_m,))
if num_batches is not None:
if cnt >= num_batches:
avg_iou = torch.stack([iou]).mean()
avg_giou = torch.stack([giou]).mean()
metrics["iou"] = avg_iou
metrics["giou"] = avg_giou
print(f"Done till {num_batches} Validation batches")
end_val_step = time.time()
print(f"Time taken for validation step = {end_val_step - start_val_step} sec")
return metrics
avg_iou = torch.stack([iou]).mean()
avg_giou = torch.stack([giou]).mean()
metrics["iou"] = avg_iou
metrics["giou"] = avg_giou
end_val_step = time.time()
print(f"Time taken for validation step = {end_val_step - start_val_step} sec")
return metrics
def fit(model: nn.Module, epochs: int, train_loader, val_loader,
device: str, optimizer, scheduler=None,
num_batches: int = None, log_interval: int = 100,
fp16: bool = False, ):
"""
A fit function that performs training for certain number of epochs.
Args:
model : A pytorch RetinaNet Model.
epochs: Number of epochs to train.
train_loader : Train loader.
val_loader : Validation loader.
device : "cuda" or "cpu"
optimizer : PyTorch optimizer.
scheduler : (optional) Learning Rate scheduler.
early_stopper: (optional) A utils provided early stopper, based on validation loss.
num_batches : (optional) Integer To limit validation to certain number of batches.
log_interval : (optional) Defualt 100. Integer to Log after specified batch ids in every batch.
fp16 : (optional) To use Mixed Precision Training using float16 dtype.
"""
history = {}
train_loss = []
val_iou = []
val_giou = []
if fp16 is True:
print("Training with Mixed precision fp16 scaler")
scaler = amp.GradScaler()
else:
scaler = None
for epoch in tqdm(range(epochs)):
print()
print(f"Training Epoch = {epoch}")
train_metrics = train_step(model, train_loader, device, optimizer,
scheduler, num_batches, log_interval, scaler)
val_metrics = val_step(model, val_loader, device, num_batches, log_interval)
# Possibly we can use individual losses
train_loss.append(train_metrics["total_loss"])
avg_iou = val_metrics["iou"]
avg_giou = val_metrics["giou"]
val_iou.append(avg_iou)
val_giou.append(avg_giou)
history = {"train": {"train_loss": train_loss},
"val": {"val_iou": val_iou, "val_giou": val_giou}}
return history
def train_sanity_fit(model: nn.Module, train_loader,
device: str, num_batches: int = None, log_interval: int = 100,
fp16: bool = False,):
"""
Performs Sanity fit over train loader.
Use this to dummy check your fit function. It does not calculate metrics, timing, or does checkpointing.
It iterates over both train_loader and val_loader for given batches.
Note: - It does not to loss.backward().
Args:
model : A pytorch Faster RCNN Model.
train_loader : Train loader.
device : "cuda" or "cpu"
num_batches : (optional) Integer To limit sanity fit over certain batches.
Useful is data is too big even for sanity check.
log_interval : (optional) Defualt 100. Integer to Log after specified batch ids in every batch.
fp16: : (optional) If True uses PyTorch native mixed precision Training.
"""
model = model.to(device)
model.train()
cnt = 0
last_idx = len(train_loader) - 1
train_sanity_start = time.time()
if fp16 is True:
scaler = amp.GradScaler()
for batch_idx, (inputs, targets) in enumerate(train_loader):
last_batch = batch_idx == last_idx
images = list(image.to(device) for image in inputs)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
if fp16 is True:
with amp.autocast():
loss_dict = model(images, targets)
else:
loss_dict = model(images, targets)
cnt += 1
if last_batch or (batch_idx % log_interval) == 0:
print(f"Train sanity check passed for batch till {batch_idx} batches")
if num_batches is not None:
if cnt >= num_batches:
print(f"Done till {num_batches} train batches")
print("All specified batches done")
train_sanity_end = time.time()
print(f"Train sanity fit check passed in time {train_sanity_end-train_sanity_start}")
return True
train_sanity_end = time.time()
print("All specified batches done")
print(f"Train sanity fit check passed in time {train_sanity_end-train_sanity_start}")
return True
def val_sanity_fit(model: nn.Module, val_loader,
device: str, num_batches: int = None,
log_interval: int = 100,):
"""
Performs Sanity fit over valid loader.
Use this to dummy check your fit function. It does not calculate metrics, timing, or does checkpointing.
It iterates over both train_loader and val_loader for given batches.
Note: - It does not to loss.backward().
Args:
model : A pytorch Faster RCNN Model.
val_loader : Validation loader.
device : "cuda" or "cpu"
num_batches : (optional) Integer To limit sanity fit over certain batches.
Useful is data is too big even for sanity check.
log_interval : (optional) Defualt 100. Integer to Log after specified batch ids in every batch.
"""
model = model.to(device)
model.eval()
cnt = 0
val_sanity_start = time.time()
last_idx = len(val_loader) - 1
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(val_loader):
last_batch = batch_idx == last_idx
images = list(image.to(device) for image in inputs)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
out = model(images)
cnt += 1
if last_batch or (batch_idx % log_interval) == 0:
print(f"Val sanity check passed for batch till {batch_idx} batches")
if num_batches is not None:
if cnt >= num_batches:
print(f"Done till {num_batches} validation batches")
print("All specified batches done")
val_sanity_end = time.time()
print(f"Val sanity fit check passed in time {val_sanity_end-val_sanity_start}")
return True
val_sanity_end = time.time()
print("All specified batches done")
print(f"Validation sanity check pased in time {val_sanity_end-val_sanity_start}")
return True
def sanity_fit(model: nn.Module, train_loader, val_loader,
device: str, num_batches: int = None,
log_interval: int = 100, fp16: bool = False,):
"""
Performs Sanity fit over train loader and valid loader.
Use this to dummy check your fit function. It does not calculate metrics, timing, or does checkpointing.
It iterates over both train_loader and val_loader for given batches.
Note: - It does not to loss.backward().
Args:
model : A pytorch Faster RCNN Model.
train_loader : Training loader.
val_loader : Validation loader.
device : "cuda" or "cpu"
num_batches : (optional) Integer To limit sanity fit over certain batches.
Useful is data is too big even for sanity check.
log_interval : (optional) Defualt 100. Integer to Log after specified batch ids in every batch.
"""
sanity_train = train_sanity_fit(model, train_loader, device, num_batches, log_interval, fp16)
sanity_val = val_sanity_fit(model, val_loader, device, num_batches, log_interval)
return True
|
11519220
|
from packetbeat import (BaseTest, FLOWS_REQUIRED_FIELDS)
from pprint import PrettyPrinter
import six
def pprint(x): return PrettyPrinter().pprint(x)
def check_fields(flow, fields):
for k, v in six.iteritems(fields):
assert flow[k] == v
class Test(BaseTest):
def test_mysql_flow(self):
self.render_config_template(
flows=True,
shutdown_timeout="1s",
)
self.run_packetbeat(
pcap="mysql_long.pcap",
debug_selectors=["*"])
objs = self.read_output(
types=["flow"],
required_fields=FLOWS_REQUIRED_FIELDS)
pprint(objs)
assert len(objs) == 1
check_fields(objs[0], {
'final': True,
'source.mac': '0a:00:27:00:00:00',
'dest.mac': '08:00:27:76:d7:41',
'dest.ip': '192.168.33.14',
'source.ip': '192.168.33.1',
'transport': 'tcp',
'source.port': 60137,
'dest.port': 3306,
'source.stats.net_packets_total': 22,
'source.stats.net_bytes_total': 1480,
'dest.stats.net_packets_total': 10,
'dest.stats.net_bytes_total': 181133,
})
def test_memcache_udp_flow(self):
self.render_config_template(
flows=True,
shutdown_timeout="1s",
)
self.run_packetbeat(
pcap="memcache/memcache_bin_udp_counter_ops.pcap",
debug_selectors=["*"])
objs = self.read_output(
types=["flow"],
required_fields=FLOWS_REQUIRED_FIELDS)
pprint(objs)
assert len(objs) == 1
check_fields(objs[0], {
'final': True,
'source.mac': 'ac:bc:32:77:41:0b',
'dest.mac': '08:00:27:dd:3b:28',
'source.ip': '192.168.188.37',
'dest.ip': '192.168.188.38',
'transport': 'udp',
'source.port': 63888,
'dest.port': 11211,
'source.stats.net_packets_total': 3,
'source.stats.net_bytes_total': 280,
})
def test_icmp4_ping(self):
self.render_config_template(
flows=True,
shutdown_timeout="1s",
)
self.run_packetbeat(
pcap="icmp/icmp4_ping_over_vlan.pcap",
debug_selectors=["*"])
objs = self.read_output(
types=["flow"],
required_fields=FLOWS_REQUIRED_FIELDS)
pprint(objs)
assert len(objs) == 1
check_fields(objs[0], {
'final': True,
'source.mac': '00:00:00:00:00:01',
'dest.mac': '00:00:00:00:00:02',
'vlan': 10,
'source.ip': '10.0.0.1',
'dest.ip': '10.0.0.2',
'icmp_id': 5,
'source.stats.net_bytes_total': 50,
'source.stats.net_packets_total': 1,
'dest.stats.net_bytes_total': 50,
'dest.stats.net_packets_total': 1,
})
def test_icmp6_ping(self):
self.render_config_template(
flows=True,
shutdown_timeout="1s",
)
self.run_packetbeat(
pcap="icmp/icmp6_ping_over_vlan.pcap",
debug_selectors=["*"])
objs = self.read_output(
types=["flow"],
required_fields=FLOWS_REQUIRED_FIELDS)
pprint(objs)
assert len(objs) == 1
check_fields(objs[0], {
'final': True,
'source.mac': '00:00:00:00:00:01',
'dest.mac': '00:00:00:00:00:02',
'vlan': 10,
'source.ipv6': '::1',
'dest.ipv6': '::2',
'icmp_id': 5,
'source.stats.net_bytes_total': 70,
'source.stats.net_packets_total': 1,
'dest.stats.net_bytes_total': 70,
'dest.stats.net_packets_total': 1,
})
|
11519229
|
from metrics.metric import Metric
from typing import Dict, Union
import torch
class PiBehaviorCloning(Metric):
"""
Behavior closing loss for training graph traversal policy.
"""
def __init__(self, args: Dict):
self.name = 'pi_bc'
def compute(self, predictions: Dict, ground_truth: Union[torch.Tensor, Dict]) -> torch.Tensor:
"""
Compute negative log likelihood of ground truth traversed edges under learned policy.
:param predictions: Dictionary with 'pi': policy for lane graph traversal (log probabilities)
:param ground_truth: Dictionary with 'evf_gt': Look up table with visited edges
"""
# Unpack arguments
pi = predictions['pi']
evf_gt = ground_truth['evf_gt']
loss = -torch.sum(pi[evf_gt.bool()]) / pi.shape[0]
return loss
|
11519246
|
import math
import torch
import torch.nn as nn
from kornia.geometry.subpix import dsnt
from kornia.utils.grid import create_meshgrid
class FineMatching(nn.Module):
"""FineMatching with s2d paradigm."""
def __init__(self):
super().__init__()
def forward(self, feat_f0, feat_f1, data):
"""
Args:
feat0 (torch.Tensor): [M, WW, C]
feat1 (torch.Tensor): [M, WW, C]
data (dict)
Update:
data (dict):{
'expec_f' (torch.Tensor): [M, 3],
'mkpts0_f' (torch.Tensor): [M, 2],
'mkpts1_f' (torch.Tensor): [M, 2]}
"""
M, WW, C = feat_f0.shape
W = int(math.sqrt(WW))
scale = data['hw0_i'][0] / data['hw0_f'][0]
self.M, self.W, self.WW, self.C, self.scale = M, W, WW, C, scale
# corner case: if no coarse matches found
if M == 0:
if self.training:
raise ValueError("M >0, when training, see coarse_matching.py")
# logger.warning('No matches found in coarse-level.')
data.update({
'expec_f': torch.empty(0, 3, device=feat_f0.device),
'mkpts0_f': data['mkpts0_c'],
'mkpts1_f': data['mkpts1_c'],
})
return
feat_f0_picked = feat_f0_picked = feat_f0[:, WW // 2, :]
sim_matrix = torch.einsum('mc,mrc->mr', feat_f0_picked, feat_f1)
softmax_temp = 1. / C**.5
heatmap = torch.softmax(softmax_temp * sim_matrix, dim=1).view(-1, W, W)
# compute coordinates from heatmap
coords_normalized = dsnt.spatial_expectation2d(heatmap[None], True)[0] # [M, 2]
grid_normalized = create_meshgrid(W, W, True, heatmap.device).reshape(1, -1, 2) # [1, WW, 2]
# compute std over <x, y>
var = torch.sum(grid_normalized**2 * heatmap.view(-1, WW, 1), dim=1) - coords_normalized**2 # [M, 2]
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # [M] clamp needed for numerical stability
# for fine-level supervision
data.update({'expec_f': torch.cat([coords_normalized, std.unsqueeze(1)], -1)})
# compute absolute kpt coords
self.get_fine_match(coords_normalized, data)
@torch.no_grad()
def get_fine_match(self, coords_normed, data):
W, WW, C, scale = self.W, self.WW, self.C, self.scale
# mkpts0_f and mkpts1_f
mkpts0_f = data['mkpts0_c']
scale1 = scale * data['scale1'][data['b_ids']] if 'scale0' in data else scale
mkpts1_f = data['mkpts1_c'] + (coords_normed * (W // 2) * scale1)[:len(data['mconf'])]
data.update({
"mkpts0_f": mkpts0_f,
"mkpts1_f": mkpts1_f
})
|
11519250
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
hltMuonValidator = DQMEDAnalyzer('HLTMuonValidator',
hltProcessName = cms.string("HLT"),
hltPathsToCheck = cms.vstring(
"HLT_(L[12])?(Iso)?(Tk)?Mu[0-9]*(Open)?(_NoVertex)?(_eta2p1)?(_v[0-9]*)?$",
"HLT_Mu17_NoFilters?(_v[0-9]*)?$",
"HLT_Dimuon0_Jpsi_v10",
"HLT_Dimuon13_Jpsi_Barrel_v5",
),
genParticleLabel = cms.string("genParticles" ),
recMuonLabel = cms.string("muons" ),
parametersTurnOn = cms.vdouble(0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
22, 24, 26, 28, 30, 32, 34, 36, 38, 40,
45, 50, 55, 60, 65, 70,
80, 100, 200, 500, 1000, 2000,
),
parametersEta = cms.vdouble(48, -2.400, 2.400),
parametersPhi = cms.vdouble(50, -3.142, 3.142),
# set criteria for matching at L1, L2, L3
cutsDr = cms.vdouble(0.4, 0.4, 0.015),
# parameters for attempting an L1 match using a propagator
maxDeltaPhi = cms.double(0.4),
maxDeltaR = cms.double(0.4),
useSimpleGeometry = cms.bool(True),
useTrack = cms.string("none"),
useState = cms.string("atVertex"),
# set cuts on generated and reconstructed muons
genMuonCut = cms.string("abs(pdgId) == 13 && status == 1"),
recMuonCut = cms.string("isGlobalMuon"),
)
|
11519255
|
import asyncio
import logging
from caldera_agent import agent_protocol
from caldera_agent import foster3 as foster
from caldera_agent import utils
import win32ts
import win32security
import re
# Add module name to log messages
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
log.setLevel(logging.WARNING)
class Interface(object):
def __init__(self, open_connections, server_api):
self.open_connections = open_connections
self.server_api = server_api
async def run(self, action, args):
raise NotImplementedError()
def clients(self):
raise NotImplementedError()
class NoClientError(Exception):
pass
class InterfaceError(Exception):
pass
class LocalInterface(Interface):
def clients(self):
# return objects that represent commanders
return [{'pid': x,
'elevated': self.open_connections[x].is_elevated,
'executable_path': self.open_connections[x].executable_path,
'username': self.open_connections[x].user_name} for x in self.open_connections.keys()]
async def _implant(self, function, pid, params: dict): # runs in implant
# pid = int(args.pop(0)) # pid at beginning of list
if pid not in self.open_connections:
raise NoClientError()
return_params = await self.open_connections[pid].run_function(function, params)
log.debug("implant received : '{}'".format(return_params))
return return_params
async def run(self, action, args):
status = False
try:
if action == 'execute':
output = await self._run_in_caldera_subprocess(args['command_line'])
status = True
elif action == 'clients':
output = self.clients()
status = True
elif action == 'write_commander':
output = await self._write_commander(args['path'])
status = True
elif action == 'rats':
pattern = re.compile(r'\[\[[-.\w]*\]\]')
if 'function' in args:
parameters = args.get('parameters', {})
for key, val in parameters.items():
matches = re.findall(pattern, val)
for match in list(set(matches)):
filler = await self.server_api.get_macro(match[2:-2])
fill_string = filler.decode()
parameters[key] = val.replace(match, fill_string)
pid = int(args.get('name'))
output = await self._implant(args['function'], pid, parameters)
status = True
else:
output = "{'action': 'rats'} must contain a 'function'."
elif action == 'create_process':
output = self._create_process(args['process_args'], parent=args.get('parent', None),
hide=args.get('hide', True), output=args.get('output', False))
status = True
elif action == 'create_process_as_user':
output = self._create_process_as_user(args['process_args'], args['user_domain'],
args['user_name'], args['user_pass'],
parent=args.get('parent', None), hide=args.get('hide', True),
output=args.get('output', False))
status = True
elif action == 'create_process_as_active_user':
output = self._create_process_as_active_user(args['process_args'], parent=args.get('parent', None),
hide=args.get('hide', True), output=args.get('output', False))
status = True
else:
output = "'{}' is not a recognized action.".format(action)
except agent_protocol.DisconnectedError:
output = "Rat crashed during execution"
return status, output
async def _write_commander(self, path):
# download commander
commander = await self.server_api.get_commander()
# TODO could use ThreadPoolExecutor here for better performance
with open(path, 'wb') as f:
f.write(commander)
return await self._run_in_caldera_subprocess("takeown /F {} /A".format(path))
async def _run_in_caldera_subprocess(self, cmd_str=None):
if cmd_str is None:
raise InterfaceError('LocalInterface._run_in_caldera_subprocess called with no cmd_args')
# Runs shell commands
proc = await asyncio.create_subprocess_shell(cmd_str, stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT)
stdout, _ = await proc.communicate()
return stdout.decode()
def _create_process_as_user(self, process_args, user_domain, user_name, user_pass, parent=None, hide=True,
output=False):
hUser = win32security.LogonUser(user_name, user_domain, user_pass, win32security.LOGON32_LOGON_INTERACTIVE,
win32security.LOGON32_PROVIDER_DEFAULT)
return self._create_process(process_args, user_handle=hUser.handle, parent=parent, hide=hide, output=output)
def _create_process_as_active_user(self, process_args, parent=None, hide=True, output=False):
WTS_CURRENT_SERVER_HANDLE = 0
sessions = win32ts.WTSEnumerateSessions(WTS_CURRENT_SERVER_HANDLE)
log.debug('{}'.format(sessions))
active_session = None
for session in sessions:
if session['State'] == utils.WTS_CONNECTSTATE_CLASS.WTSActive:
active_session = session
break
if active_session is None:
raise Exception("Could not find an active session")
# active_session = win32ts.WTSGetActiveConsoleSessionId()
log.debug("Active Session is: {}".format(active_session))
hUser = win32ts.WTSQueryUserToken(active_session['SessionId'])
return self._create_process(process_args, user_handle=hUser.handle, parent=parent, hide=hide, output=output)
def _create_process(self, process_args, user_handle=None, parent=None, hide=True, output=False):
if parent:
PROCESS_CREATE_PROCESS = 0x80
PROCESS_DUP_HANDLE = 0x40
handle = utils.get_process_handle(parent, PROCESS_CREATE_PROCESS | PROCESS_DUP_HANDLE)
parent = handle.handle
else:
parent = None
stdout = None
stderr = None
if output:
stdout = foster.PIPE
stderr = foster.STDOUT
process = foster.Popen(process_args, hide=hide, user_token=user_handle, parent=parent, stdout=stdout, stderr=stderr)
if output:
stdout, stderr = process.communicate()
if stdout:
return stdout.decode()
else:
return ""
return process.pid
|
11519289
|
from os import makedirs
from os.path import exists, join
from typing import Any, Sequence, Tuple
import imageio
from arbol.arbol import aprint, asection
from joblib import Parallel, delayed
from dexp.datasets import BaseDataset
from dexp.processing.color.projection import project_image
from dexp.utils.backends import Backend, BestBackend
def dataset_projection_rendering(
input_dataset: BaseDataset,
output_path: str = None,
channels: Sequence[str] = None,
slicing: Any = None,
overwrite: bool = False,
axis: int = 0,
dir: int = -1,
mode: str = "colormax",
clim: Tuple[float, float] = None,
attenuation: float = 0.05,
gamma: float = 1.0,
dlim: Tuple[float, float] = None,
colormap: str = None,
rgbgamma: float = 1.0,
transparency: bool = False,
legendsize: float = 1.0,
legendscale: float = 1.0,
legendtitle: str = "color-coded depth (voxels)",
legendtitlecolor: Tuple[float, float, float, float] = (1, 1, 1, 1),
legendposition: str = "bottom_left",
legendalpha: float = 1.0,
step: int = 1,
workers: int = -1,
workersbackend: str = "threading",
devices: Tuple[int, ...] = (0,),
stop_at_exception=True,
):
for channel in channels:
# Ensures that the output folder exists per channel:
if len(channels) == 1:
channel_output_path = output_path
else:
channel_output_path = output_path + f"_{channel}"
makedirs(channel_output_path, exist_ok=True)
with asection(f"Channel '{channel}' shape: {input_dataset.shape(channel)}:"):
aprint(input_dataset.info(channel))
array = input_dataset.get_array(channel, wrap_with_dask=True)
if slicing:
array = array[slicing]
aprint(f"Rendering array of shape={array.shape} and dtype={array.dtype} for channel '{channel}'.")
nbframes = array.shape[0]
with asection("Rendering:"):
def process(tp, _clim, device):
try:
with asection(f"Rendering Frame : {tp:05}"):
filename = join(channel_output_path, f"frame_{tp:05}.png")
if overwrite or not exists(filename):
with asection("Loading stack..."):
stack = array[tp].compute()
with BestBackend(device, exclusive=True, enable_unified_memory=True):
if _clim is not None:
aprint(f"Using provided min and max for contrast limits: {_clim}")
min_value, max_value = (float(strvalue) for strvalue in _clim.split(","))
_clim = (min_value, max_value)
with asection(f"Projecting image of shape: {stack.shape} "):
projection = project_image(
stack,
axis=axis,
dir=dir,
mode=mode,
attenuation=attenuation,
attenuation_min_density=0.002,
attenuation_filtering=4,
gamma=gamma,
clim=_clim,
cmap=colormap,
dlim=dlim,
rgb_gamma=rgbgamma,
transparency=transparency,
legend_size=legendsize,
legend_scale=legendscale,
legend_title=legendtitle,
legend_title_color=legendtitlecolor,
legend_position=legendposition,
legend_alpha=legendalpha,
)
with asection(f"Saving frame {tp} as: {filename}"):
imageio.imwrite(filename, Backend.to_numpy(projection), compress_level=1)
except Exception as error:
aprint(error)
aprint(f"Error occurred while processing time point {tp} !")
import traceback
traceback.print_exc()
if stop_at_exception:
raise error
if workers == -1:
workers = len(devices)
aprint(f"Number of workers: {workers}")
if workers > 1:
Parallel(n_jobs=workers, backend=workersbackend)(
delayed(process)(tp, clim, devices[tp % len(devices)]) for tp in range(0, nbframes, step)
)
else:
for tp in range(0, nbframes, step):
process(tp, clim, devices[0])
|
11519309
|
import warnings
from inspect import isclass
import numpy as np
from UQpy.RunModel import RunModel
from UQpy.SampleMethods import *
########################################################################################################################
########################################################################################################################
# Subset Simulation
########################################################################################################################
class SubsetSimulation:
"""
Perform Subset Simulation to estimate probability of failure.
This class estimates probability of failure for a user-defined model using Subset Simulation. The class can
use one of several MCMC algorithms to draw conditional samples.
**Input:**
* **runmodel_object** (``RunModel`` object):
The computational model. It should be of type `RunModel` (see ``RunModel`` class).
* **mcmc_class** (Class of type ``SampleMethods.MCMC``)
Specifies the MCMC algorithm.
Must be a child class of the ``SampleMethods.MCMC`` parent class. Note: This is `not` and object of the class.
This input specifies the class itself.
* **samples_init** (`ndarray`)
A set of samples from the specified probability distribution. These are the samples from the original
distribution. They are not conditional samples. The samples must be an array of size
`nsamples_per_ss x dimension`.
If `samples_init` is not specified, the Subset_Simulation class will use the `mcmc_class` to draw the initial
samples.
* **p_cond** (`float`):
Conditional probability for each conditional level.
* **nsamples_per_ss** (`int`)
Number of samples to draw in each conditional level.
* **max_level** (`int`)
Maximum number of allowable conditional levels.
* **verbose** (Boolean):
A boolean declaring whether to write text to the terminal.
* **mcmc_kwargs** (`dict`)
Any additional keyword arguments needed for the specific ``MCMC`` class.
**Attributes:**
* **samples** (`list` of `ndarrays`)
A list of arrays containing the samples in each conditional level.
* **g** (`list` of `ndarrays`)
A list of arrays containing the evaluation of the performance function at each sample in each conditional level.
* **g_level** (`list`)
Threshold value of the performance function for each conditional level
* **pf** (`float`)
Probability of failure estimate
* **cov1** (`float`)
Coefficient of variation of the probability of failure estimate assuming independent chains
* **cov2** (`float`)
Coefficient of variation of the probability of failure estimate with dependent chains. From [4]_
**Methods:**
"""
def __init__(self, runmodel_object, mcmc_class=MMH, samples_init=None, p_cond=0.1, nsamples_per_ss=1000,
max_level=10, verbose=False, **mcmc_kwargs):
# Store the MCMC object to create a new object of this type for each subset
self.mcmc_kwargs = mcmc_kwargs
self.mcmc_class = mcmc_class
# Initialize other attributes
self.runmodel_object = runmodel_object
self.samples_init = samples_init
self.p_cond = p_cond
self.nsamples_per_ss = nsamples_per_ss
self.max_level = max_level
self.verbose = verbose
# Check that a RunModel object is being passed in.
if not isinstance(self.runmodel_object, RunModel):
raise AttributeError(
'UQpy: Subset simulation requires the user to pass a RunModel object')
if 'random_state' in self.mcmc_kwargs:
self.random_state = self.mcmc_kwargs['random_state']
if isinstance(self.random_state, int):
self.random_state = np.random.RandomState(self.random_state)
elif not isinstance(self.random_state, (type(None), np.random.RandomState)):
raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.')
else:
self.random_state = None
# Perform initial error checks
self._init_sus()
# Initialize the mcmc_object from the specified class.
mcmc_object = self.mcmc_class(**self.mcmc_kwargs)
self.mcmc_objects = [mcmc_object]
# Initialize new attributes/variables
self.samples = list()
self.g = list()
self.g_level = list()
if self.verbose:
print('UQpy: Running Subset Simulation with MCMC of type: ' + str(type(mcmc_object)))
[self.pf, self.cov1, self.cov2] = self.run()
if self.verbose:
print('UQpy: Subset Simulation Complete!')
# -----------------------------------------------------------------------------------------------------------------------
# The run function executes the chosen subset simulation algorithm
def run(self):
"""
Execute subset simulation
This is an instance method that runs subset simulation. It is automatically called when the SubsetSimulation
class is instantiated.
**Output/Returns:**
* **pf** (`float`)
Probability of failure estimate
* **cov1** (`float`)
Coefficient of variation of the probability of failure estimate assuming independent chains
* **cov2** (`float`)
Coefficient of variation of the probability of failure estimate with dependent chains. From [4]_
"""
step = 0
n_keep = int(self.p_cond * self.nsamples_per_ss)
d12 = list()
d22 = list()
# Generate the initial samples - Level 0
# Here we need to make sure that we have good initial samples from the target joint density.
if self.samples_init is None:
warnings.warn('UQpy: You have not provided initial samples.\n Subset simulation is highly sensitive to the '
'initial sample set. It is recommended that the user either:\n'
'- Provide an initial set of samples (samples_init) known to follow the distribution; or\n'
'- Provide a robust MCMC object that will draw independent initial samples from the '
'distribution.')
self.mcmc_objects[0].run(nsamples=self.nsamples_per_ss)
self.samples.append(self.mcmc_objects[0].samples)
else:
self.samples.append(self.samples_init)
# Run the model for the initial samples, sort them by their performance function, and identify the
# conditional level
self.runmodel_object.run(samples=np.atleast_2d(self.samples[step]))
self.g.append(np.squeeze(self.runmodel_object.qoi_list))
g_ind = np.argsort(self.g[step])
self.g_level.append(self.g[step][g_ind[n_keep - 1]])
# Estimate coefficient of variation of conditional probability of first level
d1, d2 = self._cov_sus(step)
d12.append(d1 ** 2)
d22.append(d2 ** 2)
if self.verbose:
print('UQpy: Subset Simulation, conditional level 0 complete.')
while self.g_level[step] > 0 and step < self.max_level:
# Increment the conditional level
step = step + 1
# Initialize the samples and the performance function at the next conditional level
self.samples.append(np.zeros_like(self.samples[step - 1]))
self.samples[step][:n_keep] = self.samples[step - 1][g_ind[0:n_keep], :]
self.g.append(np.zeros_like(self.g[step - 1]))
self.g[step][:n_keep] = self.g[step - 1][g_ind[:n_keep]]
# Unpack the attributes
# Initialize a new MCMC object for each conditional level
self.mcmc_kwargs['seed'] = np.atleast_2d(self.samples[step][:n_keep, :])
self.mcmc_kwargs['random_state'] = self.random_state
new_mcmc_object = self.mcmc_class(**self.mcmc_kwargs)
self.mcmc_objects.append(new_mcmc_object)
# Set the number of samples to propagate each chain (n_prop) in the conditional level
n_prop_test = self.nsamples_per_ss / self.mcmc_objects[step].nchains
if n_prop_test.is_integer():
n_prop = self.nsamples_per_ss // self.mcmc_objects[step].nchains
else:
raise AttributeError(
'UQpy: The number of samples per subset (nsamples_per_ss) must be an integer multiple of '
'the number of MCMC chains.')
# Propagate each chain n_prop times and evaluate the model to accept or reject.
for i in range(n_prop - 1):
# Propagate each chain
if i == 0:
self.mcmc_objects[step].run(nsamples=2 * self.mcmc_objects[step].nchains)
else:
self.mcmc_objects[step].run(nsamples=self.mcmc_objects[step].nchains)
# Decide whether a new simulation is needed for each proposed state
a = self.mcmc_objects[step].samples[i * n_keep:(i + 1) * n_keep, :]
b = self.mcmc_objects[step].samples[(i + 1) * n_keep:(i + 2) * n_keep, :]
test1 = np.equal(a, b)
test = np.logical_and(test1[:, 0], test1[:, 1])
# Pull out the indices of the false values in the test list
ind_false = [i for i, val in enumerate(test) if not val]
# Pull out the indices of the true values in the test list
ind_true = [i for i, val in enumerate(test) if val]
# Do not run the model for those samples where the MCMC state remains unchanged.
self.samples[step][[x + (i + 1) * n_keep for x in ind_true], :] = \
self.mcmc_objects[step].samples[ind_true, :]
self.g[step][[x + (i + 1) * n_keep for x in ind_true]] = self.g[step][ind_true]
# Run the model at each of the new sample points
x_run = self.mcmc_objects[step].samples[[x + (i + 1) * n_keep for x in ind_false], :]
if x_run.size != 0:
self.runmodel_object.run(samples=x_run)
# Temporarily save the latest model runs
g_temp = np.asarray(self.runmodel_object.qoi_list[-len(x_run):])
# Accept the states with g <= g_level
ind_accept = np.where(g_temp <= self.g_level[step - 1])[0]
for ii in ind_accept:
self.samples[step][(i + 1) * n_keep + ind_false[ii]] = x_run[ii]
self.g[step][(i + 1) * n_keep + ind_false[ii]] = g_temp[ii]
# Reject the states with g > g_level
ind_reject = np.where(g_temp > self.g_level[step - 1])[0]
for ii in ind_reject:
self.samples[step][(i + 1) * n_keep + ind_false[ii]] = \
self.samples[step][i * n_keep + ind_false[ii]]
self.g[step][(i + 1) * n_keep + ind_false[ii]] = self.g[step][i * n_keep + ind_false[ii]]
g_ind = np.argsort(self.g[step])
self.g_level.append(self.g[step][g_ind[n_keep]])
# Estimate coefficient of variation of conditional probability of first level
d1, d2 = self._cov_sus(step)
d12.append(d1 ** 2)
d22.append(d2 ** 2)
if self.verbose:
print('UQpy: Subset Simulation, conditional level ' + str(step) + ' complete.')
n_fail = len([value for value in self.g[step] if value < 0])
pf = self.p_cond ** step * n_fail / self.nsamples_per_ss
cov1 = np.sqrt(np.sum(d12))
cov2 = np.sqrt(np.sum(d22))
return pf, cov1, cov2
# -----------------------------------------------------------------------------------------------------------------------
# Support functions for subset simulation
def _init_sus(self):
"""
Check for errors in the SubsetSimulation class input
This is an instance method that checks for errors in the input to the SubsetSimulation class. It is
automatically called when the SubsetSimualtion class is instantiated.
No inputs or returns.
"""
# Check that an MCMC class is being passed in.
if not isclass(self.mcmc_class):
raise ValueError('UQpy: mcmc_class must be a child class of MCMC. Note it is not an instance of the class.')
if not issubclass(self.mcmc_class, MCMC):
raise ValueError('UQpy: mcmc_class must be a child class of MCMC.')
# Check that a RunModel object is being passed in.
if not isinstance(self.runmodel_object, RunModel):
raise AttributeError(
'UQpy: Subset simulation requires the user to pass a RunModel object')
# Check that a valid conditional probability is specified.
if type(self.p_cond).__name__ != 'float':
raise AttributeError('UQpy: Invalid conditional probability. p_cond must be of float type.')
elif self.p_cond <= 0. or self.p_cond >= 1.:
raise AttributeError('UQpy: Invalid conditional probability. p_cond must be in (0, 1).')
# Check that the number of samples per subset is properly defined.
if type(self.nsamples_per_ss).__name__ != 'int':
raise AttributeError('UQpy: Number of samples per subset (nsamples_per_ss) must be integer valued.')
# Check that max_level is an integer
if type(self.max_level).__name__ != 'int':
raise AttributeError('UQpy: The maximum subset level (max_level) must be integer valued.')
def _cov_sus(self, step):
"""
Compute the coefficient of variation of the samples in a conditional level
This is an instance method that is called after each conditional level is complete to compute the coefficient
of variation of the conditional probability in that level.
**Input:**
:param step: Specifies the conditional level
:type step: int
**Output/Returns:**
:param d1: Coefficient of variation in conditional level assuming independent chains
:type d1: float
:param d2: Coefficient of variation in conditional level with dependent chains
:type d2: float
"""
# Here, we assume that the initial samples are drawn to be uncorrelated such that the correction factors do not
# need to be computed.
if step == 0:
d1 = np.sqrt((1 - self.p_cond) / (self.p_cond * self.nsamples_per_ss))
d2 = np.sqrt((1 - self.p_cond) / (self.p_cond * self.nsamples_per_ss))
return d1, d2
else:
n_c = int(self.p_cond * self.nsamples_per_ss)
n_s = int(1 / self.p_cond)
indicator = np.reshape(self.g[step] < self.g_level[step], (n_s, n_c))
gamma = self._corr_factor_gamma(indicator, n_s, n_c)
g_temp = np.reshape(self.g[step], (n_s, n_c))
beta_hat = self._corr_factor_beta(g_temp, step)
d1 = np.sqrt(((1 - self.p_cond) / (self.p_cond * self.nsamples_per_ss)) * (1 + gamma))
d2 = np.sqrt(((1 - self.p_cond) / (self.p_cond * self.nsamples_per_ss)) * (1 + gamma + beta_hat))
return d1, d2
# Computes the conventional correlation factor gamma from Au and Beck
def _corr_factor_gamma(self, indicator, n_s, n_c):
"""
Compute the conventional correlation factor gamma from Au and Beck (Reference [1])
This is an instance method that computes the correlation factor gamma used to estimate the coefficient of
variation of the conditional probability estimate from a given conditional level. This method is called
automatically within the _cov_sus method.
**Input:**
:param indicator: An array of booleans indicating whether the performance function is below the threshold for
the conditional probability.
:type indicator: boolean array
:param n_s: Number of samples drawn from each Markov chain in each conditional level
:type n_s: int
:param n_c: Number of Markov chains in each conditional level
:type n_c: int
**Output/Returns:**
:param gam: Gamma factor in coefficient of variation estimate
:type gam: float
"""
gam = np.zeros(n_s - 1)
r = np.zeros(n_s)
ii = indicator * 1
r_ = ii @ ii.T / n_c - self.p_cond ** 2
for i in range(r_.shape[0]):
r[i] = np.sum(np.diag(r_, i)) / (r_.shape[0] - i)
r0 = 0.1 * (1 - 0.1)
r = r / r0
for i in range(n_s - 1):
gam[i] = (1 - ((i + 1) / n_s)) * r[i + 1]
gam = 2 * np.sum(gam)
return gam
# Computes the updated correlation factor beta from Shields et al.
def _corr_factor_beta(self, g, step):
"""
Compute the additional correlation factor beta from Shields et al. (Reference [2])
This is an instance method that computes the correlation factor beta used to estimate the coefficient of
variation of the conditional probability estimate from a given conditional level. This method is called
automatically within the _cov_sus method.
**Input:**
:param g: An array containing the performance function evaluation at all points in the current conditional
level.
:type g: numpy array
:param step: Current conditional level
:type step: int
**Output/Returns:**
:param beta: Beta factor in coefficient of variation estimate
:type beta: float
"""
beta = 0
for i in range(np.shape(g)[1]):
for j in range(i + 1, np.shape(g)[1]):
if g[0, i] == g[0, j]:
beta = beta + 1
beta = beta * 2
ar = np.asarray(self.mcmc_objects[step].acceptance_rate)
ar_mean = np.mean(ar)
factor = 0
for i in range(np.shape(g)[0] - 1):
factor = factor + (1 - (i + 1) * np.shape(g)[0] / np.shape(g)[1]) * (1 - ar_mean)
factor = factor * 2 + 1
beta = beta / np.shape(g)[1] * factor
return beta
|
11519343
|
import json
import os
import random
from glob import glob
import numpy as np
import pandas as pd
# from preprocessing.CredbankProcessor import preprocessing_tweet_text
from CredbankProcessor import preprocessing_tweet_text
from src.data_loader import load_files_from_dataset_dir, load_matrix_from_csv
from pprint import pprint as pp
'''
This is a utility script that:
1) Generation of training data for RPDNN model using the PHEME dataset
Link to the original dataset
https://figshare.com/articles/PHEME_dataset_for_Rumour_Detection_and_Veracity_Classification/6392078
2) generate independent test set from PHEME dataset for CREDBANK fine-tuned ELMo
'''
# Path to the pheme dataset (all-rnr-annotated-threads)
my_path = os.path.join(os.path.dirname(__file__),'..', '..', '..', '..', 'Data/all-rnr-annotated-threads')
train_set = {"charliehebdo-all-rnr-threads", "ebola-essien-all-rnr-threads",
"ferguson-all-rnr-threads", "germanwings-crash-all-rnr-threads",
"ottawashooting-all-rnr-threads",
"prince-toronto-all-rnr-threads", "putinmissing-all-rnr-threads",
"sydneysiege-all-rnr-threads"}
test_set = {"gurlitt-all-rnr-threads"}
def generate_development_set(my_path, output_dir="aug_rnr_training"):
for folders in glob(os.path.join(my_path, '*')):
event_name = os.path.basename(folders) ## Get event name
print("Event: ", event_name)
event_types = glob(os.path.join(folders, '*')) # folders named rumours or non-rumours
# output_df = pd.DataFrame(columns=['tweet_id', 'created_at', 'text', 'retweet_count', 'user_name', 'user_verified', 'follower'])
# output_df = pd.DataFrame(columns=['tweet_id', 'created_at', 'text', 'label']) # create a dataframe to store output
output_df = pd.DataFrame(columns=['tweet_id', 'created_at', 'text', 'label', 'user_id', 'user_name']) # create a dataframe to store output
for event_t in event_types:
category = os.path.basename(event_t) # rumours or non-rumours
print("category: ")
print(event_name, category)
idfiles = glob(os.path.join(event_t, '*'))
print(idfiles[:3])
print("")
for id_f in idfiles: # visit each tweet folder
source_tweet = glob(os.path.join(id_f, 'source-tweets/*')) # consider source tweets only
# print(source_tweet)
# print("id_f: ", id_f)
tweet_file_name = os.path.basename(id_f)
# print("tweet_file_name: ", tweet_file_name)
assert len(source_tweet)==1
with open(source_tweet[0], 'r') as f:
tweet = json.load(f)
# if is_retweet(tweet):
# tweet = get_original_source_tweet(tweet)
timestamp = pd.DatetimeIndex([tweet['created_at']])
tweet_id = tweet["id_str"]
# print("tweet id inside: ", tweet_id)
if int(tweet_id) != int(tweet_file_name):
print("warning: data set problem. check mismatched json name and tweet id inside.")
continue
if category == 'rumours':
#print("saving this rumour source tweet: ")
output_df.loc[len(output_df)] = [tweet['id_str'], timestamp[0], tweet['full_text'] if "full_text" in tweet else tweet["text"], 1, tweet['user']['id_str'], tweet['user']['screen_name']]
elif category == "non-rumours":
#print("saving this non-rumours source tweet: ")
output_df.loc[len(output_df)] = [tweet['id_str'], timestamp[0], tweet['full_text'] if "full_text" in tweet else tweet["text"], 0, tweet['user']['id_str'], tweet['user']['screen_name']]
#print(output_df)
filename = os.path.join(os.path.dirname(my_path), output_dir+'/{}.csv'.format(event_name))
os.makedirs(os.path.dirname(filename), exist_ok=True)
output_df.to_csv(filename, index=None, encoding='utf-8')
def is_retweet(source_tweet_json : dict):
"""
a temporary solution to load actual source tweet data from "retweeted_status" in our augmented dataset
:param source_tweet_json:
:return:
"""
return "retweeted_status" in source_tweet_json
def get_original_source_tweet (source_tweet_json : dict):
if "retweeted_status" in source_tweet_json:
return source_tweet_json["retweeted_status"]
def oversampling_pos(data_X, shuffling_options, X, train_diffs):
"""
oversampling on positive train examples when dataset is few
Oversampling should only be applied to train set before the split
class imbalance like 4:1 above can cause problems
:param data_X:
:param shuffling_options:
:param X:
:param train_diffs:
:return:
"""
print("oversampling %s positive examples ... " % train_diffs)
random_option = random.randint(0,4)
data_indices = shuffling_options[random_option][0]
oversampled_size = 0
for data_index in data_indices:
if oversampled_size >= train_diffs:
break
dev_data_example = X[data_index]
# if current instance is positive example
if dev_data_example[3] == 1:
data_X = np.append(data_X, [dev_data_example], axis=0)
oversampled_size +=1
return data_X
def undersampling_neg(data_X):
"""
undersampling on negative train examples when dataset is few
Oversampling should only be applied to train set before the split
class imbalance like 4:1 above can cause problems
:param data_X:
:param shuffling_options:
:param X:
:param train_diffs:
:return:
"""
pos_samples, neg_samples = separate_pos_neg(data_X)
train_diffs = len(neg_samples) - len(pos_samples)
print("undersampling %s negative examples ... " % train_diffs)
import random
undersampled_indices = random.sample(range(1, len(neg_samples)), train_diffs)
print("total %s undersampled examples" % len(undersampled_indices))
neg_samples = np.delete(neg_samples, undersampled_indices, axis=0)
pos_samples = np.array(pos_samples)
print("pos_samples.shape: ", pos_samples.shape)
print("neg_samples.shape: ", neg_samples.shape)
return np.concatenate([pos_samples, neg_samples])
def separate_pos_neg(data_X):
pos_samples = []
neg_samples = []
for i in range(0, len(data_X)):
dev_data_example_i = data_X[i]
if dev_data_example_i[3] == 1:
pos_samples.append(dev_data_example_i)
else:
neg_samples.append(dev_data_example_i)
return pos_samples, neg_samples
def export_data(train_X, test_event, file_name):
"""
:param train_X:
:param file_name: pheme_6392078_train_set_combined.csv or pheme_6392078_heldout_set_combined.csv
:return:
"""
# train_set_file = os.path.join(os.path.dirname(__file__), '..', '..', "data", "train", test_event, file_name)
# train_set_file = os.path.join(os.path.dirname(__file__), '..', '..', "data", "Multitask", "aug-boston", test_event, file_name)
train_set_file = os.path.join(os.path.dirname(__file__), '..', '..', "data", "test-balance", test_event, file_name)
os.makedirs(os.path.dirname(train_set_file), exist_ok=True)
output_df = pd.DataFrame(train_X)
# output_df.to_csv(train_set_file, header=['tweet_id', 'created_at', 'text', 'label'], index=None)
output_df.to_csv(train_set_file, header=['tweet_id', 'created_at', 'text', 'label', 'user_id', 'user_name'], index=None, encoding="utf-8")
print("export %s to %s" % (output_df.shape, train_set_file))
def generate_combined_dev_set(training_data_dir_path, test_set_dir_path):
"""
combine every individual development set generated from generate_development_set(),
shuffle the dataset,
and split into train and validation set
and save into a single csv file
The development set are generated from PHE RNR-annotated threads directory and saved into 4-columns csv files
We use oversampling of positive examples for original PHEME 6392078 training set
and use undersampling of negative examples for augmented dataset
:return:
"""
# dataset_dir = "C:\\Data\\NLP-corpus\\PHEME-dataset\\pheme_training"
# dataset_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'Data/pheme_training')
dataset_dir = training_data_dir_path
test_dataset_dir = test_set_dir_path
print(os.path.exists(dataset_dir))
all_train_dataset_path = load_files_from_dataset_dir(dataset_dir)
print("all_train_dataset_path: ", all_train_dataset_path)
dev_set_events = {'bostonbombings','charliehebdo', 'ebola-essien', 'ferguson', 'germanwings', 'gurlitt',
'ottawashooting', 'prince-toronto', 'sydneysiege'}
test_set_events = {'putinmissing'}
dev_set_path = list(filter(None, [os.path.join(dataset_dir, individual_train_set_path) if individual_train_set_path.replace('.csv','').split('-')[0]
in dev_set_events else ''
for individual_train_set_path in all_train_dataset_path]))
test_set_path = list(filter(None, [os.path.join(test_dataset_dir, individual_train_set_path) if individual_train_set_path.replace('.csv','').split('-')[0]
in test_set_events else ''
for individual_train_set_path in all_train_dataset_path]))
print("dev_set_path: ")
pp(dev_set_path)
# assert len(dev_set_path)==5
print("test_set_path: ")
pp( test_set_path)
X = None
for dev_set_file in dev_set_path:
df = load_matrix_from_csv(dev_set_file, header=0, start_col_index=0, end_col_index=6)
print("dataset loaded from %s" % dev_set_file)
print("current event set size: ", df[:].shape)
if X is None:
X = df[:]
else:
X = np.append(X, df[:], axis=0)
pos_inst, neg_inst = check_dataset_balance(df)
test_X = None
for test_set_file in test_set_path:
df = load_matrix_from_csv(test_set_file, header=0, start_col_index=0, end_col_index=6)
print("dataset loaded from %s" % test_set_file)
print("current event set size: ", df[:].shape)
if test_X is None:
test_X = df[:]
else:
test_X = np.append(test_X, df[:], axis=0)
pos_inst, neg_inst = check_dataset_balance(df)
print("final total size: ", test_X.shape)
print("")
print("undersampling test set ...")
test_X = undersampling_neg(test_X)
from sklearn.model_selection import ShuffleSplit
rs = ShuffleSplit(n_splits=5, random_state=random.randint(1,10), test_size=0.10, train_size=None)
train_X = np.empty(shape=(0, 6), dtype=object)
# train_X = np.empty(shape=(0, 4), dtype=object)
validation_X = np.empty(shape=(0, 6), dtype=object)
# validation_X = np.empty(shape=(0, 4), dtype=object)
shuffling_options = list(rs.split(X))
random_option = random.randint(0,4)
train_indices = shuffling_options[random_option][0]
test_indices = shuffling_options[random_option][1]
print("done. TRAIN:", train_indices, ", size: ", len(train_indices), "TEST:", test_indices, ", size: ", len(test_indices))
for train_index in train_indices:
train_X = np.append(train_X, [X[train_index]], axis=0)
for heldout_index in test_indices:
validation_X = np.append(validation_X, [X[heldout_index]], axis=0)
print("train size: ", train_X.shape)
print("validation set size: ", validation_X.shape)
print("")
print("* check balance of train set ... ")
train_pos_inst, train_neg_inst = check_dataset_balance(train_X)
print("* check balance of held-out set ... ")
val_pos_inst, val_neg_inst = check_dataset_balance(validation_X)
print("")
# oversampling positive instances
train_diffs = train_neg_inst - train_pos_inst
print("difference btw positive examples and negative examples in train set: ", train_diffs)
val_diffs = val_neg_inst - val_pos_inst
print("difference btw positive examples and negative examples in validation set: ", val_diffs)
#print("oversampling (only applied to) train set... ")
#train_X = oversampling_pos(train_X, shuffling_options, X, train_diffs)
#print("train side after oversampling positive examples, ", train_X.shape)
print("undersampling train set ...")
train_X = undersampling_neg(train_X)
print("train size after undersampling negative examples, ", train_X.shape)
print("undersampling heldout set ...")
validation_X = undersampling_neg(validation_X)
print("checking dataset balance ...")
#print("balance after oversampling train set: ")
print("balance after undersampling train set: ")
check_dataset_balance(train_X)
print("balance after undersampling held-out set: ")
check_dataset_balance(validation_X)
print("balance after undersampling test set: ")
check_dataset_balance(test_X)
#export_data(train_X, "pheme_6392078_train_set_combined.csv")
#export_data(validation_X, "pheme_6392078_heldout_set_combined.csv")
# export_data(train_X, "{}".format(list(test_set_events)[0]), "aug_rnr_train_set_combined.csv")
# export_data(validation_X, "{}".format(list(test_set_events)[0]), "aug_rnr_heldout_set_combined.csv")
# export_data(test_X, "{}".format(list(test_set_events)[0]), "aug_rnr_test_set_combined.csv")
def check_test_set():
print("check data balance in test set: ")
test_set_path = os.path.join(os.path.dirname(__file__), '..', "..", "data","test","putinmissing-all-rnr-threads.csv")
df = load_matrix_from_csv(test_set_path, header=0, start_col_index=0, end_col_index=4)
# print(df)
check_dataset_balance(df)
# -> positive instances: 126 , negative instances: 112
def check_dataset_balance(df):
pos_inst = 0
neg_inst = 0
for row in df[:]:
if row[3] == 1:
pos_inst += 1
else:
neg_inst += 1
print("positive instances: %s , negative instances: %s" %(pos_inst, neg_inst))
print("")
return pos_inst, neg_inst
def generate_corpus_4_elmo_test():
"""
we fine-tuned pre-trained ELMo model on credbank based on the assumption that
credbank corpus is a credibility-focus Twitter dataset which can be used as a representative corpus
for rumour detection domain/task.
:return:
"""
print("")
# source tweet dataset processed from PHEME dataset covering 9 events
dataset_dir = os.path.join('C:\\Data\\NLP-corpus\\PHEME-dataset', 'pheme_training')
print(os.path.exists(dataset_dir))
all_test_dataset_path = load_files_from_dataset_dir(dataset_dir)
all_events = {'charliehebdo', 'ebola-essien', 'ferguson', 'germanwings', 'gurlitt',
'ottawashooting', 'prince-toronto', 'sydneysiege', 'putinmissing'}
all_set_path = list(filter(None, [os.path.join(dataset_dir, individual_data_set_path) if individual_data_set_path.split('-')[0]
in all_events else ''
for individual_data_set_path in all_test_dataset_path]))
print("all event data set_path: ", all_set_path)
X = None
for event_set_file in all_set_path:
df = load_matrix_from_csv(event_set_file, header=0, start_col_index=0, end_col_index=4)
print("dataset loaded from %s" % event_set_file)
print("current event set size: ", df[:].shape)
if X is None:
X = df[:]
else:
X = np.append(X, df[:], axis=0)
# have 6178 source tweets available in PHEME dataset (6392078)
print("all PHEME source tweets are loaded: ", X.shape)
print("Export PHEME corpus: ")
pheme_data_output_dir = os.path.join(os.path.dirname(__file__), '..', '..', "output", "elmo")
try:
os.mkdir(pheme_data_output_dir)
except Exception as err:
print()
pheme_source_tweet_corpus_path = os.path.join(pheme_data_output_dir, "pheme_source_tweet_corpus.txt")
with open(pheme_source_tweet_corpus_path, mode='w', encoding='utf-8') as outputfile:
for row in X[:]:
normed_text_tokens = preprocessing_tweet_text(row[2])
if len(normed_text_tokens) > 0:
outputfile.write("%s\n" % " ".join(normed_text_tokens))
print("done")
if __name__ == '__main__':
#generate_corpus_4_elmo_test()
#my_path = os.path.join('c:\\','Data', 'NLP-corpus', 'aug_rnr', 'aug-rnr-merge-data-14052019-balance')
# my_path = os.path.join('..','..','data','social_context','aug-rnr-merge-data-23052019-balance')
# my_path = os.path.join('..','..','data','social_context','aug-rnr-merge-data-13062019')
# my_path = os.path.join('..','..','data','social_context','pheme-early')
# C:\Data\NLP-corpus\aug_rnr\test_aug_data
# my_path = os.path.join('c:\\','Data', 'NLP-corpus', 'aug_rnr', 'test_aug_data')
# generate_development_set(my_path, output_dir='baseline')
# development_set_path = os.path.join('c:\\','Data', 'NLP-corpus', 'aug_rnr', 'r')
development_set_path = os.path.join('..','..','data','social_context','aug_rnr_training')
# development_set_path = os.path.join('..','..','data','social_context','all_rnr_training')
test_set_path = os.path.join('..','..','data','social_context','all_rnr_training')
# test_set_path = os.path.join('..','..','data','social_context','aug_rnr_training')
generate_combined_dev_set(development_set_path, test_set_path)
# check_test_set()
|
11519361
|
from click.testing import CliRunner
from healthkit_to_sqlite import cli, utils
import io
import pathlib
import pytest
import sqlite_utils
from sqlite_utils.db import ForeignKey
import pathlib
import zipfile
@pytest.fixture
def xml_path():
return pathlib.Path(__file__).parent / "export.xml"
@pytest.fixture
def xml_fp(xml_path):
return open(xml_path, "r")
@pytest.fixture
def converted(xml_fp):
db = sqlite_utils.Database(":memory:")
utils.convert_xml_to_sqlite(xml_fp, db)
return db
@pytest.fixture(params=["export.xml", "exportar.xml"])
def zip_file_with_gpx(request, tmpdir):
export_xml_filename = request.param
zip_contents_path = pathlib.Path(__file__).parent / "zip_contents"
archive = str(tmpdir / "export.zip")
buf = io.BytesIO()
zf = zipfile.ZipFile(buf, "w")
for filepath in zip_contents_path.glob("**/*"):
if filepath.is_file():
arcname = filepath.relative_to(zip_contents_path)
if arcname.name == "export.xml":
arcname = arcname.parent / export_xml_filename
zf.write(filepath, str(arcname))
zf.close()
with open(archive, "wb") as fp:
fp.write(buf.getbuffer())
return tmpdir, archive
def test_help():
result = CliRunner().invoke(cli.cli, ["--help"])
assert result.output.startswith("Usage: cli")
def test_fixture(xml_fp):
assert xml_fp.read().startswith("<Health")
def test_find_all_tags(xml_fp):
findings = list(
utils.find_all_tags(xml_fp, {"Record", "Workout", "ActivitySummary"})
)
assert ["Record", "Record", "Workout", "ActivitySummary", "ActivitySummary"] == [
f[0] for f in findings
]
def test_converted_activity_summaries(converted):
actual = list(converted["activity_summary"].rows)
assert [
{
"dateComponents": "2016-11-15",
"activeEnergyBurned": "590.252",
"activeEnergyBurnedGoal": "630",
"activeEnergyBurnedUnit": "kcal",
"appleExerciseTime": "68",
"appleExerciseTimeGoal": "30",
"appleStandHours": "13",
"appleStandHoursGoal": "12",
},
{
"dateComponents": "2016-11-16",
"activeEnergyBurned": "323.513",
"activeEnergyBurnedGoal": "630",
"activeEnergyBurnedUnit": "kcal",
"appleExerciseTime": "39",
"appleExerciseTimeGoal": "30",
"appleStandHours": "9",
"appleStandHoursGoal": "12",
},
] == actual
def test_converted_workouts(converted):
actual = list(converted["workouts"].rows)
assert [
{
"id": "e615a9651eab4d95debed14c2c2f7cce0c31feed",
"workoutActivityType": "HKWorkoutActivityTypeRunning",
"duration": "5.19412346680959",
"durationUnit": "min",
"totalDistance": "0.4971749504535062",
"totalDistanceUnit": "mi",
"totalEnergyBurned": "48.74499999999999",
"totalEnergyBurnedUnit": "kcal",
"sourceName": "Apple\xa0Watch",
"sourceVersion": "3.1",
"creationDate": "2016-11-14 07:33:49 -0700",
"startDate": "2016-11-14 07:25:41 -0700",
"endDate": "2016-11-14 07:30:52 -0700",
"metadata_HKTimeZone": "America/Los_Angeles",
"metadata_HKWeatherTemperature": "56 degF",
"metadata_HKWeatherHumidity": "96 %",
"workout_events": "[]",
}
] == actual
assert [
ForeignKey(
table="workout_points",
column="workout_id",
other_table="workouts",
other_column="id",
)
] == converted["workout_points"].foreign_keys
actual_points = list(converted["workout_points"].rows)
assert [
{
"date": "2016-11-14 07:25:44 -0700",
"latitude": 37.7777,
"longitude": -122.426,
"altitude": 21.2694,
"horizontalAccuracy": 2.40948,
"verticalAccuracy": 1.67859,
"course": -1.0,
"speed": 2.48034,
"workout_id": "e615a9651eab4d95debed14c2c2f7cce0c31feed",
},
{
"date": "2016-11-14 07:25:44 -0700",
"latitude": 37.7777,
"longitude": -122.426,
"altitude": 21.2677,
"horizontalAccuracy": 2.40059,
"verticalAccuracy": 1.67236,
"course": -1.0,
"speed": 2.48034,
"workout_id": "e615a9651eab4d95debed14c2c2f7cce0c31feed",
},
] == actual_points
def test_converted_records(converted):
# These should have been recorded in rBodyMassIndex and rHeartRate
body_mass_actual = list(converted["rBodyMassIndex"].rows)
assert [
{
"sourceName": "Health Mate",
"sourceVersion": "2160040",
"unit": "count",
"creationDate": "2016-11-20 17:57:19 -0700",
"startDate": "2016-04-18 08:25:32 -0700",
"endDate": "2016-04-18 08:25:32 -0700",
"value": "22.5312",
"metadata_Health Mate App Version": "2.16.0",
"metadata_Withings User Identifier": "12345",
"metadata_Modified Date": "2016-04-18 15:56:05 +0000",
"metadata_Withings Link": "withings-bd2://timeline/measure?userid=12345&date=482685932&type=1",
"metadata_HKWasUserEntered": "0",
}
] == body_mass_actual
heart_rate_actual = list(converted["rHeartRate"].rows)
assert [
{
"sourceName": "Apple\xa0Watch",
"sourceVersion": "4.3.1",
"unit": "count/min",
"creationDate": "2018-09-10 02:47:35 -0700",
"startDate": "2018-09-10 02:28:55 -0700",
"endDate": "2018-09-10 02:28:55 -0700",
"value": "72",
"device": "<<HKDevice: 0x282a45810>, name:Apple Watch, manufacturer:Apple, model:Watch, hardware:Watch2,4, software:4.3.1>",
"metadata_HKMetadataKeyHeartRateMotionContext": "0",
}
] == heart_rate_actual
def test_cli_rejects_non_zip(xml_path, tmpdir):
result = CliRunner().invoke(cli.cli, [str(xml_path), str(tmpdir / "output.db")])
assert 1 == result.exit_code
assert (
"Error: File is not a zip file. Use --xml if you are "
"running against an XML file."
) == result.output.strip()
def test_cli_parses_xml_file(xml_path, tmpdir):
output = str(tmpdir / "output.db")
result = CliRunner().invoke(cli.cli, [str(xml_path), output, "--xml"])
assert 0 == result.exit_code
db = sqlite_utils.Database(output)
assert {
"workouts",
"workout_points",
"activity_summary",
"rHeartRate",
"rBodyMassIndex",
} == set(db.table_names())
def test_zip_file_with_gpx(zip_file_with_gpx):
tmpdir, export = zip_file_with_gpx
output = str(tmpdir / "output.db")
result = CliRunner().invoke(cli.cli, [export, output])
assert result.exit_code == 0, result.output
db = sqlite_utils.Database(output)
assert {
"workouts",
"workout_points",
"activity_summary",
"rHeartRate",
"rBodyMassIndex",
} == set(db.table_names())
# Confirm workout points from GPX were correctly imported
assert [
{
"date": "2019-06-11T22:00:42Z",
"latitude": 37.781672,
"longitude": -122.396397,
"altitude": 4.076904,
"horizontalAccuracy": 8.063116,
"verticalAccuracy": 6.428697,
"course": 206.252884,
"speed": 0.180883,
"workout_id": "e615a9651eab4d95debed14c2c2f7cce0c31feed",
},
{
"date": "2019-06-11T22:00:42Z",
"latitude": 37.78167,
"longitude": -122.396396,
"altitude": 4.083609,
"horizontalAccuracy": 8.29291,
"verticalAccuracy": 6.481525,
"course": 206.252884,
"speed": 0.116181,
"workout_id": "e615a9651eab4d95debed14c2c2f7cce0c31feed",
},
{
"date": "2019-06-11T22:00:43Z",
"latitude": 37.78167,
"longitude": -122.396394,
"altitude": 4.085232,
"horizontalAccuracy": 8.453521,
"verticalAccuracy": 6.549587,
"course": 206.252884,
"speed": 0.054395,
"workout_id": "e615a9651eab4d95debed14c2c2f7cce0c31feed",
},
] == list(db["workout_points"].rows)
|
11519414
|
from .serving import run_simple as run_simple
from .test import Client as Client
from .wrappers import Request as Request
from .wrappers import Response as Response
__version__ = "2.1.0.dev0"
|
11519433
|
import shortuuid
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from urllib.parse import urljoin
from app.files import RandomFileName
from app.models import DefaultQuerySet, TimestampedModel, models
from app.tasks import send_mail
from users.models import User
class Languages(models.TextChoices):
RU = 'RU', _('Russian')
EN = 'EN', _('English')
class DiplomaQuerySet(DefaultQuerySet):
def for_viewset(self):
return self.select_related('study', 'study__student', 'study__course')
def for_user(self, user):
return self.filter(study__student=user)
class Diploma(TimestampedModel):
objects: DiplomaQuerySet = DiplomaQuerySet.as_manager()
study = models.ForeignKey('studying.Study', on_delete=models.CASCADE)
slug = models.CharField(max_length=32, db_index=True, unique=True, default=shortuuid.uuid)
language = models.CharField(max_length=3, choices=Languages.choices, db_index=True)
image = models.ImageField(upload_to=RandomFileName('diplomas'))
class Meta:
constraints = [
models.UniqueConstraint(fields=['study', 'language'], name='unique_study'),
]
indexes = [
models.Index(fields=['study', 'language']),
]
ordering = ['-id']
permissions = [
('access_all_diplomas', _('May access diplomas of all students')),
]
verbose_name = _('Diploma')
verbose_name_plural = _('Diplomas')
def get_other_languages(self) -> DiplomaQuerySet:
return self.__class__.objects.filter(study=self.study).exclude(pk=self.pk)
def get_absolute_url(self) -> str:
return urljoin(settings.DIPLOMA_FRONTEND_URL, f'/{self.slug}/')
def send_to_student(self):
send_mail.delay(
to=self.study.student.email,
template_id='new-diploma',
ctx=dict(
course_name=self.study.course.full_name,
diploma_url=self.get_absolute_url(),
),
disable_antispam=True,
)
def regenerate(self) -> int:
"""Regenerate diploma for self and all other diplomas that match given course"""
count = 0
for template in DiplomaTemplate.objects.filter(course=self.study.course):
template.generate_diploma(student=self.study.student)
count += 1
return count
class DiplomaTemplate(TimestampedModel):
course = models.ForeignKey('products.Course', on_delete=models.CASCADE)
slug = models.CharField(max_length=32, help_text=_('Check out https://is.gd/eutOYr for available templates'))
language = models.CharField(max_length=3, choices=Languages.choices, db_index=True)
class Meta:
verbose_name = _('Diploma template')
verbose_name_plural = _('Diploma templates')
constraints = [
models.UniqueConstraint(fields=['course', 'language'], name='single diploma per course option'),
]
indexes = [
models.Index(fields=['course', 'language']),
]
def generate_diploma(self, student: User):
from diplomas.tasks import generate_diploma
generate_diploma.delay(
student_id=student.pk,
course_id=self.course.pk,
language=self.language,
)
|
11519436
|
from socket import *
serverName = 'localhost'
serverPort = 3001
serverSocket= socket(AF_INET, SOCK_STREAM)
serverSocket.connect( (serverName, serverPort) )
script=open('testcommand2.py','rb')
serverSocket.send(script.read())
wow = serverSocket.recv(1024)
serverSocket.close()
|
11519463
|
from __future__ import print_function, division
import os
import sys
import json
import pandas as pd
def convert_csv_to_dict(csv_path, subset, labels):
data = pd.read_csv(csv_path, delimiter=' ', header=None)
keys = []
key_labels = []
key_start_frame = []
key_end_frame = []
for i in range(data.shape[0]):
row = data.ix[i, :]
class_name = labels[row[1]-1]
basename = str(row[0])
start_frame = str(row[2])
end_frame = str(row[3])
keys.append(basename)
key_labels.append(class_name)
key_start_frame.append(start_frame)
key_end_frame.append(end_frame)
database = {}
for i in range(len(keys)):
key = keys[i]
if key in database: # need this because I have the same folder 3 times
key = key + '^' + str(i)
database[key] = {}
database[key]['subset'] = subset
label = key_labels[i]
start_frame = key_start_frame[i]
end_frame = key_end_frame[i]
database[key]['annotations'] = {'label': label, 'start_frame':start_frame, 'end_frame':end_frame}
return database
def load_labels(label_csv_path):
data = pd.read_csv(label_csv_path, delimiter=' ', header=None)
labels = []
for i in range(data.shape[0]):
labels.append(str(data.ix[i, 1]))
return labels
def convert_nv_csv_to_activitynet_json(label_csv_path, train_csv_path,
val_csv_path, dst_json_path):
labels = load_labels(label_csv_path)
train_database = convert_csv_to_dict(train_csv_path, 'training', labels)
val_database = convert_csv_to_dict(val_csv_path, 'validation', labels)
dst_data = {}
dst_data['labels'] = labels
dst_data['database'] = {}
dst_data['database'].update(train_database)
dst_data['database'].update(val_database)
with open(dst_json_path, 'w') as dst_file:
json.dump(dst_data, dst_file)
if __name__ == '__main__':
csv_dir_path = sys.argv[1]
for class_type in ['all', 'all_but_None', 'binary']:
if class_type == 'all':
class_ind_file = 'classIndAll.txt'
elif class_type == 'all_but_None':
class_ind_file = 'classIndAllbutNone.txt'
elif class_type == 'binary':
class_ind_file = 'classIndBinary.txt'
label_csv_path = os.path.join(csv_dir_path, class_ind_file)
train_csv_path = os.path.join(csv_dir_path, 'trainlist'+ class_type + '.txt')
val_csv_path = os.path.join(csv_dir_path, 'vallist'+ class_type + '.txt')
dst_json_path = os.path.join(csv_dir_path, 'nv' + class_type + '.json')
convert_nv_csv_to_activitynet_json(label_csv_path, train_csv_path,
val_csv_path, dst_json_path)
print('Successfully wrote to json : ', dst_json_path)
# HOW TO RUN:
# python nv_json.py '../annotation_nvGesture'
|
11519465
|
import logging
from typing import Any, Dict, List, Optional
import torch
from torch.autograd import Variable
from torch.nn import Linear,ReLU
from torch.nn.functional import nll_loss
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Highway, MatrixAttention
from allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TimeDistributed, TextFieldEmbedder, FeedForward
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy, SquadEmAndF1
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class FC3(torch.nn.Module):
def __init__(self,dim):
self.fc1 = Linear(dim,dim)
self.fc2 = Linear(dim,dim)
self.fc3 = Linear(dim,dim)
self.relu = ReLU()
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
x = self.relu(x)
return x
@Model.register("bidaf")
class BidirectionalAttentionFlow(Model):
"""
This class implements <NAME>'s `Bidirectional Attention Flow model
<https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/7586b7cca1deba124af80609327395e613a20e9d>`_
for answering reading comprehension questions (ICLR 2017).
The basic layout is pretty simple: encode words as a combination of word embeddings and a
character-level encoder, pass the word representations through a bi-LSTM/GRU, use a matrix of
attentions to put question information into the passage word representations (this is the only
part that is at all non-standard), pass this through another few layers of bi-LSTMs/GRUs, and
do a softmax over span start and span end.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``question`` and ``passage`` ``TextFields`` we get as input to the model.
num_highway_layers : ``int``
The number of highway layers to use in between embedding the input and passing it through
the phrase layer.
phrase_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and doing the bidirectional attention.
attention_similarity_function : ``SimilarityFunction``
The similarity function that we will use when comparing encoded passage and question
representations.
modeling_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between the bidirectional
attention and predicting span start and end.
span_end_encoder : ``Seq2SeqEncoder``
The encoder that we will use to incorporate span start predictions into the passage state
before predicting span end.
dropout : ``float``, optional (default=0.2)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
mask_lstms : ``bool``, optional (default=True)
If ``False``, we will skip passing the mask to the LSTM layers. This gives a ~2x speedup,
with only a slight performance decrease, if any. We haven't experimented much with this
yet, but have confirmed that we still get very similar performance with much faster
training times. We still use the mask for all softmaxes, but avoid the shuffling that's
required when using masking with pytorch LSTMs.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
attention_similarity_function: SimilarityFunction,
modeling_layer: Seq2SeqEncoder,
span_end_encoder: Seq2SeqEncoder,
dropout: float = 0.2,
mask_lstms: bool = True,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(BidirectionalAttentionFlow, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._highway_layer = TimeDistributed(Highway(text_field_embedder.get_output_dim(),
num_highway_layers))
self._phrase_layer = phrase_layer
self._matrix_attention = MatrixAttention(attention_similarity_function)
self._modeling_layer = modeling_layer
self._span_end_encoder = span_end_encoder
encoding_dim = phrase_layer.get_output_dim()
modeling_dim = modeling_layer.get_output_dim()
self._compat_layer = FC3(encoding_dim*4+modeling_dim)
self._compat_pred_layer = Linear(encoding_dim*4+modeling_dim,2)
span_start_input_dim = encoding_dim * 4 + modeling_dim
self._span_start_predictor = TimeDistributed(torch.nn.Linear(span_start_input_dim, 1))
span_end_encoding_dim = span_end_encoder.get_output_dim()
span_end_input_dim = encoding_dim * 4 + span_end_encoding_dim
self._span_end_predictor = TimeDistributed(torch.nn.Linear(span_end_input_dim, 1))
# Bidaf has lots of layer dimensions which need to match up - these
# aren't necessarily obvious from the configuration files, so we check
# here.
if modeling_layer.get_input_dim() != 4 * encoding_dim:
raise ConfigurationError("The input dimension to the modeling_layer must be "
"equal to 4 times the encoding dimension of the phrase_layer. "
"Found {} and 4 * {} respectively.".format(modeling_layer.get_input_dim(),
encoding_dim))
if text_field_embedder.get_output_dim() != phrase_layer.get_input_dim():
raise ConfigurationError("The output dimension of the text_field_embedder (embedding_dim + "
"char_cnn) must match the input dimension of the phrase_encoder. "
"Found {} and {}, respectively.".format(text_field_embedder.get_output_dim(),
phrase_layer.get_input_dim()))
if span_end_encoder.get_input_dim() != encoding_dim * 4 + modeling_dim * 3:
raise ConfigurationError("The input dimension of the span_end_encoder should be equal to "
"4 * phrase_layer.output_dim + 3 * modeling_layer.output_dim. "
"Found {} and (4 * {} + 3 * {}) "
"respectively.".format(span_end_encoder.get_input_dim(),
encoding_dim,
modeling_dim))
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
self._compat_accuracy = BooleanAccuracy()
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._mask_lstms = mask_lstms
initializer(self)
def forward(self, # type: ignore
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``.
passage : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` index. If
this is given, we will compute a loss that gets included in the output dictionary.
span_end : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` index. If
this is given, we will compute a loss that gets included in the output dictionary.
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question ID, original passage text, and token
offsets into the passage for each instance in the batch. We use this for computing
official metrics using the official SQuAD evaluation script. The length of this list
should be the batch size, and each dictionary should have the keys ``id``,
``original_passage``, and ``token_offsets``. If you only want the best span string and
don't care about official metrics, you can omit the ``id`` key.
Returns
-------
An output dictionary consisting of:
span_start_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalised log
probabilities of the span start position.
span_start_probs : torch.FloatTensor
The result of ``softmax(span_start_logits)``.
span_end_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalised log
probabilities of the span end position (inclusive).
span_end_probs : torch.FloatTensor
The result of ``softmax(span_end_logits)``.
best_span : torch.IntTensor
The result of a constrained inference over ``span_start_logits`` and
``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)``.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
best_span_str : List[str]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
"""
embedded_question = self._highway_layer(self._text_field_embedder(question))
embedded_passage = self._highway_layer(self._text_field_embedder(passage))
batch_size = embedded_question.size(0)
passage_length = embedded_passage.size(1)
question_mask = util.get_text_field_mask(question).float()
passage_mask = util.get_text_field_mask(passage).float()
question_lstm_mask = question_mask if self._mask_lstms else None
passage_lstm_mask = passage_mask if self._mask_lstms else None
encoded_question = self._dropout(self._phrase_layer(embedded_question, question_lstm_mask))
encoded_passage = self._dropout(self._phrase_layer(embedded_passage, passage_lstm_mask))
encoding_dim = encoded_question.size(-1)
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = util.last_dim_softmax(passage_question_similarity, question_mask)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# We replace masked values with something really negative here, so they don't affect the
# max below.
masked_similarity = util.replace_masked_values(passage_question_similarity,
question_mask.unsqueeze(1),
-1e7)
# Shape: (batch_size, passage_length)
question_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)
# Shape: (batch_size, passage_length)
question_passage_attention = util.masked_softmax(question_passage_similarity, passage_mask)
# Shape: (batch_size, encoding_dim)
question_passage_vector = util.weighted_sum(encoded_passage, question_passage_attention)
# Shape: (batch_size, passage_length, encoding_dim)
tiled_question_passage_vector = question_passage_vector.unsqueeze(1).expand(batch_size,
passage_length,
encoding_dim)
# Shape: (batch_size, passage_length, encoding_dim * 4)
final_merged_passage = torch.cat([encoded_passage,
passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * tiled_question_passage_vector],
dim=-1)
modeled_passage = self._dropout(self._modeling_layer(final_merged_passage, passage_lstm_mask))
modeling_dim = modeled_passage.size(-1)
self._compat_logits = self._compat_layer()
self._compat_pred_layer = Linear(encoding_dim*4+modeling_dim,2)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim))
span_start_input = self._dropout(torch.cat([final_merged_passage, modeled_passage], dim=-1))
# Shape: (batch_size, passage_length)
span_start_logits = self._span_start_predictor(span_start_input).squeeze(-1)
# Shape: (batch_size, passage_length)
span_start_probs = util.masked_softmax(span_start_logits, passage_mask)
# Shape: (batch_size, modeling_dim)
span_start_representation = util.weighted_sum(modeled_passage, span_start_probs)
# Shape: (batch_size, passage_length, modeling_dim)
tiled_start_representation = span_start_representation.unsqueeze(1).expand(batch_size,
passage_length,
modeling_dim)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim * 3)
span_end_representation = torch.cat([final_merged_passage,
modeled_passage,
tiled_start_representation,
modeled_passage * tiled_start_representation],
dim=-1)
# Shape: (batch_size, passage_length, encoding_dim)
encoded_span_end = self._dropout(self._span_end_encoder(span_end_representation,
passage_lstm_mask))
# Shape: (batch_size, passage_length, encoding_dim * 4 + span_end_encoding_dim)
span_end_input = self._dropout(torch.cat([final_merged_passage, encoded_span_end], dim=-1))
span_end_logits = self._span_end_predictor(span_end_input).squeeze(-1)
span_end_probs = util.masked_softmax(span_end_logits, passage_mask)
span_start_logits = util.replace_masked_values(span_start_logits, passage_mask, -1e7)
span_end_logits = util.replace_masked_values(span_end_logits, passage_mask, -1e7)
best_span = self._get_best_span(span_start_logits, span_end_logits)
output_dict = {"span_start_logits": span_start_logits,
"span_start_probs": span_start_probs,
"span_end_logits": span_end_logits,
"span_end_probs": span_end_probs,
"best_span": best_span}
if span_start is not None:
loss = nll_loss(util.masked_log_softmax(span_start_logits, passage_mask), span_start.squeeze(-1))
self._span_start_accuracy(span_start_logits, span_start.squeeze(-1))
loss += nll_loss(util.masked_log_softmax(span_end_logits, passage_mask), span_end.squeeze(-1))
self._span_end_accuracy(span_end_logits, span_end.squeeze(-1))
self._span_accuracy(best_span, torch.stack([span_start, span_end], -1))
output_dict["loss"] = loss
if metadata is not None:
output_dict['best_span_str'] = []
for i in range(batch_size):
passage_str = metadata[i]['original_passage']
offsets = metadata[i]['token_offsets']
predicted_span = tuple(best_span[i].data.cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_span_string = passage_str[start_offset:end_offset]
output_dict['best_span_str'].append(best_span_string)
answer_texts = metadata[i].get('answer_texts', [])
if answer_texts:
self._squad_metrics(best_span_string, answer_texts)
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._squad_metrics.get_metric(reset)
return {
'start_acc': self._span_start_accuracy.get_metric(reset),
'end_acc': self._span_end_accuracy.get_metric(reset),
'span_acc': self._span_accuracy.get_metric(reset),
'em': exact_match,
'f1': f1_score,
}
@staticmethod
def _get_best_span(span_start_logits: Variable, span_end_logits: Variable) -> Variable:
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
max_span_log_prob = [-1e20] * batch_size
span_start_argmax = [0] * batch_size
best_word_span = Variable(span_start_logits.data.new()
.resize_(batch_size, 2).fill_(0)).long()
span_start_logits = span_start_logits.data.cpu().numpy()
span_end_logits = span_end_logits.data.cpu().numpy()
for b in range(batch_size): # pylint: disable=invalid-name
for j in range(passage_length):
val1 = span_start_logits[b, span_start_argmax[b]]
if val1 < span_start_logits[b, j]:
span_start_argmax[b] = j
val1 = span_start_logits[b, j]
val2 = span_end_logits[b, j]
if val1 + val2 > max_span_log_prob[b]:
best_word_span[b, 0] = span_start_argmax[b]
best_word_span[b, 1] = j
max_span_log_prob[b] = val1 + val2
return best_word_span
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BidirectionalAttentionFlow':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
num_highway_layers = params.pop("num_highway_layers")
phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer"))
similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer"))
span_end_encoder = Seq2SeqEncoder.from_params(params.pop("span_end_encoder"))
dropout = params.pop('dropout', 0.2)
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
mask_lstms = params.pop('mask_lstms', True)
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
num_highway_layers=num_highway_layers,
phrase_layer=phrase_layer,
attention_similarity_function=similarity_function,
modeling_layer=modeling_layer,
span_end_encoder=span_end_encoder,
dropout=dropout,
mask_lstms=mask_lstms,
initializer=initializer,
regularizer=regularizer)
|
11519475
|
from pymongo import MongoClient
import app
import settings
def connect(collection):
client = MongoClient()
d = client[settings.MONGO_DATABASE]
return d[collection]
def bake():
from flask import g
for route in ['index']:
with (app.app.test_request_context(path="/%s.html" % route)):
view = globals()['app'].__dict__[route]
file_path = "www/%s.html" % route
html = view().encode('utf-8')
with open(file_path, "w") as writefile:
writefile.write(html)
print("Wrote %s" % file_path)
|
11519488
|
class Solution:
def sumSubseqWidths(self, A: List[int]) -> int:
MOD = 10 ** 9 + 7
A.sort()
total = 0
c = 1
for i in range(len(A)):
total = (total + A[i] * c - A[len(A) - i - 1] * c) % MOD
c = (c * 2) % MOD
return total
|
11519517
|
from base import BaseModel
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from itertools import chain
from math import ceil
class SegNet(BaseModel):
def __init__(self, num_classes, in_channels=3, pretrained=True, freeze_bn=False, **_):
super(SegNet, self).__init__()
vgg_bn = models.vgg16_bn(pretrained= pretrained)
encoder = list(vgg_bn.features.children())
# Adjust the input size
if in_channels != 3:
encoder[0] = nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1)
# Encoder, VGG without any maxpooling
self.stage1_encoder = nn.Sequential(*encoder[:6])
self.stage2_encoder = nn.Sequential(*encoder[7:13])
self.stage3_encoder = nn.Sequential(*encoder[14:23])
self.stage4_encoder = nn.Sequential(*encoder[24:33])
self.stage5_encoder = nn.Sequential(*encoder[34:-1])
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
# Decoder, same as the encoder but reversed, maxpool will not be used
decoder = encoder
decoder = [i for i in list(reversed(decoder)) if not isinstance(i, nn.MaxPool2d)]
# Replace the last conv layer
decoder[-1] = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
# When reversing, we also reversed conv->batchN->relu, correct it
decoder = [item for i in range(0, len(decoder), 3) for item in decoder[i:i+3][::-1]]
# Replace some conv layers & batchN after them
for i, module in enumerate(decoder):
if isinstance(module, nn.Conv2d):
if module.in_channels != module.out_channels:
decoder[i+1] = nn.BatchNorm2d(module.in_channels)
decoder[i] = nn.Conv2d(module.out_channels, module.in_channels, kernel_size=3, stride=1, padding=1)
self.stage1_decoder = nn.Sequential(*decoder[0:9])
self.stage2_decoder = nn.Sequential(*decoder[9:18])
self.stage3_decoder = nn.Sequential(*decoder[18:27])
self.stage4_decoder = nn.Sequential(*decoder[27:33])
self.stage5_decoder = nn.Sequential(*decoder[33:],
nn.Conv2d(64, num_classes, kernel_size=3, stride=1, padding=1)
)
self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2)
self._initialize_weights(self.stage1_decoder, self.stage2_decoder, self.stage3_decoder,
self.stage4_decoder, self.stage5_decoder)
if freeze_bn: self.freeze_bn()
if freeze_backbone:
set_trainable([self.stage1_encoder, self.stage2_encoder, self.stage3_encoder, self.stage4_encoder, self.stage5_encoder], False)
def _initialize_weights(self, *stages):
for modules in stages:
for module in modules.modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
def forward(self, x):
# Encoder
x = self.stage1_encoder(x)
x1_size = x.size()
x, indices1 = self.pool(x)
x = self.stage2_encoder(x)
x2_size = x.size()
x, indices2 = self.pool(x)
x = self.stage3_encoder(x)
x3_size = x.size()
x, indices3 = self.pool(x)
x = self.stage4_encoder(x)
x4_size = x.size()
x, indices4 = self.pool(x)
x = self.stage5_encoder(x)
x5_size = x.size()
x, indices5 = self.pool(x)
# Decoder
x = self.unpool(x, indices=indices5, output_size=x5_size)
x = self.stage1_decoder(x)
x = self.unpool(x, indices=indices4, output_size=x4_size)
x = self.stage2_decoder(x)
x = self.unpool(x, indices=indices3, output_size=x3_size)
x = self.stage3_decoder(x)
x = self.unpool(x, indices=indices2, output_size=x2_size)
x = self.stage4_decoder(x)
x = self.unpool(x, indices=indices1, output_size=x1_size)
x = self.stage5_decoder(x)
return x
def get_backbone_params(self):
return []
def get_decoder_params(self):
return self.parameters()
def freeze_bn(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d): module.eval()
class DecoderBottleneck(nn.Module):
def __init__(self, inchannels):
super(DecoderBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inchannels, inchannels//4, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(inchannels//4)
self.conv2 = nn.ConvTranspose2d(inchannels//4, inchannels//4, kernel_size=2, stride=2, bias=False)
self.bn2 = nn.BatchNorm2d(inchannels//4)
self.conv3 = nn.Conv2d(inchannels//4, inchannels//2, 1, bias=False)
self.bn3 = nn.BatchNorm2d(inchannels//2)
self.relu = nn.ReLU(inplace=True)
self.downsample = nn.Sequential(
nn.ConvTranspose2d(inchannels, inchannels//2, kernel_size=2, stride=2, bias=False),
nn.BatchNorm2d(inchannels//2))
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class LastBottleneck(nn.Module):
def __init__(self, inchannels):
super(LastBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inchannels, inchannels//4, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(inchannels//4)
self.conv2 = nn.Conv2d(inchannels//4, inchannels//4, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(inchannels//4)
self.conv3 = nn.Conv2d(inchannels//4, inchannels//4, 1, bias=False)
self.bn3 = nn.BatchNorm2d(inchannels//4)
self.relu = nn.ReLU(inplace=True)
self.downsample = nn.Sequential(
nn.Conv2d(inchannels, inchannels//4, kernel_size=1, bias=False),
nn.BatchNorm2d(inchannels//4))
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class SegResNet(BaseModel):
def __init__(self, num_classes, in_channels=3, pretrained=True, freeze_bn=False, **_):
super(SegResNet, self).__init__()
resnet50 = models.resnet50(pretrained=pretrained)
encoder = list(resnet50.children())
if in_channels != 3:
encoder[0] = nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1)
encoder[3].return_indices = True
# Encoder
self.first_conv = nn.Sequential(*encoder[:4])
resnet50_blocks = list(resnet50.children())[4:-2]
self.encoder = nn.Sequential(*resnet50_blocks)
# Decoder
resnet50_untrained = models.resnet50(pretrained=False)
resnet50_blocks = list(resnet50_untrained.children())[4:-2][::-1]
decoder = []
channels = (2048, 1024, 512)
for i, block in enumerate(resnet50_blocks[:-1]):
new_block = list(block.children())[::-1][:-1]
decoder.append(nn.Sequential(*new_block, DecoderBottleneck(channels[i])))
new_block = list(resnet50_blocks[-1].children())[::-1][:-1]
decoder.append(nn.Sequential(*new_block, LastBottleneck(256)))
self.decoder = nn.Sequential(*decoder)
self.last_conv = nn.Sequential(
nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2, bias=False),
nn.Conv2d(64, num_classes, kernel_size=3, stride=1, padding=1)
)
if freeze_bn: self.freeze_bn()
if freeze_backbone:
set_trainable([self.first_conv, self.encoder], False)
def forward(self, x):
inputsize = x.size()
# Encoder
x, indices = self.first_conv(x)
x = self.encoder(x)
# Decoder
x = self.decoder(x)
h_diff = ceil((x.size()[2] - indices.size()[2]) / 2)
w_diff = ceil((x.size()[3] - indices.size()[3]) / 2)
if indices.size()[2] % 2 == 1:
x = x[:, :, h_diff:x.size()[2]-(h_diff-1), w_diff: x.size()[3]-(w_diff-1)]
else:
x = x[:, :, h_diff:x.size()[2]-h_diff, w_diff: x.size()[3]-w_diff]
x = F.max_unpool2d(x, indices, kernel_size=2, stride=2)
x = self.last_conv(x)
if inputsize != x.size():
h_diff = (x.size()[2] - inputsize[2]) // 2
w_diff = (x.size()[3] - inputsize[3]) // 2
x = x[:, :, h_diff:x.size()[2]-h_diff, w_diff: x.size()[3]-w_diff]
if h_diff % 2 != 0: x = x[:, :, :-1, :]
if w_diff % 2 != 0: x = x[:, :, :, :-1]
return x
def get_backbone_params(self):
return chain(self.first_conv.parameters(), self.encoder.parameters())
def get_decoder_params(self):
return chain(self.decoder.parameters(), self.last_conv.parameters())
def freeze_bn(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d): module.eval()
|
11519536
|
import datetime
import subprocess
import threading
import random
import queue
import atexit
import json
import time
import sys
import re
import os
class Remote:
def __init__(self, address=None, port=None, identity_file=None):
self.address = address
self.port = port or 22
self.ifile = identity_file
def __hash__(self):
return hash((self.address, self.port, self.ifile))
def __eq__(self, other):
return (isinstance(other, type(self))
and self.address == other.address
and self.port == other.port
and self.ifile == other.ifile
)
def from_json(obj):
return Remote(obj.get("address"), obj.get("port"), obj.get("identity_file"))
default_remotes = [Remote()] # local
terminals = {} # terminals (SSH/local)
def eprint(message):
sys.stderr.write(f'{message}\n')
# get time in milliseconds
def millis():
return int(1000 * time.time())
def check_access(remotes):
shared.check_access(remotes)
def root():
if os.geteuid() != 0:
eprint('Need to run as root.')
exit(1)
def load_json(path):
with open(path) as file:
return json.load(file)
return None
def seed_random(value):
random.seed(value)
def sleep(seconds):
time.sleep(seconds)
def wait(beg_ms, until_sec):
now_ms = millis()
# wait until time is over
if (now_ms - beg_ms) < (until_sec * 1000):
time.sleep(((until_sec * 1000) - (now_ms - beg_ms)) / 1000.0)
else:
eprint('Wait timeout already passed by {:.2f}sec'.format(((now_ms - beg_ms) - (until_sec * 1000)) / 1000))
stop_all_terminals()
exit(1)
'''
Add links to network to make sure
it is fully connected.
'''
def make_connected(network):
neighbors = convert_to_neighbors(network)
clusters = _get_clusters_sets(neighbors)
def get_unique_id(neighbors, i = 0):
if f'ic-{i}' not in neighbors:
return f'ic-{i}'
else:
return get_unique_id(neighbors, i + 1)
def get_center_node(neighbors, cluster):
max_neighbors = 0
center_node = None
for sid, neighs in neighbors.items():
if sid in cluster and len(neighs) >= max_neighbors:
max_neighbors = len(neighs)
center_node = sid
return center_node
if len(clusters) > 1:
central = get_unique_id(neighbors)
# connect all clusters via central node
for cluster in clusters:
center = get_center_node(neighbors, cluster)
network['links'].append({'source': central, 'target': center, 'type': 'vpn'})
def json_count(path):
obj = path
if isinstance(path, str):
with open(path) as file:
obj = json.load(file)
links = obj.get('links', [])
nodes = {}
for link in links:
nodes[link['source']] = 0;
nodes[link['target']] = 0;
links = obj.get('links', [])
return (len(nodes), len(links))
# add titles and values to a CSV file
def csv_update(file, delimiter, *args):
titles = list()
values = list()
for arg in args:
titles += arg[0]
values += arg[1]
# convert elements to str
for i in range(0, len(titles)):
titles[i] = str(titles[i])
# convert elements to str
for i in range(0, len(values)):
values[i] = str(values[i])
if file.tell() == 0:
file.write(delimiter.join(titles) + '\n')
file.write(delimiter.join(values) + '\n')
def sysload(remotes=default_remotes):
load1 = 0
load5 = 0
load15 = 0
for remote in remotes:
stdout = exec(remote, 'uptime', get_output=True)[0]
t = stdout.split('load average:')[1].split(',')
load1 += float(t[0])
load5 += float(t[1])
load15 += float(t[2])
titles = ['load1', 'load5', 'load15']
values = [load1 / len(remotes), load5 / len(remotes), load15 / len(remotes)]
return (titles, values)
def create_process(remote, command, add_quotes=True):
if remote.address:
if add_quotes:
command = command.replace('\'', '\\\'') # need to escape
command = f'\'{command}\''
if remote.ifile:
command = f'ssh -p {remote.port} -i {remote.ifile} root@{remote.address} {command}'
else:
command = f'ssh -p {remote.port} root@{remote.address} {command}'
else:
# local terminal
command = f'{command}'
return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
'''
SSH or local terminal thread
for a higher execution speed
'''
class TerminalThread(threading.Thread):
def __init__(self, remote):
super(TerminalThread, self).__init__()
self.remote = remote
self.finish = False
self.queue = queue.Queue()
self.output_lock = threading.Lock()
self.output = {}
self.start()
def run(self):
while True:
try:
# might raise Empty
(ignore_error, get_output, add_quotes, command) = self.queue.get(block=True, timeout=1)
p = create_process(self.remote, command, add_quotes)
(std, err) = p.communicate()
stdout = std.decode()
errout = err.decode()
if p.returncode != 0 and not ignore_error:
label = self.remote.address or 'local'
eprint(errout)
eprint(stdout)
eprint(f'Abort, command failed on {label}: {command}')
eprint('Network might be in an undefined state!')
exit(1)
if get_output and self.queue.empty():
self.output_lock.acquire()
self.output[command] = (stdout, errout, p.returncode)
self.output_lock.release()
except queue.Empty:
# try again or finish loop
if self.finish:
break
except Exception as e:
eprint(e)
exit(1)
def exec(remote, command, get_output=False, ignore_error=False, add_quotes=True):
if remote not in terminals:
terminals[remote] = TerminalThread(remote)
t = terminals[remote]
t.queue.put((ignore_error, get_output, add_quotes, command))
while get_output:
t.output_lock.acquire()
result = t.output.pop(command, None)
t.output_lock.release()
if result:
return result
time.sleep(0.05)
'''
The open terminal threads block our
program to exit if not finished
'''
def stop_all_terminals():
for term in terminals.values():
term.finish = True
for term in terminals.values():
term.join()
def wait_for_completion():
for term in terminals.values():
while term.queue.qsize() != 0:
time.sleep(0.1)
# id independent of source/target direction
def link_id(source, target):
if source > target:
return f'{source}-{target}'
else:
return f'{target}-{source}'
def get_current_state(remotes):
links = {}
nodes = []
rmap = {}
node_re = re.compile(r'\d+: br-([^:]+)')
link_re = re.compile(r'\d+: ve-([^@:]+).*(?<= master )br-([^ ]+)')
for remote in remotes:
stdout, stderr, rcode = exec(remote, f'ip netns exec "switch" ip a l || true', get_output=True)
for line in stdout.splitlines():
m = link_re.search(line)
if m:
ifname = m.group(1) # without ve-
master = m.group(2) # without br-
source = ifname[:len(master)]
target = ifname[len(master) + 1:]
lid = link_id(source, target)
if lid not in links:
links[lid] = {'source': source, 'target': target}
m = node_re.search(line)
if m:
ifname = m.group(1) # without br-
nodes.append({'id': ifname})
rmap[ifname] = remote
return ({'nodes': nodes, 'links': list(links.values())}, rmap)
def get_remote_mapping(remotes):
rmap = {}
for remote in remotes:
(stdout, _, _) = exec(remote, 'ip netns list', get_output=True)
for line in stdout.split():
if line.startswith('ns-'):
rmap[line.strip()[3:]] = remote
return rmap
# create a neighbor dict from a json network description
def convert_to_neighbors(*networks):
neighbors = {}
for network in networks:
# create a structure we can use efficiently
for node in network.get('nodes', []):
neighbors.setdefault(str(node['id']), set())
for link in network.get('links', []):
source = str(link['source'])
target = str(link['target'])
neighbors.setdefault(source, set()).add(target)
neighbors.setdefault(target, set()).add(source)
ret = {}
for key, value in neighbors.items():
ret[key] = list(value)
return ret
def check_access(remotes):
# single empty remote with no address => local
if len(remotes) == 1 and remotes[0].address is None:
if os.geteuid() == 0:
# we are root
return
else:
eprint('Local setup needs to run as root.')
stop_all_terminals()
exit(1)
for remote in remotes:
if remote.address is None:
eprint('Need external address for all remotes.')
stop_all_terminals()
exit(1)
# check if we can execute something
(stdout, stderr, rcode) = exec(remote, 'true', get_output=True, ignore_error=True)
if rcode != 0:
eprint(stdout)
eprint(stderr)
stop_all_terminals()
exit(1)
def format_duration(time_ms):
d, remainder = divmod(time_ms, 24 * 60 * 60 * 1000)
h, remainder = divmod(remainder, 60 * 60 * 1000)
m, remainder = divmod(remainder, 60 * 1000)
s, remainder = divmod(remainder, 1000)
ms = remainder
if d > 0:
if h > 0:
return '{}.{}d'.format(int(d), int(h))
return '{}d'.format(int(d))
elif h > 0:
if m > 0:
return '{}.{}h'.format(int(h), int(m))
return '{}h'.format(int(h))
elif m > 0:
if s > 0:
return '{}.{}m'.format(int(m), int(s))
return '{}m'.format(int(m))
elif s > 0:
if ms > 0:
return '{}.{}s'.format(int(s), int(ms))
return '{}s'.format(int(s))
else:
return '{}ms'.format(int(ms))
def format_size(bytes):
if bytes < 1000:
return f'{bytes:.2f} B'
elif bytes < 1000_000:
return f'{bytes / 1000:.2f} K'
elif bytes < 1000_000_000:
return f'{bytes / 1000_000:.2f} M'
else:
return f'{bytes / 1000_000_000:.2f} G'
|
11519587
|
import numpy as np
from scipy.linalg import expm
def cost(seq):
N=len(seq)
dt=2*np.pi/N
sx=1/2 * np.mat([[0,1],\
[1,0]], dtype=complex)
sz=1/2 * np.mat([[1,0],\
[0,-1]], dtype=complex)
U = np.matrix(np.identity(2, dtype=complex)) #initial Evolution operator
J=4 # control field strength
for ii in seq:
H =ii * J * sz + 1*sx # Hamiltonian
U = expm(-1j * H * dt) * U # Evolution operator
p0=np.mat([[1],[0]], dtype=complex) #initial state
pt=U * p0 #final state
target = np.mat([[0], [1]], dtype=complex) # south pole
err = 1-(np.abs(pt.H * target)**2).item(0).real #infidelity (to make it as small as possible)
return err
delta=0.01
cost_hist = []
def gradient_descent(x, dim, learning_rate, num_iterations):
for i in range(num_iterations):
v=np.random.rand(dim)
xp=x+v*delta
xm=x-v*delta
error_derivative = (cost(xp) - cost(xm))/(2*delta)
x = x - (learning_rate) * error_derivative*v
cost_hist.append(cost(xp))
return cost(x)
N = 20
seq = np.random.rand(N)
ep_max = 500
fidelity = 1-gradient_descent(seq, N, 0.01, ep_max)
print('Final_fidelity=',fidelity)
|
11519588
|
import json
import sys, os.path
import os
storage = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..\..')) + '\\storageManager')
sys.path.append(storage)
error_path = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..\..')) + '\\ERROR')
sys.path.append(error_path)
response_path = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..\..')) + '\\Response')
sys.path.append(response_path)
storage = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..\..')) + '\\typeChecker\\')
sys.path.append(storage)
where_path = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + '\\Where')
sys.path.append(where_path)
from jsonMode import *
from typeChecker.typeChecker import *
from Error import Error
from Response.Response import Response
from Where import Where
tc = TypeChecker()
class Column():
def __init__(self):
self.name = None
self.type = None
self.specificType = None
self.default = None
self.isNull = None
self.isUnique = None
self.uniqueName = None
self.size = None
self.isPrimary = None
self.referencesTable = None
self.isCheck = None
self.referenceColumn = None
class UpdateTable():
def __init__(self):
self.name = None
self.traeCols = 0
self.columnas = []
self.values = []
self.checkers = []
self.listaids = []
def obtenerColumnasDictionary(self,dbUse, tabla):
# Se obtiene el diccionario de columnas para la tabla del Storage,
# Solo se utiliza para selects de tablas, hay casos en los que se pueden
# Recibir tablas como parámetros, en las subconsultas, por ejemplo.
tc = TypeChecker()
listTemp = tc.return_columnsJSON(dbUse, tabla.upper())
listaCols = []
if listTemp != None:
for col in listTemp:
listaCols.append([col['name'], col['type']])
return [listaCols]
return [[]]
def execute(self, parent, enviroment):
# OBTENER TABLAS CAMPOS VALORES
for hijo in parent.hijos:
if "TABLE" == hijo.nombreNodo:
self.name = hijo.valor
elif "LISTA_UPDATE" == hijo.nombreNodo:
self.traeCols = 1
for h in hijo.hijos:
for i in h.hijos:
if "COL" == i.nombreNodo:
self.columnas.append(i)
else:
self.values.append(i)
# INFO DB ACTUAL
with open('src/Config/Config.json') as file:
config = json.load(file)
if config['databaseIndex'] == None:
err_resp = Error(3,"No se ha seleccionado ninguna base de datos.",-1)
resp = Response("42P12",err_resp)
return resp
useDB = config['databaseIndex'].upper()
listTables = showTables(useDB)
if not(self.name.upper() in listTables) :
err_resp = Error(3,"No existe la tabla en la DB.",-1)
resp = Response("42P12",err_resp)
return resp
# INFO A INSERTAR
tablename = self.name.upper()
print('DB ACTUAL', useDB)
print('Tabla Update',tablename)
columnasI = []
valoresI = []
val_update = {}
l_col = tc.return_columnsObject(useDB,tablename)
for h in self.columnas:
columnasI.append(h.valor.upper())
for h in self.values:
valoresI.append(h.valor)
contador = 0
contador2 = 0
for h in l_col:
for j in columnasI:
if h.name.upper() == j:
val_update[contador] = valoresI[contador2]
contador2 = contador2 + 1
contador2 = 0
contador = contador + 1
# VALIDACIONES
if len(parent.hijos) < 4:
#Update sin where
res = update(useDB, tablename, val_update,['0'])
print('update code: ', res )
if res == 0:
resp = Response("00000","Se actualizo el valor en la tabla")
else:
err_resp = Error(3,"No se logro actualizar el valor",-1)
resp = Response("42P12",err_resp)
return resp
else:
#update con where
parent_node = parent.hijos[3]
raw_matrix = [extractTable(useDB,tablename)]
columnas = self.obtenerColumnasDictionary(useDB,tablename)
tablas = [[tablename,tablename]]
nuevoWhere = Where()
listaResult = nuevoWhere.execute(parent_node, raw_matrix, tablas, columnas,None)
print('listaREsult ', listaResult)
res = ""
for elemento in listaResult:
if elemento[0] is True:
res = update(useDB, tablename, val_update, [str(elemento[1])])
print('update code: ',res)
if res == 0:
resp = Response("00000","Se actualizo el valor en la tabla")
else:
err_resp = Error(3,"No se logro actualizar el valor",-1)
resp = Response("42P12",err_resp)
return resp
def validar_tipo(self, tipo: str, variable: str):
print('Variable ', variable, ' tipo ', tipo)
variable = str(variable)
try:
if tipo == 1:
variable = variable.replace(" ","")
variable = variable.replace(",","")
int(variable)
elif tipo == 2:
print('caracter')
elif tipo == 4:
if not(variable.upper() == 'TRUE' or variable.upper() == 'FALSE'):
return 0
else:
print('No tiene validacion')
return 1
except:
print('La variable ', variable, ' no coincide con el tipo ',tipo)
return 0
|
11519617
|
from tkinter import *
root = Tk()
b = Button(root, text="Delete me", command=lambda: b.pack_forget())
b.pack()
root.mainloop()
|
11519622
|
r"""
Fermionic Ghosts Super Lie Conformal Algebra
The *Fermionic-ghosts* or b--c system super Lie conformal algebra
with `2n` generators is the H-graded super Lie conformal algebra
generated by odd vectors `b_i, c_i, i = 1,\ldots,n` and a central
element `K`, with non-vanishing `\lambda`-brackets:
.. MATH::
[{b_i}_\lambda c_j] = \delta_{ij} K.
The generators `b_i` have degree `1` while the generators `c_i`
have degree `0`.
AUTHORS:
- <NAME> (2020-06-03): Initial implementation.
"""
#******************************************************************************
# Copyright (C) 2020 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from .graded_lie_conformal_algebra import GradedLieConformalAlgebra
class FermionicGhostsLieConformalAlgebra(GradedLieConformalAlgebra):
r"""
The Fermionic ghosts or `bc`-system super Lie conformal algebra.
INPUT:
- ``R`` -- a commutative ring; the base ring of this Lie
conformal algebra
- ``ngens`` -- an even positive Integer (default: ``2``); The
number of non-central generators of this Lie conformal
algebra.
- ``names`` -- a tuple of ``str``; alternative names for the
generators
- ``index_set`` -- an enumerated set; alternative indexing
set for the generators.
OUTPUT:
The Fermionic Ghosts super Lie conformal algebra with generators
`b_i,c_i, i=1,\ldots,n` and `K` where `2n` is ``ngens``.
EXAMPLES::
sage: R = lie_conformal_algebras.FermionicGhosts(QQ); R
The Fermionic ghosts Lie conformal algebra with generators (b, c, K) over Rational Field
sage: R.inject_variables()
Defining b, c, K
sage: b.bracket(c) == c.bracket(b)
True
sage: b.degree()
1
sage: c.degree()
0
sage: R.category()
Category of H-graded super finitely generated Lie conformal algebras with basis over Rational Field
sage: R = lie_conformal_algebras.FermionicGhosts(QQbar, ngens=4, names = 'abcd');R
The Fermionic ghosts Lie conformal algebra with generators (a, b, c, d, K) over Algebraic Field
sage: R.structure_coefficients()
Finite family {('a', 'c'): ((0, K),), ('b', 'd'): ((0, K),), ('c', 'a'): ((0, K),), ('d', 'b'): ((0, K),)}
"""
def __init__(self,R,ngens=2,names=None,index_set=None):
"""
Initialize self.
TESTS::
sage: V = lie_conformal_algebras.BosonicGhosts(QQ)
sage: TestSuite(V).run()
"""
try:
assert (ngens > 0 and ngens % 2 == 0)
except AssertionError:
raise ValueError("ngens should be an even positive integer, " +
"got {}".format(ngens))
latex_names = None
if (names is None) and (index_set is None):
from sage.misc.defaults import variable_names as varnames
from sage.misc.defaults import latex_variable_names as laxnames
names = varnames(ngens/2,'b') + varnames(ngens/2,'c')
latex_names = tuple(laxnames(ngens/2,'b') +\
laxnames(ngens/2,'c')) + ('K',)
from sage.structure.indexed_generators import \
standardize_names_index_set
names,index_set = standardize_names_index_set(names=names,
index_set=index_set,
ngens=ngens)
from sage.matrix.special import identity_matrix
A = identity_matrix(R,ngens/2)
from sage.matrix.special import block_matrix
gram_matrix = block_matrix([[R.zero(),A],[A,R.zero()]])
ghostsdict = { (i,j): {0: {('K',0): gram_matrix[index_set.rank(i),
index_set.rank(j)]}} for i in index_set for j in index_set}
weights = (1,)*(ngens//2) + (0,)*(ngens//2)
parity = (1,)*ngens
super(FermionicGhostsLieConformalAlgebra,self).__init__(R,
ghostsdict,names=names,
latex_names=latex_names,
index_set=index_set,
weights=weights,
parity=parity,
central_elements=('K',))
def _repr_(self):
"""
String representation.
EXAMPLES::
sage: lie_conformal_algebras.FermionicGhosts(QQ)
The Fermionic ghosts Lie conformal algebra with generators (b, c, K) over Rational Field
"""
return "The Fermionic ghosts Lie conformal algebra with generators {} "\
"over {}".format(self.gens(),self.base_ring())
|
11519629
|
import numpy as np
from yggdrasil.components import ComponentBase
class FilterBase(ComponentBase):
r"""Base class for message filters.
Args:
initial_state (dict, optional): Dictionary of initial state variables
that should be set when the filter is created.
"""
_filtertype = None
_schema_type = 'filter'
_schema_subtype_key = 'filtertype'
def __init__(self, *args, **kwargs):
self._state = {}
super(FilterBase, self).__init__(*args, **kwargs)
def evaluate_filter(self, x):
r"""Call filter on the provided message.
Args:
x (object): Message object to filter.
Returns:
bool: True if the message will pass through the filter, False otherwise.
"""
raise NotImplementedError # pragma: debug
def __call__(self, x):
r"""Call filter on the provided message.
Args:
x (object): Message object to filter.
Returns:
bool: True if the message will pass through the filter, False otherwise.
"""
out = self.evaluate_filter(x)
if isinstance(out, np.ndarray):
assert(out.dtype == bool)
out = bool(out.all())
elif isinstance(out, np.bool_):
out = bool(out)
try:
assert(isinstance(out, bool))
except AssertionError: # pragma: debug
print(out, type(out))
raise
return out
|
11519638
|
import unittest
from com.example.client.config.low_level_client import ESLowLevelClient
class TestLowLevelClientSearch(unittest.TestCase):
es = ESLowLevelClient.get_instance()
def test_match_phrase_query(self):
body={
"query": {
"match_phrase": {
"fund_name": {
"query": "iShares MSCI ACWI ETF"
}
}
}
}
response = self.es.search(index='cf_etf', body=body)
self.assertEqual(response['hits']['total']['value'], 1)
print(response)
|
11519682
|
from typing import Any, Dict, List, Text
import regex
import re
import rasa.shared.utils.io
from rasa.shared.constants import DOCS_URL_COMPONENTS
from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer
from rasa.shared.nlu.training_data.message import Message
class WhitespaceTokenizer(Tokenizer):
defaults = {
# Flag to check whether to split intents
"intent_tokenization_flag": False,
# Symbol on which intent should be split
"intent_split_symbol": "_",
# Regular expression to detect tokens
"token_pattern": None,
}
# the following language should not be tokenized using the WhitespaceTokenizer
not_supported_language_list = ["zh", "ja", "th"]
def __init__(self, component_config: Dict[Text, Any] = None) -> None:
"""Construct a new tokenizer using the WhitespaceTokenizer framework."""
super().__init__(component_config)
self.emoji_pattern = self.get_emoji_regex()
if "case_sensitive" in self.component_config:
rasa.shared.utils.io.raise_warning(
"The option 'case_sensitive' was moved from the tokenizers to the "
"featurizers.",
docs=DOCS_URL_COMPONENTS,
)
@staticmethod
def get_emoji_regex():
return re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"\u200d" # zero width joiner
"\u200c" # zero width non-joiner
"]+",
flags=re.UNICODE,
)
def remove_emoji(self, text: Text) -> Text:
"""Remove emoji if the full text, aka token, matches the emoji regex."""
match = self.emoji_pattern.fullmatch(text)
if match is not None:
return ""
return text
def tokenize(self, message: Message, attribute: Text) -> List[Token]:
text = message.get(attribute)
# we need to use regex instead of re, because of
# https://stackoverflow.com/questions/12746458/python-unicode-regular-expression-matching-failing-with-some-unicode-characters
# remove 'not a word character' if
words = regex.sub(
# there is a space or an end of a string after it
r"[^\w#@&]+(?=\s|$)|"
# there is a space or beginning of a string before it
# not followed by a number
r"(\s|^)[^\w#@&]+(?=[^0-9\s])|"
# not in between numbers and not . or @ or & or - or #
# e.g. 10'000.00 or <EMAIL>
# and not url characters
r"(?<=[^0-9\s])[^\w._~:/?#\[\]()@!$&*+,;=-]+(?=[^0-9\s])",
" ",
text,
).split()
words = [self.remove_emoji(w) for w in words]
words = [w for w in words if w]
# if we removed everything like smiles `:)`, use the whole text as 1 token
if not words:
words = [text]
tokens = self._convert_words_to_tokens(words, text)
return self._apply_token_pattern(tokens)
|
11519721
|
import importlib
import logging
import sys
import click
import numpy as np
import tensorflow as tf
from experiment.config import load_config
@click.command()
@click.argument('config_file')
def run(config_file):
"""This program is the starting point for every experiment. It pulls together the configuration and all necessary
experiment classes to load
"""
config = load_config(config_file)
config_global = config['global']
# setup a logger
logger = logging.getLogger('experiment')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(config['logger']['level'])
handler_stdout.setFormatter(formatter)
logger.addHandler(handler_stdout)
if 'path' in config['logger']:
handler_file = logging.FileHandler(config['logger']['path'])
handler_file.setLevel(config['logger']['level'])
handler_file.setFormatter(formatter)
logger.addHandler(handler_file)
logger.setLevel(config['logger']['level'])
# Allow the gpu to be used in parallel
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
if 'max_threads' in config_global:
sess_config.intra_op_parallelism_threads = config_global['max_threads']
# we allow to set the random seed in the config file for reproducibility. However, when running on GPU, results
# will still be nondeterministic (due to nondeterministic behavior of tensorflow)
if 'random_seed' in config_global:
seed = config_global['random_seed']
logger.info('Using fixed random seed'.format(seed))
np.random.seed(seed)
tf.set_random_seed(seed)
with tf.Session(config=sess_config) as sess:
# We are now fetching all relevant modules. It is strictly required that these module contain a variable named
# 'component' that points to a class which inherits from experiment.Data, experiment.Experiment,
# experiment.Trainer or experiment.Evaluator
data_module = config['data-module']
model_module = config['model-module']
training_module = config['training-module']
evaluation_module = config.get('evaluation-module', None)
# The modules are now dynamically loaded
DataClass = importlib.import_module(data_module).component
ModelClass = importlib.import_module(model_module).component
TrainingClass = importlib.import_module(training_module).component
EvaluationClass = importlib.import_module(evaluation_module).component if evaluation_module else None
# We then wire together all the modules and start training
data = DataClass(config['data'], config_global, logger)
model = ModelClass(config['model'], config_global, logger)
training = TrainingClass(config['training'], config_global, logger)
# setup the data (validate, create generators, load data, or else)
logger.info('Setting up the data')
data.setup()
# build the model (e.g. compile it)
logger.info('Building the model')
model.build(data, sess)
# start the training process
logger.info('Starting the training process')
training.start(model, data, sess)
# perform evaluation, if required
if EvaluationClass:
logger.info('Evaluating')
evaluation = EvaluationClass(config['evaluation'], config_global, logger)
evaluation.start(model, data, sess)
else:
logger.info('No evaluation')
logger.info('DONE')
if __name__ == '__main__':
run()
|
11519755
|
from functools import reduce
from math import ceil
from concurrent.futures import ProcessPoolExecutor, as_completed
from .mergesort import sort as _sort, merge
def sort(v, workers=2):
if len(v) == 0:
return v
dim = ceil(len(v) / workers)
chunks = (v[k: k + dim] for k in range(0, len(v), dim))
with ProcessPoolExecutor(max_workers=workers) as executor:
futures = [
executor.submit(_sort, chunk) for chunk in chunks
]
return reduce(
merge,
(future.result() for future in as_completed(futures))
)
|
11519811
|
import logging
import urllib
import requests
import base64
import voluptuous as vol
from datetime import timedelta
from homeassistant.helpers.discovery import load_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD, CONF_HOST
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DOMAIN = "aerogarden"
DATA_AEROGARDEN = "AEROGARDEN"
DEFAULT_HOST = "https://app3.aerogarden.com:8443"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
class AerogardenAPI:
def __init__(self, username, password, host=None):
self._username = urllib.parse.quote(username)
self._password = urllib.parse.quote(password)
self._host = host
self._userid = None
self._error_msg = None
self._data = None
self._login_url = "/api/Admin/Login"
self._status_url = "/api/CustomData/QueryUserDevice"
self._update_url = "/api/Custom/UpdateDeviceConfig"
self._headers = {
"User-Agent": "HA-Aerogarden/0.1",
"Content-Type": "application/x-www-form-urlencoded",
}
self.login()
@property
def error(self):
return self._error_msg
def login(self):
post_data = "mail=" + self._username + "&userPwd=" + self._password
url = self._host + self._login_url
try:
r = requests.post(url, data=post_data, headers=self._headers)
except RequestException:
_LOGGER.exception("Error communicating with aerogarden servers")
return False
response = r.json()
userid = response["code"]
if userid > 0:
self._userid = str(userid)
else:
self._error_msg = "Login api call returned %s" % (response["code"])
def is_valid_login(self):
if self._userid:
return True
return
def garden_name(self, macaddr):
multi_garden = self.garden_property(macaddr, "chooseGarden")
if not multi_garden:
return self.garden_property(macaddr, "plantedName")
multi_garden_label = "left" if multi_garden == 0 else "right"
return self.garden_property(macaddr, "plantedName") + "_" + multi_garden_label
def garden_property(self, macaddr, field):
if macaddr not in self._data:
return None
if field not in self._data[macaddr]:
return None
return self._data[macaddr].get(field, None)
def light_toggle(self, macaddr):
"""
Toggles between Bright, Dimmed, and Off.
I couldn't find any way to set a specific state, it just cycles between the three.
"""
if macaddr not in self._data:
return None
post_data = {
"airGuid": macaddr,
"chooseGarden": self.garden_property(macaddr, "chooseGarden"),
"userID": self._userid,
"plantConfig": '{ "lightTemp" : 1 }', # this value seems to not matter
}
url = self._host + self._update_url
_LOGGER.debug(f"Sending POST data to toggle light: {post_data}")
try:
r = requests.post(url, data=post_data, headers=self._headers)
except RequestException:
_LOGGER.exception("Error communicating with aerogarden servers")
return False
results = r.json()
if "code" in results:
if results["code"] == 1:
return True
self._error_msg = f"Didn't get code 1 from update API call: {results}"
_LOGGER.exception(f"Failed to toggle light: {self._error_msg}")
self.update(no_throttle=True)
return False
@property
def gardens(self):
return self._data.keys()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
data = {}
if not self.is_valid_login():
return
url = self._host + self._status_url
post_data = "userID=" + self._userid
try:
r = requests.post(url, data=post_data, headers=self._headers)
except RequestException:
_LOGGER.exception("Error communicating with aerogarden servers")
return False
garden_data = r.json()
if "Message" in garden_data:
self._error_msg = "Couldn't get data for garden (correct macaddr?): %s" % (
garden_data["Message"]
)
return False
for garden in garden_data:
if "plantedName" in garden:
garden["plantedName"] = base64.b64decode(garden["plantedName"]).decode(
"utf-8"
)
# id = garden.get("configID", None)
gardenmac = garden["airGuid"] # + "-" + ("" if id is None else str(id))
data[gardenmac] = garden
_LOGGER.debug("Fetched data {}".format(data))
self._data = data
return True
def setup(hass, config):
""" Setup the aerogarden platform """
username = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
host = config[DOMAIN].get(CONF_HOST)
ag = AerogardenAPI(username, password, host)
if not ag.is_valid_login():
_LOGGER.error("Invalid login: %s" % (ag.error))
return
ag.update()
# store the aerogarden API object into hass data system
hass.data[DATA_AEROGARDEN] = ag
load_platform(hass, "sensor", DOMAIN, {}, config)
load_platform(hass, "binary_sensor", DOMAIN, {}, config)
load_platform(hass, "light", DOMAIN, {}, config)
return True
|
11519835
|
import requests
from flask import abort, jsonify, request, Blueprint
from nookipedia.config import DB_KEYS
from nookipedia.middlewares import authorize
from nookipedia.cargo import call_cargo, get_other_item_list
from nookipedia.errors import error_response
from nookipedia.models import format_other_item
from nookipedia.utility import generate_fields
router = Blueprint("items", __name__)
@router.route("/nh/items/<string:item>", methods=["GET"])
def get_nh_item(item):
authorize(DB_KEYS, request)
item = requests.utils.unquote(item).replace("_", " ")
limit = "1"
tables = "nh_item"
fields = generate_fields(
"_pageName=url",
"en_name=name",
"image_url",
"stack",
"hha_base",
"buy1_price",
"buy1_currency",
"sell",
"is_fence",
"material_type",
"material_seasonality",
"material_sort",
"material_name_sort",
"material_seasonality_sort",
"edible",
"plant_type",
"availability1",
"availability1_note",
"availability2",
"availability2_note",
"availability3",
"availability3_note",
"version_added",
"unlocked",
"notes",
)
where = f'en_name="{item}"'
params = {
"action": "cargoquery",
"format": "json",
"tables": tables,
"fields": fields,
"where": where,
"limit": limit,
}
cargo_results = call_cargo(params, request.args)
if len(cargo_results) == 0:
abort(
404,
description=error_response(
"No data was found for the given query.",
f"MediaWiki Cargo request succeeded by nothing was returned for the parameters: {params}",
),
)
else:
return jsonify(format_other_item(cargo_results[0]))
@router.route("/nh/items", methods=["GET"])
def get_nh_item_all():
authorize(DB_KEYS, request)
limit = "400"
tables = "nh_item"
fields = generate_fields(
"_pageName=url",
"en_name=name",
"image_url",
"stack",
"hha_base",
"buy1_price",
"buy1_currency",
"sell",
"is_fence",
"material_type",
"material_seasonality",
"material_sort",
"material_name_sort",
"material_seasonality_sort",
"edible",
"plant_type",
"availability1",
"availability1_note",
"availability2",
"availability2_note",
"availability3",
"availability3_note",
"version_added",
"unlocked",
"notes",
)
return get_other_item_list(limit, tables, fields)
|
11519847
|
import tarfile
import argparse
import subprocess
from pathlib import Path
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
from model import *
from utils import *
from dataset import *
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=int, required=True, help='device num, cuda:0')
parser.add_argument('--ckpt', type=str, required=True, help='path to *_best_model.pth')
parser.add_argument('--inputDir', type=str, required=True, help='path to save output')
args = parser.parse_args()
return args
args = parse_args()
if torch.cuda.is_available() and args.device >= 0:
device = torch.device(f'cuda:{args.device}')
torch.cuda.manual_seed(1234)
else:
device = torch.device('cpu')
torch.manual_seed(1234)
test_dataset = SICEPart1(args.inputDir, transform=transforms.ToTensor())
test_loader = DataLoader(test_dataset, batch_size=4, shuffle=False)
inPath = Path(args.inputDir).expanduser().resolve()
outDir = inPath.parent.joinpath(inPath.stem + '_output')
create_dir(outDir)
checkpoint = torch.load(args.ckpt, map_location=device)
model = DCENet(n=8, return_results=[4, 6, 8])
model.load_state_dict(checkpoint['model'])
model.to(device)
model.eval()
to_gray, neigh_diff = get_kernels(device) # conv kernels for calculating spatial consistency loss
with torch.no_grad():
for i, sample in enumerate(test_loader):
names = sample['name']
img_batch = sample['img'].to(device)
results, alpha_stacked = model(img_batch)
enhanced_batch = results[1]
for name, enhanced in zip(names, enhanced_batch):
torchvision.utils.save_image(enhanced, str(outDir.joinpath(name[:-4] + '_enh.jpg')))
with tarfile.open(inPath + '.tar.gz', 'w:gz') as tar:
tar.add(outDir, arcname=outDir.stem)
tar.add(args.ckpt, arcname=os.path.basename(args.ckpt))
# CMD = ['tar', 'czfP', inPath + '.tar.gz', args.ckpt, args.inputDir, str(outDir)]
# subprocess.call(CMD)
|
11519855
|
import unittest
from Ejercicio3 import ITasks, SystemAuthProxy, User
class ProxyTest(unittest.TestCase):
def test_level_one_user(self):
usr = User(name='foo', access_level=1)
proxy = SystemAuthProxy(usr, "Payments")
self.assertEqual("User 'foo' is doing level-1 tasks on system ### Payments ###", proxy.level_1_tasks())
self.assertEqual("ERROR: User 'foo' is not authorized to do level-2 tasks", proxy.level_2_tasks())
self.assertEqual("ERROR: User 'foo' is not authorized to do level-3 tasks", proxy.level_3_tasks())
def test_level_two_user(self):
usr = User(name='bar', access_level=2)
proxy = SystemAuthProxy(usr, "IT")
self.assertEqual("User 'bar' is doing level-1 tasks on system ### IT ###", proxy.level_1_tasks())
self.assertEqual("User 'bar' is doing level-2 tasks on system ### IT ###", proxy.level_2_tasks())
self.assertEqual("ERROR: User 'bar' is not authorized to do level-3 tasks", proxy.level_3_tasks())
def test_level_three_user(self):
usr = User(name='baz', access_level=3)
proxy = SystemAuthProxy(usr, "Sales")
self.assertEqual("User 'baz' is doing level-1 tasks on system ### Sales ###", proxy.level_1_tasks())
self.assertEqual("User 'baz' is doing level-2 tasks on system ### Sales ###", proxy.level_2_tasks())
self.assertEqual("User 'baz' is doing level-3 tasks on system ### Sales ###", proxy.level_3_tasks())
if __name__ == "__main__":
unittest.main()
|
11519874
|
import json
import platform
import sys
import sysconfig
if platform.python_implementation() == "PyPy":
# Workaround for PyPy 3.6 on windows:
# - sysconfig.get_config_var("EXT_SUFFIX") differs to importlib until
# Python 3.8
# - PyPy does not load the plain ".pyd" suffix because it expects that's
# for a CPython extension module
#
# This workaround can probably be removed once PyPy for Python 3.8 is the
# main PyPy version.
import importlib.machinery
ext_suffix = importlib.machinery.EXTENSION_SUFFIXES[0]
else:
ext_suffix = sysconfig.get_config_var("EXT_SUFFIX")
metadata = {
"major": sys.version_info.major,
"minor": sys.version_info.minor,
"abiflags": sysconfig.get_config_var("ABIFLAGS"),
"interpreter": platform.python_implementation().lower(),
"ext_suffix": ext_suffix,
"abi_tag": (sysconfig.get_config_var("SOABI") or "-").split("-")[1] or None,
# This one isn't technically necessary, but still very useful for sanity checks
"platform": platform.system().lower(),
# We need this one for windows abi3 builds
"base_prefix": sys.base_prefix,
}
print(json.dumps(metadata))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.