code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# coding=utf-8
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import sys
import ast
import os
import argparse
import glob
import moxing as mox
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from npu_bridge.estimator import npu_ops
from utils import create_session as cs
from utils import logger as lg
from data_loader.resnet50 import data_loader as dl
from models.resnet50 import resnet, res50_helper
from models.resnet50 import res50_model as ml
from optimizers import optimizer as op
from losses import res50_loss as ls
from trainers import gpu_base_trainer as tr
# from configs import res50_config as cfg
from hyper_param import hyper_param as hp
from layers import layers as ly
OUTPUT_PATH = "/cache/model"
DATA_PATH = "/cache/data"
def set_env():
"""
set environment of DEVICE_INDEX
"""
os.environ['DEVICE_INDEX'] = os.getenv('RANK_ID')
def args_parser():
"""
get super parameter
return:
parser_args
"""
parser = argparse.ArgumentParser(description="train resnet50")
parser.add_argument('--train_url', type=str, default='',
help='the path model saved')
parser.add_argument('--data_url', type=str, default='',
help='the training data')
parser.add_argument('--config_file', type=str, default='res50_32bs_1p_host',
help='the config file')
parser.add_argument('--max_train_steps', type=int, default=10000,
help='max_train_steps')
parser.add_argument('--iterations_per_loop', default=1000,
help='iterations config used.')
parser.add_argument('--batch_size', type=int, default=32,
help='batch_size')
parser.add_argument('--num_classes', type=int, default=1001,
help='num_classes')
parser.add_argument('--num_epochs', type=int, default=None,
help='num_epochs')
parser.add_argument('--learning_rate_maximum', type=float, default=0.1,
help='learning_rate_maximum')
parser.add_argument('--debug', default=True, type=ast.literal_eval,
help='debug mode config used.')
parser.add_argument('--eval', default=False, type=ast.literal_eval,
help='evaluate config used.')
parser.add_argument('--model_dir', default="/cache/model",
help='model dir path config used.')
parser.add_argument('--restore_path', type=str, default='',
help='restore ckpt path')
parser_args, _ = parser.parse_known_args()
return parser_args
def set_config(args):
"""
get config from file and reset the config by super parameter
"""
configs = 'configs'
cfg = getattr(__import__(configs, fromlist=[args.config_file]),
args.config_file)
config = cfg.res50_config()
config['data_url'] = DATA_PATH
config['log_dir'] = OUTPUT_PATH
config['model_dir'] = OUTPUT_PATH
config['ckpt_dir'] = OUTPUT_PATH
# set param from parse
config['iterations_per_loop'] = int(args.iterations_per_loop)
config['max_train_steps'] = int(args.max_train_steps)
config['debug'] = args.debug
config['eval'] = args.eval
config['model_dir'] = args.model_dir
config['batch_size'] = args.batch_size
config['global_batch_size'] = config['batch_size'] * config['rank_size']
config['num_classes'] = args.num_classes
config['num_epochs'] = args.num_epochs
config['learning_rate_maximum'] = args.learning_rate_maximum
config['restore_path'] = os.path.join(DATA_PATH, "ckpt",
input_args.restore_path)
print("iterations_per_loop :%d" % (config['iterations_per_loop']))
print("max_train_steps :%d" % (config['max_train_steps']))
print("debug :%s" % (config['debug']))
print("eval :%s" % (config['eval']))
print("model_dir :%s" % (config['model_dir']))
print("batch_size :%d" % (config['batch_size']))
if config['num_epochs']:
print("num_epochs :%d" % (config['num_epochs']))
print("learning_rate_maximum :%f" % (config['learning_rate_maximum']))
print("num_classes :%d" % (config['num_classes']))
print("restore_path :%s" % (config['restore_path']))
return config
def train(args):
"""
training and generate the ckpt model
"""
config = set_config(args)
Session = cs.CreateSession(config)
data = dl.DataLoader(config)
hyper_param = hp.HyperParams(config)
layers = ly.Layers()
optimizer = op.Optimizer(config)
loss = ls.Loss(config)
# add tensorboard summary
logger = lg.LogSessionRunHook(config)
# get the model
model = ml.Model(config, data, hyper_param, layers, optimizer, loss, logger)
# use Estimator to build training process
trainer = tr.GPUBaseTrain(Session, config, data, model, logger)
if config['mode'] == 'train':
trainer.train()
if config['eval']:
trainer.evaluate()
elif config['mode'] == 'evaluate':
trainer.evaluate()
elif config['mode'] == 'train_and_evaluate':
trainer.train_and_evaluate()
else:
raise ValueError('Invalid type of mode')
def model_trans(args):
"""
frozen the model
"""
ckpt_list = glob.glob("/cache/model/model.ckpt-*.meta")
if not ckpt_list:
print("ckpt file not generated.")
return
ckpt_list.sort(key=os.path.getmtime)
ckpt_model = ckpt_list[-1].rsplit(".", 1)[0]
print("====================%s" % ckpt_model)
tf.reset_default_graph()
# set inputs node
inputs = tf.placeholder(tf.float32, shape=[None, 224, 224, 3], name="input")
# create inference graph
with res50_helper.custom_getter_with_fp16_and_weight_decay(dtype=tf.float32,
weight_decay=0.0001):
builder = resnet.LayerBuilder(tf.nn.relu, 'channels_last', False,
use_batch_norm=True,
conv_initializer=None,
bn_init_mode='adv_bn_init',
bn_gamma_initial_value=1.0)
top_layer = resnet.inference_resnext_impl(builder, inputs, [3, 4, 6, 3],
"original", args.num_classes)
with tf.Session() as sess:
tf.train.write_graph(sess.graph_def, '/cache/model', 'model.pb')
freeze_graph.freeze_graph(
input_graph='/cache/model/model.pb',
input_saver='',
input_binary=False,
input_checkpoint=ckpt_model,
output_node_names='fp32_vars/final_dense', # graph outputs node
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph='/cache/model/resnext50_tf_910.pb', # graph outputs name
clear_devices=False,
initializer_nodes='')
print("done")
if __name__ == '__main__':
set_env()
input_args = args_parser()
# copy dataset from obs to container
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH, 0o755)
if not os.path.exists(OUTPUT_PATH):
os.makedirs(OUTPUT_PATH, 0o755)
mox.file.copy_parallel(input_args.data_url, DATA_PATH)
# set level of logging
tf.logging.set_verbosity(tf.logging.INFO)
train(input_args)
# trans ckpt model to pb
model_trans(input_args)
# after train, copy log and model from container to obs
mox.file.copy_parallel(OUTPUT_PATH, input_args.train_url)
|
[
"argparse.ArgumentParser",
"tensorflow.reset_default_graph",
"models.resnet50.res50_model.Model",
"trainers.gpu_base_trainer.GPUBaseTrain",
"moxing.file.copy_parallel",
"utils.logger.LogSessionRunHook",
"tensorflow.logging.set_verbosity",
"tensorflow.train.write_graph",
"glob.glob",
"os.path.join",
"models.resnet50.resnet.inference_resnext_impl",
"hyper_param.hyper_param.HyperParams",
"os.path.exists",
"tensorflow.placeholder",
"losses.res50_loss.Loss",
"tensorflow.Session",
"layers.layers.Layers",
"data_loader.resnet50.data_loader.DataLoader",
"os.getenv",
"os.makedirs",
"models.resnet50.resnet.LayerBuilder",
"models.resnet50.res50_helper.custom_getter_with_fp16_and_weight_decay",
"tensorflow.python.tools.freeze_graph.freeze_graph",
"utils.create_session.CreateSession",
"optimizers.optimizer.Optimizer"
] |
[((2175, 2195), 'os.getenv', 'os.getenv', (['"""RANK_ID"""'], {}), "('RANK_ID')\n", (2184, 2195), False, 'import os\n'), ((2302, 2355), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train resnet50"""'}), "(description='train resnet50')\n", (2325, 2355), False, 'import argparse\n'), ((4931, 4987), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""ckpt"""', 'input_args.restore_path'], {}), "(DATA_PATH, 'ckpt', input_args.restore_path)\n", (4943, 4987), False, 'import os\n'), ((5865, 5889), 'utils.create_session.CreateSession', 'cs.CreateSession', (['config'], {}), '(config)\n', (5881, 5889), True, 'from utils import create_session as cs\n'), ((5901, 5922), 'data_loader.resnet50.data_loader.DataLoader', 'dl.DataLoader', (['config'], {}), '(config)\n', (5914, 5922), True, 'from data_loader.resnet50 import data_loader as dl\n'), ((5941, 5963), 'hyper_param.hyper_param.HyperParams', 'hp.HyperParams', (['config'], {}), '(config)\n', (5955, 5963), True, 'from hyper_param import hyper_param as hp\n'), ((5977, 5988), 'layers.layers.Layers', 'ly.Layers', ([], {}), '()\n', (5986, 5988), True, 'from layers import layers as ly\n'), ((6005, 6025), 'optimizers.optimizer.Optimizer', 'op.Optimizer', (['config'], {}), '(config)\n', (6017, 6025), True, 'from optimizers import optimizer as op\n'), ((6037, 6052), 'losses.res50_loss.Loss', 'ls.Loss', (['config'], {}), '(config)\n', (6044, 6052), True, 'from losses import res50_loss as ls\n'), ((6096, 6124), 'utils.logger.LogSessionRunHook', 'lg.LogSessionRunHook', (['config'], {}), '(config)\n', (6116, 6124), True, 'from utils import logger as lg\n'), ((6158, 6226), 'models.resnet50.res50_model.Model', 'ml.Model', (['config', 'data', 'hyper_param', 'layers', 'optimizer', 'loss', 'logger'], {}), '(config, data, hyper_param, layers, optimizer, loss, logger)\n', (6166, 6226), True, 'from models.resnet50 import res50_model as ml\n'), ((6287, 6340), 'trainers.gpu_base_trainer.GPUBaseTrain', 'tr.GPUBaseTrain', (['Session', 'config', 'data', 'model', 'logger'], {}), '(Session, config, data, model, logger)\n', (6302, 6340), True, 'from trainers import gpu_base_trainer as tr\n'), ((6747, 6790), 'glob.glob', 'glob.glob', (['"""/cache/model/model.ckpt-*.meta"""'], {}), "('/cache/model/model.ckpt-*.meta')\n", (6756, 6790), False, 'import glob\n'), ((7014, 7038), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (7036, 7038), True, 'import tensorflow as tf\n'), ((7074, 7141), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 224, 224, 3]', 'name': '"""input"""'}), "(tf.float32, shape=[None, 224, 224, 3], name='input')\n", (7088, 7141), True, 'import tensorflow as tf\n'), ((8733, 8787), 'moxing.file.copy_parallel', 'mox.file.copy_parallel', (['input_args.data_url', 'DATA_PATH'], {}), '(input_args.data_url, DATA_PATH)\n', (8755, 8787), True, 'import moxing as mox\n'), ((8820, 8861), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (8844, 8861), True, 'import tensorflow as tf\n'), ((9008, 9065), 'moxing.file.copy_parallel', 'mox.file.copy_parallel', (['OUTPUT_PATH', 'input_args.train_url'], {}), '(OUTPUT_PATH, input_args.train_url)\n', (9030, 9065), True, 'import moxing as mox\n'), ((7180, 7276), 'models.resnet50.res50_helper.custom_getter_with_fp16_and_weight_decay', 'res50_helper.custom_getter_with_fp16_and_weight_decay', ([], {'dtype': 'tf.float32', 'weight_decay': '(0.0001)'}), '(dtype=tf.float32,\n weight_decay=0.0001)\n', (7233, 7276), False, 'from models.resnet50 import resnet, res50_helper\n'), ((7355, 7518), 'models.resnet50.resnet.LayerBuilder', 'resnet.LayerBuilder', (['tf.nn.relu', '"""channels_last"""', '(False)'], {'use_batch_norm': '(True)', 'conv_initializer': 'None', 'bn_init_mode': '"""adv_bn_init"""', 'bn_gamma_initial_value': '(1.0)'}), "(tf.nn.relu, 'channels_last', False, use_batch_norm=True,\n conv_initializer=None, bn_init_mode='adv_bn_init',\n bn_gamma_initial_value=1.0)\n", (7374, 7518), False, 'from models.resnet50 import resnet, res50_helper\n'), ((7683, 7777), 'models.resnet50.resnet.inference_resnext_impl', 'resnet.inference_resnext_impl', (['builder', 'inputs', '[3, 4, 6, 3]', '"""original"""', 'args.num_classes'], {}), "(builder, inputs, [3, 4, 6, 3], 'original',\n args.num_classes)\n", (7712, 7777), False, 'from models.resnet50 import resnet, res50_helper\n'), ((7834, 7846), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7844, 7846), True, 'import tensorflow as tf\n'), ((7864, 7928), 'tensorflow.train.write_graph', 'tf.train.write_graph', (['sess.graph_def', '"""/cache/model"""', '"""model.pb"""'], {}), "(sess.graph_def, '/cache/model', 'model.pb')\n", (7884, 7928), True, 'import tensorflow as tf\n'), ((7937, 8295), 'tensorflow.python.tools.freeze_graph.freeze_graph', 'freeze_graph.freeze_graph', ([], {'input_graph': '"""/cache/model/model.pb"""', 'input_saver': '""""""', 'input_binary': '(False)', 'input_checkpoint': 'ckpt_model', 'output_node_names': '"""fp32_vars/final_dense"""', 'restore_op_name': '"""save/restore_all"""', 'filename_tensor_name': '"""save/Const:0"""', 'output_graph': '"""/cache/model/resnext50_tf_910.pb"""', 'clear_devices': '(False)', 'initializer_nodes': '""""""'}), "(input_graph='/cache/model/model.pb', input_saver=\n '', input_binary=False, input_checkpoint=ckpt_model, output_node_names=\n 'fp32_vars/final_dense', restore_op_name='save/restore_all',\n filename_tensor_name='save/Const:0', output_graph=\n '/cache/model/resnext50_tf_910.pb', clear_devices=False,\n initializer_nodes='')\n", (7962, 8295), False, 'from tensorflow.python.tools import freeze_graph\n'), ((8584, 8609), 'os.path.exists', 'os.path.exists', (['DATA_PATH'], {}), '(DATA_PATH)\n', (8598, 8609), False, 'import os\n'), ((8619, 8646), 'os.makedirs', 'os.makedirs', (['DATA_PATH', '(493)'], {}), '(DATA_PATH, 493)\n', (8630, 8646), False, 'import os\n'), ((8660, 8687), 'os.path.exists', 'os.path.exists', (['OUTPUT_PATH'], {}), '(OUTPUT_PATH)\n', (8674, 8687), False, 'import os\n'), ((8697, 8726), 'os.makedirs', 'os.makedirs', (['OUTPUT_PATH', '(493)'], {}), '(OUTPUT_PATH, 493)\n', (8708, 8726), False, 'import os\n')]
|
# Generated by Django 2.2 on 2019-06-20 07:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0006_auto_20190620_1446'),
]
operations = [
migrations.AlterField(
model_name='eatstatistics',
name='eatHot',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='eatstatistics',
name='eatId',
field=models.IntegerField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='eatstatistics',
name='eatProtein',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='eatstatistics',
name='eatSugar',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='foods',
name='foodHot',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='foods',
name='foodId',
field=models.IntegerField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='foods',
name='foodProtein',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='foods',
name='foodSugar',
field=models.IntegerField(),
),
]
|
[
"django.db.models.IntegerField"
] |
[((339, 360), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (358, 360), False, 'from django.db import migrations, models\n'), ((488, 561), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), '(auto_created=True, primary_key=True, serialize=False)\n', (507, 561), False, 'from django.db import migrations, models\n'), ((694, 715), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (713, 715), False, 'from django.db import migrations, models\n'), ((846, 867), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (865, 867), False, 'from django.db import migrations, models\n'), ((989, 1010), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1008, 1010), False, 'from django.db import migrations, models\n'), ((1131, 1204), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), '(auto_created=True, primary_key=True, serialize=False)\n', (1150, 1204), False, 'from django.db import migrations, models\n'), ((1330, 1351), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1349, 1351), False, 'from django.db import migrations, models\n'), ((1475, 1496), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1494, 1496), False, 'from django.db import migrations, models\n')]
|
"""\
Update files with AWS metadata
"""
import json
import logging
import transaction
from pyramid.paster import get_app
from pyramid.threadlocal import manager
from pyramid.testing import DummyRequest
EPILOG = __doc__
logger = logging.getLogger(__name__)
def run(app, files):
root = app.root_factory(app)
collection = root['file']
dummy_request = DummyRequest(root=root, registry=app.registry, _stats={})
manager.push({'request': dummy_request, 'registry': app.registry})
for i, uuid in enumerate(collection):
item = root.get_by_uuid(uuid)
dummy_request.context = item
properties = item.upgrade_properties()
sheets = None
value = files.get(str(uuid))
if value is not None:
properties['file_size'] = value['file_size']
sheets = {
'external': {
'service': 's3',
'bucket': 'encode-files',
'key': value['s3_file_name'],
},
}
item.update(properties, sheets=sheets)
if (i + 1) % 100 == 0:
logger.info('Updated %d', i + 1)
def main():
import argparse
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here.
description="Migrate files to AWS", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('--abort', action='store_true', help="Rollback transaction")
parser.add_argument('files_processed', type=argparse.FileType('rb'), help="path to json file")
parser.add_argument('config_uri', help="path to configfile")
args = parser.parse_args()
logging.basicConfig()
app = get_app(args.config_uri, args.app_name)
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('encoded').setLevel(logging.DEBUG)
files_processed = json.load(args.files_processed)
good_files = {v['uuid']: v for v in files_processed
if 'errors' not in v and 'blacklisted' not in v}
raised = False
try:
run(app, good_files)
except Exception:
raised = True
raise
finally:
if raised or args.abort:
transaction.abort()
logger.info('Rolled back.')
else:
transaction.commit()
if __name__ == '__main__':
main()
|
[
"transaction.commit",
"json.load",
"argparse.ArgumentParser",
"logging.basicConfig",
"pyramid.testing.DummyRequest",
"pyramid.paster.get_app",
"pyramid.threadlocal.manager.push",
"transaction.abort",
"argparse.FileType",
"logging.getLogger"
] |
[((231, 258), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (248, 258), False, 'import logging\n'), ((365, 422), 'pyramid.testing.DummyRequest', 'DummyRequest', ([], {'root': 'root', 'registry': 'app.registry', '_stats': '{}'}), '(root=root, registry=app.registry, _stats={})\n', (377, 422), False, 'from pyramid.testing import DummyRequest\n'), ((427, 493), 'pyramid.threadlocal.manager.push', 'manager.push', (["{'request': dummy_request, 'registry': app.registry}"], {}), "({'request': dummy_request, 'registry': app.registry})\n", (439, 493), False, 'from pyramid.threadlocal import manager\n'), ((1193, 1325), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Migrate files to AWS"""', 'epilog': 'EPILOG', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), "(description='Migrate files to AWS', epilog=EPILOG,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n", (1216, 1325), False, 'import argparse\n'), ((1785, 1806), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (1804, 1806), False, 'import logging\n'), ((1817, 1856), 'pyramid.paster.get_app', 'get_app', (['args.config_uri', 'args.app_name'], {}), '(args.config_uri, args.app_name)\n', (1824, 1856), False, 'from pyramid.paster import get_app\n'), ((2012, 2043), 'json.load', 'json.load', (['args.files_processed'], {}), '(args.files_processed)\n', (2021, 2043), False, 'import json\n'), ((1633, 1656), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (1650, 1656), False, 'import argparse\n'), ((1936, 1964), 'logging.getLogger', 'logging.getLogger', (['"""encoded"""'], {}), "('encoded')\n", (1953, 1964), False, 'import logging\n'), ((2331, 2350), 'transaction.abort', 'transaction.abort', ([], {}), '()\n', (2348, 2350), False, 'import transaction\n'), ((2417, 2437), 'transaction.commit', 'transaction.commit', ([], {}), '()\n', (2435, 2437), False, 'import transaction\n')]
|
# Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2019 Preferred Infrastructure, Inc.
# Copyright (c) 2019 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 by Contributors
# \file roi_pooling.cu
# \brief roi pooling operator
# \author <NAME>, <NAME>, <NAME>
# \changed to roi_align by <NAME>
# \file roi_align.cu
# \roi align operator described in Mask RCNN
# -----------------------------------------------------------------------------
from __future__ import division
import numbers
import numpy as np
import six
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
from chainercv.functions.ps_roi_average_align_2d \
import _GET_BILINEAR_INTERP_KERNEL
from chainercv.functions.ps_roi_average_align_2d \
import _get_bilinear_interp_params
from chainercv.functions.ps_roi_average_align_2d import _get_bounds
from chainercv.functions.ps_roi_average_align_2d import _pair
from chainercv.functions.ps_roi_average_pooling_2d import _outsize
class PSROIMaxAlign2D(function.Function):
def __init__(
self, outsize, spatial_scale,
group_size, sampling_ratio=None
):
out_c, out_h, out_w = _outsize(outsize)
if out_c is not None and \
not (isinstance(out_c, numbers.Integral) and out_c > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(out_c), out_c))
if not (isinstance(out_h, numbers.Integral) and out_h > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(out_h), out_h))
if not (isinstance(out_w, numbers.Integral) and out_w > 0):
raise TypeError(
'outsize[2] must be positive integer: {}, {}'
.format(type(out_w), out_w))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real)
and spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
if not (isinstance(group_size, numbers.Integral)
and group_size > 0):
raise TypeError(
'group_size must be positive integer: {}, {}'
.format(type(group_size), group_size))
sampling_ratio = _pair(sampling_ratio)
if not all((isinstance(s, numbers.Integral) and s >= 1) or s is None
for s in sampling_ratio):
raise TypeError(
'sampling_ratio must be integer >= 1 or a pair of it: {}'
.format(sampling_ratio))
self.out_c, self.out_h, self.out_w = out_c, out_h, out_w
self.spatial_scale = spatial_scale
self.group_size = group_size
self.sampling_ratio = sampling_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == np.float32,
x_type.ndim == 4,
roi_type.dtype == np.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == np.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0]
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = np.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = np.empty(top_data.shape, dtype=np.int32)
group_size = self.group_size
pooled_width, pooled_height \
= self.out_w, self.out_h
spatial_scale = self.spatial_scale
for i in six.moves.range(top_data.size):
n, ctop, ph, pw = np.unravel_index(i, top_data.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
gh = int(np.floor(ph * group_size / pooled_height))
gw = int(np.floor(pw * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(np.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(np.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
maxval = - np.inf
maxidx = -1
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear interpolation {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
tmpval = 0.0
isvalid = False
bottom_index = iy * roi_bin_grid_w + ix
if w1 > 0 and y_low >= 0 and x_low >= 0:
v1 = bottom_data[roi_batch_ind, c, y_low, x_low]
tmpval += w1 * v1
isvalid = True
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
v2 = bottom_data[roi_batch_ind, c, y_low, x_high]
tmpval += w2 * v2
isvalid = True
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
v3 = bottom_data[roi_batch_ind, c, y_high, x_low]
tmpval += w3 * v3
isvalid = True
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
v4 = bottom_data[roi_batch_ind, c, y_high, x_high]
tmpval += w4 * v4
isvalid = True
if isvalid and tmpval > maxval:
maxval = tmpval
maxidx = bottom_index
# }}
top_data[n, ctop, ph, pw] = maxval
self.argmax_data[n, ctop, ph, pw] = maxidx
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = cuda.cupy.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = cuda.cupy.empty(top_data.shape, np.int32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T bottom_data, raw T bottom_rois,
raw int32 bottom_roi_indices,
T spatial_scale, int32 channel,
int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w
''',
'T top_data, int32 argmax_data',
'''
// pos in output filter
int ph = (i / pooled_width) % pooled_height;
int pw = i % pooled_width;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_data_offset =
(roi_batch_ind * channel + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
T maxval = - (T) (1.0 / 0.0);
int maxidx = -1;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1
{
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
T tmpval = 0.;
bool isvalid = false;
int bottom_index = iy * roi_bin_grid_w + ix;
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T v1 = bottom_data[
bottom_data_offset + y_low * width + x_low];
tmpval += w1 * v1;
isvalid = true;
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T v2 = bottom_data[
bottom_data_offset + y_low * width + x_high];
tmpval += w2 * v2;
isvalid = true;
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T v3 = bottom_data[
bottom_data_offset + y_high * width + x_low];
tmpval += w3 * v3;
isvalid = true;
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T v4 = bottom_data[
bottom_data_offset + y_high * width + x_high];
tmpval += w4 * v4;
isvalid = true;
}
// }}
if (isvalid && tmpval > maxval) {
maxval = tmpval;
maxidx = bottom_index;
}
}
}
top_data = maxval;
argmax_data = maxidx;
''',
'ps_roi_max_align_2d_fwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(bottom_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, self.out_h, self.out_w,
self.group_size, sampling_ratio_h, sampling_ratio_w,
top_data, self.argmax_data)
return top_data,
def backward_cpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
height, width = self._bottom_data_shape[2:]
bottom_diff = np.zeros(self._bottom_data_shape, np.float32)
spatial_scale = self.spatial_scale
pooled_height = self.out_h
pooled_width = self.out_w
group_size = self.group_size
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
n, ctop, ph, pw = np.unravel_index(i, top_diff.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
gh = int(np.floor(float(ph) * group_size / pooled_height))
gw = int(np.floor(float(pw) * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
top_diff_this_bin = top_diff[n, ctop, ph, pw]
maxidx = self.argmax_data[n, ctop, ph, pw]
if maxidx != -1:
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(np.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(np.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
iy = int(maxidx / roi_bin_grid_w)
ix = maxidx % roi_bin_grid_w
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear_interpolation_gradient {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
g1 = top_diff_this_bin * w1
bottom_diff[roi_batch_ind, c, y_low, x_low] += g1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
g2 = top_diff_this_bin * w2
bottom_diff[roi_batch_ind, c, y_low, x_high] += g2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
g3 = top_diff_this_bin * w3
bottom_diff[roi_batch_ind, c, y_high, x_low] += g3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
g4 = top_diff_this_bin * w4
bottom_diff[roi_batch_ind, c, y_high, x_high] += g4
# }}
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
channel, height, width = self._bottom_data_shape[1:]
out_c, out_h, out_w = gy[0].shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, np.float32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T top_diff, raw int32 argmax_data,
raw T bottom_rois, raw int32 bottom_roi_indices,
T spatial_scale, int32 channel, int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w
''',
'raw T bottom_diff',
'''
// (n, c, h, w) coords in bottom data
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_diff_offset =
(roi_batch_ind * channel + c) * height * width;
int top_offset =
(n * pooled_dim + ctop) * pooled_height * pooled_width;
T top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
int maxidx = argmax_data[top_offset + ph * pooled_width + pw];
if (maxidx != -1) {
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
int iy = maxidx / roi_bin_grid_w;
int ix = maxidx % roi_bin_grid_w;
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T g1 = top_diff_this_bin * w1;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_low], g1);
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T g2 = top_diff_this_bin * w2;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_high], g2);
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T g3 = top_diff_this_bin * w3;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_low], g3);
}
if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {
T g4 = top_diff_this_bin * w4;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_high], g4);
}
// }}
}
''',
'ps_roi_max_align_2d_bwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(gy[0], self.argmax_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, out_h, out_w, self.group_size,
sampling_ratio_h, sampling_ratio_w, bottom_diff,
size=gy[0].size)
return bottom_diff, None, None
def ps_roi_max_align_2d(
x, rois, roi_indices, outsize,
spatial_scale, group_size, sampling_ratio=None
):
"""Position Sensitive Region of Interest (ROI) Max align function.
This function computes position sensitive max value of input spatial patch
with the given region of interests. Each ROI is splitted into
:math:`(group\_size, group\_size)` regions, and position sensitive values
in each region is computed.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (array): Input roi. The shape is expected to
be :math:`(R, 4)`, and each datum is set as below:
(y_min, x_min, y_max, x_max). The dtype is :obj:`numpy.float32`.
roi_indices (array): Input roi indices. The shape is expected to
be :math:`(R, )`. The dtype is :obj:`numpy.int32`.
outsize ((int, int, int) or (int, int) or int): Expected output size
after pooled: (channel, height, width) or (height, width)
or outsize. ``outsize=o`` and ``outsize=(o, o)`` are equivalent.
Channel parameter is used to assert the input shape.
spatial_scale (float): Scale of the roi is resized.
group_size (int): Position sensitive group size.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing PSROIPooling:
`R-FCN <https://arxiv.org/abs/1605.06409>`_.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return PSROIMaxAlign2D(
outsize, spatial_scale,
group_size, sampling_ratio)(x, rois, roi_indices)
|
[
"chainer.utils.type_check.expect",
"numpy.ceil",
"six.moves.range",
"chainercv.functions.ps_roi_average_align_2d._get_bounds",
"numpy.empty",
"chainercv.functions.ps_roi_average_align_2d._pair",
"numpy.floor",
"numpy.zeros",
"numpy.unravel_index",
"chainer.backends.cuda.cupy.zeros",
"chainer.backends.cuda.cupy.empty",
"chainercv.functions.ps_roi_average_align_2d._get_bilinear_interp_params",
"chainer.backends.cuda.elementwise",
"chainercv.functions.ps_roi_average_pooling_2d._outsize"
] |
[((1420, 1437), 'chainercv.functions.ps_roi_average_pooling_2d._outsize', '_outsize', (['outsize'], {}), '(outsize)\n', (1428, 1437), False, 'from chainercv.functions.ps_roi_average_pooling_2d import _outsize\n'), ((2718, 2739), 'chainercv.functions.ps_roi_average_align_2d._pair', '_pair', (['sampling_ratio'], {}), '(sampling_ratio)\n', (2723, 2739), False, 'from chainercv.functions.ps_roi_average_align_2d import _pair\n'), ((3351, 3609), 'chainer.utils.type_check.expect', 'type_check.expect', (['(x_type.dtype == np.float32)', '(x_type.ndim == 4)', '(roi_type.dtype == np.float32)', '(roi_type.ndim == 2)', '(roi_type.shape[1] == 4)', '(roi_index_type.dtype == np.int32)', '(roi_index_type.ndim == 1)', '(roi_type.shape[0] == roi_index_type.shape[0])'], {}), '(x_type.dtype == np.float32, x_type.ndim == 4, roi_type.\n dtype == np.float32, roi_type.ndim == 2, roi_type.shape[1] == 4, \n roi_index_type.dtype == np.int32, roi_index_type.ndim == 1, roi_type.\n shape[0] == roi_index_type.shape[0])\n', (3368, 3609), False, 'from chainer.utils import type_check\n'), ((4773, 4839), 'numpy.empty', 'np.empty', (['(n_roi, out_c, self.out_h, self.out_w)'], {'dtype': 'np.float32'}), '((n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)\n', (4781, 4839), True, 'import numpy as np\n'), ((4880, 4920), 'numpy.empty', 'np.empty', (['top_data.shape'], {'dtype': 'np.int32'}), '(top_data.shape, dtype=np.int32)\n', (4888, 4920), True, 'import numpy as np\n'), ((5095, 5125), 'six.moves.range', 'six.moves.range', (['top_data.size'], {}), '(top_data.size)\n', (5110, 5125), False, 'import six\n'), ((9671, 9744), 'chainer.backends.cuda.cupy.empty', 'cuda.cupy.empty', (['(n_roi, out_c, self.out_h, self.out_w)'], {'dtype': 'np.float32'}), '((n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)\n', (9686, 9744), False, 'from chainer.backends import cuda\n'), ((9785, 9826), 'chainer.backends.cuda.cupy.empty', 'cuda.cupy.empty', (['top_data.shape', 'np.int32'], {}), '(top_data.shape, np.int32)\n', (9800, 9826), False, 'from chainer.backends import cuda\n'), ((15665, 15710), 'numpy.zeros', 'np.zeros', (['self._bottom_data_shape', 'np.float32'], {}), '(self._bottom_data_shape, np.float32)\n', (15673, 15710), True, 'import numpy as np\n'), ((15904, 15934), 'six.moves.range', 'six.moves.range', (['top_diff.size'], {}), '(top_diff.size)\n', (15919, 15934), False, 'import six\n'), ((19203, 19255), 'chainer.backends.cuda.cupy.zeros', 'cuda.cupy.zeros', (['self._bottom_data_shape', 'np.float32'], {}), '(self._bottom_data_shape, np.float32)\n', (19218, 19255), False, 'from chainer.backends import cuda\n'), ((5157, 5192), 'numpy.unravel_index', 'np.unravel_index', (['i', 'top_data.shape'], {}), '(i, top_data.shape)\n', (5173, 5192), True, 'import numpy as np\n'), ((6451, 6482), 'six.moves.range', 'six.moves.range', (['roi_bin_grid_h'], {}), '(roi_bin_grid_h)\n', (6466, 6482), False, 'import six\n'), ((10124, 15177), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['"""\n raw T bottom_data, raw T bottom_rois,\n raw int32 bottom_roi_indices,\n T spatial_scale, int32 channel,\n int32 height, int32 width,\n int32 pooled_dim, int32 pooled_height, int32 pooled_width,\n int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w\n """', '"""T top_data, int32 argmax_data"""', '"""\n // pos in output filter\n int ph = (i / pooled_width) % pooled_height;\n int pw = i % pooled_width;\n int ctop = (i / pooled_width / pooled_height) % pooled_dim;\n int n = i / pooled_width / pooled_height / pooled_dim;\n\n int roi_batch_ind = bottom_roi_indices[n];\n T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;\n T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;\n T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;\n T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;\n\n // Force too small ROIs to be 1x1\n T roi_height = max(roi_end_h - roi_start_h, 0.1);\n T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n\n // Compute w and h at bottom\n T bin_size_h = roi_height / static_cast<T>(pooled_height);\n T bin_size_w = roi_width / static_cast<T>(pooled_width);\n\n // Compute c at bottom\n int gh = floor(\n static_cast<T>(ph) * group_size / pooled_height);\n int gw = floor(\n static_cast<T>(pw) * group_size / pooled_width);\n gh = min(max(gh, 0), group_size - 1);\n gw = min(max(gw, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int bottom_data_offset =\n (roi_batch_ind * channel + c) * height * width;\n\n // We use roi_bin_grid to sample the grid and mimic integral\n int roi_bin_grid_h = (sampling_ratio_h > 0)\n ? sampling_ratio_h\n : ceil(roi_height / pooled_height); // e.g. = 2\n int roi_bin_grid_w = (sampling_ratio_w > 0)\n ? sampling_ratio_w\n : ceil(roi_width / pooled_width);\n\n T maxval = - (T) (1.0 / 0.0);\n int maxidx = -1;\n for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1\n {\n T y = roi_start_h + ph * bin_size_h +\n static_cast<T>(iy + .5f) * bin_size_h /\n static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5\n int y_low, y_high;\n bool y_ret = get_bounds(y, height, y_low, y_high);\n if (!y_ret) continue;\n for (int ix = 0; ix < roi_bin_grid_w; ix++) {\n T x = roi_start_w + pw * bin_size_w +\n static_cast<T>(ix + .5f) * bin_size_w /\n static_cast<T>(roi_bin_grid_w);\n\n int x_low, x_high;\n bool x_ret = get_bounds(x, width, x_low, x_high);\n if (!x_ret) continue;\n // bilinear_interpolation {{\n T w1, w2, w3, w4;\n get_bilinear_interp_params(\n y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);\n\n T tmpval = 0.;\n bool isvalid = false;\n int bottom_index = iy * roi_bin_grid_w + ix;\n if (w1 > 0 && y_low >= 0 && x_low >= 0) {\n T v1 = bottom_data[\n bottom_data_offset + y_low * width + x_low];\n tmpval += w1 * v1;\n isvalid = true;\n }\n if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {\n T v2 = bottom_data[\n bottom_data_offset + y_low * width + x_high];\n tmpval += w2 * v2;\n isvalid = true;\n }\n if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {\n T v3 = bottom_data[\n bottom_data_offset + y_high * width + x_low];\n tmpval += w3 * v3;\n isvalid = true;\n }\n if (w4 > 0 && y_high <= height - 1 &&\n x_high <= width - 1) {\n T v4 = bottom_data[\n bottom_data_offset + y_high * width + x_high];\n tmpval += w4 * v4;\n isvalid = true;\n }\n\n // }}\n\n if (isvalid && tmpval > maxval) {\n maxval = tmpval;\n maxidx = bottom_index;\n }\n }\n }\n top_data = maxval;\n argmax_data = maxidx;\n """', '"""ps_roi_max_align_2d_fwd"""'], {'preamble': '_GET_BILINEAR_INTERP_KERNEL'}), '(\n """\n raw T bottom_data, raw T bottom_rois,\n raw int32 bottom_roi_indices,\n T spatial_scale, int32 channel,\n int32 height, int32 width,\n int32 pooled_dim, int32 pooled_height, int32 pooled_width,\n int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w\n """\n , \'T top_data, int32 argmax_data\',\n """\n // pos in output filter\n int ph = (i / pooled_width) % pooled_height;\n int pw = i % pooled_width;\n int ctop = (i / pooled_width / pooled_height) % pooled_dim;\n int n = i / pooled_width / pooled_height / pooled_dim;\n\n int roi_batch_ind = bottom_roi_indices[n];\n T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;\n T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;\n T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;\n T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;\n\n // Force too small ROIs to be 1x1\n T roi_height = max(roi_end_h - roi_start_h, 0.1);\n T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n\n // Compute w and h at bottom\n T bin_size_h = roi_height / static_cast<T>(pooled_height);\n T bin_size_w = roi_width / static_cast<T>(pooled_width);\n\n // Compute c at bottom\n int gh = floor(\n static_cast<T>(ph) * group_size / pooled_height);\n int gw = floor(\n static_cast<T>(pw) * group_size / pooled_width);\n gh = min(max(gh, 0), group_size - 1);\n gw = min(max(gw, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int bottom_data_offset =\n (roi_batch_ind * channel + c) * height * width;\n\n // We use roi_bin_grid to sample the grid and mimic integral\n int roi_bin_grid_h = (sampling_ratio_h > 0)\n ? sampling_ratio_h\n : ceil(roi_height / pooled_height); // e.g. = 2\n int roi_bin_grid_w = (sampling_ratio_w > 0)\n ? sampling_ratio_w\n : ceil(roi_width / pooled_width);\n\n T maxval = - (T) (1.0 / 0.0);\n int maxidx = -1;\n for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1\n {\n T y = roi_start_h + ph * bin_size_h +\n static_cast<T>(iy + .5f) * bin_size_h /\n static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5\n int y_low, y_high;\n bool y_ret = get_bounds(y, height, y_low, y_high);\n if (!y_ret) continue;\n for (int ix = 0; ix < roi_bin_grid_w; ix++) {\n T x = roi_start_w + pw * bin_size_w +\n static_cast<T>(ix + .5f) * bin_size_w /\n static_cast<T>(roi_bin_grid_w);\n\n int x_low, x_high;\n bool x_ret = get_bounds(x, width, x_low, x_high);\n if (!x_ret) continue;\n // bilinear_interpolation {{\n T w1, w2, w3, w4;\n get_bilinear_interp_params(\n y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);\n\n T tmpval = 0.;\n bool isvalid = false;\n int bottom_index = iy * roi_bin_grid_w + ix;\n if (w1 > 0 && y_low >= 0 && x_low >= 0) {\n T v1 = bottom_data[\n bottom_data_offset + y_low * width + x_low];\n tmpval += w1 * v1;\n isvalid = true;\n }\n if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {\n T v2 = bottom_data[\n bottom_data_offset + y_low * width + x_high];\n tmpval += w2 * v2;\n isvalid = true;\n }\n if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {\n T v3 = bottom_data[\n bottom_data_offset + y_high * width + x_low];\n tmpval += w3 * v3;\n isvalid = true;\n }\n if (w4 > 0 && y_high <= height - 1 &&\n x_high <= width - 1) {\n T v4 = bottom_data[\n bottom_data_offset + y_high * width + x_high];\n tmpval += w4 * v4;\n isvalid = true;\n }\n\n // }}\n\n if (isvalid && tmpval > maxval) {\n maxval = tmpval;\n maxidx = bottom_index;\n }\n }\n }\n top_data = maxval;\n argmax_data = maxidx;\n """\n , \'ps_roi_max_align_2d_fwd\', preamble=_GET_BILINEAR_INTERP_KERNEL)\n', (10140, 15177), False, 'from chainer.backends import cuda\n'), ((15966, 16001), 'numpy.unravel_index', 'np.unravel_index', (['i', 'top_diff.shape'], {}), '(i, top_diff.shape)\n', (15982, 16001), True, 'import numpy as np\n'), ((19553, 24296), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['"""\n raw T top_diff, raw int32 argmax_data,\n raw T bottom_rois, raw int32 bottom_roi_indices,\n T spatial_scale, int32 channel, int32 height, int32 width,\n int32 pooled_dim, int32 pooled_height, int32 pooled_width,\n int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w\n """', '"""raw T bottom_diff"""', '"""\n // (n, c, h, w) coords in bottom data\n int pw = i % pooled_width;\n int ph = (i / pooled_width) % pooled_height;\n int ctop = (i / pooled_width / pooled_height) % pooled_dim;\n int n = i / pooled_width / pooled_height / pooled_dim;\n\n // Do not using rounding; this implementation detail is critical\n int roi_batch_ind = bottom_roi_indices[n];\n T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;\n T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;\n T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;\n T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;\n\n // Force too small ROIs to be 1x1\n T roi_height = max(roi_end_h - roi_start_h, 0.1);\n T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n\n // Compute w and h at bottom\n T bin_size_h = roi_height / static_cast<T>(pooled_height);\n T bin_size_w = roi_width / static_cast<T>(pooled_width);\n\n // Compute c at bottom\n int gh = floor(\n static_cast<T>(ph) * group_size / pooled_height);\n int gw = floor(\n static_cast<T>(pw) * group_size / pooled_width);\n gh = min(max(gh, 0), group_size - 1);\n gw = min(max(gw, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int bottom_diff_offset =\n (roi_batch_ind * channel + c) * height * width;\n\n int top_offset =\n (n * pooled_dim + ctop) * pooled_height * pooled_width;\n T top_diff_this_bin =\n top_diff[top_offset + ph * pooled_width + pw];\n int maxidx = argmax_data[top_offset + ph * pooled_width + pw];\n\n if (maxidx != -1) {\n // We use roi_bin_grid to sample the grid and mimic integral\n int roi_bin_grid_h = (sampling_ratio_h > 0)\n ? sampling_ratio_h\n : ceil(roi_height / pooled_height); // e.g. = 2\n int roi_bin_grid_w = (sampling_ratio_w > 0)\n ? sampling_ratio_w\n : ceil(roi_width / pooled_width);\n\n int iy = maxidx / roi_bin_grid_w;\n int ix = maxidx % roi_bin_grid_w;\n\n T y = roi_start_h + ph * bin_size_h +\n static_cast<T>(iy + .5f) * bin_size_h /\n static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5\n T x = roi_start_w + pw * bin_size_w +\n static_cast<T>(ix + .5f) * bin_size_w /\n static_cast<T>(roi_bin_grid_w);\n\n int y_low, y_high;\n bool y_ret = get_bounds(y, height, y_low, y_high);\n if (!y_ret) continue;\n int x_low, x_high;\n bool x_ret = get_bounds(x, width, x_low, x_high);\n if (!x_ret) continue;\n\n // bilinear_interpolation_gradient {{\n T w1, w2, w3, w4;\n get_bilinear_interp_params(\n y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);\n\n if (w1 > 0 && y_low >= 0 && x_low >= 0) {\n T g1 = top_diff_this_bin * w1;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_low * width + x_low], g1);\n }\n if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {\n T g2 = top_diff_this_bin * w2;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_low * width + x_high], g2);\n }\n if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {\n T g3 = top_diff_this_bin * w3;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_high * width + x_low], g3);\n }\n if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {\n T g4 = top_diff_this_bin * w4;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_high * width + x_high], g4);\n }\n\n // }}\n }\n """', '"""ps_roi_max_align_2d_bwd"""'], {'preamble': '_GET_BILINEAR_INTERP_KERNEL'}), '(\n """\n raw T top_diff, raw int32 argmax_data,\n raw T bottom_rois, raw int32 bottom_roi_indices,\n T spatial_scale, int32 channel, int32 height, int32 width,\n int32 pooled_dim, int32 pooled_height, int32 pooled_width,\n int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w\n """\n , \'raw T bottom_diff\',\n """\n // (n, c, h, w) coords in bottom data\n int pw = i % pooled_width;\n int ph = (i / pooled_width) % pooled_height;\n int ctop = (i / pooled_width / pooled_height) % pooled_dim;\n int n = i / pooled_width / pooled_height / pooled_dim;\n\n // Do not using rounding; this implementation detail is critical\n int roi_batch_ind = bottom_roi_indices[n];\n T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;\n T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;\n T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;\n T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;\n\n // Force too small ROIs to be 1x1\n T roi_height = max(roi_end_h - roi_start_h, 0.1);\n T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n\n // Compute w and h at bottom\n T bin_size_h = roi_height / static_cast<T>(pooled_height);\n T bin_size_w = roi_width / static_cast<T>(pooled_width);\n\n // Compute c at bottom\n int gh = floor(\n static_cast<T>(ph) * group_size / pooled_height);\n int gw = floor(\n static_cast<T>(pw) * group_size / pooled_width);\n gh = min(max(gh, 0), group_size - 1);\n gw = min(max(gw, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int bottom_diff_offset =\n (roi_batch_ind * channel + c) * height * width;\n\n int top_offset =\n (n * pooled_dim + ctop) * pooled_height * pooled_width;\n T top_diff_this_bin =\n top_diff[top_offset + ph * pooled_width + pw];\n int maxidx = argmax_data[top_offset + ph * pooled_width + pw];\n\n if (maxidx != -1) {\n // We use roi_bin_grid to sample the grid and mimic integral\n int roi_bin_grid_h = (sampling_ratio_h > 0)\n ? sampling_ratio_h\n : ceil(roi_height / pooled_height); // e.g. = 2\n int roi_bin_grid_w = (sampling_ratio_w > 0)\n ? sampling_ratio_w\n : ceil(roi_width / pooled_width);\n\n int iy = maxidx / roi_bin_grid_w;\n int ix = maxidx % roi_bin_grid_w;\n\n T y = roi_start_h + ph * bin_size_h +\n static_cast<T>(iy + .5f) * bin_size_h /\n static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5\n T x = roi_start_w + pw * bin_size_w +\n static_cast<T>(ix + .5f) * bin_size_w /\n static_cast<T>(roi_bin_grid_w);\n\n int y_low, y_high;\n bool y_ret = get_bounds(y, height, y_low, y_high);\n if (!y_ret) continue;\n int x_low, x_high;\n bool x_ret = get_bounds(x, width, x_low, x_high);\n if (!x_ret) continue;\n\n // bilinear_interpolation_gradient {{\n T w1, w2, w3, w4;\n get_bilinear_interp_params(\n y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);\n\n if (w1 > 0 && y_low >= 0 && x_low >= 0) {\n T g1 = top_diff_this_bin * w1;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_low * width + x_low], g1);\n }\n if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {\n T g2 = top_diff_this_bin * w2;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_low * width + x_high], g2);\n }\n if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {\n T g3 = top_diff_this_bin * w3;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_high * width + x_low], g3);\n }\n if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {\n T g4 = top_diff_this_bin * w4;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_high * width + x_high], g4);\n }\n\n // }}\n }\n """\n , \'ps_roi_max_align_2d_bwd\', preamble=_GET_BILINEAR_INTERP_KERNEL)\n', (19569, 24296), False, 'from chainer.backends import cuda\n'), ((5722, 5763), 'numpy.floor', 'np.floor', (['(ph * group_size / pooled_height)'], {}), '(ph * group_size / pooled_height)\n', (5730, 5763), True, 'import numpy as np\n'), ((5786, 5826), 'numpy.floor', 'np.floor', (['(pw * group_size / pooled_width)'], {}), '(pw * group_size / pooled_width)\n', (5794, 5826), True, 'import numpy as np\n'), ((6633, 6655), 'chainercv.functions.ps_roi_average_align_2d._get_bounds', '_get_bounds', (['y', 'height'], {}), '(y, height)\n', (6644, 6655), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bounds\n'), ((6776, 6807), 'six.moves.range', 'six.moves.range', (['roi_bin_grid_w'], {}), '(roi_bin_grid_w)\n', (6791, 6807), False, 'import six\n'), ((17733, 17755), 'chainercv.functions.ps_roi_average_align_2d._get_bounds', '_get_bounds', (['y', 'height'], {}), '(y, height)\n', (17744, 17755), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bounds\n'), ((17885, 17906), 'chainercv.functions.ps_roi_average_align_2d._get_bounds', '_get_bounds', (['x', 'width'], {}), '(x, width)\n', (17896, 17906), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bounds\n'), ((18088, 18151), 'chainercv.functions.ps_roi_average_align_2d._get_bilinear_interp_params', '_get_bilinear_interp_params', (['y', 'x', 'y_low', 'x_low', 'y_high', 'x_high'], {}), '(y, x, y_low, x_low, y_high, x_high)\n', (18115, 18151), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bilinear_interp_params\n'), ((6070, 6105), 'numpy.ceil', 'np.ceil', (['(roi_height / pooled_height)'], {}), '(roi_height / pooled_height)\n', (6077, 6105), True, 'import numpy as np\n'), ((6265, 6298), 'numpy.ceil', 'np.ceil', (['(roi_width / pooled_width)'], {}), '(roi_width / pooled_width)\n', (6272, 6298), True, 'import numpy as np\n'), ((6971, 6992), 'chainercv.functions.ps_roi_average_align_2d._get_bounds', '_get_bounds', (['x', 'width'], {}), '(x, width)\n', (6982, 6992), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bounds\n'), ((7181, 7244), 'chainercv.functions.ps_roi_average_align_2d._get_bilinear_interp_params', '_get_bilinear_interp_params', (['y', 'x', 'y_low', 'x_low', 'y_high', 'x_high'], {}), '(y, x, y_low, x_low, y_high, x_high)\n', (7208, 7244), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bilinear_interp_params\n'), ((17044, 17079), 'numpy.ceil', 'np.ceil', (['(roi_height / pooled_height)'], {}), '(roi_height / pooled_height)\n', (17051, 17079), True, 'import numpy as np\n'), ((17255, 17288), 'numpy.ceil', 'np.ceil', (['(roi_width / pooled_width)'], {}), '(roi_width / pooled_width)\n', (17262, 17288), True, 'import numpy as np\n')]
|
import abc
import binascii
from psion.jose.exceptions import InvalidKey, InvalidSignature
from psion.jose.jwk import JsonWebKey
from psion.webtools import base64url_decode, base64url_encode
class JWSAlgorithm(abc.ABC):
"""
Implementation of the Section 3 of RFC 7518.
This class provides the expected method signatures
that will be used throughout the package.
All JWS Algorithms **MUST** inherit from this class and
implement its methods.
:cvar ``__algorithm__``: Name of the algorithm.
:cvar ``__hash_name__``: Name of the hash function used by the algorithm.
:cvar ``__key_type__``: Type of the key that the algorithm accepts.
"""
__algorithm__: str = None
__hash_name__: str = None
__key_type__: str = None
@classmethod
def validate_key(cls, key: JsonWebKey):
"""
Validates the provided key against the algorithm's
specifications and restrictions.
:param key: JWK to be validated.
:type key: JsonWebKey
:raises InvalidKey: The provided key is invalid.
"""
if not isinstance(key, JsonWebKey):
raise InvalidKey
# pylint: disable=used-before-assignment
if (alg := key.data.get("alg")) and alg != cls.__algorithm__:
raise InvalidKey(
f'This key is intended to be used by the algorithm "{alg}".'
)
if key.data.get("kty") != cls.__key_type__:
raise InvalidKey(f'This algorithm only accepts "{cls.__key_type__}" keys.')
@classmethod
@abc.abstractmethod
def sign(cls, data: bytes, key: JsonWebKey) -> bytes:
"""
Signs the provided data using the provided key.
:param data: Data to be signed.
:type data: bytes
:param key: JWK used to sign the data.
:type key: JsonWebKey
:return: URL Safe Base64 encoded signature of the data.
:rtype: bytes
"""
@classmethod
@abc.abstractmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey) -> None:
"""
Verifies if the data and signature provided match
based on the provided Json Web Key.
:param signature: Signature used in the verification.
**MUST** be a URL Safe Base64 encoded bytes string.
:type signature: bytes
:param data: Data to be verified.
:type data: bytes
:param key: JWK used to verify the data.
:type key: JsonWebKey
:raises InvalidSignature: The signature and data do not match.
"""
class none(JWSAlgorithm):
__algorithm__: str = "none"
@classmethod
def sign(cls, data: bytes, key: JsonWebKey = None) -> bytes:
return b""
@classmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey = None) -> None:
pass
class _HMAC(JWSAlgorithm):
__key_type__: str = "oct"
@classmethod
def sign(cls, data: bytes, key: JsonWebKey) -> bytes:
cls.validate_key(key)
signature = key.sign(data, cls.__hash_name__)
return base64url_encode(signature)
@classmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey):
cls.validate_key(key)
try:
# Incorrect padding of the encoded signature.
raw_signature = base64url_decode(signature)
except binascii.Error:
raise InvalidSignature
key.verify(raw_signature, data, cls.__hash_name__)
class HS256(_HMAC):
__algorithm__: str = "HS256"
__hash_name__: str = "SHA256"
class HS384(_HMAC):
__algorithm__: str = "HS384"
__hash_name__: str = "SHA384"
class HS512(_HMAC):
__algorithm__: str = "HS512"
__hash_name__: str = "SHA512"
class _RSA_PKCS1v15(JWSAlgorithm):
__key_type__: str = "RSA"
__padding__: str = "PKCS1v15"
@classmethod
def sign(cls, data: bytes, key: JsonWebKey) -> bytes:
cls.validate_key(key)
signature = key.sign(data, cls.__hash_name__, rsa_padding=cls.__padding__)
return base64url_encode(signature)
@classmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey):
cls.validate_key(key)
try:
# Incorrect padding of the encoded signature.
raw_signature = base64url_decode(signature)
except binascii.Error:
raise InvalidSignature
key.verify(raw_signature, data, cls.__hash_name__, rsa_padding=cls.__padding__)
class RS256(_RSA_PKCS1v15):
__algorithm__: str = "RS256"
__hash_name__: str = "SHA256"
class RS384(_RSA_PKCS1v15):
__algorithm__: str = "RS384"
__hash_name__: str = "SHA384"
class RS512(_RSA_PKCS1v15):
__algorithm__: str = "RS512"
__hash_name__: str = "SHA512"
class _EC(JWSAlgorithm):
__curve__: str = None
__key_type__: str = "EC"
@classmethod
def validate_key(cls, key: JsonWebKey):
super(_EC, cls).validate_key(key)
if key.data.get("crv") != cls.__curve__:
raise InvalidKey(
f'This algorithm only accepts the curve "{cls.__curve__}".'
)
@classmethod
def sign(cls, data: bytes, key: JsonWebKey) -> bytes:
cls.validate_key(key)
signature = key.sign(data, cls.__hash_name__)
return base64url_encode(signature)
@classmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey):
cls.validate_key(key)
try:
# Incorrect padding of the encoded signature.
raw_signature = base64url_decode(signature)
except binascii.Error:
raise InvalidSignature
key.verify(raw_signature, data, cls.__hash_name__)
class ES256(_EC):
__algorithm__: str = "ES256"
__curve__: str = "P-256"
__hash_name__: str = "SHA256"
class ES384(_EC):
__algorithm__: str = "ES384"
__curve__: str = "P-384"
__hash_name__: str = "SHA384"
class ES512(_EC):
__algorithm__: str = "ES512"
__curve__: str = "P-521"
__hash_name__: str = "SHA512"
class _RSA_PSS(JWSAlgorithm):
__key_type__: str = "RSA"
__padding__: str = "PSS"
@classmethod
def sign(cls, data: bytes, key: JsonWebKey) -> bytes:
cls.validate_key(key)
signature = key.sign(data, cls.__hash_name__, rsa_padding=cls.__padding__)
return base64url_encode(signature)
@classmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey):
cls.validate_key(key)
try:
# Incorrect padding of the encoded signature.
raw_signature = base64url_decode(signature)
except binascii.Error:
raise InvalidSignature
key.verify(raw_signature, data, cls.__hash_name__, rsa_padding=cls.__padding__)
class PS256(_RSA_PSS):
__algorithm__: str = "PS256"
__hash_name__: str = "SHA256"
class PS384(_RSA_PSS):
__algorithm__: str = "PS384"
__hash_name__: str = "SHA384"
class PS512(_RSA_PSS):
__algorithm__: str = "PS512"
__hash_name__: str = "SHA512"
|
[
"psion.webtools.base64url_decode",
"psion.webtools.base64url_encode",
"psion.jose.exceptions.InvalidKey"
] |
[((3089, 3116), 'psion.webtools.base64url_encode', 'base64url_encode', (['signature'], {}), '(signature)\n', (3105, 3116), False, 'from psion.webtools import base64url_decode, base64url_encode\n'), ((4060, 4087), 'psion.webtools.base64url_encode', 'base64url_encode', (['signature'], {}), '(signature)\n', (4076, 4087), False, 'from psion.webtools import base64url_decode, base64url_encode\n'), ((5310, 5337), 'psion.webtools.base64url_encode', 'base64url_encode', (['signature'], {}), '(signature)\n', (5326, 5337), False, 'from psion.webtools import base64url_decode, base64url_encode\n'), ((6352, 6379), 'psion.webtools.base64url_encode', 'base64url_encode', (['signature'], {}), '(signature)\n', (6368, 6379), False, 'from psion.webtools import base64url_decode, base64url_encode\n'), ((1298, 1370), 'psion.jose.exceptions.InvalidKey', 'InvalidKey', (['f"""This key is intended to be used by the algorithm "{alg}"."""'], {}), '(f\'This key is intended to be used by the algorithm "{alg}".\')\n', (1308, 1370), False, 'from psion.jose.exceptions import InvalidKey, InvalidSignature\n'), ((1472, 1541), 'psion.jose.exceptions.InvalidKey', 'InvalidKey', (['f"""This algorithm only accepts "{cls.__key_type__}" keys."""'], {}), '(f\'This algorithm only accepts "{cls.__key_type__}" keys.\')\n', (1482, 1541), False, 'from psion.jose.exceptions import InvalidKey, InvalidSignature\n'), ((3334, 3361), 'psion.webtools.base64url_decode', 'base64url_decode', (['signature'], {}), '(signature)\n', (3350, 3361), False, 'from psion.webtools import base64url_decode, base64url_encode\n'), ((4305, 4332), 'psion.webtools.base64url_decode', 'base64url_decode', (['signature'], {}), '(signature)\n', (4321, 4332), False, 'from psion.webtools import base64url_decode, base64url_encode\n'), ((5033, 5104), 'psion.jose.exceptions.InvalidKey', 'InvalidKey', (['f"""This algorithm only accepts the curve "{cls.__curve__}"."""'], {}), '(f\'This algorithm only accepts the curve "{cls.__curve__}".\')\n', (5043, 5104), False, 'from psion.jose.exceptions import InvalidKey, InvalidSignature\n'), ((5555, 5582), 'psion.webtools.base64url_decode', 'base64url_decode', (['signature'], {}), '(signature)\n', (5571, 5582), False, 'from psion.webtools import base64url_decode, base64url_encode\n'), ((6597, 6624), 'psion.webtools.base64url_decode', 'base64url_decode', (['signature'], {}), '(signature)\n', (6613, 6624), False, 'from psion.webtools import base64url_decode, base64url_encode\n')]
|
import pytest
import torch
import mantrap.agents
import mantrap.constants
import mantrap.environment
import mantrap.utility.maths
import mantrap.utility.shaping
torch.manual_seed(0)
###########################################################################
# Tests - All Environment #################################################
###########################################################################
@pytest.mark.parametrize("environment_class", [mantrap.environment.KalmanEnvironment,
mantrap.environment.PotentialFieldEnvironment,
mantrap.environment.SocialForcesEnvironment,
mantrap.environment.Trajectron])
class TestEnvironment:
@staticmethod
def test_initialization(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_position = torch.rand(2).float()
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_position)
assert torch.all(torch.eq(env.ego.position, ego_position))
assert env.num_ados == 0
assert env.time == 0.0
env.add_ado(position=torch.tensor([6, 7]), velocity=torch.ones(2))
assert torch.all(torch.eq(env.ados[0].position, torch.tensor([6, 7]).float()))
assert torch.all(torch.eq(env.ados[0].velocity, torch.ones(2)))
@staticmethod
def test_step(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ado_init_position = torch.zeros(2)
ado_init_velocity = torch.ones(2)
ego_init_position = torch.tensor([-4, 6])
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_init_position)
# In order to be able to verify the generated trajectories easily, we assume uni-modality here.
env.add_ado(position=ado_init_position, velocity=ado_init_velocity)
assert env.num_ados == 1
t_horizon = 5
ego_controls = torch.stack([torch.tensor([1, 0])] * t_horizon)
ego_trajectory = env.ego.unroll_trajectory(controls=ego_controls, dt=env.dt)
for t in range(t_horizon):
ado_t, ego_t = env.step(ego_action=ego_controls[t])
# Check dimensions of outputted ado and ego states.
assert ado_t.numel() == 5
assert ado_t.shape == (1, 5)
assert ego_t.numel() == 5
# While the exact value of the ado agent's states depends on the environment dynamics used, all of them
# are based on the ego state (control), which is thought to be enforced while forwarding the environment.
assert all(torch.isclose(ego_t, ego_trajectory[t+1, :]))
@staticmethod
def test_step_reset(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_position = torch.rand(2)
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_position)
# In order to be able to verify the generated trajectories easily, we assume uni-modality here.
env.add_ado(position=torch.zeros(2), velocity=torch.zeros(2))
env.add_ado(position=torch.ones(2), velocity=torch.zeros(2))
ego_next_state = torch.rand(5)
ado_next_states = torch.rand(env.num_ados, 5)
env.step_reset(ego_next=ego_next_state, ado_next=ado_next_states)
assert torch.all(torch.eq(env.ego.state_with_time, ego_next_state))
for m_ado, ado in enumerate(env.ados):
assert torch.allclose(ado.state_with_time, ado_next_states[m_ado, :])
@staticmethod
def test_prediction_trajectories_shape(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
env = environment_class()
t_horizon = 4
history = torch.stack(5 * [torch.tensor([1, 0, 0, 0, 0])])
env.add_ado(goal=torch.ones(2), position=torch.tensor([-1, 0]), history=history)
env.add_ado(goal=torch.zeros(2), position=torch.tensor([1, 0]), history=history)
ado_trajectories = env.sample_wo_ego(t_horizon=t_horizon)
assert mantrap.utility.shaping.check_ado_samples(ado_trajectories, t_horizon=t_horizon + 1, ados=2)
@staticmethod
def test_build_distributions(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_position = torch.rand(2)
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_position)
env.add_ado(position=torch.tensor([3, 0]), goal=torch.tensor([-4, 0]))
env.add_ado(position=torch.tensor([5, 0]), goal=torch.tensor([-2, 0]))
env.add_ado(position=torch.tensor([10, 0]), goal=torch.tensor([5, 3]))
prediction_horizon = 10
trajectory = torch.zeros((prediction_horizon + 1, 4)) # does not matter here anyway
dist_dict = env.compute_distributions(ego_trajectory=trajectory)
assert env.check_distribution(dist_dict, t_horizon=prediction_horizon)
@staticmethod
def test_detaching(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_position = torch.rand(2)
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_position)
env.add_ado(position=torch.tensor([3, 0]), goal=torch.tensor([-4, 0]))
env.add_ado(position=torch.tensor([-3, 2]), goal=torch.tensor([1, 5]))
# Build computation graph to detach later on. Then check whether the graph has been been built by checking
# for gradient availability.
ado_action = torch.rand(2)
ado_action.requires_grad = True
env.ados[0].update(ado_action, dt=env.dt)
if env.is_differentiable_wrt_ego:
assert env.ados[0].position.grad_fn is not None
# Detach computation graph.
env.detach()
assert env.ados[0].position.grad_fn is None
@staticmethod
def test_copy(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_init_pos = torch.tensor([-5, 0])
ados_init_pos = torch.stack([torch.tensor([1.0, 0.0]), torch.tensor([-6, 2.5])])
ados_init_vel = torch.stack([torch.tensor([4.2, -1]), torch.tensor([-7, -2.0])])
ados_goal = torch.stack([torch.zeros(2), torch.ones(2)])
# Create example environment scene to copy later on. Then copy the example environment.
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_init_pos)
env.add_ado(position=ados_init_pos[0], velocity=ados_init_vel[0], goal=ados_goal[0])
env.add_ado(position=ados_init_pos[1], velocity=ados_init_vel[1], goal=ados_goal[1])
env_copy = env.copy()
# Test equality of basic environment properties and states.
assert env.name == env_copy.name
assert env.time == env_copy.time
assert env.dt == env_copy.dt
assert env.same_initial_conditions(other=env_copy)
assert env.ego == env_copy.ego
for i in range(env.num_ados): # agents should be equal and in the same order
assert env.ados[i] == env_copy.ados[i]
assert env.ado_ids[i] == env_copy.ado_ids[i]
ego_state_original, ado_states_original = env.states()
ego_state_copy, ado_states_copy = env_copy.states()
assert torch.all(torch.eq(ego_state_original, ego_state_copy))
assert torch.all(torch.eq(ado_states_original, ado_states_copy))
# Test broken link between `env` and `env_copy`, i.e. when I change env_copy, then the original
# environment remains unchanged.
env_copy.step(ego_action=torch.ones(2)) # does not matter here anyways
ego_state_original, ado_states_original = env.states()
ego_state_copy, ado_states_copy = env_copy.states()
assert not torch.all(torch.eq(ego_state_original, ego_state_copy))
assert not torch.all(torch.eq(ado_states_original, ado_states_copy))
@staticmethod
def test_states(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_position = torch.tensor([-5, 0])
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_position)
env.add_ado(position=torch.tensor([3, 0]), velocity=torch.rand(2), goal=torch.rand(2))
env.add_ado(position=torch.tensor([-4, 2]), velocity=torch.ones(2), goal=torch.rand(2))
ego_state, ado_states = env.states()
assert mantrap.utility.shaping.check_ego_state(ego_state, enforce_temporal=True)
assert mantrap.utility.shaping.check_ado_states(ado_states, enforce_temporal=True)
# The first entry of every predicted trajectory should be the current state, check that.
ado_trajectories = env.predict_wo_ego(t_horizon=2)
assert torch.allclose(ado_trajectories[:, 0, 0, :], ado_states[:, 0:2], atol=0.01)
ado_samples = env.sample_wo_ego(t_horizon=2, num_samples=1)
assert torch.allclose(ado_samples[:, 0, 0, 0, :], ado_states[:, 0:2], atol=0.01)
# Test that the states are the same as the states of actual agents.
assert torch.all(torch.eq(ego_state, env.ego.state_with_time))
for m_ado, ado in enumerate(env.ados):
assert torch.all(torch.eq(ado_states[m_ado, :], ado.state_with_time))
###########################################################################
# Test - Social Forces Environment ########################################
###########################################################################
@pytest.mark.parametrize("goal_position", [torch.tensor([2.0, 2.0]), torch.tensor([0.0, -2.0])])
def test_social_forces_single_ado_prediction(goal_position: torch.Tensor):
env = mantrap.environment.SocialForcesEnvironment()
env.add_ado(goal=goal_position, position=torch.tensor([-1, -5]), velocity=torch.ones(2) * 0.8)
trajectory_samples = env.sample_wo_ego(t_horizon=100, num_samples=100)
trajectory = torch.mean(trajectory_samples, dim=1).squeeze()
assert torch.isclose(trajectory[-1][0], goal_position[0], atol=1.0)
assert torch.isclose(trajectory[-1][1], goal_position[1], atol=1.0)
def test_social_forces_static_ado_pair_prediction():
env = mantrap.environment.SocialForcesEnvironment()
env.add_ado(goal=torch.zeros(2), position=torch.tensor([-1, 0]), velocity=torch.tensor([0.1, 0]))
env.add_ado(goal=torch.zeros(2), position=torch.tensor([1, 0]), velocity=torch.tensor([-0.1, 0]))
trajectories = env.sample_wo_ego(t_horizon=10, num_samples=100)
trajectories = torch.mean(trajectories, dim=1).squeeze()
# Due to the repulsive of the agents between each other, they cannot both go to their goal position (which is
# the same for both of them). Therefore the distance must be larger then zero basically, otherwise the repulsive
# force would not act (or act attractive instead of repulsive).
assert torch.norm(trajectories[0, -1, 0:1] - trajectories[1, -1, 0:1]) > 1e-3
###########################################################################
# Test - Potential Field Environment ######################################
###########################################################################
@pytest.mark.parametrize(
"pos_1, pos_2",
[
(torch.tensor([0, 2]), torch.tensor([0, 6])),
],
)
def test_potential_field_forces(pos_1: torch.Tensor, pos_2: torch.Tensor):
env_1 = mantrap.environment.PotentialFieldEnvironment(pos_1, ego_type=mantrap.agents.IntegratorDTAgent)
env_2 = mantrap.environment.PotentialFieldEnvironment(pos_2, ego_type=mantrap.agents.IntegratorDTAgent)
t_horizon = 4
mus = torch.zeros((2, t_horizon, env_1.num_modes, 2))
sigmas = torch.zeros((2, t_horizon, env_1.num_modes, 2))
grads = torch.zeros((2, t_horizon, 2))
for i, env in enumerate([env_1, env_2]):
env.add_ado(position=torch.zeros(2), velocity=torch.tensor([0, 1]))
ego_controls = torch.zeros((4, 2))
ego_controls.requires_grad = True
ego_trajectory = env.ego.unroll_trajectory(ego_controls, dt=env.dt)
dist_dict = env.compute_distributions(ego_trajectory=ego_trajectory)
mus[i, :, :, :] = dist_dict[env.ado_ids[0]].mean
sigmas[i, :, :, :] = dist_dict[env.ado_ids[0]].variance
grads[i, :, :] = torch.autograd.grad(torch.norm(mus[i, -1, :, :]), ego_controls)[0]
# The interaction "force" is distance based, so more distant agents should affect a smaller "force".
# Due to a larger induced force differences between the particle parameters are larger, so that the
# uncertainty grows larger as well. (0 ==> "close" ego; 1 ==> "far" ego)
assert torch.sum(sigmas[0, :, :]) >= torch.sum(sigmas[1, :, :])
# When the delta position is uni-directional, so e.g. just in x-position, the force as well as the gradient
# should point only in this direction.
for i, pos in enumerate([pos_1, pos_2]):
for k in [0, 1]:
if pos[k] == 0:
assert torch.allclose(grads[i, :, k], torch.zeros(t_horizon))
###########################################################################
# Test - Kalman Environment ###############################################
###########################################################################
def test_kalman_distributions():
env = mantrap.environment.KalmanEnvironment()
x0, y0 = 3.7, -5.1
vx, vy = -1.0, 0.9
env.add_ado(position=torch.tensor([x0, y0]), velocity=torch.tensor([vx, vy]))
t_horizon = 4
dist_dict = env.compute_distributions_wo_ego(t_horizon=t_horizon)
mean = dist_dict[env.ado_ids[0]].mean
variance = dist_dict[env.ado_ids[0]].variance
assert torch.allclose(mean[:, 0, 0], torch.ones(t_horizon) * vx)
assert torch.allclose(mean[:, 0, 1], torch.ones(t_horizon) * vy)
variance_diff = (variance[1:, :, :] - variance[:-1, :, :]).squeeze()
assert torch.all(variance_diff >= 0) # variance is strictly increasing over time
###########################################################################
# Test - Trajectron Environment ###########################################
###########################################################################
def test_trajectron_wo_prediction():
env = mantrap.environment.Trajectron(ego_type=mantrap.agents.DoubleIntegratorDTAgent,
ego_position=torch.zeros(2))
env.add_ado(position=torch.tensor([4, 4]), velocity=torch.tensor([0, -1]))
samples_wo = env.sample_wo_ego(t_horizon=10, num_samples=5)
assert mantrap.utility.shaping.check_ado_samples(samples_wo, ados=env.num_ados, num_samples=5)
ego_controls = torch.zeros((10, 2))
samples_with = env.sample_w_controls(ego_controls, num_samples=5)
assert mantrap.utility.shaping.check_ado_samples(samples_with, ados=env.num_ados, num_samples=5)
##########################################################################
# Test - SGAN Environment #################################################
##########################################################################
def test_sgan_sampling():
sgan = mantrap.environment.SGAN(ego_position=torch.zeros(2), ego_velocity=torch.rand(2))
sgan.add_ado(position=torch.tensor([4, 2]), velocity=torch.tensor([-1, -1]))
samples = sgan.sample_wo_ego(t_horizon=5, num_samples=3)
assert mantrap.utility.shaping.check_ado_samples(samples, num_samples=3, t_horizon=6)
|
[
"torch.eq",
"torch.ones",
"torch.mean",
"torch.sum",
"torch.manual_seed",
"torch.norm",
"torch.isclose",
"torch.rand",
"torch.zeros",
"pytest.mark.parametrize",
"torch.allclose",
"torch.tensor",
"torch.all"
] |
[((164, 184), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (181, 184), False, 'import torch\n'), ((415, 638), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""environment_class"""', '[mantrap.environment.KalmanEnvironment, mantrap.environment.\n PotentialFieldEnvironment, mantrap.environment.SocialForcesEnvironment,\n mantrap.environment.Trajectron]'], {}), "('environment_class', [mantrap.environment.\n KalmanEnvironment, mantrap.environment.PotentialFieldEnvironment,\n mantrap.environment.SocialForcesEnvironment, mantrap.environment.\n Trajectron])\n", (438, 638), False, 'import pytest\n'), ((10092, 10152), 'torch.isclose', 'torch.isclose', (['trajectory[-1][0]', 'goal_position[0]'], {'atol': '(1.0)'}), '(trajectory[-1][0], goal_position[0], atol=1.0)\n', (10105, 10152), False, 'import torch\n'), ((10164, 10224), 'torch.isclose', 'torch.isclose', (['trajectory[-1][1]', 'goal_position[1]'], {'atol': '(1.0)'}), '(trajectory[-1][1], goal_position[1], atol=1.0)\n', (10177, 10224), False, 'import torch\n'), ((11716, 11763), 'torch.zeros', 'torch.zeros', (['(2, t_horizon, env_1.num_modes, 2)'], {}), '((2, t_horizon, env_1.num_modes, 2))\n', (11727, 11763), False, 'import torch\n'), ((11777, 11824), 'torch.zeros', 'torch.zeros', (['(2, t_horizon, env_1.num_modes, 2)'], {}), '((2, t_horizon, env_1.num_modes, 2))\n', (11788, 11824), False, 'import torch\n'), ((11837, 11867), 'torch.zeros', 'torch.zeros', (['(2, t_horizon, 2)'], {}), '((2, t_horizon, 2))\n', (11848, 11867), False, 'import torch\n'), ((13975, 14004), 'torch.all', 'torch.all', (['(variance_diff >= 0)'], {}), '(variance_diff >= 0)\n', (13984, 14004), False, 'import torch\n'), ((14740, 14760), 'torch.zeros', 'torch.zeros', (['(10, 2)'], {}), '((10, 2))\n', (14751, 14760), False, 'import torch\n'), ((1570, 1584), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (1581, 1584), False, 'import torch\n'), ((1613, 1626), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (1623, 1626), False, 'import torch\n'), ((1655, 1676), 'torch.tensor', 'torch.tensor', (['[-4, 6]'], {}), '([-4, 6])\n', (1667, 1676), False, 'import torch\n'), ((2906, 2919), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (2916, 2919), False, 'import torch\n'), ((3292, 3305), 'torch.rand', 'torch.rand', (['(5)'], {}), '(5)\n', (3302, 3305), False, 'import torch\n'), ((3332, 3359), 'torch.rand', 'torch.rand', (['env.num_ados', '(5)'], {}), '(env.num_ados, 5)\n', (3342, 3359), False, 'import torch\n'), ((4410, 4423), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (4420, 4423), False, 'import torch\n'), ((4817, 4857), 'torch.zeros', 'torch.zeros', (['(prediction_horizon + 1, 4)'], {}), '((prediction_horizon + 1, 4))\n', (4828, 4857), False, 'import torch\n'), ((5185, 5198), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (5195, 5198), False, 'import torch\n'), ((5633, 5646), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (5643, 5646), False, 'import torch\n'), ((6088, 6109), 'torch.tensor', 'torch.tensor', (['[-5, 0]'], {}), '([-5, 0])\n', (6100, 6109), False, 'import torch\n'), ((8160, 8181), 'torch.tensor', 'torch.tensor', (['[-5, 0]'], {}), '([-5, 0])\n', (8172, 8181), False, 'import torch\n'), ((8873, 8948), 'torch.allclose', 'torch.allclose', (['ado_trajectories[:, 0, 0, :]', 'ado_states[:, 0:2]'], {'atol': '(0.01)'}), '(ado_trajectories[:, 0, 0, :], ado_states[:, 0:2], atol=0.01)\n', (8887, 8948), False, 'import torch\n'), ((9032, 9105), 'torch.allclose', 'torch.allclose', (['ado_samples[:, 0, 0, 0, :]', 'ado_states[:, 0:2]'], {'atol': '(0.01)'}), '(ado_samples[:, 0, 0, 0, :], ado_states[:, 0:2], atol=0.01)\n', (9046, 9105), False, 'import torch\n'), ((9656, 9680), 'torch.tensor', 'torch.tensor', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (9668, 9680), False, 'import torch\n'), ((9682, 9707), 'torch.tensor', 'torch.tensor', (['[0.0, -2.0]'], {}), '([0.0, -2.0])\n', (9694, 9707), False, 'import torch\n'), ((10980, 11043), 'torch.norm', 'torch.norm', (['(trajectories[0, -1, 0:1] - trajectories[1, -1, 0:1])'], {}), '(trajectories[0, -1, 0:1] - trajectories[1, -1, 0:1])\n', (10990, 11043), False, 'import torch\n'), ((12013, 12032), 'torch.zeros', 'torch.zeros', (['(4, 2)'], {}), '((4, 2))\n', (12024, 12032), False, 'import torch\n'), ((12740, 12766), 'torch.sum', 'torch.sum', (['sigmas[0, :, :]'], {}), '(sigmas[0, :, :])\n', (12749, 12766), False, 'import torch\n'), ((12770, 12796), 'torch.sum', 'torch.sum', (['sigmas[1, :, :]'], {}), '(sigmas[1, :, :])\n', (12779, 12796), False, 'import torch\n'), ((1086, 1126), 'torch.eq', 'torch.eq', (['env.ego.position', 'ego_position'], {}), '(env.ego.position, ego_position)\n', (1094, 1126), False, 'import torch\n'), ((3460, 3509), 'torch.eq', 'torch.eq', (['env.ego.state_with_time', 'ego_next_state'], {}), '(env.ego.state_with_time, ego_next_state)\n', (3468, 3509), False, 'import torch\n'), ((3577, 3639), 'torch.allclose', 'torch.allclose', (['ado.state_with_time', 'ado_next_states[m_ado, :]'], {}), '(ado.state_with_time, ado_next_states[m_ado, :])\n', (3591, 3639), False, 'import torch\n'), ((7399, 7443), 'torch.eq', 'torch.eq', (['ego_state_original', 'ego_state_copy'], {}), '(ego_state_original, ego_state_copy)\n', (7407, 7443), False, 'import torch\n'), ((7470, 7516), 'torch.eq', 'torch.eq', (['ado_states_original', 'ado_states_copy'], {}), '(ado_states_original, ado_states_copy)\n', (7478, 7516), False, 'import torch\n'), ((9208, 9252), 'torch.eq', 'torch.eq', (['ego_state', 'env.ego.state_with_time'], {}), '(ego_state, env.ego.state_with_time)\n', (9216, 9252), False, 'import torch\n'), ((9886, 9908), 'torch.tensor', 'torch.tensor', (['[-1, -5]'], {}), '([-1, -5])\n', (9898, 9908), False, 'import torch\n'), ((10033, 10070), 'torch.mean', 'torch.mean', (['trajectory_samples'], {'dim': '(1)'}), '(trajectory_samples, dim=1)\n', (10043, 10070), False, 'import torch\n'), ((10357, 10371), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (10368, 10371), False, 'import torch\n'), ((10382, 10403), 'torch.tensor', 'torch.tensor', (['[-1, 0]'], {}), '([-1, 0])\n', (10394, 10403), False, 'import torch\n'), ((10414, 10436), 'torch.tensor', 'torch.tensor', (['[0.1, 0]'], {}), '([0.1, 0])\n', (10426, 10436), False, 'import torch\n'), ((10459, 10473), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (10470, 10473), False, 'import torch\n'), ((10484, 10504), 'torch.tensor', 'torch.tensor', (['[1, 0]'], {}), '([1, 0])\n', (10496, 10504), False, 'import torch\n'), ((10515, 10538), 'torch.tensor', 'torch.tensor', (['[-0.1, 0]'], {}), '([-0.1, 0])\n', (10527, 10538), False, 'import torch\n'), ((10628, 10659), 'torch.mean', 'torch.mean', (['trajectories'], {'dim': '(1)'}), '(trajectories, dim=1)\n', (10638, 10659), False, 'import torch\n'), ((11342, 11362), 'torch.tensor', 'torch.tensor', (['[0, 2]'], {}), '([0, 2])\n', (11354, 11362), False, 'import torch\n'), ((11364, 11384), 'torch.tensor', 'torch.tensor', (['[0, 6]'], {}), '([0, 6])\n', (11376, 11384), False, 'import torch\n'), ((13513, 13535), 'torch.tensor', 'torch.tensor', (['[x0, y0]'], {}), '([x0, y0])\n', (13525, 13535), False, 'import torch\n'), ((13546, 13568), 'torch.tensor', 'torch.tensor', (['[vx, vy]'], {}), '([vx, vy])\n', (13558, 13568), False, 'import torch\n'), ((13793, 13814), 'torch.ones', 'torch.ones', (['t_horizon'], {}), '(t_horizon)\n', (13803, 13814), False, 'import torch\n'), ((13862, 13883), 'torch.ones', 'torch.ones', (['t_horizon'], {}), '(t_horizon)\n', (13872, 13883), False, 'import torch\n'), ((14461, 14475), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (14472, 14475), False, 'import torch\n'), ((14502, 14522), 'torch.tensor', 'torch.tensor', (['[4, 4]'], {}), '([4, 4])\n', (14514, 14522), False, 'import torch\n'), ((14533, 14554), 'torch.tensor', 'torch.tensor', (['[0, -1]'], {}), '([0, -1])\n', (14545, 14554), False, 'import torch\n'), ((15235, 15249), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (15246, 15249), False, 'import torch\n'), ((15264, 15277), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (15274, 15277), False, 'import torch\n'), ((15305, 15325), 'torch.tensor', 'torch.tensor', (['[4, 2]'], {}), '([4, 2])\n', (15317, 15325), False, 'import torch\n'), ((15336, 15358), 'torch.tensor', 'torch.tensor', (['[-1, -1]'], {}), '([-1, -1])\n', (15348, 15358), False, 'import torch\n'), ((937, 950), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (947, 950), False, 'import torch\n'), ((1221, 1241), 'torch.tensor', 'torch.tensor', (['[6, 7]'], {}), '([6, 7])\n', (1233, 1241), False, 'import torch\n'), ((1252, 1265), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (1262, 1265), False, 'import torch\n'), ((1411, 1424), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (1421, 1424), False, 'import torch\n'), ((2716, 2762), 'torch.isclose', 'torch.isclose', (['ego_t', 'ego_trajectory[t + 1, :]'], {}), '(ego_t, ego_trajectory[t + 1, :])\n', (2729, 2762), False, 'import torch\n'), ((3156, 3170), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (3167, 3170), False, 'import torch\n'), ((3181, 3195), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (3192, 3195), False, 'import torch\n'), ((3226, 3239), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (3236, 3239), False, 'import torch\n'), ((3250, 3264), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (3261, 3264), False, 'import torch\n'), ((3929, 3942), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (3939, 3942), False, 'import torch\n'), ((3953, 3974), 'torch.tensor', 'torch.tensor', (['[-1, 0]'], {}), '([-1, 0])\n', (3965, 3974), False, 'import torch\n'), ((4018, 4032), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (4029, 4032), False, 'import torch\n'), ((4043, 4063), 'torch.tensor', 'torch.tensor', (['[1, 0]'], {}), '([1, 0])\n', (4055, 4063), False, 'import torch\n'), ((4555, 4575), 'torch.tensor', 'torch.tensor', (['[3, 0]'], {}), '([3, 0])\n', (4567, 4575), False, 'import torch\n'), ((4582, 4603), 'torch.tensor', 'torch.tensor', (['[-4, 0]'], {}), '([-4, 0])\n', (4594, 4603), False, 'import torch\n'), ((4634, 4654), 'torch.tensor', 'torch.tensor', (['[5, 0]'], {}), '([5, 0])\n', (4646, 4654), False, 'import torch\n'), ((4661, 4682), 'torch.tensor', 'torch.tensor', (['[-2, 0]'], {}), '([-2, 0])\n', (4673, 4682), False, 'import torch\n'), ((4713, 4734), 'torch.tensor', 'torch.tensor', (['[10, 0]'], {}), '([10, 0])\n', (4725, 4734), False, 'import torch\n'), ((4741, 4761), 'torch.tensor', 'torch.tensor', (['[5, 3]'], {}), '([5, 3])\n', (4753, 4761), False, 'import torch\n'), ((5330, 5350), 'torch.tensor', 'torch.tensor', (['[3, 0]'], {}), '([3, 0])\n', (5342, 5350), False, 'import torch\n'), ((5357, 5378), 'torch.tensor', 'torch.tensor', (['[-4, 0]'], {}), '([-4, 0])\n', (5369, 5378), False, 'import torch\n'), ((5409, 5430), 'torch.tensor', 'torch.tensor', (['[-3, 2]'], {}), '([-3, 2])\n', (5421, 5430), False, 'import torch\n'), ((5437, 5457), 'torch.tensor', 'torch.tensor', (['[1, 5]'], {}), '([1, 5])\n', (5449, 5457), False, 'import torch\n'), ((6147, 6171), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (6159, 6171), False, 'import torch\n'), ((6173, 6196), 'torch.tensor', 'torch.tensor', (['[-6, 2.5]'], {}), '([-6, 2.5])\n', (6185, 6196), False, 'import torch\n'), ((6236, 6259), 'torch.tensor', 'torch.tensor', (['[4.2, -1]'], {}), '([4.2, -1])\n', (6248, 6259), False, 'import torch\n'), ((6261, 6285), 'torch.tensor', 'torch.tensor', (['[-7, -2.0]'], {}), '([-7, -2.0])\n', (6273, 6285), False, 'import torch\n'), ((6321, 6335), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (6332, 6335), False, 'import torch\n'), ((6337, 6350), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (6347, 6350), False, 'import torch\n'), ((7697, 7710), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (7707, 7710), False, 'import torch\n'), ((7897, 7941), 'torch.eq', 'torch.eq', (['ego_state_original', 'ego_state_copy'], {}), '(ego_state_original, ego_state_copy)\n', (7905, 7941), False, 'import torch\n'), ((7972, 8018), 'torch.eq', 'torch.eq', (['ado_states_original', 'ado_states_copy'], {}), '(ado_states_original, ado_states_copy)\n', (7980, 8018), False, 'import torch\n'), ((8313, 8333), 'torch.tensor', 'torch.tensor', (['[3, 0]'], {}), '([3, 0])\n', (8325, 8333), False, 'import torch\n'), ((8344, 8357), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (8354, 8357), False, 'import torch\n'), ((8364, 8377), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (8374, 8377), False, 'import torch\n'), ((8408, 8429), 'torch.tensor', 'torch.tensor', (['[-4, 2]'], {}), '([-4, 2])\n', (8420, 8429), False, 'import torch\n'), ((8440, 8453), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (8450, 8453), False, 'import torch\n'), ((8460, 8473), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (8470, 8473), False, 'import torch\n'), ((9330, 9381), 'torch.eq', 'torch.eq', (['ado_states[m_ado, :]', 'ado.state_with_time'], {}), '(ado_states[m_ado, :], ado.state_with_time)\n', (9338, 9381), False, 'import torch\n'), ((9919, 9932), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (9929, 9932), False, 'import torch\n'), ((11942, 11956), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (11953, 11956), False, 'import torch\n'), ((11967, 11987), 'torch.tensor', 'torch.tensor', (['[0, 1]'], {}), '([0, 1])\n', (11979, 11987), False, 'import torch\n'), ((12395, 12423), 'torch.norm', 'torch.norm', (['mus[i, -1, :, :]'], {}), '(mus[i, -1, :, :])\n', (12405, 12423), False, 'import torch\n'), ((2057, 2077), 'torch.tensor', 'torch.tensor', (['[1, 0]'], {}), '([1, 0])\n', (2069, 2077), False, 'import torch\n'), ((3872, 3901), 'torch.tensor', 'torch.tensor', (['[1, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0])\n', (3884, 3901), False, 'import torch\n'), ((13105, 13127), 'torch.zeros', 'torch.zeros', (['t_horizon'], {}), '(t_horizon)\n', (13116, 13127), False, 'import torch\n'), ((1324, 1344), 'torch.tensor', 'torch.tensor', (['[6, 7]'], {}), '([6, 7])\n', (1336, 1344), False, 'import torch\n')]
|
#Kabaddi Package --- defender module
from Football import forward
def name_defender():
'''Kabaddi defender names are'''
print("Defender Function")
print("Defender1: Mr. Y")
print("Defender2: Mr. Z")
print()
forward.name_forward()
|
[
"Football.forward.name_forward"
] |
[((237, 259), 'Football.forward.name_forward', 'forward.name_forward', ([], {}), '()\n', (257, 259), False, 'from Football import forward\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['HierarchyArgs', 'Hierarchy']
@pulumi.input_type
class HierarchyArgs:
def __init__(__self__, *,
levels: pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]],
filter: Optional[pulumi.Input['HierarchyFilterArgs']] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Hierarchy resource.
:param pulumi.Input['HierarchyFilterArgs'] filter: An optional clause that a hierarchy requires to be matched.
:param pulumi.Input[str] name: Name of the hierarchy.
"""
pulumi.set(__self__, "levels", levels)
if filter is not None:
pulumi.set(__self__, "filter", filter)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def levels(self) -> pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]]:
return pulumi.get(self, "levels")
@levels.setter
def levels(self, value: pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]]):
pulumi.set(self, "levels", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input['HierarchyFilterArgs']]:
"""
An optional clause that a hierarchy requires to be matched.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input['HierarchyFilterArgs']]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the hierarchy.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _HierarchyState:
def __init__(__self__, *,
filter: Optional[pulumi.Input['HierarchyFilterArgs']] = None,
levels: Optional[pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Hierarchy resources.
:param pulumi.Input['HierarchyFilterArgs'] filter: An optional clause that a hierarchy requires to be matched.
:param pulumi.Input[str] name: Name of the hierarchy.
"""
if filter is not None:
pulumi.set(__self__, "filter", filter)
if levels is not None:
pulumi.set(__self__, "levels", levels)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input['HierarchyFilterArgs']]:
"""
An optional clause that a hierarchy requires to be matched.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input['HierarchyFilterArgs']]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter
def levels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]]]:
return pulumi.get(self, "levels")
@levels.setter
def levels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]]]):
pulumi.set(self, "levels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the hierarchy.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class Hierarchy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
filter: Optional[pulumi.Input[pulumi.InputType['HierarchyFilterArgs']]] = None,
levels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HierarchyLevelArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a [Sumologic Hierarchy](https://help.sumologic.com/Visualizations-and-Alerts/Explore).
## Example Usage
```python
import pulumi
import pulumi_sumologic as sumologic
example_hierarchy = sumologic.Hierarchy("exampleHierarchy",
filter=sumologic.HierarchyFilterArgs(
key="_origin",
value="kubernetes",
),
levels=[sumologic.HierarchyLevelArgs(
entity_type="cluster",
next_level=sumologic.HierarchyLevelNextLevelArgs(
entity_type="node",
),
next_levels_with_conditions=[sumologic.HierarchyLevelNextLevelsWithConditionArgs(
condition="testCondition",
level=sumologic.HierarchyLevelNextLevelsWithConditionLevelArgs(
entity_type="namespace",
),
)],
)])
```
## Import
Hierarchies can be imported using the id, e.g.hcl
```sh
$ pulumi import sumologic:index/hierarchy:Hierarchy test id
```
[1]https://help.sumologic.com/Visualizations-and-Alerts/Explore
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['HierarchyFilterArgs']] filter: An optional clause that a hierarchy requires to be matched.
:param pulumi.Input[str] name: Name of the hierarchy.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HierarchyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a [Sumologic Hierarchy](https://help.sumologic.com/Visualizations-and-Alerts/Explore).
## Example Usage
```python
import pulumi
import pulumi_sumologic as sumologic
example_hierarchy = sumologic.Hierarchy("exampleHierarchy",
filter=sumologic.HierarchyFilterArgs(
key="_origin",
value="kubernetes",
),
levels=[sumologic.HierarchyLevelArgs(
entity_type="cluster",
next_level=sumologic.HierarchyLevelNextLevelArgs(
entity_type="node",
),
next_levels_with_conditions=[sumologic.HierarchyLevelNextLevelsWithConditionArgs(
condition="testCondition",
level=sumologic.HierarchyLevelNextLevelsWithConditionLevelArgs(
entity_type="namespace",
),
)],
)])
```
## Import
Hierarchies can be imported using the id, e.g.hcl
```sh
$ pulumi import sumologic:index/hierarchy:Hierarchy test id
```
[1]https://help.sumologic.com/Visualizations-and-Alerts/Explore
:param str resource_name: The name of the resource.
:param HierarchyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HierarchyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
filter: Optional[pulumi.Input[pulumi.InputType['HierarchyFilterArgs']]] = None,
levels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HierarchyLevelArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HierarchyArgs.__new__(HierarchyArgs)
__props__.__dict__["filter"] = filter
if levels is None and not opts.urn:
raise TypeError("Missing required property 'levels'")
__props__.__dict__["levels"] = levels
__props__.__dict__["name"] = name
super(Hierarchy, __self__).__init__(
'sumologic:index/hierarchy:Hierarchy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
filter: Optional[pulumi.Input[pulumi.InputType['HierarchyFilterArgs']]] = None,
levels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HierarchyLevelArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'Hierarchy':
"""
Get an existing Hierarchy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['HierarchyFilterArgs']] filter: An optional clause that a hierarchy requires to be matched.
:param pulumi.Input[str] name: Name of the hierarchy.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HierarchyState.__new__(_HierarchyState)
__props__.__dict__["filter"] = filter
__props__.__dict__["levels"] = levels
__props__.__dict__["name"] = name
return Hierarchy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def filter(self) -> pulumi.Output[Optional['outputs.HierarchyFilter']]:
"""
An optional clause that a hierarchy requires to be matched.
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def levels(self) -> pulumi.Output[Sequence['outputs.HierarchyLevel']]:
return pulumi.get(self, "levels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the hierarchy.
"""
return pulumi.get(self, "name")
|
[
"pulumi.get",
"pulumi.ResourceOptions",
"pulumi.set"
] |
[((994, 1032), 'pulumi.set', 'pulumi.set', (['__self__', '"""levels"""', 'levels'], {}), "(__self__, 'levels', levels)\n", (1004, 1032), False, 'import pulumi\n'), ((1324, 1350), 'pulumi.get', 'pulumi.get', (['self', '"""levels"""'], {}), "(self, 'levels')\n", (1334, 1350), False, 'import pulumi\n'), ((1468, 1501), 'pulumi.set', 'pulumi.set', (['self', '"""levels"""', 'value'], {}), "(self, 'levels', value)\n", (1478, 1501), False, 'import pulumi\n'), ((1714, 1740), 'pulumi.get', 'pulumi.get', (['self', '"""filter"""'], {}), "(self, 'filter')\n", (1724, 1740), False, 'import pulumi\n'), ((1845, 1878), 'pulumi.set', 'pulumi.set', (['self', '"""filter"""', 'value'], {}), "(self, 'filter', value)\n", (1855, 1878), False, 'import pulumi\n'), ((2034, 2058), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (2044, 2058), False, 'import pulumi\n'), ((2141, 2172), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (2151, 2172), False, 'import pulumi\n'), ((3225, 3251), 'pulumi.get', 'pulumi.get', (['self', '"""filter"""'], {}), "(self, 'filter')\n", (3235, 3251), False, 'import pulumi\n'), ((3356, 3389), 'pulumi.set', 'pulumi.set', (['self', '"""filter"""', 'value'], {}), "(self, 'filter', value)\n", (3366, 3389), False, 'import pulumi\n'), ((3533, 3559), 'pulumi.get', 'pulumi.get', (['self', '"""levels"""'], {}), "(self, 'levels')\n", (3543, 3559), False, 'import pulumi\n'), ((3687, 3720), 'pulumi.set', 'pulumi.set', (['self', '"""levels"""', 'value'], {}), "(self, 'levels', value)\n", (3697, 3720), False, 'import pulumi\n'), ((3876, 3900), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (3886, 3900), False, 'import pulumi\n'), ((3983, 4014), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (3993, 4014), False, 'import pulumi\n'), ((11189, 11215), 'pulumi.get', 'pulumi.get', (['self', '"""filter"""'], {}), "(self, 'filter')\n", (11199, 11215), False, 'import pulumi\n'), ((11340, 11366), 'pulumi.get', 'pulumi.get', (['self', '"""levels"""'], {}), "(self, 'levels')\n", (11350, 11366), False, 'import pulumi\n'), ((11513, 11537), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (11523, 11537), False, 'import pulumi\n'), ((1076, 1114), 'pulumi.set', 'pulumi.set', (['__self__', '"""filter"""', 'filter'], {}), "(__self__, 'filter', filter)\n", (1086, 1114), False, 'import pulumi\n'), ((1156, 1190), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (1166, 1190), False, 'import pulumi\n'), ((2816, 2854), 'pulumi.set', 'pulumi.set', (['__self__', '"""filter"""', 'filter'], {}), "(__self__, 'filter', filter)\n", (2826, 2854), False, 'import pulumi\n'), ((2898, 2936), 'pulumi.set', 'pulumi.set', (['__self__', '"""levels"""', 'levels'], {}), "(__self__, 'levels', levels)\n", (2908, 2936), False, 'import pulumi\n'), ((2978, 3012), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (2988, 3012), False, 'import pulumi\n'), ((8652, 8676), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (8674, 8676), False, 'import pulumi\n'), ((10672, 10701), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (10694, 10701), False, 'import pulumi\n')]
|
import pyqtgraph as pg
from pyqtgraph.dockarea import *
import numpy as np
import os
import numbers
try:
from PyQt4.QtGui import QFileDialog
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QMainWindow
except ImportError:
from PyQt5.QtWidgets import QFileDialog
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QApplication, QMainWindow
from neutronbraggedge.experiment_handler import *
from ImagingReso import _utilities
from __code.ui_resonance_imaging_experiment_vs_theory import Ui_MainWindow as UiMainWindow
class ImageWindow(QMainWindow):
pen_color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
pen_symbol = ['o', 's', 't', 'd', '+']
stack = []
integrated_stack = []
working_folder = ''
x_axis = {'file_index': [],
'tof': [],
'ev': [],
'lambda': []}
x_axis_label = {'file_index': 'file index',
'tof': u'TOF (\u00B5s)',
'ev': 'eV',
'lambda': u'\u03BB (\u212B)',
}
y_axis = {'label': 'Mean Counts', 'data': []}
elements_to_plot = {} # ex U, U235...etc to plot
spectra_file = ''
b_enable_only_file_index_button = True
def __init__(self, parent=None, stack=[], working_folder='', o_reso=None):
QMainWindow.__init__(self, parent=parent)
self.ui = UiMainWindow()
self.ui.setupUi(self)
self.setWindowTitle("Select Rotation Angle for All Images")
self.stack = np.array(stack)
self.integrated_stack = self.stack.sum(axis=0)
self.working_folder = working_folder
self.o_reso = o_reso
self.initialize_pyqtgraph()
self.init_label()
self.init_list_of_things_to_plot()
self.update_radio_button_status()
self.display_image()
self.update_x_axis()
self.roi_changed()
def update_plot(self):
# self.update_x_axis()
self.plot()
def init_label(self):
_tof_label = u"TOF (\u00B5s)"
self.ui.tof_radio_button.setText(_tof_label)
_lambda_label = u"lambda (\u212B)"
self.ui.lambda_radio_button.setText(_lambda_label)
_offset_label = u"\u00B5s"
self.ui.detector_offset_units.setText(_offset_label)
def display_image(self):
self.ui.image_view.setImage(self.integrated_stack)
def plot(self):
x_axis_selected = self.get_x_axis_selected()
x_axis_data = self.x_axis[x_axis_selected]
y_axis_data = self.y_axis['data']
# print("for {}".format(x_axis_selected))
# pprint.pprint(y_axis_data[0:10])
# pprint.pprint(x_axis_data[0:10])
# print()
y_axis_label = self.y_axis['label']
if x_axis_selected == 'ev':
y_axis_data = y_axis_data[::-1]
x_axis_data = x_axis_data[::-1]
x_axis_data = x_axis_data[0: len(y_axis_data)]
self.counts_vs_index.clear()
try:
self.legend.scene().removeItem(self.legend)
except:
pass
self.legend = self.counts_vs_index.addLegend()
self.counts_vs_index.plot(
x_axis_data, y_axis_data, name='Experimental')
self.counts_vs_index.setLabel('bottom', x_axis_selected)
self.counts_vs_index.setLabel('left', y_axis_label)
# plot all elements
elements_to_plot = self.elements_to_plot
_index_pen_color = 0
_index_pen_symbol = 0
for _label in elements_to_plot.keys():
_x_axis_data = elements_to_plot[_label]['x_axis']
_y_axis_data = elements_to_plot[_label]['y_axis']
self.counts_vs_index.plot(
_x_axis_data,
_y_axis_data,
name=_label,
pen=self.pen_color[_index_pen_color],
penSymbol=self.pen_symbol[_index_pen_symbol])
_index_pen_color += 1
if _index_pen_color >= len(self.pen_color):
_index_pen_color = 0
_index_pen_symbol += 1
if _index_pen_symbol == len(self.pen_symbol):
_index_pen_color = 0
_index_pen_symbol = 0
def initialize_pyqtgraph(self):
area = DockArea()
area.setVisible(True)
d1 = Dock("Image Integrated Preview", size=(300, 800))
d2 = Dock("Counts vs Image Index of Selection", size=(300, 800))
area.addDock(d1, 'right')
area.addDock(d2, 'left')
preview_widget = pg.GraphicsLayoutWidget()
pg.setConfigOptions(antialias=True)
# image view
self.ui.image_view = pg.ImageView()
self.ui.image_view.ui.menuBtn.hide()
self.ui.image_view.ui.roiBtn.hide()
# default ROI
self.ui.roi = pg.ROI([0, 0], [20, 20],
pen=(62, 13, 244),
scaleSnap=True) #blue
self.ui.roi.addScaleHandle([1, 1], [0, 0])
self.ui.image_view.addItem(self.ui.roi)
self.ui.roi.sigRegionChanged.connect(self.roi_changed)
d1.addWidget(self.ui.image_view)
self.counts_vs_index = pg.PlotWidget(title='')
self.counts_vs_index.plot()
d2.addWidget(self.counts_vs_index)
vertical_layout = QtGui.QVBoxLayout()
vertical_layout.addWidget(area)
self.ui.widget.setLayout(vertical_layout)
def roi_changed(self):
region = self.ui.roi.getArraySlice(self.integrated_stack,
self.ui.image_view.imageItem)
x0 = region[0][0].start
x1 = region[0][0].stop - 1
y0 = region[0][1].start
y1 = region[0][1].stop - 1
mean_selection = [_data[x0:x1, y0:y1].mean() for _data in self.stack]
self.y_axis['data'] = mean_selection
self.plot()
# x_axis
def get_x_axis_selected(self):
if self.ui.file_index_ratio_button.isChecked():
return 'file_index'
elif self.ui.tof_radio_button.isChecked():
return 'tof'
elif self.ui.lambda_radio_button.isChecked():
return 'lambda'
else:
return 'ev'
def update_radio_button_status(self):
x_axis_selected = self.get_x_axis_selected()
# enable or not list of element to display
if x_axis_selected == 'file_index':
list_status = False
else:
list_status = True
self.ui.list_to_plot_widget.setEnabled(list_status)
b_enable_only_file_index_button = False
spectra_file = self.spectra_file
if not os.path.exists(spectra_file):
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
distance_source_detector = self.ui.distance_source_detector_value.text()
if not distance_source_detector:
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
elif not isinstance(float(distance_source_detector), numbers.Number):
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
detector_offset = str(self.ui.detector_offset_value.text())
if not detector_offset:
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
elif not isinstance(float(detector_offset), numbers.Number):
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
self.set_radio_buttons_status(
b_enable_only_file_index_button=b_enable_only_file_index_button)
self.b_enable_only_file_index_button = b_enable_only_file_index_button
self.update_x_axis()
def update_x_axis(self):
self.x_axis['file_index'] = np.arange(len(self.stack))
if not self.b_enable_only_file_index_button:
# tof
spectra_file = self.spectra_file
_tof_handler = TOF(filename=spectra_file)
self.x_axis['tof'] = _tof_handler.tof_array
# lambda
distance_source_detector = self.ui.distance_source_detector_value.text()
detector_offset = str(self.ui.detector_offset_value.text())
_exp = Experiment(
tof=_tof_handler.tof_array,
distance_source_detector_m=float(distance_source_detector),
detector_offset_micros=float(detector_offset))
self.x_axis['lambda'] = _exp.lambda_array * 1e10
# ev
_exp = Experiment(tof = _tof_handler.tof_array,
distance_source_detector_m = float(distance_source_detector),
detector_offset_micros= float(detector_offset))
_exp_ev = _utilities.convert_x_axis(array=_exp.lambda_array*1e10,
from_units='angstroms',
to_units='ev',
offset_us=float(detector_offset),
source_to_detector_m=float(distance_source_detector))
# _exp_ev = np.linspace(1, 3000, len(_tof_handler.tof_array))
# import scipy
# _exp_ev = scipy.random.ranf(len(_tof_handler.tof_array)) * 3000000
# _exp_ev.sort()
# _exp_ev = _exp_ev[::-1]
self.x_axis['ev'] = _exp_ev
# with open('/users/j35/Desktop/test_output.txt', 'w') as f:
# for _data in _exp_ev:
# f.write(str(_data) + '\n')
else:
self.x_axis['ev'] = []
self.x_axis['tof'] = []
self.x_axis['lambda'] = []
def set_radio_buttons_status(self, b_enable_only_file_index_button=False):
self.ui.tof_radio_button.setEnabled(
not b_enable_only_file_index_button)
self.ui.lambda_radio_button.setEnabled(
not b_enable_only_file_index_button)
self.ui.energy_radio_button.setEnabled(
not b_enable_only_file_index_button)
if b_enable_only_file_index_button:
self.ui.file_index_ratio_button.setChecked(True)
def radio_button_clicked(self):
self.update_radio_button_status()
self.plot()
def distance_source_detector_validated(self):
self.update_radio_button_status()
self.update_x_axis()
self.plot()
def detector_offset_validated(self):
self.update_radio_button_status()
self.update_x_axis()
self.plot()
def time_spectra_file_browse_button_clicked(self):
spectra_file = QFileDialog.getOpenFileName(
caption='Select Time Spectra',
directory=self.working_folder,
filter='txt (*_Spectra.txt);;All (*.*)')
if spectra_file:
self.ui.time_spectra_file.setText(os.path.basename(spectra_file))
self.spectra_file = spectra_file
self.update_radio_button_status()
self.update_x_axis()
self.plot()
def init_list_of_things_to_plot(self):
list_things_to_plot = []
stack = self.o_reso.stack
list_layers = stack.keys()
for _layer in list_layers:
list_things_to_plot.append(_layer)
list_elements = stack[_layer]['elements']
for _element in list_elements:
list_things_to_plot.append(_layer + ' -> ' + _element)
list_isotopes = stack[_layer][_element]['isotopes']['list']
for _isotope in list_isotopes:
list_things_to_plot.append(_layer + ' -> ' + _element +
' -> ' + _isotope)
self.ui.list_to_plot_widget.addItems(list_things_to_plot)
def done_button_clicked(self):
self.close()
def plot_selection_changed(self, item):
_elements_to_plot = {}
# init
x_axis_ev = []
x_axis_selected = self.get_x_axis_selected()
if x_axis_selected == 'file_index':
self.elements_to_plot = _elements_to_plot
return
# retrieve data to display
for _item in self.ui.list_to_plot_widget.selectedIndexes():
_row_selected = _item.row()
_text = self.ui.list_to_plot_widget.item(_row_selected).text()
_layer_element_isotope = self.__parse_layer_element_isotope(_text)
_layer = _layer_element_isotope['layer']
_element = _layer_element_isotope['element']
_isotope = _layer_element_isotope['isotope']
if _element == '':
transmission = self.o_reso.stack_signal[_layer]['transmission']
x_axis_ev = self.o_reso.stack_signal[_layer]['energy_eV']
elif _isotope == '':
transmission = self.o_reso.stack_signal[_layer][_element][
'transmission']
x_axis_ev = self.o_reso.stack_signal[_layer][_element][
'energy_eV']
else:
transmission = self.o_reso.stack_signal[_layer][_element][
_isotope]['transmission']
x_axis_ev = self.o_reso.stack_signal[_layer][_element][
_isotope]['energy_eV']
_elements_to_plot[_text] = {}
_elements_to_plot[_text]['y_axis'] = transmission
x_axis = []
if x_axis_selected == 'lambda':
x_axis = _utilities.convert_x_axis(
array=x_axis_ev, from_units='ev', to_units='angstroms')
elif x_axis_selected == 'tof':
detector_offset = float(self.ui.detector_offset_value.text())
distance_source_detector = float(
self.ui.distance_source_detector_value.text())
x_axis = _utilities.convert_x_axis(
array=x_axis_ev,
from_units='ev',
to_units='s',
offset_us=detector_offset,
source_to_detector_m=distance_source_detector)
else: # ev
x_axis = x_axis_ev
_elements_to_plot[_text]['x_axis'] = x_axis
self.elements_to_plot = _elements_to_plot
self.plot()
def __parse_layer_element_isotope(self, text):
''' this will create a dictionary of each data to plot
'''
_dict = {'layer': '', 'element': '', 'isotope': ''}
parse_text = text.split(' -> ')
_dict['layer'] = parse_text[0]
if len(parse_text) >= 2:
_dict['element'] = parse_text[1]
if len(parse_text) >= 3:
_dict['isotope'] = parse_text[2]
return _dict
def closeEvent(self, event=None):
pass
|
[
"PyQt5.QtWidgets.QMainWindow.__init__",
"os.path.basename",
"pyqtgraph.PlotWidget",
"os.path.exists",
"pyqtgraph.ImageView",
"PyQt5.QtGui.QVBoxLayout",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"numpy.array",
"pyqtgraph.ROI",
"pyqtgraph.setConfigOptions",
"ImagingReso._utilities.convert_x_axis",
"__code.ui_resonance_imaging_experiment_vs_theory.Ui_MainWindow",
"pyqtgraph.GraphicsLayoutWidget"
] |
[((1319, 1360), 'PyQt5.QtWidgets.QMainWindow.__init__', 'QMainWindow.__init__', (['self'], {'parent': 'parent'}), '(self, parent=parent)\n', (1339, 1360), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow\n'), ((1379, 1393), '__code.ui_resonance_imaging_experiment_vs_theory.Ui_MainWindow', 'UiMainWindow', ([], {}), '()\n', (1391, 1393), True, 'from __code.ui_resonance_imaging_experiment_vs_theory import Ui_MainWindow as UiMainWindow\n'), ((1514, 1529), 'numpy.array', 'np.array', (['stack'], {}), '(stack)\n', (1522, 1529), True, 'import numpy as np\n'), ((4558, 4583), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (4581, 4583), True, 'import pyqtgraph as pg\n'), ((4592, 4627), 'pyqtgraph.setConfigOptions', 'pg.setConfigOptions', ([], {'antialias': '(True)'}), '(antialias=True)\n', (4611, 4627), True, 'import pyqtgraph as pg\n'), ((4679, 4693), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {}), '()\n', (4691, 4693), True, 'import pyqtgraph as pg\n'), ((4828, 4887), 'pyqtgraph.ROI', 'pg.ROI', (['[0, 0]', '[20, 20]'], {'pen': '(62, 13, 244)', 'scaleSnap': '(True)'}), '([0, 0], [20, 20], pen=(62, 13, 244), scaleSnap=True)\n', (4834, 4887), True, 'import pyqtgraph as pg\n'), ((5188, 5211), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {'title': '""""""'}), "(title='')\n", (5201, 5211), True, 'import pyqtgraph as pg\n'), ((5318, 5337), 'PyQt5.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (5335, 5337), False, 'from PyQt5 import QtCore, QtGui\n'), ((10755, 10890), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', ([], {'caption': '"""Select Time Spectra"""', 'directory': 'self.working_folder', 'filter': '"""txt (*_Spectra.txt);;All (*.*)"""'}), "(caption='Select Time Spectra', directory=self.\n working_folder, filter='txt (*_Spectra.txt);;All (*.*)')\n", (10782, 10890), False, 'from PyQt5.QtWidgets import QFileDialog\n'), ((6642, 6670), 'os.path.exists', 'os.path.exists', (['spectra_file'], {}), '(spectra_file)\n', (6656, 6670), False, 'import os\n'), ((10994, 11024), 'os.path.basename', 'os.path.basename', (['spectra_file'], {}), '(spectra_file)\n', (11010, 11024), False, 'import os\n'), ((13599, 13685), 'ImagingReso._utilities.convert_x_axis', '_utilities.convert_x_axis', ([], {'array': 'x_axis_ev', 'from_units': '"""ev"""', 'to_units': '"""angstroms"""'}), "(array=x_axis_ev, from_units='ev', to_units=\n 'angstroms')\n", (13624, 13685), False, 'from ImagingReso import _utilities\n'), ((13965, 14116), 'ImagingReso._utilities.convert_x_axis', '_utilities.convert_x_axis', ([], {'array': 'x_axis_ev', 'from_units': '"""ev"""', 'to_units': '"""s"""', 'offset_us': 'detector_offset', 'source_to_detector_m': 'distance_source_detector'}), "(array=x_axis_ev, from_units='ev', to_units='s',\n offset_us=detector_offset, source_to_detector_m=distance_source_detector)\n", (13990, 14116), False, 'from ImagingReso import _utilities\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import math
import itertools_recipes as it
data=np.array([[1,1],[5,2],[3,3],[0,2],[9,4],[4,8]])
x=data[:,0]
y=data[:,1]
def choose():
q=[]
u=list(it.permutations([0,1,2,3,4,5],6))
m=np.zeros((6,2))
n=np.zeros((6,2))
for i in range(len(u)):
m[0]=data[u[i][0]]
m[1]=data[u[i][1]]
m[2]=data[u[i][2]]
m[3]=data[u[i][3]]
m[4]=data[u[i][4]]
m[5]=data[u[i][5]]
distance(m)
q.append(distance(m))
k=min(q)
print('最短路程为',k)
g=q.index(k)
n[0] = data[u[g][0]]
n[1] = data[u[g][1]]
n[2] = data[u[g][2]]
n[3] = data[u[g][3]]
n[4] = data[u[g][4]]
n[5] = data[u[g][5]]
print(n)
draw_a_line(n)
def draw_a_line(w):
i=0
for i in range(5):
a=np.linspace(w[i,0],w[i+1,0],100)
b=np.linspace(w[i,1],w[i+1,1],100)
plt.plot(a,b,'.')
c=np.linspace(w[0,0],w[5,0],100)
d=np.linspace(w[0,1],w[5,1],100)
plt.plot(c,d,'.')
def distance(w):
i=0
sum=0
e=[]
for i in range(5):
e.append(math.sqrt((w[i+1,0]-w[i,0])**2+(w[i+1,1]-w[i,1])**2))
sum=sum+e[i]
sum=sum+math.sqrt((w[5,0]-w[1,0])**2+(w[5,1]-w[1,1])**2)
return(sum)
choose()
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"math.sqrt",
"numpy.zeros",
"numpy.array",
"numpy.linspace",
"itertools_recipes.permutations"
] |
[((105, 163), 'numpy.array', 'np.array', (['[[1, 1], [5, 2], [3, 3], [0, 2], [9, 4], [4, 8]]'], {}), '([[1, 1], [5, 2], [3, 3], [0, 2], [9, 4], [4, 8]])\n', (113, 163), True, 'import numpy as np\n'), ((1310, 1320), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1318, 1320), True, 'import matplotlib.pyplot as plt\n'), ((259, 275), 'numpy.zeros', 'np.zeros', (['(6, 2)'], {}), '((6, 2))\n', (267, 275), True, 'import numpy as np\n'), ((282, 298), 'numpy.zeros', 'np.zeros', (['(6, 2)'], {}), '((6, 2))\n', (290, 298), True, 'import numpy as np\n'), ((958, 992), 'numpy.linspace', 'np.linspace', (['w[0, 0]', 'w[5, 0]', '(100)'], {}), '(w[0, 0], w[5, 0], 100)\n', (969, 992), True, 'import numpy as np\n'), ((996, 1030), 'numpy.linspace', 'np.linspace', (['w[0, 1]', 'w[5, 1]', '(100)'], {}), '(w[0, 1], w[5, 1], 100)\n', (1007, 1030), True, 'import numpy as np\n'), ((1032, 1051), 'matplotlib.pyplot.plot', 'plt.plot', (['c', 'd', '"""."""'], {}), "(c, d, '.')\n", (1040, 1051), True, 'import matplotlib.pyplot as plt\n'), ((218, 256), 'itertools_recipes.permutations', 'it.permutations', (['[0, 1, 2, 3, 4, 5]', '(6)'], {}), '([0, 1, 2, 3, 4, 5], 6)\n', (233, 256), True, 'import itertools_recipes as it\n'), ((849, 887), 'numpy.linspace', 'np.linspace', (['w[i, 0]', 'w[i + 1, 0]', '(100)'], {}), '(w[i, 0], w[i + 1, 0], 100)\n', (860, 887), True, 'import numpy as np\n'), ((892, 930), 'numpy.linspace', 'np.linspace', (['w[i, 1]', 'w[i + 1, 1]', '(100)'], {}), '(w[i, 1], w[i + 1, 1], 100)\n', (903, 930), True, 'import numpy as np\n'), ((933, 952), 'matplotlib.pyplot.plot', 'plt.plot', (['a', 'b', '"""."""'], {}), "(a, b, '.')\n", (941, 952), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1293), 'math.sqrt', 'math.sqrt', (['((w[5, 0] - w[1, 0]) ** 2 + (w[5, 1] - w[1, 1]) ** 2)'], {}), '((w[5, 0] - w[1, 0]) ** 2 + (w[5, 1] - w[1, 1]) ** 2)\n', (1240, 1293), False, 'import math\n'), ((1142, 1212), 'math.sqrt', 'math.sqrt', (['((w[i + 1, 0] - w[i, 0]) ** 2 + (w[i + 1, 1] - w[i, 1]) ** 2)'], {}), '((w[i + 1, 0] - w[i, 0]) ** 2 + (w[i + 1, 1] - w[i, 1]) ** 2)\n', (1151, 1212), False, 'import math\n')]
|
import os
import numpy as np
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
def minibatch_loader(minibatch, minibatch_size, drop_last=True):
return DataLoader(minibatch, batch_size=minibatch_size, drop_last=drop_last)
def get_next_available_dir(root, dir_name, absolute_path=True, create=True):
checkpoint_dir_base = os.path.join(root, dir_name)
dir_id = 1
checkpoint_dir = f"{checkpoint_dir_base}_{dir_id}"
while os.path.exists(checkpoint_dir):
dir_id += 1
checkpoint_dir = f"{checkpoint_dir_base}_{dir_id}"
if create:
os.mkdir(checkpoint_dir)
if absolute_path:
return checkpoint_dir
else:
return f"{dir_name}_{dir_id}"
def _plot(data_dict, x_label, y_label, title):
fig, ax = plt.subplots() # (figsize=(10,10))
if 'x_ticks' in data_dict:
x_values = data_dict.pop('x_ticks')
if len(x_values) > 20:
x_values = None # too crowded to read on the figure
else:
x_values = None
max_x_range_len = 0
for name, data in data_dict.items():
if x_values is not None:
ax.plot(list(range(len(x_values))), data, label=name)
else:
ax.plot(data, label=name)
if x_values is not None:
plt.xticks(list(range(len(x_values))), x_values)
ax.legend()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.show()
return fig
def load_and_plot_privacy_param_variation():
eps1 = {'name': 'eps_1', 'fp': './eps1.npy', }
noise_multiplier = {'name': 'noise_multiplier', 'fp': './noise_multiplier.npy'}
eps3 = {'name': 'eps_3', 'fp': './eps3.npy'}
files = [eps1, noise_multiplier, eps3]
curve_names = ['Test accuracy', 'MEA fidelity', 'MIA accuracy']
for data_file in files:
data = dict()
with open(data_file['fp'], 'rb') as f:
data['x_ticks'] = np.load(f)
for curve in curve_names:
data[curve] = np.load(f)
# data['x_ticks'] = np.array(data_file['rng'])
_plot(data, data_file['name'], 'Privacy and Utility', 'Small CNN on Cifar10')
def load_and_plot_learning_curves():
def fetch(fs, metric_name):
metric_data = dict()
for f in fs:
metric_data[f['name']] = f[metric_name]
return metric_data
metrics = ['val_acc']
msdp = {'name': 'MSDPFL', 'fp': "outFL/MNIST/low_eps/msdpfl/stats.npy"}
opacus = {'name': 'Opacus FL', 'fp': "outFL/MNIST/low_eps/opacusfl/stats.npy"}
non_p = {'name': 'Non-Private FL', 'fp': "outFL/MNIST/npfl/stats.npy"}
title = 'Highly private FL training on MNIST'
files = [msdp, opacus, non_p]
for data_file in files:
data = dict()
with open(data_file['fp'], 'rb') as f:
for metric in metrics:
data[metric] = np.load(f)
data_file.update(**data)
for metric in metrics:
metric_data = fetch(files, metric)
f = _plot(metric_data, 'Epochs', metric, title)
if metric == 'val_acc':
f.savefig(f"./val_acc.png", bbox_inches='tight')
def load_and_plot_dr():
def fetch(fs, metric_name):
metric_data = dict()
for f in fs:
metric_data[f['name']] = f[metric_name]
return metric_data
def dr_plot(data_dict, x_label, y_label, title):
fig, ax = plt.subplots() # (figsize=(10,10))
for name, data in data_dict.items():
ax.plot(data, label=name)
ax.legend()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.show()
metrics = {'centralised': ['train_loss', 'train_acc', 'val_acc'],
'fl': ['val_acc']
}
msdp = {'name': 'MSDP', 'fp': "out_centralMSDP/DR/msdp/MSDPTrainer_0_plot_stats.npy"}
msdpfl = {'name': 'MSDPFL', 'fp': "outFL/DR/msdpfl/stats.npy"}
opacus = {'name': 'Opacus', 'fp': "out_centralMSDP/DR/opacus/MSDPTrainer_0_plot_stats.npy"}
opacusfl = {'name': 'OpacusFL', 'fp': "outFL/DR/opacus_fl/stats.npy"}
non_p = {'name': 'Non-Private', 'fp': "out_centralMSDP/DR/np/MSDPTrainer_0_plot_stats.npy"}
non_pfl = {'name': 'Non-Private FL', 'fp': "outFL/DR/np_fl/stats.npy"}
title = 'FL training on DR'
central = [msdp, opacus, non_p]
fl = [msdpfl, opacusfl, non_pfl]
files = central + fl
for data_file in files:
data = dict()
if data_file in central:
metric_type = 'centralised'
else:
metric_type = 'fl'
with open(data_file['fp'], 'rb') as f:
for metric in metrics[metric_type]:
data[metric] = np.load(f)
data_file.update(**data)
for metric in ['val_acc']:
metric_data = fetch(files, metric)
dr_plot(metric_data, 'Epochs/ Rounds', metric, title)
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"numpy.load",
"matplotlib.pyplot.show",
"torch.utils.data.DataLoader",
"os.path.exists",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join"
] |
[((183, 252), 'torch.utils.data.DataLoader', 'DataLoader', (['minibatch'], {'batch_size': 'minibatch_size', 'drop_last': 'drop_last'}), '(minibatch, batch_size=minibatch_size, drop_last=drop_last)\n', (193, 252), False, 'from torch.utils.data import DataLoader\n'), ((356, 384), 'os.path.join', 'os.path.join', (['root', 'dir_name'], {}), '(root, dir_name)\n', (368, 384), False, 'import os\n'), ((459, 489), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (473, 489), False, 'import os\n'), ((753, 767), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (765, 767), True, 'from matplotlib import pyplot as plt\n'), ((1264, 1283), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (1274, 1283), True, 'from matplotlib import pyplot as plt\n'), ((1286, 1305), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (1296, 1305), True, 'from matplotlib import pyplot as plt\n'), ((1308, 1324), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1317, 1324), True, 'from matplotlib import pyplot as plt\n'), ((1327, 1337), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1335, 1337), True, 'from matplotlib import pyplot as plt\n'), ((579, 603), 'os.mkdir', 'os.mkdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (587, 603), False, 'import os\n'), ((3138, 3152), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3150, 3152), True, 'from matplotlib import pyplot as plt\n'), ((3267, 3286), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (3277, 3286), True, 'from matplotlib import pyplot as plt\n'), ((3291, 3310), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (3301, 3310), True, 'from matplotlib import pyplot as plt\n'), ((3315, 3331), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3324, 3331), True, 'from matplotlib import pyplot as plt\n'), ((3336, 3346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3344, 3346), True, 'from matplotlib import pyplot as plt\n'), ((1797, 1807), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1804, 1807), True, 'import numpy as np\n'), ((1862, 1872), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1869, 1872), True, 'import numpy as np\n'), ((2664, 2674), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (2671, 2674), True, 'import numpy as np\n'), ((4323, 4333), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (4330, 4333), True, 'import numpy as np\n')]
|
'''
PISA module to prep incoming data into formats that are
compatible with the mc_uncertainty likelihood formulation
This module takes in events containers from the pipeline, and
introduces an additional array giving the indices where each
event falls into.
module structure imported from bootcamp example
'''
from __future__ import absolute_import, print_function, division
__author__ = "<NAME> (<EMAIL>)"
import numpy as np
from pisa import FTYPE
from pisa.core.pi_stage import PiStage
#from pisa.utils.log import logging
# Load the modified index lookup function
from pisa.core.bin_indexing import lookup_indices
class add_indices(PiStage):
"""
PISA Pi stage to map out the index of the analysis
binning where each event falls into.
Parameters
----------
data
params
foo : Quantity
bar : Quanitiy with time dimension
input_names
output_names
debug_mode
input_specs:
calc_specs : must be events
output_specs: must be a MultiDimBinnig
Notes:
------
- input and calc specs are predetermined in the module
(inputs from the config files will be disregarded)
- stage appends an array quantity called bin_indices
- stage also appends an array mask to access events by
bin index later in the pipeline
"""
# this is the constructor with default arguments
def __init__(self,
data=None,
params=None,
input_names=None,
output_names=None,
debug_mode=None,
input_specs=None,
calc_specs=None,
output_specs=None,
):
#
# No parameters are expected in this stage
# same goes for a bunch of other stage options
#
expected_params = ()
input_names = ()
output_names = ()
input_apply_keys = ()
# We add the bin_indices key
# (but not in the apply function so maybe useless...)
#
output_calc_keys = ('bin_indices',)
output_apply_keys = ()
# init base class
super(add_indices, self).__init__(data=data,
params=params,
expected_params=expected_params,
input_names=input_names,
output_names=output_names,
debug_mode=debug_mode,
input_specs=input_specs,
calc_specs=calc_specs,
output_specs=output_specs,
input_apply_keys=input_apply_keys,
output_apply_keys=output_apply_keys,
output_calc_keys=output_calc_keys,
)
# make sure the user specified some modes
assert self.input_mode is not None
assert self.calc_mode is not None
assert self.output_mode is not None
def setup_function(self):
'''
Calculate the bin index where each event falls into
Create one mask for each analysis bin.
'''
assert self.calc_specs == 'events', 'ERROR: calc specs must be set to "events for this module'
self.data.data_specs = 'events'
for container in self.data:
# Generate a new container called bin_indices
container['bin_indices'] = np.empty((container.size), dtype=np.int64)
variables_to_bin = []
for bin_name in self.output_specs.names:
variables_to_bin.append(container[bin_name])
new_array = lookup_indices(sample=variables_to_bin,
binning=self.output_specs)
new_array = new_array.get('host')
np.copyto(src=new_array, dst=container["bin_indices"].get('host'))
for bin_i in range(self.output_specs.tot_num_bins):
container.add_array_data(key='bin_{}_mask'.format(bin_i),
data=(new_array == bin_i))
|
[
"numpy.empty",
"pisa.core.bin_indexing.lookup_indices"
] |
[((3599, 3639), 'numpy.empty', 'np.empty', (['container.size'], {'dtype': 'np.int64'}), '(container.size, dtype=np.int64)\n', (3607, 3639), True, 'import numpy as np\n'), ((3818, 3884), 'pisa.core.bin_indexing.lookup_indices', 'lookup_indices', ([], {'sample': 'variables_to_bin', 'binning': 'self.output_specs'}), '(sample=variables_to_bin, binning=self.output_specs)\n', (3832, 3884), False, 'from pisa.core.bin_indexing import lookup_indices\n')]
|
from flask import Blueprint, flash, redirect, render_template, request, url_for,sessions
from octs.user.models import Course, Message, User
from octs.database import db
from .forms import MessageForm
from flask_login import current_user
blueprint = Blueprint('message', __name__, url_prefix='/message',static_folder='../static')
@blueprint.route('/all/<id>')
def show_all(id):
messages = Message.query.filter_by(to_id=id).all()
usernames = []
for message in messages:
from_id = message.from_id
user = User.query.filter_by(id=from_id).first()
usernames.append(user.name)
length = len(messages)
messages = list(reversed(messages))
return render_template('message/list.html', length=length, messages=messages, names=usernames, listtype='全部消息')
@blueprint.route('/unread/<id>')
def show_unread(id):
messages = Message.query.filter_by(to_id=id).all()
messages = [message for message in messages if message.has_read==False]
usernames = []
for message in messages:
from_id = message.from_id
user = User.query.filter_by(id=from_id).first()
usernames.append(user.name)
length = len(messages)
messages = list(reversed(messages))
return render_template('message/list.html', length=length, messages=messages, names=usernames, listtype='未读消息')
@blueprint.route('/detail/<id>')
def show_detail(id):
message = Message.query.filter_by(id=id).first()
message.has_read = True
db.session.add(message)
db.session.commit()
user = User.query.filter_by(id=message.from_id).first()
return render_template('message/detail.html', message=message, name=user.name)
@blueprint.route('/send/', methods=['GET', 'POST'])
def send():
form = MessageForm()
if form.validate_on_submit():
to_user_id = form.send_to.data
user = User.query.filter_by(user_id=to_user_id).first()
if user is None:
flash('该用户不存在')
return redirect(url_for('message.send'))
title = form.title.data
message = form.message.data
Message.sendMessage(current_user.id, user.id, message=message, title=title)
flash('消息发送成功')
return redirect(url_for('message.send'))
return render_template('message/send_message.html', form=form)
|
[
"flask.flash",
"flask.Blueprint",
"octs.user.models.User.query.filter_by",
"octs.user.models.Message.sendMessage",
"flask.url_for",
"octs.user.models.Message.query.filter_by",
"flask.render_template",
"octs.database.db.session.add",
"octs.database.db.session.commit"
] |
[((250, 335), 'flask.Blueprint', 'Blueprint', (['"""message"""', '__name__'], {'url_prefix': '"""/message"""', 'static_folder': '"""../static"""'}), "('message', __name__, url_prefix='/message', static_folder='../static'\n )\n", (259, 335), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for, sessions\n'), ((686, 794), 'flask.render_template', 'render_template', (['"""message/list.html"""'], {'length': 'length', 'messages': 'messages', 'names': 'usernames', 'listtype': '"""全部消息"""'}), "('message/list.html', length=length, messages=messages,\n names=usernames, listtype='全部消息')\n", (701, 794), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for, sessions\n'), ((1229, 1337), 'flask.render_template', 'render_template', (['"""message/list.html"""'], {'length': 'length', 'messages': 'messages', 'names': 'usernames', 'listtype': '"""未读消息"""'}), "('message/list.html', length=length, messages=messages,\n names=usernames, listtype='未读消息')\n", (1244, 1337), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for, sessions\n'), ((1474, 1497), 'octs.database.db.session.add', 'db.session.add', (['message'], {}), '(message)\n', (1488, 1497), False, 'from octs.database import db\n'), ((1502, 1521), 'octs.database.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1519, 1521), False, 'from octs.database import db\n'), ((1593, 1664), 'flask.render_template', 'render_template', (['"""message/detail.html"""'], {'message': 'message', 'name': 'user.name'}), "('message/detail.html', message=message, name=user.name)\n", (1608, 1664), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for, sessions\n'), ((2234, 2289), 'flask.render_template', 'render_template', (['"""message/send_message.html"""'], {'form': 'form'}), "('message/send_message.html', form=form)\n", (2249, 2289), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for, sessions\n'), ((2074, 2149), 'octs.user.models.Message.sendMessage', 'Message.sendMessage', (['current_user.id', 'user.id'], {'message': 'message', 'title': 'title'}), '(current_user.id, user.id, message=message, title=title)\n', (2093, 2149), False, 'from octs.user.models import Course, Message, User\n'), ((2158, 2173), 'flask.flash', 'flash', (['"""消息发送成功"""'], {}), "('消息发送成功')\n", (2163, 2173), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for, sessions\n'), ((394, 427), 'octs.user.models.Message.query.filter_by', 'Message.query.filter_by', ([], {'to_id': 'id'}), '(to_id=id)\n', (417, 427), False, 'from octs.user.models import Course, Message, User\n'), ((861, 894), 'octs.user.models.Message.query.filter_by', 'Message.query.filter_by', ([], {'to_id': 'id'}), '(to_id=id)\n', (884, 894), False, 'from octs.user.models import Course, Message, User\n'), ((1403, 1433), 'octs.user.models.Message.query.filter_by', 'Message.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (1426, 1433), False, 'from octs.user.models import Course, Message, User\n'), ((1533, 1573), 'octs.user.models.User.query.filter_by', 'User.query.filter_by', ([], {'id': 'message.from_id'}), '(id=message.from_id)\n', (1553, 1573), False, 'from octs.user.models import Course, Message, User\n'), ((1929, 1944), 'flask.flash', 'flash', (['"""该用户不存在"""'], {}), "('该用户不存在')\n", (1934, 1944), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for, sessions\n'), ((2198, 2221), 'flask.url_for', 'url_for', (['"""message.send"""'], {}), "('message.send')\n", (2205, 2221), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for, sessions\n'), ((531, 563), 'octs.user.models.User.query.filter_by', 'User.query.filter_by', ([], {'id': 'from_id'}), '(id=from_id)\n', (551, 563), False, 'from octs.user.models import Course, Message, User\n'), ((1074, 1106), 'octs.user.models.User.query.filter_by', 'User.query.filter_by', ([], {'id': 'from_id'}), '(id=from_id)\n', (1094, 1106), False, 'from octs.user.models import Course, Message, User\n'), ((1843, 1883), 'octs.user.models.User.query.filter_by', 'User.query.filter_by', ([], {'user_id': 'to_user_id'}), '(user_id=to_user_id)\n', (1863, 1883), False, 'from octs.user.models import Course, Message, User\n'), ((1973, 1996), 'flask.url_for', 'url_for', (['"""message.send"""'], {}), "('message.send')\n", (1980, 1996), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for, sessions\n')]
|
import Tkinter as tk
import ttk
import tkSimpleDialog
def subtree_ids(treeview, x, level=0):
"""
Return a list of tuples containing the ids and levels for *x* and every element below it in the Treeview *treeview*.
The level of *x* is 0, children of *x* are 1, and so forth.
"""
id_list = list()
id_list.append((x, level))
for y in treeview.get_children(x):
id_list.extend(subtree_ids(treeview, y, level + 1))
return id_list
class DeleteDialog(tkSimpleDialog.Dialog):
"""
Confirmation dialog box for deleting the selected items from the Treeview.
"""
def __init__(self, parent_window, tree, title="Confirm Deletion"):
self.tree = tree
self.id_tuples = list()
for x in self.tree.treeview.selection():
if x not in [y[0] for y in self.id_tuples]:
self.id_tuples.extend(subtree_ids(self.tree.treeview, x))
tkSimpleDialog.Dialog.__init__(self, parent_window, title)
def body(self, master):
"""
Generates the required text listing all elements that will be deleted.
Displays the "OK" and "Cancel" buttons.
"""
if len(self.id_tuples) == 0:
message_string = "No elements selected."
elif len(self.id_tuples) == 1:
message_string = 'Delete "{}"?'.format(
self.tree.get_element(self.id_tuples[0][0]).name
)
else:
message_string = "Delete the following items?\n"
for x, level in self.id_tuples:
if level == 0:
bullet = " " + u"\u25C6"
else:
bullet = " " * (level + 1) + u"\u25C7"
message_string += u"{bullet} {name}\n".format(
bullet=bullet, name=self.tree.get_element(x).name
)
message = ttk.Label(master, text=message_string, justify="left")
message.grid(row=0, sticky="w")
def buttonbox(self):
"""
Display only one button if there's no selection. Otherwise, use the default method to display two buttons.
"""
if len(self.id_tuples) == 0:
box = tk.Frame(self)
w = tk.Button(
box, text="OK", width=10, command=self.cancel, default="active"
)
w.pack(side="left", padx=5, pady=5)
self.bind("<Return>", self.cancel)
box.pack()
else:
tkSimpleDialog.Dialog.buttonbox(self)
def apply(self):
"""
Called when the user chooses "OK". Performs the deletion.
"""
for tree_id, _ in self.id_tuples:
self.tree.delete_element(tree_id)
self.tree.refresh_treeview()
|
[
"tkSimpleDialog.Dialog.__init__",
"ttk.Label",
"Tkinter.Frame",
"tkSimpleDialog.Dialog.buttonbox",
"Tkinter.Button"
] |
[((923, 981), 'tkSimpleDialog.Dialog.__init__', 'tkSimpleDialog.Dialog.__init__', (['self', 'parent_window', 'title'], {}), '(self, parent_window, title)\n', (953, 981), False, 'import tkSimpleDialog\n'), ((1874, 1928), 'ttk.Label', 'ttk.Label', (['master'], {'text': 'message_string', 'justify': '"""left"""'}), "(master, text=message_string, justify='left')\n", (1883, 1928), False, 'import ttk\n'), ((2189, 2203), 'Tkinter.Frame', 'tk.Frame', (['self'], {}), '(self)\n', (2197, 2203), True, 'import Tkinter as tk\n'), ((2221, 2295), 'Tkinter.Button', 'tk.Button', (['box'], {'text': '"""OK"""', 'width': '(10)', 'command': 'self.cancel', 'default': '"""active"""'}), "(box, text='OK', width=10, command=self.cancel, default='active')\n", (2230, 2295), True, 'import Tkinter as tk\n'), ((2472, 2509), 'tkSimpleDialog.Dialog.buttonbox', 'tkSimpleDialog.Dialog.buttonbox', (['self'], {}), '(self)\n', (2503, 2509), False, 'import tkSimpleDialog\n')]
|
from pprint import pprint
from jnpr.junos import Device
from jnpr.junos.op.phyport import PhyPortTable
import code
with Device(host='192.168.127.12', user='pyez', password='<PASSWORD>!', gather_facts=False) as dev:
intf_status = PhyPortTable(dev)
intf_status.get()
code.interact(local=locals())
for intf in intf_status:
intf_items = intf.items()
print(list(intf_items))
print(intf.oper)
|
[
"jnpr.junos.op.phyport.PhyPortTable",
"jnpr.junos.Device"
] |
[((122, 212), 'jnpr.junos.Device', 'Device', ([], {'host': '"""192.168.127.12"""', 'user': '"""pyez"""', 'password': '"""<PASSWORD>!"""', 'gather_facts': '(False)'}), "(host='192.168.127.12', user='pyez', password='<PASSWORD>!',\n gather_facts=False)\n", (128, 212), False, 'from jnpr.junos import Device\n'), ((235, 252), 'jnpr.junos.op.phyport.PhyPortTable', 'PhyPortTable', (['dev'], {}), '(dev)\n', (247, 252), False, 'from jnpr.junos.op.phyport import PhyPortTable\n')]
|
#!/usr/bin/env python
"""
synopsis:
Task worker
Connects PULL socket to tcp://localhost:5557
Collects workloads from ventilator via that socket
Connects PUSH socket to tcp://localhost:5558
Sends results to sink via that socket
Author: <NAME> <lev(at)columbia(dot)edu>
Modified for async/ioloop: <NAME> <dkuhlman(at)davekuhlman(dot)org>
usage:
python taskwork.py
"""
import sys
from functools import partial
import zmq
from zmq.eventloop.future import Context
from zmq.eventloop.ioloop import IOLoop
from tornado import gen
@gen.coroutine
def run_worker(context):
# Socket to receive messages on
receiver = context.socket(zmq.PULL)
receiver.connect("tcp://localhost:5557")
# Socket to send messages to
sender = context.socket(zmq.PUSH)
sender.connect("tcp://localhost:5558")
# Process tasks forever
while True:
s = yield receiver.recv()
# Simple progress indicator for the viewer
sys.stdout.write('.')
sys.stdout.flush()
# Do the work
yield gen.sleep(int(s) * 0.001)
# Send results to sink
yield sender.send(b'')
@gen.coroutine
def run(loop):
context = Context()
yield run_worker(context)
def main():
args = sys.argv[1:]
if len(args) != 0:
sys.exit(__doc__)
try:
loop = IOLoop.current()
loop.run_sync(partial(run, loop, ))
except KeyboardInterrupt:
print('\nFinished (interrupted)')
if __name__ == '__main__':
main()
|
[
"sys.stdout.write",
"zmq.eventloop.ioloop.IOLoop.current",
"functools.partial",
"zmq.eventloop.future.Context",
"sys.stdout.flush",
"sys.exit"
] |
[((1190, 1199), 'zmq.eventloop.future.Context', 'Context', ([], {}), '()\n', (1197, 1199), False, 'from zmq.eventloop.future import Context\n'), ((971, 992), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (987, 992), False, 'import sys\n'), ((1001, 1019), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1017, 1019), False, 'import sys\n'), ((1299, 1316), 'sys.exit', 'sys.exit', (['__doc__'], {}), '(__doc__)\n', (1307, 1316), False, 'import sys\n'), ((1341, 1357), 'zmq.eventloop.ioloop.IOLoop.current', 'IOLoop.current', ([], {}), '()\n', (1355, 1357), False, 'from zmq.eventloop.ioloop import IOLoop\n'), ((1380, 1398), 'functools.partial', 'partial', (['run', 'loop'], {}), '(run, loop)\n', (1387, 1398), False, 'from functools import partial\n')]
|
import subprocess as sp
import os
import numpy as np
import argparse
from tqdm import tqdm
def cal_metrics(pred_dir, gt_dir, out_path):
'''Merge pred and gt dir and use the precompiled metric exe to calculate the
corresponding values. The results will be written to the out_path'''
preds = os.listdir(pred_dir)
gts = os.listdir(gt_dir)
pairs = []
for p in preds:
p_tmp = p.split(".")[0]
for gt in gts:
if p_tmp == gt.split(".")[0]:
pairs.append((p, gt))
break
print("Calculate metrics:")
with open(out_path, "bw+") as out_file:
for pred, gt in tqdm(pairs):
gt_path = os.path.join(gt_dir, gt)
pred_path = os.path.join(pred_dir, pred)
exec_metrics(out_file, gt_path, pred_path)
def exec_metrics(fd, gt, pred):
'''Calculate Metrics '''
with sp.Popen(["./Metrics", pred, gt], stdout=sp.PIPE) as proc:
fd.write(proc.stdout.read())
|
[
"subprocess.Popen",
"tqdm.tqdm",
"os.path.join",
"os.listdir"
] |
[((303, 323), 'os.listdir', 'os.listdir', (['pred_dir'], {}), '(pred_dir)\n', (313, 323), False, 'import os\n'), ((334, 352), 'os.listdir', 'os.listdir', (['gt_dir'], {}), '(gt_dir)\n', (344, 352), False, 'import os\n'), ((648, 659), 'tqdm.tqdm', 'tqdm', (['pairs'], {}), '(pairs)\n', (652, 659), False, 'from tqdm import tqdm\n'), ((889, 938), 'subprocess.Popen', 'sp.Popen', (["['./Metrics', pred, gt]"], {'stdout': 'sp.PIPE'}), "(['./Metrics', pred, gt], stdout=sp.PIPE)\n", (897, 938), True, 'import subprocess as sp\n'), ((683, 707), 'os.path.join', 'os.path.join', (['gt_dir', 'gt'], {}), '(gt_dir, gt)\n', (695, 707), False, 'import os\n'), ((732, 760), 'os.path.join', 'os.path.join', (['pred_dir', 'pred'], {}), '(pred_dir, pred)\n', (744, 760), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-09-02 14:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('members', '0036_remove_urlparameter_pass_on_name'),
]
operations = [
migrations.RenameField(
model_name='urlparameter',
old_name='consultation',
new_name='campaign',
),
migrations.AddField(
model_name='urlparameter',
name='default_value_if_no_nation_builder_value',
field=models.CharField(blank=True, default=None, help_text='The value for this parameter if NationBuilder doesnt supply one', max_length=100, null=True),
),
]
|
[
"django.db.models.CharField",
"django.db.migrations.RenameField"
] |
[((315, 414), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""urlparameter"""', 'old_name': '"""consultation"""', 'new_name': '"""campaign"""'}), "(model_name='urlparameter', old_name='consultation',\n new_name='campaign')\n", (337, 414), False, 'from django.db import migrations, models\n'), ((606, 761), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': 'None', 'help_text': '"""The value for this parameter if NationBuilder doesnt supply one"""', 'max_length': '(100)', 'null': '(True)'}), "(blank=True, default=None, help_text=\n 'The value for this parameter if NationBuilder doesnt supply one',\n max_length=100, null=True)\n", (622, 761), False, 'from django.db import migrations, models\n')]
|
import torch
import numpy as np
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
batch_size = target.size(0)
pred = torch.argmax(output, dim=1)
pred = pred.squeeze()
correct = pred.eq(target.expand_as(pred))
acc = correct.view(-1).float().sum(0) * 100 / (batch_size)
return acc
def sliding_accuracy(logits, target, slider_length):
'''
compute the accuracy while averaging over slider_length frames
implemented to accumulate at the begining of the sequence and give the average for the last frame in the slider
'''
n_examples = target.size(0)
pred = torch.zeros_like(logits)
for i in range(logits.size(2)):
pred[:, :, i] = torch.mean(logits[:, :, np.max([0, i - slider_length]):i + 1], dim=2)
pred = torch.argmax(pred, dim=1)
pred = pred.squeeze().view(-1)
correct = pred.eq(target)
acc = correct.view(-1).float().sum(0) * 100 / n_examples
return acc, pred
def accuracy_v2(output, target):
"""Computes the precision@k for the specified values of k"""
batch_size = target.size(0)
n_frames = target.size(1)
correct = output.eq(target.expand_as(output))
acc = correct.view(-1).float().sum(0) * 100 / (batch_size*n_frames)
return acc
def accuracy_topk(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def post_process_logits(per_frame_logits, average=False, num_frames_to_avg=12, threshold = 0.7):
if average:
last_frame_logits = torch.mean(per_frame_logits[:, :, -num_frames_to_avg - 1:-1], dim=2)
label_ind = torch.argmax(last_frame_logits, dim=1).item()
last_frame_logits = torch.nn.functional.softmax(last_frame_logits, dim=1).squeeze()
else:
per_frame_logits = torch.nn.functional.softmax(per_frame_logits, dim=1)
_, pred = per_frame_logits.topk(1, 1, True, True)
label_ind = pred.squeeze()[-1].item()
last_frame_logits = per_frame_logits[0, :, -1].squeeze()
if last_frame_logits[label_ind] < threshold:
label_ind = 0
return label_ind, last_frame_logits
def make_weights_for_balanced_classes(clip_set, label_count):
""" compute the weight per clip for the weighted random sampler"""
n_clips = len(clip_set)
nclasses = len(label_count)
N = label_count.sum()
weight_per_class = [0.] * nclasses
for i in range(nclasses):
weight_per_class[i] = N/float(label_count[i])
weight = [0] * n_clips
for idx, clip in enumerate(clip_set):
clip_label_sum = clip[1].sum(axis=1)
if clip_label_sum.sum() == 0:
print("zero!!!")
ratios = clip_label_sum / clip_label_sum.sum()
weight[idx] = np.dot(weight_per_class, ratios)
return weight
|
[
"torch.mean",
"torch.zeros_like",
"torch.argmax",
"torch.nn.functional.softmax",
"numpy.max",
"numpy.dot"
] |
[((171, 198), 'torch.argmax', 'torch.argmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (183, 198), False, 'import torch\n'), ((655, 679), 'torch.zeros_like', 'torch.zeros_like', (['logits'], {}), '(logits)\n', (671, 679), False, 'import torch\n'), ((822, 847), 'torch.argmax', 'torch.argmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (834, 847), False, 'import torch\n'), ((1888, 1956), 'torch.mean', 'torch.mean', (['per_frame_logits[:, :, -num_frames_to_avg - 1:-1]'], {'dim': '(2)'}), '(per_frame_logits[:, :, -num_frames_to_avg - 1:-1], dim=2)\n', (1898, 1956), False, 'import torch\n'), ((2152, 2204), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['per_frame_logits'], {'dim': '(1)'}), '(per_frame_logits, dim=1)\n', (2179, 2204), False, 'import torch\n'), ((3090, 3122), 'numpy.dot', 'np.dot', (['weight_per_class', 'ratios'], {}), '(weight_per_class, ratios)\n', (3096, 3122), True, 'import numpy as np\n'), ((1977, 2015), 'torch.argmax', 'torch.argmax', (['last_frame_logits'], {'dim': '(1)'}), '(last_frame_logits, dim=1)\n', (1989, 2015), False, 'import torch\n'), ((2051, 2104), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['last_frame_logits'], {'dim': '(1)'}), '(last_frame_logits, dim=1)\n', (2078, 2104), False, 'import torch\n'), ((764, 794), 'numpy.max', 'np.max', (['[0, i - slider_length]'], {}), '([0, i - slider_length])\n', (770, 794), True, 'import numpy as np\n')]
|
import logging
import requests
from urllib.parse import urlencode
from requests.exceptions import RequestException
class AnemometerClient(object):
"""
Fetch and parse JSON from Anemometer instance
"""
# default set of fields to be returned
FIELDS = [
'checksum',
'snippet',
'index_ratio',
'query_time_avg',
'rows_sent_avg',
'ts_cnt',
'Query_time_sum',
'Lock_time_sum',
'Rows_sent_sum',
'Rows_examined_sum',
'Rows_examined_median',
'Query_time_median',
'Query_time_median',
'dimension.sample',
'hostname_max',
'db_max',
'Fingerprint',
]
def __init__(self, root_url):
self._http = None
self._logger = logging.getLogger(self.__class__.__name__)
self._root_url = root_url
@property
def http(self):
if self._http is None:
self._http = requests.session()
return self._http
def _get_full_url(self, params):
encoded_params = urlencode(params, doseq=True)
return '{}/index.php?{}'.format(self._root_url, encoded_params)
def get_queries(self, fields=None, order=None, limit=None, group=None):
# apply default values
fields = fields or self.FIELDS
order = order or 'Query_time_sum DESC'
limit = limit or 150
group = group or 'checksum'
# format the URL
url = self._get_full_url(params={
'action': 'api',
'output': 'json',
'datasource': 'localhost',
'fact-group': group,
'fact-order': order,
'fact-limit': limit,
'table_fields[]': fields
})
self._logger.info('Fetching <{}>'.format(url))
try:
resp = self.http.get(url).json()
queries = resp.get('result', [])
self._logger.info('Got {} queries'.format(len(queries)))
return queries
except RequestException as e:
self._logger.error('HTTP request failed', exc_info=True)
raise e
|
[
"requests.session",
"urllib.parse.urlencode",
"logging.getLogger"
] |
[((785, 827), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (802, 827), False, 'import logging\n'), ((1062, 1091), 'urllib.parse.urlencode', 'urlencode', (['params'], {'doseq': '(True)'}), '(params, doseq=True)\n', (1071, 1091), False, 'from urllib.parse import urlencode\n'), ((953, 971), 'requests.session', 'requests.session', ([], {}), '()\n', (969, 971), False, 'import requests\n')]
|
from brands import brands_az
from brands import dbpedia
from brands import roadbikereview
from brands import bikeindex
if __name__ == '__main__':
# b1 = brands_az.get_blog_brands()
# b2 = dbpedia.get_dbpedia_brands()
# roadbikereview.get_review_brands()
bikeindex.get_index_brands()
# print("%s %s" % (len(b1), len(b2)))
|
[
"brands.bikeindex.get_index_brands"
] |
[((271, 299), 'brands.bikeindex.get_index_brands', 'bikeindex.get_index_brands', ([], {}), '()\n', (297, 299), False, 'from brands import bikeindex\n')]
|
import glob
csv:list = []
for folder in glob.glob("../data/VIDEO/*"):
for file in glob.glob(folder + "/*.csv"):
csv.append(file)
columns:bool = True
with open("framing_action.csv", "w") as f:
for fcsv in csv:
with open(fcsv, "r") as fc:
if columns:
f.writelines(fc.readlines())
columns = False
fc.readline()
f.writelines(fc.readlines())
|
[
"glob.glob"
] |
[((42, 70), 'glob.glob', 'glob.glob', (['"""../data/VIDEO/*"""'], {}), "('../data/VIDEO/*')\n", (51, 70), False, 'import glob\n'), ((88, 116), 'glob.glob', 'glob.glob', (["(folder + '/*.csv')"], {}), "(folder + '/*.csv')\n", (97, 116), False, 'import glob\n')]
|
import tensorflow as tf
import tflearn
import numpy as np
import re
from model import SelfAttentive
from sklearn.utils import shuffle
from reader import load_csv, VocabDict
'''
parse
'''
tf.app.flags.DEFINE_integer('num_epochs', 5, 'number of epochs to train')
tf.app.flags.DEFINE_integer('batch_size', 20, 'batch size to train in one step')
tf.app.flags.DEFINE_integer('labels', 5, 'number of label classes')
tf.app.flags.DEFINE_integer('word_pad_length', 60, 'word pad length for training')
tf.app.flags.DEFINE_integer('decay_step', 500, 'decay steps')
tf.app.flags.DEFINE_float('learn_rate', 1e-2, 'learn rate for training optimization')
tf.app.flags.DEFINE_boolean('shuffle', True, 'shuffle data FLAG')
tf.app.flags.DEFINE_boolean('train', True, 'train mode FLAG')
tf.app.flags.DEFINE_boolean('visualize', False, 'visualize FLAG')
tf.app.flags.DEFINE_boolean('penalization', True, 'penalization FLAG')
FLAGS = tf.app.flags.FLAGS
num_epochs = FLAGS.num_epochs
batch_size = FLAGS.batch_size
tag_size = FLAGS.labels
word_pad_length = FLAGS.word_pad_length
lr = FLAGS.learn_rate
TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+", re.UNICODE)
def token_parse(iterator):
for value in iterator:
return TOKENIZER_RE.findall(value)
tokenizer = tflearn.data_utils.VocabularyProcessor(word_pad_length, tokenizer_fn=lambda tokens: [token_parse(x) for x in tokens])
label_dict = VocabDict()
def string_parser(arr, fit):
if fit == False:
return list(tokenizer.transform(arr))
else:
return list(tokenizer.fit_transform(arr))
model = SelfAttentive()
with tf.Session() as sess:
# build graph
model.build_graph(n=word_pad_length)
# Downstream Application
with tf.variable_scope('DownstreamApplication'):
global_step = tf.Variable(0, trainable=False, name='global_step')
learn_rate = tf.train.exponential_decay(lr, global_step, FLAGS.decay_step, 0.95, staircase=True)
labels = tf.placeholder('float32', shape=[None, tag_size])
net = tflearn.fully_connected(model.M, 2000, activation='relu')
logits = tflearn.fully_connected(net, tag_size, activation=None)
loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits), axis=1)
if FLAGS.penalization == True:
p_coef = 0.004
p_loss = p_coef * model.P
loss = loss + p_loss
p_loss = tf.reduce_mean(p_loss)
loss = tf.reduce_mean(loss)
params = tf.trainable_variables()
#clipped_gradients = [tf.clip_by_value(x, -0.5, 0.5) for x in gradients]
optimizer = tf.train.AdamOptimizer(learn_rate)
grad_and_vars = tf.gradients(loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(grad_and_vars, 0.5)
opt = optimizer.apply_gradients(zip(clipped_gradients, params), global_step=global_step)
# Start Training
sess.run(tf.global_variables_initializer())
words, tags = load_csv('./data/ag_news_csv/train.csv', target_columns=[0], columns_to_ignore=[1], target_dict=label_dict)
words = string_parser(words, fit=True)
if FLAGS.shuffle == True:
words, tags = shuffle(words, tags)
word_input = tflearn.data_utils.pad_sequences(words, maxlen=word_pad_length)
total = len(word_input)
step_print = int((total/batch_size) / 13)
if FLAGS.train == True:
print('start training')
for epoch_num in range(num_epochs):
epoch_loss = 0
step_loss = 0
for i in range(int(total/batch_size)):
batch_input, batch_tags = (word_input[i*batch_size:(i+1)*batch_size], tags[i*batch_size:(i+1)*batch_size])
train_ops = [opt, loss, learn_rate, global_step]
if FLAGS.penalization == True:
train_ops += [p_loss]
result = sess.run(train_ops, feed_dict={model.input_pl: batch_input, labels: batch_tags})
step_loss += result[1]
epoch_loss += result[1]
if i % step_print == (step_print-step_print):
if FLAGS.penalization == True:
print(f'step_log: (epoch: {epoch_num}, step: {i}, global_step: {result[3]}, learn_rate: {result[2]}), Loss: {step_loss/step_print}, Penalization: {result[4]})')
else:
print(f'step_log: (epoch: {epoch_num}, step: {i}, global_step: {result[3]}, learn_rate: {result[2]}), Loss: {step_loss/step_print})')
#print(f'{result[4]}')
step_loss = 0
print('***')
print(f'epoch {epoch_num}: (global_step: {result[3]}), Average Loss: {epoch_loss/(total/batch_size)})')
print('***\n')
saver = tf.train.Saver()
saver.save(sess, './model.ckpt')
else:
saver = tf.train.Saver()
saver.restore(sess, './model.ckpt')
words, tags = load_csv('./data/ag_news_csv/test.csv', target_columns=[0], columns_to_ignore=[1], target_dict=label_dict)
words_with_index = string_parser(words, fit=True)
word_input = tflearn.data_utils.pad_sequences(words_with_index, maxlen=word_pad_length)
total = len(word_input)
rs = 0.
if FLAGS.visualize == True:
f = open('visualize.html', 'w')
f.write('<html style="margin:0;padding:0;"><body style="margin:0;padding:0;">\n')
for i in range(int(total/batch_size)):
batch_input, batch_tags = (word_input[i*batch_size:(i+1)*batch_size], tags[i*batch_size:(i+1)*batch_size])
result = sess.run([logits, model.A], feed_dict={model.input_pl: batch_input, labels: batch_tags})
arr = result[0]
for j in range(len(batch_tags)):
rs+=np.sum(np.argmax(arr[j]) == np.argmax(batch_tags[j]))
if FLAGS.visualize == True:
f.write('<div style="margin:25px;">\n')
for k in range(len(result[1][0])):
f.write('<p style="margin:10px;">\n')
ww = TOKENIZER_RE.findall(words[i*batch_size][0])
for j in range(word_pad_length):
alpha = "{:.2f}".format(result[1][0][k][j])
if len(ww) <= j:
w = "___"
else:
w = ww[j]
f.write(f'\t<span style="margin-left:3px;background-color:rgba(255,0,0,{alpha})">{w}</span>\n')
f.write('</p>\n')
f.write('</div>\n')
if FLAGS.visualize == True:
f.write('</body></html>')
f.close()
print(f'Test accuracy: {rs/total}')
sess.close()
|
[
"tensorflow.app.flags.DEFINE_float",
"tensorflow.trainable_variables",
"numpy.argmax",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.Variable",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"reader.VocabDict",
"tensorflow.gradients",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"reader.load_csv",
"tensorflow.Session",
"tensorflow.reduce_mean",
"tensorflow.train.exponential_decay",
"re.compile",
"tflearn.fully_connected",
"tflearn.data_utils.pad_sequences",
"model.SelfAttentive",
"sklearn.utils.shuffle",
"tensorflow.train.AdamOptimizer"
] |
[((189, 262), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_epochs"""', '(5)', '"""number of epochs to train"""'], {}), "('num_epochs', 5, 'number of epochs to train')\n", (216, 262), True, 'import tensorflow as tf\n'), ((263, 348), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(20)', '"""batch size to train in one step"""'], {}), "('batch_size', 20, 'batch size to train in one step'\n )\n", (290, 348), True, 'import tensorflow as tf\n'), ((344, 411), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""labels"""', '(5)', '"""number of label classes"""'], {}), "('labels', 5, 'number of label classes')\n", (371, 411), True, 'import tensorflow as tf\n'), ((412, 498), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""word_pad_length"""', '(60)', '"""word pad length for training"""'], {}), "('word_pad_length', 60,\n 'word pad length for training')\n", (439, 498), True, 'import tensorflow as tf\n'), ((495, 556), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""decay_step"""', '(500)', '"""decay steps"""'], {}), "('decay_step', 500, 'decay steps')\n", (522, 556), True, 'import tensorflow as tf\n'), ((557, 646), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learn_rate"""', '(0.01)', '"""learn rate for training optimization"""'], {}), "('learn_rate', 0.01,\n 'learn rate for training optimization')\n", (582, 646), True, 'import tensorflow as tf\n'), ((643, 708), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""shuffle"""', '(True)', '"""shuffle data FLAG"""'], {}), "('shuffle', True, 'shuffle data FLAG')\n", (670, 708), True, 'import tensorflow as tf\n'), ((709, 770), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""train"""', '(True)', '"""train mode FLAG"""'], {}), "('train', True, 'train mode FLAG')\n", (736, 770), True, 'import tensorflow as tf\n'), ((771, 836), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""visualize"""', '(False)', '"""visualize FLAG"""'], {}), "('visualize', False, 'visualize FLAG')\n", (798, 836), True, 'import tensorflow as tf\n'), ((837, 907), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""penalization"""', '(True)', '"""penalization FLAG"""'], {}), "('penalization', True, 'penalization FLAG')\n", (864, 907), True, 'import tensorflow as tf\n'), ((1099, 1177), 're.compile', 're.compile', (['"""[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\\\\\'\\\\w\\\\-]+"""', 're.UNICODE'], {}), '("[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\\\\\'\\\\w\\\\-]+", re.UNICODE)\n', (1109, 1177), False, 'import re\n'), ((1411, 1422), 'reader.VocabDict', 'VocabDict', ([], {}), '()\n', (1420, 1422), False, 'from reader import load_csv, VocabDict\n'), ((1577, 1592), 'model.SelfAttentive', 'SelfAttentive', ([], {}), '()\n', (1590, 1592), False, 'from model import SelfAttentive\n'), ((1598, 1610), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1608, 1610), True, 'import tensorflow as tf\n'), ((2872, 2983), 'reader.load_csv', 'load_csv', (['"""./data/ag_news_csv/train.csv"""'], {'target_columns': '[0]', 'columns_to_ignore': '[1]', 'target_dict': 'label_dict'}), "('./data/ag_news_csv/train.csv', target_columns=[0],\n columns_to_ignore=[1], target_dict=label_dict)\n", (2880, 2983), False, 'from reader import load_csv, VocabDict\n'), ((3103, 3166), 'tflearn.data_utils.pad_sequences', 'tflearn.data_utils.pad_sequences', (['words'], {'maxlen': 'word_pad_length'}), '(words, maxlen=word_pad_length)\n', (3135, 3166), False, 'import tflearn\n'), ((4621, 4731), 'reader.load_csv', 'load_csv', (['"""./data/ag_news_csv/test.csv"""'], {'target_columns': '[0]', 'columns_to_ignore': '[1]', 'target_dict': 'label_dict'}), "('./data/ag_news_csv/test.csv', target_columns=[0],\n columns_to_ignore=[1], target_dict=label_dict)\n", (4629, 4731), False, 'from reader import load_csv, VocabDict\n'), ((4795, 4869), 'tflearn.data_utils.pad_sequences', 'tflearn.data_utils.pad_sequences', (['words_with_index'], {'maxlen': 'word_pad_length'}), '(words_with_index, maxlen=word_pad_length)\n', (4827, 4869), False, 'import tflearn\n'), ((1709, 1751), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""DownstreamApplication"""'], {}), "('DownstreamApplication')\n", (1726, 1751), True, 'import tensorflow as tf\n'), ((1771, 1822), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (1782, 1822), True, 'import tensorflow as tf\n'), ((1840, 1927), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['lr', 'global_step', 'FLAGS.decay_step', '(0.95)'], {'staircase': '(True)'}), '(lr, global_step, FLAGS.decay_step, 0.95,\n staircase=True)\n', (1866, 1927), True, 'import tensorflow as tf\n'), ((1937, 1986), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""'], {'shape': '[None, tag_size]'}), "('float32', shape=[None, tag_size])\n", (1951, 1986), True, 'import tensorflow as tf\n'), ((1997, 2054), 'tflearn.fully_connected', 'tflearn.fully_connected', (['model.M', '(2000)'], {'activation': '"""relu"""'}), "(model.M, 2000, activation='relu')\n", (2020, 2054), False, 'import tflearn\n'), ((2068, 2123), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', 'tag_size'], {'activation': 'None'}), '(net, tag_size, activation=None)\n', (2091, 2123), False, 'import tflearn\n'), ((2392, 2412), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (2406, 2412), True, 'import tensorflow as tf\n'), ((2426, 2450), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2448, 2450), True, 'import tensorflow as tf\n'), ((2544, 2578), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learn_rate'], {}), '(learn_rate)\n', (2566, 2578), True, 'import tensorflow as tf\n'), ((2599, 2625), 'tensorflow.gradients', 'tf.gradients', (['loss', 'params'], {}), '(loss, params)\n', (2611, 2625), True, 'import tensorflow as tf\n'), ((2653, 2695), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grad_and_vars', '(0.5)'], {}), '(grad_and_vars, 0.5)\n', (2675, 2695), True, 'import tensorflow as tf\n'), ((2820, 2853), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2851, 2853), True, 'import tensorflow as tf\n'), ((3067, 3087), 'sklearn.utils.shuffle', 'shuffle', (['words', 'tags'], {}), '(words, tags)\n', (3074, 3087), False, 'from sklearn.utils import shuffle\n'), ((4471, 4487), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4485, 4487), True, 'import tensorflow as tf\n'), ((4545, 4561), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4559, 4561), True, 'import tensorflow as tf\n'), ((2149, 2218), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (2188, 2218), True, 'import tensorflow as tf\n'), ((2358, 2380), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['p_loss'], {}), '(p_loss)\n', (2372, 2380), True, 'import tensorflow as tf\n'), ((5388, 5405), 'numpy.argmax', 'np.argmax', (['arr[j]'], {}), '(arr[j])\n', (5397, 5405), True, 'import numpy as np\n'), ((5409, 5433), 'numpy.argmax', 'np.argmax', (['batch_tags[j]'], {}), '(batch_tags[j])\n', (5418, 5433), True, 'import numpy as np\n')]
|
from itertools import product
import numpy as np
import argparse
from joblib import Parallel, delayed
from pathlib import Path
import openslide
from openslide.deepzoom import DeepZoomGenerator
class Patcher:
def __init__(self):
self._get_args()
self._make_output_dir()
self._read_img()
def _get_args(self):
parser = argparse.ArgumentParser(description="Make patches from WSI.")
parser.add_argument("img_path",
help="Path to the whole slide image.")
parser.add_argument("-s", "--output_size",
help="Output patch size of both x, y without the overlap area.",
default=254,
type=int)
parser.add_argument("-ov", "--overlap",
help="Overlap size.",
default=1,
type=int)
parser.add_argument("-ou", "--output_dir",
help="Where to save the patches.")
parser.add_argument("-t", "--thresh",
default=0,
type=int,
help="If set a int 1-255, saves only onshore patch.")
self.args = parser.parse_args()
def _make_output_dir(self):
if self.args.output_dir is None:
wsipath = Path(self.args.img_path)
self.args.output_dir = wsipath.parent/wsipath.stem
if not Path(self.args.output_dir).exists():
Path(self.args.output_dir).mkdir(parents=True)
self.output_dir = Path(self.args.output_dir)
def _read_img(self):
img = openslide.OpenSlide(self.args.img_path)
self.dzimg = DeepZoomGenerator(img,
int(self.args.output_size),
int(self.args.overlap))
self.tiles = self.dzimg.level_tiles[-1]
self.deepest_level = self.dzimg.level_count - 1
self.iterator = product(range(self.tiles[0]), range(self.tiles[1]))
def make_patch(self, x, y):
patch = self.dzimg.get_tile(self.deepest_level, (x, y))
if self.args.thresh:
checker = np.array(patch)
if np.mean(checker) < int(self.args.thresh):
patch.save(f"{self.output_dir}/{x:04}_{y:04}.png")
else:
patch.save(f"{self.output_dir}/{x:04}_{y:04}.png")
def make_patch_parallel(self):
parallel = Parallel(n_jobs=-1, verbose=1, backend="threading")
parallel([delayed(self.make_patch)(x, y) for x, y in self.iterator])
def make_patch_for(self):
for x, y in self.iterator:
self.make_patch(x, y)
if __name__ == '__main__':
patcher = Patcher()
patcher.make_patch_parallel()
# p.make_patch_for() # use if make_patch_parallel doesn't work.
|
[
"openslide.OpenSlide",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.mean",
"numpy.array",
"joblib.Parallel",
"joblib.delayed"
] |
[((360, 421), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Make patches from WSI."""'}), "(description='Make patches from WSI.')\n", (383, 421), False, 'import argparse\n'), ((1607, 1633), 'pathlib.Path', 'Path', (['self.args.output_dir'], {}), '(self.args.output_dir)\n', (1611, 1633), False, 'from pathlib import Path\n'), ((1674, 1713), 'openslide.OpenSlide', 'openslide.OpenSlide', (['self.args.img_path'], {}), '(self.args.img_path)\n', (1693, 1713), False, 'import openslide\n'), ((2488, 2539), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)', 'verbose': '(1)', 'backend': '"""threading"""'}), "(n_jobs=-1, verbose=1, backend='threading')\n", (2496, 2539), False, 'from joblib import Parallel, delayed\n'), ((1382, 1406), 'pathlib.Path', 'Path', (['self.args.img_path'], {}), '(self.args.img_path)\n', (1386, 1406), False, 'from pathlib import Path\n'), ((2216, 2231), 'numpy.array', 'np.array', (['patch'], {}), '(patch)\n', (2224, 2231), True, 'import numpy as np\n'), ((2247, 2263), 'numpy.mean', 'np.mean', (['checker'], {}), '(checker)\n', (2254, 2263), True, 'import numpy as np\n'), ((1485, 1511), 'pathlib.Path', 'Path', (['self.args.output_dir'], {}), '(self.args.output_dir)\n', (1489, 1511), False, 'from pathlib import Path\n'), ((1534, 1560), 'pathlib.Path', 'Path', (['self.args.output_dir'], {}), '(self.args.output_dir)\n', (1538, 1560), False, 'from pathlib import Path\n'), ((2558, 2582), 'joblib.delayed', 'delayed', (['self.make_patch'], {}), '(self.make_patch)\n', (2565, 2582), False, 'from joblib import Parallel, delayed\n')]
|
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from contenido.models import Audio
# Create your tests here.
class ContenidoTests(TestCase):
def setUp(self):
# Every test needs access to the request factory.
user = User.objects.create_user(
username='userTest', email='<EMAIL>', password='<PASSWORD>')
audio = Audio()
audio.nom_audio = "song1"
audio.val_recurso = "http://la...."
audio.fec_entrada_audio = "2016-10-08"
audio.save()
audio.likes.add(User.objects.get(id=user.id))
audio.save()
def unlike_test_view(self):
audio = Audio.objects.get(pk=1)
total_likes_before = audio.likes.count()
c = Client()
response = c.post('/unlike/', {'song_id': 1})
audio = Audio.objects.get(pk=1)
audio.likes.remove(User.objects.get(id=1))
total_likes_after = audio.likes.count()
self.assertEqual(response.status_code, 200)
self.assertEqual(total_likes_after, total_likes_before - 1)
|
[
"django.contrib.auth.models.User.objects.get",
"django.test.Client",
"django.contrib.auth.models.User.objects.create_user",
"contenido.models.Audio",
"contenido.models.Audio.objects.get"
] |
[((299, 389), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""userTest"""', 'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""'}), "(username='userTest', email='<EMAIL>', password=\n '<PASSWORD>')\n", (323, 389), False, 'from django.contrib.auth.models import User\n'), ((414, 421), 'contenido.models.Audio', 'Audio', ([], {}), '()\n', (419, 421), False, 'from contenido.models import Audio\n'), ((692, 715), 'contenido.models.Audio.objects.get', 'Audio.objects.get', ([], {'pk': '(1)'}), '(pk=1)\n', (709, 715), False, 'from contenido.models import Audio\n'), ((777, 785), 'django.test.Client', 'Client', ([], {}), '()\n', (783, 785), False, 'from django.test import Client\n'), ((856, 879), 'contenido.models.Audio.objects.get', 'Audio.objects.get', ([], {'pk': '(1)'}), '(pk=1)\n', (873, 879), False, 'from contenido.models import Audio\n'), ((592, 620), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user.id'}), '(id=user.id)\n', (608, 620), False, 'from django.contrib.auth.models import User\n'), ((907, 929), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': '(1)'}), '(id=1)\n', (923, 929), False, 'from django.contrib.auth.models import User\n')]
|
import os
import torch
import numpy as np
from pytorch_retinanet.loss import FocalLoss
from pytorch_retinanet.retinanet import RetinaNet
from pytorch_retinanet.encoder import DataEncoder
import local_config
from braille_utils import label_tools
def create_model_retinanet(params, device):
'''
Creates model and auxiliary functions
:param params: OvoTools.AttrDict with parameters
:param device: 'cuda'/'cpu'
:return: model, detection_collate function, loss function
'''
use_multiple_class_groups = params.data.get('class_as_6pt', False)
num_classes = 1 if params.data.get_points else ([1]*6 if use_multiple_class_groups else 64)
encoder = DataEncoder(**params.model_params.encoder_params)
model = RetinaNet(num_layers=encoder.num_layers(), num_anchors=encoder.num_anchors(),
num_classes=num_classes,
num_fpn_layers=params.model_params.get('num_fpn_layers', 0)).to(device)
retina_loss = FocalLoss(num_classes=num_classes, **params.model_params.get('loss_params', dict()))
def detection_collate(batch):
'''
:param batch: list of (tb image(CHW float), [(left, top, right, bottom, class),...]) сcoords in [0,1], extra_params
:return: batch: ( images (BCNHW), ( encoded_rects, encoded_labels ) )
copied from RetinaNet, but a) accepts rects as input, b) returns (x,y) where y = (encoded_rects, encoded_labels)
'''
# t = [b for b in batch if b[1].shape[0]==0]
# if len(t):
# pass
#device = torch.device('cpu') # commented to use settings.device
boxes = [torch.tensor(b[1][:, :4], dtype = torch.float32, device=device)
*torch.tensor(params.data.net_hw[::-1]*2, dtype = torch.float32, device=device) for b in batch]
labels = [torch.tensor(b[1][:, 4], dtype = torch.long, device=device) for b in batch]
if params.data.get_points:
labels = [torch.tensor([0]*len(lb), dtype = torch.long, device=device) for lb in labels]
elif use_multiple_class_groups:
# классы нумеруются с 0, отсутствие класса = -1, далее в encode cls_targets=1+labels
labels = [torch.tensor([[int(ch)-1 for ch in label_tools.int_to_label010(int_lbl.item())] for int_lbl in lb],
dtype=torch.long, device=device) for lb in labels]
original_images = [b[3] for b in batch if len(b)>3] # batch contains augmented image if not in train mode
imgs = [x[0] for x in batch]
calc_cls_mask = torch.tensor([b[2].get('calc_cls', True) for b in batch],
dtype=torch.bool,
device=device)
h, w = tuple(params.data.net_hw)
num_imgs = len(batch)
inputs = torch.zeros(num_imgs, 3, h, w).to(imgs[0])
loc_targets = []
cls_targets = []
for i in range(num_imgs):
inputs[i] = imgs[i]
labels_i = labels[i]
if use_multiple_class_groups and len(labels_i.shape) != 2: # it can happen if no labels are on image
labels_i = labels_i.reshape((0, len(num_classes)))
loc_target, cls_target, max_ious = encoder.encode(boxes[i], labels_i, input_size=(w,h))
loc_targets.append(loc_target)
cls_targets.append(cls_target)
if original_images: # inference mode
return inputs, ( torch.stack(loc_targets), torch.stack(cls_targets), calc_cls_mask), original_images
else:
return inputs, (torch.stack(loc_targets), torch.stack(cls_targets), calc_cls_mask)
class Loss:
def __init__(self):
self.encoder = encoder
pass
def __call__(self, pred, targets):
loc_preds, cls_preds = pred
loc_targets, cls_targets, calc_cls_mask = targets
if calc_cls_mask.min(): # Ничего не пропускаем
calc_cls_mask = None
loss = retina_loss(loc_preds, loc_targets, cls_preds, cls_targets, cls_calc_mask=calc_cls_mask)
return loss
def get_dict(self, *kargs, **kwargs):
return retina_loss.loss_dict
def metric(self, key):
def call(*kargs, **kwargs):
return retina_loss.loss_dict[key]
return call
return model, detection_collate, Loss()
if __name__ == '__main__':
pass
|
[
"torch.zeros",
"pytorch_retinanet.encoder.DataEncoder",
"torch.stack",
"torch.tensor"
] |
[((678, 727), 'pytorch_retinanet.encoder.DataEncoder', 'DataEncoder', ([], {}), '(**params.model_params.encoder_params)\n', (689, 727), False, 'from pytorch_retinanet.encoder import DataEncoder\n'), ((1827, 1884), 'torch.tensor', 'torch.tensor', (['b[1][:, 4]'], {'dtype': 'torch.long', 'device': 'device'}), '(b[1][:, 4], dtype=torch.long, device=device)\n', (1839, 1884), False, 'import torch\n'), ((1632, 1693), 'torch.tensor', 'torch.tensor', (['b[1][:, :4]'], {'dtype': 'torch.float32', 'device': 'device'}), '(b[1][:, :4], dtype=torch.float32, device=device)\n', (1644, 1693), False, 'import torch\n'), ((1714, 1792), 'torch.tensor', 'torch.tensor', (['(params.data.net_hw[::-1] * 2)'], {'dtype': 'torch.float32', 'device': 'device'}), '(params.data.net_hw[::-1] * 2, dtype=torch.float32, device=device)\n', (1726, 1792), False, 'import torch\n'), ((2805, 2835), 'torch.zeros', 'torch.zeros', (['num_imgs', '(3)', 'h', 'w'], {}), '(num_imgs, 3, h, w)\n', (2816, 2835), False, 'import torch\n'), ((3439, 3463), 'torch.stack', 'torch.stack', (['loc_targets'], {}), '(loc_targets)\n', (3450, 3463), False, 'import torch\n'), ((3465, 3489), 'torch.stack', 'torch.stack', (['cls_targets'], {}), '(cls_targets)\n', (3476, 3489), False, 'import torch\n'), ((3565, 3589), 'torch.stack', 'torch.stack', (['loc_targets'], {}), '(loc_targets)\n', (3576, 3589), False, 'import torch\n'), ((3591, 3615), 'torch.stack', 'torch.stack', (['cls_targets'], {}), '(cls_targets)\n', (3602, 3615), False, 'import torch\n')]
|
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import mock
from nova import exception
from nova import objects
from nova import test
from pypowervm.tests import test_fixtures as pvm_fx
from nova_powervm.tests.virt import powervm
from nova_powervm.virt.powervm.tasks import network as tf_net
def cna(mac):
"""Builds a mock Client Network Adapter for unit tests."""
nic = mock.MagicMock()
nic.mac = mac
nic.vswitch_uri = 'fake_href'
return nic
class TestNetwork(test.TestCase):
def setUp(self):
super(TestNetwork, self).setUp()
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
self.mock_lpar_wrap = mock.MagicMock()
self.mock_lpar_wrap.can_modify_io.return_value = True, None
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_unplug_vifs(self, mock_vm_get):
"""Tests that a delete of the vif can be done."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. One should already exist, the other
# should not.
cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')]
mock_vm_get.return_value = cnas
# Mock up the network info. This also validates that they will be
# sanitized to upper case.
net_info = [
{'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:22'},
{'address': 'aa:bb:cc:dd:ee:33'}
]
# Run method
p_vifs = tf_net.UnplugVifs(self.apt, inst, net_info, 'host_uuid')
p_vifs.execute(self.mock_lpar_wrap)
# The delete should have only been called once. The second CNA didn't
# have a matching mac...so it should be skipped.
self.assertEqual(1, cnas[0].delete.call_count)
self.assertEqual(0, cnas[1].delete.call_count)
self.assertEqual(1, cnas[2].delete.call_count)
def test_unplug_vifs_invalid_state(self):
"""Tests that the delete raises an exception if bad VM state."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock that the state is incorrect
self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
# Run method
p_vifs = tf_net.UnplugVifs(self.apt, inst, mock.Mock(), 'host_uuid')
self.assertRaises(tf_net.VirtualInterfaceUnplugException,
p_vifs.execute, self.mock_lpar_wrap)
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_vifs_rmc(self, mock_vm_get, mock_vm_crt):
"""Tests that a crt vif can be done with secure RMC."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. One should already exist, the other
# should not.
mock_vm_get.return_value = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
# Mock up the network info. This also validates that they will be
# sanitized to upper case.
net_info = [
{'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:22'},
{'address': 'aa:bb:cc:dd:ee:33'}
]
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
p_vifs.execute(self.mock_lpar_wrap)
# The create should have only been called once.
self.assertEqual(2, mock_vm_crt.call_count)
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_vifs_rmc_no_create(self, mock_vm_get, mock_vm_crt):
"""Verifies if no creates are needed, none are done."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. Both should already exist.
mock_vm_get.return_value = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
# Mock up the network info. This also validates that they will be
# sanitized to upper case.
net_info = [
{'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:11'}
]
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
resp = p_vifs.execute(self.mock_lpar_wrap)
# The create should not have been called. The response should have
# been empty.
self.assertEqual(0, mock_vm_crt.call_count)
self.assertEqual([], resp)
# State check shouldn't have even been invoked as no creates were
# required
self.assertEqual(0, self.mock_lpar_wrap.can_modify_io.call_count)
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_vifs_invalid_state(self, mock_vm_get, mock_vm_crt):
"""Tests that a crt_vif fails when the LPAR state is bad."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. Only doing one for simplicity
mock_vm_get.return_value = []
net_info = [{'address': 'aa:bb:cc:dd:ee:ff'}]
# Mock that the state is incorrect
self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
self.assertRaises(exception.VirtualInterfaceCreateException,
p_vifs.execute, self.mock_lpar_wrap)
# The create should not have been invoked
self.assertEqual(0, mock_vm_crt.call_count)
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_vifs_timeout(self, mock_vm_get, mock_vm_crt):
"""Tests that crt vif failure via loss of neutron callback."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. Only doing one for simplicity
mock_vm_get.return_value = [cna('AABBCCDDEE11')]
# Mock up the network info.
net_info = [{'address': 'aa:bb:cc:dd:ee:ff'}]
# Ensure that an exception is raised by a timeout.
mock_vm_crt.side_effect = eventlet.timeout.Timeout()
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
self.assertRaises(exception.VirtualInterfaceCreateException,
p_vifs.execute, self.mock_lpar_wrap)
# The create should have only been called once.
self.assertEqual(1, mock_vm_crt.call_count)
@mock.patch('nova_powervm.virt.powervm.vm.crt_secure_rmc_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_secure_rmc_vswitch')
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_mgmt_vif(self, mock_vm_get, mock_vm_crt,
mock_get_rmc_vswitch, mock_crt_rmc_vif):
"""Tests that a mgmt vif can be created."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the rmc vswitch
vswitch_w = mock.MagicMock()
vswitch_w.href = 'fake_mgmt_uri'
mock_get_rmc_vswitch.return_value = vswitch_w
# Run method
p_vifs = tf_net.PlugMgmtVif(self.apt, inst, 'host_uuid')
p_vifs.execute([])
# The create should have only been called once.
self.assertEqual(1, mock_crt_rmc_vif.call_count)
@mock.patch('nova.utils.is_neutron')
def test_get_vif_events(self, mock_is_neutron):
# Set up common mocks.
inst = objects.Instance(**powervm.TEST_INSTANCE)
net_info = [mock.MagicMock(), mock.MagicMock()]
net_info[0]['id'] = 'a'
net_info[0].get.return_value = False
net_info[1]['id'] = 'b'
net_info[1].get.return_value = True
# Set up the runner.
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
# Mock that neutron is off.
mock_is_neutron.return_value = False
self.assertEqual([], p_vifs._get_vif_events())
# Turn neutron on.
mock_is_neutron.return_value = True
resp = p_vifs._get_vif_events()
# Only one should be returned since only one was active.
self.assertEqual(1, len(resp))
|
[
"nova_powervm.virt.powervm.tasks.network.PlugMgmtVif",
"nova.objects.Instance",
"mock.patch",
"nova_powervm.virt.powervm.tasks.network.UnplugVifs",
"pypowervm.tests.test_fixtures.AdapterFx",
"mock.Mock",
"eventlet.timeout.Timeout",
"mock.MagicMock"
] |
[((978, 994), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (992, 994), False, 'import mock\n'), ((1342, 1393), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.get_cnas"""'], {}), "('nova_powervm.virt.powervm.vm.get_cnas')\n", (1352, 1393), False, 'import mock\n'), ((3003, 3053), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.crt_vif"""'], {}), "('nova_powervm.virt.powervm.vm.crt_vif')\n", (3013, 3053), False, 'import mock\n'), ((3059, 3110), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.get_cnas"""'], {}), "('nova_powervm.virt.powervm.vm.get_cnas')\n", (3069, 3110), False, 'import mock\n'), ((4037, 4087), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.crt_vif"""'], {}), "('nova_powervm.virt.powervm.vm.crt_vif')\n", (4047, 4087), False, 'import mock\n'), ((4093, 4144), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.get_cnas"""'], {}), "('nova_powervm.virt.powervm.vm.get_cnas')\n", (4103, 4144), False, 'import mock\n'), ((5256, 5306), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.crt_vif"""'], {}), "('nova_powervm.virt.powervm.vm.crt_vif')\n", (5266, 5306), False, 'import mock\n'), ((5312, 5363), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.get_cnas"""'], {}), "('nova_powervm.virt.powervm.vm.get_cnas')\n", (5322, 5363), False, 'import mock\n'), ((6220, 6270), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.crt_vif"""'], {}), "('nova_powervm.virt.powervm.vm.crt_vif')\n", (6230, 6270), False, 'import mock\n'), ((6276, 6327), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.get_cnas"""'], {}), "('nova_powervm.virt.powervm.vm.get_cnas')\n", (6286, 6327), False, 'import mock\n'), ((7249, 7310), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.crt_secure_rmc_vif"""'], {}), "('nova_powervm.virt.powervm.vm.crt_secure_rmc_vif')\n", (7259, 7310), False, 'import mock\n'), ((7316, 7381), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.get_secure_rmc_vswitch"""'], {}), "('nova_powervm.virt.powervm.vm.get_secure_rmc_vswitch')\n", (7326, 7381), False, 'import mock\n'), ((7387, 7437), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.crt_vif"""'], {}), "('nova_powervm.virt.powervm.vm.crt_vif')\n", (7397, 7437), False, 'import mock\n'), ((7443, 7494), 'mock.patch', 'mock.patch', (['"""nova_powervm.virt.powervm.vm.get_cnas"""'], {}), "('nova_powervm.virt.powervm.vm.get_cnas')\n", (7453, 7494), False, 'import mock\n'), ((8132, 8167), 'mock.patch', 'mock.patch', (['"""nova.utils.is_neutron"""'], {}), "('nova.utils.is_neutron')\n", (8142, 8167), False, 'import mock\n'), ((1251, 1267), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1265, 1267), False, 'import mock\n'), ((1512, 1553), 'nova.objects.Instance', 'objects.Instance', ([], {}), '(**powervm.TEST_INSTANCE)\n', (1528, 1553), False, 'from nova import objects\n'), ((2075, 2131), 'nova_powervm.virt.powervm.tasks.network.UnplugVifs', 'tf_net.UnplugVifs', (['self.apt', 'inst', 'net_info', '"""host_uuid"""'], {}), "(self.apt, inst, net_info, 'host_uuid')\n", (2092, 2131), True, 'from nova_powervm.virt.powervm.tasks import network as tf_net\n'), ((2613, 2654), 'nova.objects.Instance', 'objects.Instance', ([], {}), '(**powervm.TEST_INSTANCE)\n', (2629, 2654), False, 'from nova import objects\n'), ((3250, 3291), 'nova.objects.Instance', 'objects.Instance', ([], {}), '(**powervm.TEST_INSTANCE)\n', (3266, 3291), False, 'from nova import objects\n'), ((4294, 4335), 'nova.objects.Instance', 'objects.Instance', ([], {}), '(**powervm.TEST_INSTANCE)\n', (4310, 4335), False, 'from nova import objects\n'), ((5518, 5559), 'nova.objects.Instance', 'objects.Instance', ([], {}), '(**powervm.TEST_INSTANCE)\n', (5534, 5559), False, 'from nova import objects\n'), ((6478, 6519), 'nova.objects.Instance', 'objects.Instance', ([], {}), '(**powervm.TEST_INSTANCE)\n', (6494, 6519), False, 'from nova import objects\n'), ((6830, 6856), 'eventlet.timeout.Timeout', 'eventlet.timeout.Timeout', ([], {}), '()\n', (6854, 6856), False, 'import eventlet\n'), ((7689, 7730), 'nova.objects.Instance', 'objects.Instance', ([], {}), '(**powervm.TEST_INSTANCE)\n', (7705, 7730), False, 'from nova import objects\n'), ((7786, 7802), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (7800, 7802), False, 'import mock\n'), ((7937, 7984), 'nova_powervm.virt.powervm.tasks.network.PlugMgmtVif', 'tf_net.PlugMgmtVif', (['self.apt', 'inst', '"""host_uuid"""'], {}), "(self.apt, inst, 'host_uuid')\n", (7955, 7984), True, 'from nova_powervm.virt.powervm.tasks import network as tf_net\n'), ((8266, 8307), 'nova.objects.Instance', 'objects.Instance', ([], {}), '(**powervm.TEST_INSTANCE)\n', (8282, 8307), False, 'from nova import objects\n'), ((2842, 2853), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2851, 2853), False, 'import mock\n'), ((3788, 3804), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3802, 3804), False, 'import mock\n'), ((4755, 4771), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4769, 4771), False, 'import mock\n'), ((5889, 5905), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (5903, 5905), False, 'import mock\n'), ((6912, 6928), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (6926, 6928), False, 'import mock\n'), ((8328, 8344), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (8342, 8344), False, 'import mock\n'), ((8346, 8362), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (8360, 8362), False, 'import mock\n'), ((8580, 8596), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (8594, 8596), False, 'import mock\n'), ((1195, 1213), 'pypowervm.tests.test_fixtures.AdapterFx', 'pvm_fx.AdapterFx', ([], {}), '()\n', (1211, 1213), True, 'from pypowervm.tests import test_fixtures as pvm_fx\n')]
|
# Copyright (c) 2019 <NAME>
# MIT license - see LICENSE
"""disk_image scans the disk image candidate directories and returns availabe disk images for loading.
"""
import os, datetime, json, traceback
from ..lib.util import get_triage_logger, init_triage_logger
tlog = get_triage_logger()
global WCE_IMAGES
WCE_IMAGES = "/usr/local/share/wce/wce-disk-images"
IMAGE_META_JSON_FILE = ".disk_image_type.json"
def set_wce_disk_image_dir(dir):
global WCE_IMAGES
WCE_IMAGES = dir
pass
# gets the potential directories to look for disk images
def get_maybe_disk_image_directories():
global WCE_IMAGES
dirs = []
# No longer look for other directories.
# It would make things rather complicated.
if os.path.exists(WCE_IMAGES) and os.path.isdir(WCE_IMAGES) and WCE_IMAGES not in dirs:
dirs.append(WCE_IMAGES)
pass
return dirs
# gets the potential directories to look for disk images
def get_disk_image_list_order():
global WCE_IMAGES
list_order = {}
if os.path.exists(WCE_IMAGES) and os.path.isdir(WCE_IMAGES):
list_order_path = os.path.join(WCE_IMAGES, ".list-order")
if os.path.exists(list_order_path):
try:
with open(list_order_path) as list_order_fd:
dirs = list_order_fd.readlines()
for i in range(len(dirs)):
list_order[dirs[i].strip()] = i
pass
pass
pass
except:
pass
pass
pass
return list_order
#
#
#
def list_image_files(dirs):
"""lists the images files under the dirs.
:return:
a list of tuples
the shape of tuple: (image_filename, subdir, fullpath)
"""
images = []
for a_dir in dirs:
for direntry in os.listdir(a_dir):
# Anything starting with "." is ignored
if direntry[0:1] == '.':
continue
catalog_dir = os.path.join(a_dir, direntry)
image_meta_file = os.path.join(catalog_dir, IMAGE_META_JSON_FILE)
if not os.path.exists(image_meta_file) or not os.path.isfile(image_meta_file):
continue
if direntry.endswith(".partclone.gz"):
images.append( (direntry, "", catalog_dir) )
pass
if os.path.isdir(catalog_dir):
for direntryinsubdir in os.listdir(catalog_dir):
# Anything starting with "." is ignored
if direntryinsubdir[0:1] == '.':
continue
if direntryinsubdir.endswith(".partclone.gz"):
images.append((direntryinsubdir, direntry, os.path.join(catalog_dir, direntryinsubdir)) )
pass
pass
pass
pass
pass
return images
#
#
#
def get_disk_images(wce_share_url=None):
'''scans the known drectories for disk image and returns the list of disk images
:arg:
wce_share_url: prefix for the disk imagefile.
:returns: list of dict instances.
mtime: file modify time
restoreType: keyword for restore type. [wce|wce-16|triage|clone]
The restore type is nothing more than the name of directory, and
should match exactly to the restore type.
name: filename - this is shown to the user.
size: file size
fullpath: the full path.
..note the entries are deduped by the filename so if two directories
contain the same file name, only one is pikced.
'''
# gather disk image files
_images= list_image_files(get_maybe_disk_image_directories())
# Dedup the same file name
images = {}
for image in _images:
fname, subdir, fullpath = image
images[fname] = image
pass
# Sort image listing order
result = []
for filename, image in images.items():
fname, subdir, fullpath = image
filestat = os.stat(fullpath)
mtime = datetime.datetime.fromtimestamp(filestat.st_mtime)
# If wce_share_url is provided, reconstruct the fullpath. HTTP server needs to respond to the route.
if wce_share_url:
fullpath = '{wce_share_url}/wce-disk-images/{restoretype}/{filename}'.format(wce_share_url=wce_share_url, restoretype=subdir, filename=filename)
pass
fattr = { "mtime": mtime.strftime('%Y-%m-%d %H:%M'),
"restoreType" : subdir,
"name": filename,
"fullpath": fullpath,
"size": filestat.st_size,
"subdir": subdir,
"index": len(result) }
result.append(fattr)
pass
list_order = get_disk_image_list_order()
n = len(result)
result.sort(key=lambda x: list_order.get(x["subdir"], len(list_order)) * n + x["index"])
return result
def read_disk_image_types(verbose=False):
'''scans the known drectories for disk image and returns the list of disk image types
:arg none
:returns: list of dict instances which is .disk_image_type.json file in the directory.
[ { "id": "wce-18",
"filestem": "wce-mate18",
"name": "WCE Ubuntu 18.04LTS",
"timestamp": true,
"efi_image": ".efi-512M.fat32.partclone.gz",
"partition_map": "gpt",
"hostname": "wce",
"randomize_hostname": true,
"cmdline": {
"acpi_enforce_resources": "lax" ,
"nvme_core.default_ps_max_latency_us": "5500"
}
},
{ ... }
]
'''
image_metas = []
for subdir in get_maybe_disk_image_directories():
if verbose:
print("Checking subdir " + subdir)
pass
index = 0
for direntry in os.listdir(subdir):
catalog_dir = os.path.join(subdir, direntry)
image_meta = read_disk_image_type(catalog_dir)
if verbose:
print("Catalog dir " + catalog_dir)
print(image_meta)
pass
if image_meta:
image_meta['index'] = index
image_metas.append(image_meta)
index += 1
pass
pass
pass
list_order = get_disk_image_list_order()
n = len(image_metas)
if list_order:
image_metas.sort(key=lambda x: list_order.get(x["id"], len(list_order)) * n + x['index'])
pass
return image_metas
def read_disk_image_type(catalog_dir):
'''reads the disk image type file from the directory
:arg dir
:returns: a dict instance from the image-meta
'''
result = None
try:
image_meta_file = os.path.join(catalog_dir, IMAGE_META_JSON_FILE)
if not os.path.exists(image_meta_file) or not os.path.isfile(image_meta_file):
return None
with open(image_meta_file) as meta_file:
result = json.load(meta_file)
pass
pass
except json.decoder.JSONDecodeError:
tlog.debug('catalog_dir %s: JSON parse error. Check the contents.' % catalog_dir);
pass
except:
# If anything goes wrong, just ignore the directory.
tlog.debug('catalog_dir %s: %s' % (catalog_dir, traceback.format_exc()))
pass
#
if result:
result["catalogDirectory"] = catalog_dir
pass
return result
def make_disk_image_name(destdir, inname, filesystem='ext4'):
image_meta = read_disk_image_type(destdir)
if image_meta is None:
if inname is None:
exc_msg = "Directory {dir} does not have '{json_file}' file.".format(dir=destdir, json_file=IMAGE_META_JSON_FILE)
raise Exception(exc_msg)
return inname
imagename = image_meta["filestem"]
if not imagename:
imagename = inname
pass
if image_meta.get("timestamp", False):
timestamp = datetime.date.today().isoformat()
imagename = imagename + "-" + timestamp
pass
# Right now, this is making ext4
imagename = imagename + ".%s.partclone.gz" % filesystem
return os.path.join(destdir, imagename)
def get_file_system_from_source(source):
filesystem_ext = None
tail = ".partclone.gz"
if source.endswith(tail):
source = source[:-len(tail)]
else:
return None
try:
filesystem_ext = os.path.splitext(source)[1][1:]
except:
pass
if filesystem_ext in ['ext4', 'ext3', 'fat32', 'vfat', 'fat16']:
return filesystem_ext
return None
def translate_disk_image_name_to_url(wce_share_url, disk_image_name):
for source in get_disk_images(wce_share_url):
if source["name"] == disk_image_name:
return source
pass
return disk_image
#
if __name__ == "__main__":
print("HELLO")
tlog = init_triage_logger(filename='/tmp/disk_images.log')
print(read_disk_image_types(verbose=True))
print(get_disk_images())
print(get_file_system_from_source("a.ext4.partclone.gz"))
print(get_file_system_from_source("a.ext4.partclone"))
print(get_file_system_from_source("a.partclone.gz"))
print(read_disk_image_type("/usr/local/share/wce/wce-disk-images/triage"))
print("HELLO HELLO")
for disk_image in get_disk_images():
print(translate_disk_image_name_to_url("http://10.3.2.1:8080/wce", disk_image["name"]))
pass
pass
|
[
"json.load",
"os.stat",
"os.path.isdir",
"os.path.exists",
"datetime.date.today",
"os.path.isfile",
"os.path.splitext",
"traceback.format_exc",
"datetime.datetime.fromtimestamp",
"os.path.join",
"os.listdir"
] |
[((7434, 7466), 'os.path.join', 'os.path.join', (['destdir', 'imagename'], {}), '(destdir, imagename)\n', (7446, 7466), False, 'import os, datetime, json, traceback\n'), ((716, 742), 'os.path.exists', 'os.path.exists', (['WCE_IMAGES'], {}), '(WCE_IMAGES)\n', (730, 742), False, 'import os, datetime, json, traceback\n'), ((747, 772), 'os.path.isdir', 'os.path.isdir', (['WCE_IMAGES'], {}), '(WCE_IMAGES)\n', (760, 772), False, 'import os, datetime, json, traceback\n'), ((988, 1014), 'os.path.exists', 'os.path.exists', (['WCE_IMAGES'], {}), '(WCE_IMAGES)\n', (1002, 1014), False, 'import os, datetime, json, traceback\n'), ((1019, 1044), 'os.path.isdir', 'os.path.isdir', (['WCE_IMAGES'], {}), '(WCE_IMAGES)\n', (1032, 1044), False, 'import os, datetime, json, traceback\n'), ((1068, 1107), 'os.path.join', 'os.path.join', (['WCE_IMAGES', '""".list-order"""'], {}), "(WCE_IMAGES, '.list-order')\n", (1080, 1107), False, 'import os, datetime, json, traceback\n'), ((1115, 1146), 'os.path.exists', 'os.path.exists', (['list_order_path'], {}), '(list_order_path)\n', (1129, 1146), False, 'import os, datetime, json, traceback\n'), ((1675, 1692), 'os.listdir', 'os.listdir', (['a_dir'], {}), '(a_dir)\n', (1685, 1692), False, 'import os, datetime, json, traceback\n'), ((3671, 3688), 'os.stat', 'os.stat', (['fullpath'], {}), '(fullpath)\n', (3678, 3688), False, 'import os, datetime, json, traceback\n'), ((3701, 3751), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['filestat.st_mtime'], {}), '(filestat.st_mtime)\n', (3732, 3751), False, 'import os, datetime, json, traceback\n'), ((5341, 5359), 'os.listdir', 'os.listdir', (['subdir'], {}), '(subdir)\n', (5351, 5359), False, 'import os, datetime, json, traceback\n'), ((6135, 6182), 'os.path.join', 'os.path.join', (['catalog_dir', 'IMAGE_META_JSON_FILE'], {}), '(catalog_dir, IMAGE_META_JSON_FILE)\n', (6147, 6182), False, 'import os, datetime, json, traceback\n'), ((1808, 1837), 'os.path.join', 'os.path.join', (['a_dir', 'direntry'], {}), '(a_dir, direntry)\n', (1820, 1837), False, 'import os, datetime, json, traceback\n'), ((1862, 1909), 'os.path.join', 'os.path.join', (['catalog_dir', 'IMAGE_META_JSON_FILE'], {}), '(catalog_dir, IMAGE_META_JSON_FILE)\n', (1874, 1909), False, 'import os, datetime, json, traceback\n'), ((2132, 2158), 'os.path.isdir', 'os.path.isdir', (['catalog_dir'], {}), '(catalog_dir)\n', (2145, 2158), False, 'import os, datetime, json, traceback\n'), ((5381, 5411), 'os.path.join', 'os.path.join', (['subdir', 'direntry'], {}), '(subdir, direntry)\n', (5393, 5411), False, 'import os, datetime, json, traceback\n'), ((6347, 6367), 'json.load', 'json.load', (['meta_file'], {}), '(meta_file)\n', (6356, 6367), False, 'import os, datetime, json, traceback\n'), ((2192, 2215), 'os.listdir', 'os.listdir', (['catalog_dir'], {}), '(catalog_dir)\n', (2202, 2215), False, 'import os, datetime, json, traceback\n'), ((6194, 6225), 'os.path.exists', 'os.path.exists', (['image_meta_file'], {}), '(image_meta_file)\n', (6208, 6225), False, 'import os, datetime, json, traceback\n'), ((6233, 6264), 'os.path.isfile', 'os.path.isfile', (['image_meta_file'], {}), '(image_meta_file)\n', (6247, 6264), False, 'import os, datetime, json, traceback\n'), ((7245, 7266), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (7264, 7266), False, 'import os, datetime, json, traceback\n'), ((7672, 7696), 'os.path.splitext', 'os.path.splitext', (['source'], {}), '(source)\n', (7688, 7696), False, 'import os, datetime, json, traceback\n'), ((1923, 1954), 'os.path.exists', 'os.path.exists', (['image_meta_file'], {}), '(image_meta_file)\n', (1937, 1954), False, 'import os, datetime, json, traceback\n'), ((1962, 1993), 'os.path.isfile', 'os.path.isfile', (['image_meta_file'], {}), '(image_meta_file)\n', (1976, 1993), False, 'import os, datetime, json, traceback\n'), ((6642, 6664), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6662, 6664), False, 'import os, datetime, json, traceback\n'), ((2443, 2486), 'os.path.join', 'os.path.join', (['catalog_dir', 'direntryinsubdir'], {}), '(catalog_dir, direntryinsubdir)\n', (2455, 2486), False, 'import os, datetime, json, traceback\n')]
|
import pywer
references = [
"this is a simple python package",
"it calculates word error rate",
"it can also calculate cer",
]
hypotheses = [
"this is the simple python package",
"it calculates word error",
"it can also calculate see er",
]
wer = pywer.wer(references, hypotheses)
cer = pywer.cer(references, hypotheses)
print(f"WER: {wer:.2f}, CER: {cer:.2f}")
|
[
"pywer.cer",
"pywer.wer"
] |
[((273, 306), 'pywer.wer', 'pywer.wer', (['references', 'hypotheses'], {}), '(references, hypotheses)\n', (282, 306), False, 'import pywer\n'), ((313, 346), 'pywer.cer', 'pywer.cer', (['references', 'hypotheses'], {}), '(references, hypotheses)\n', (322, 346), False, 'import pywer\n')]
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.device_datasource_graph import DeviceDatasourceGraph # noqa: F401,E501
from logicmonitor_sdk.models.tree_node import TreeNode # noqa: F401,E501
class DeviceDataSource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alert_status': 'str',
'auto_discovery': 'bool',
'data_source_display_name': 'str',
'device_id': 'int',
'device_name': 'str',
'created_on': 'int',
'collect_method': 'str',
'data_source_id': 'int',
'graphs': 'list[DeviceDatasourceGraph]',
'sdt_at': 'str',
'next_auto_discovery_on': 'int',
'id': 'int',
'alert_status_priority': 'int',
'alert_disable_status': 'str',
'data_source_description': 'str',
'overview_graphs': 'list[DeviceDatasourceGraph]',
'stop_monitoring': 'bool',
'assigned_on': 'int',
'is_multiple': 'bool',
'instance_number': 'int',
'updated_on': 'int',
'sdt_status': 'str',
'data_source_name': 'str',
'device_display_name': 'str',
'monitoring_instance_number': 'int',
'groups_disabled_this_source': 'list[TreeNode]',
'group_name': 'str',
'instance_auto_group_enabled': 'bool',
'alerting_disabled_on': 'TreeNode',
'data_source_type': 'str',
'status': 'int'
}
attribute_map = {
'alert_status': 'alertStatus',
'auto_discovery': 'autoDiscovery',
'data_source_display_name': 'dataSourceDisplayName',
'device_id': 'deviceId',
'device_name': 'deviceName',
'created_on': 'createdOn',
'collect_method': 'collectMethod',
'data_source_id': 'dataSourceId',
'graphs': 'graphs',
'sdt_at': 'sdtAt',
'next_auto_discovery_on': 'nextAutoDiscoveryOn',
'id': 'id',
'alert_status_priority': 'alertStatusPriority',
'alert_disable_status': 'alertDisableStatus',
'data_source_description': 'dataSourceDescription',
'overview_graphs': 'overviewGraphs',
'stop_monitoring': 'stopMonitoring',
'assigned_on': 'assignedOn',
'is_multiple': 'isMultiple',
'instance_number': 'instanceNumber',
'updated_on': 'updatedOn',
'sdt_status': 'sdtStatus',
'data_source_name': 'dataSourceName',
'device_display_name': 'deviceDisplayName',
'monitoring_instance_number': 'monitoringInstanceNumber',
'groups_disabled_this_source': 'groupsDisabledThisSource',
'group_name': 'groupName',
'instance_auto_group_enabled': 'instanceAutoGroupEnabled',
'alerting_disabled_on': 'alertingDisabledOn',
'data_source_type': 'dataSourceType',
'status': 'status'
}
def __init__(self, alert_status=None, auto_discovery=None, data_source_display_name=None, device_id=None, device_name=None, created_on=None, collect_method=None, data_source_id=None, graphs=None, sdt_at=None, next_auto_discovery_on=None, id=None, alert_status_priority=None, alert_disable_status=None, data_source_description=None, overview_graphs=None, stop_monitoring=None, assigned_on=None, is_multiple=None, instance_number=None, updated_on=None, sdt_status=None, data_source_name=None, device_display_name=None, monitoring_instance_number=None, groups_disabled_this_source=None, group_name=None, instance_auto_group_enabled=None, alerting_disabled_on=None, data_source_type=None, status=None): # noqa: E501
"""DeviceDataSource - a model defined in Swagger""" # noqa: E501
self._alert_status = None
self._auto_discovery = None
self._data_source_display_name = None
self._device_id = None
self._device_name = None
self._created_on = None
self._collect_method = None
self._data_source_id = None
self._graphs = None
self._sdt_at = None
self._next_auto_discovery_on = None
self._id = None
self._alert_status_priority = None
self._alert_disable_status = None
self._data_source_description = None
self._overview_graphs = None
self._stop_monitoring = None
self._assigned_on = None
self._is_multiple = None
self._instance_number = None
self._updated_on = None
self._sdt_status = None
self._data_source_name = None
self._device_display_name = None
self._monitoring_instance_number = None
self._groups_disabled_this_source = None
self._group_name = None
self._instance_auto_group_enabled = None
self._alerting_disabled_on = None
self._data_source_type = None
self._status = None
self.discriminator = None
if alert_status is not None:
self.alert_status = alert_status
if auto_discovery is not None:
self.auto_discovery = auto_discovery
if data_source_display_name is not None:
self.data_source_display_name = data_source_display_name
if device_id is not None:
self.device_id = device_id
if device_name is not None:
self.device_name = device_name
if created_on is not None:
self.created_on = created_on
if collect_method is not None:
self.collect_method = collect_method
if data_source_id is not None:
self.data_source_id = data_source_id
if graphs is not None:
self.graphs = graphs
if sdt_at is not None:
self.sdt_at = sdt_at
if next_auto_discovery_on is not None:
self.next_auto_discovery_on = next_auto_discovery_on
if id is not None:
self.id = id
if alert_status_priority is not None:
self.alert_status_priority = alert_status_priority
if alert_disable_status is not None:
self.alert_disable_status = alert_disable_status
if data_source_description is not None:
self.data_source_description = data_source_description
if overview_graphs is not None:
self.overview_graphs = overview_graphs
if stop_monitoring is not None:
self.stop_monitoring = stop_monitoring
if assigned_on is not None:
self.assigned_on = assigned_on
if is_multiple is not None:
self.is_multiple = is_multiple
if instance_number is not None:
self.instance_number = instance_number
if updated_on is not None:
self.updated_on = updated_on
if sdt_status is not None:
self.sdt_status = sdt_status
if data_source_name is not None:
self.data_source_name = data_source_name
if device_display_name is not None:
self.device_display_name = device_display_name
if monitoring_instance_number is not None:
self.monitoring_instance_number = monitoring_instance_number
if groups_disabled_this_source is not None:
self.groups_disabled_this_source = groups_disabled_this_source
if group_name is not None:
self.group_name = group_name
if instance_auto_group_enabled is not None:
self.instance_auto_group_enabled = instance_auto_group_enabled
if alerting_disabled_on is not None:
self.alerting_disabled_on = alerting_disabled_on
if data_source_type is not None:
self.data_source_type = data_source_type
if status is not None:
self.status = status
@property
def alert_status(self):
"""Gets the alert_status of this DeviceDataSource. # noqa: E501
:return: The alert_status of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._alert_status
@alert_status.setter
def alert_status(self, alert_status):
"""Sets the alert_status of this DeviceDataSource.
:param alert_status: The alert_status of this DeviceDataSource. # noqa: E501
:type: str
"""
self._alert_status = alert_status
@property
def auto_discovery(self):
"""Gets the auto_discovery of this DeviceDataSource. # noqa: E501
:return: The auto_discovery of this DeviceDataSource. # noqa: E501
:rtype: bool
"""
return self._auto_discovery
@auto_discovery.setter
def auto_discovery(self, auto_discovery):
"""Sets the auto_discovery of this DeviceDataSource.
:param auto_discovery: The auto_discovery of this DeviceDataSource. # noqa: E501
:type: bool
"""
self._auto_discovery = auto_discovery
@property
def data_source_display_name(self):
"""Gets the data_source_display_name of this DeviceDataSource. # noqa: E501
:return: The data_source_display_name of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._data_source_display_name
@data_source_display_name.setter
def data_source_display_name(self, data_source_display_name):
"""Sets the data_source_display_name of this DeviceDataSource.
:param data_source_display_name: The data_source_display_name of this DeviceDataSource. # noqa: E501
:type: str
"""
self._data_source_display_name = data_source_display_name
@property
def device_id(self):
"""Gets the device_id of this DeviceDataSource. # noqa: E501
:return: The device_id of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._device_id
@device_id.setter
def device_id(self, device_id):
"""Sets the device_id of this DeviceDataSource.
:param device_id: The device_id of this DeviceDataSource. # noqa: E501
:type: int
"""
self._device_id = device_id
@property
def device_name(self):
"""Gets the device_name of this DeviceDataSource. # noqa: E501
:return: The device_name of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._device_name
@device_name.setter
def device_name(self, device_name):
"""Sets the device_name of this DeviceDataSource.
:param device_name: The device_name of this DeviceDataSource. # noqa: E501
:type: str
"""
self._device_name = device_name
@property
def created_on(self):
"""Gets the created_on of this DeviceDataSource. # noqa: E501
:return: The created_on of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._created_on
@created_on.setter
def created_on(self, created_on):
"""Sets the created_on of this DeviceDataSource.
:param created_on: The created_on of this DeviceDataSource. # noqa: E501
:type: int
"""
self._created_on = created_on
@property
def collect_method(self):
"""Gets the collect_method of this DeviceDataSource. # noqa: E501
:return: The collect_method of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._collect_method
@collect_method.setter
def collect_method(self, collect_method):
"""Sets the collect_method of this DeviceDataSource.
:param collect_method: The collect_method of this DeviceDataSource. # noqa: E501
:type: str
"""
self._collect_method = collect_method
@property
def data_source_id(self):
"""Gets the data_source_id of this DeviceDataSource. # noqa: E501
:return: The data_source_id of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._data_source_id
@data_source_id.setter
def data_source_id(self, data_source_id):
"""Sets the data_source_id of this DeviceDataSource.
:param data_source_id: The data_source_id of this DeviceDataSource. # noqa: E501
:type: int
"""
self._data_source_id = data_source_id
@property
def graphs(self):
"""Gets the graphs of this DeviceDataSource. # noqa: E501
:return: The graphs of this DeviceDataSource. # noqa: E501
:rtype: list[DeviceDatasourceGraph]
"""
return self._graphs
@graphs.setter
def graphs(self, graphs):
"""Sets the graphs of this DeviceDataSource.
:param graphs: The graphs of this DeviceDataSource. # noqa: E501
:type: list[DeviceDatasourceGraph]
"""
self._graphs = graphs
@property
def sdt_at(self):
"""Gets the sdt_at of this DeviceDataSource. # noqa: E501
:return: The sdt_at of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._sdt_at
@sdt_at.setter
def sdt_at(self, sdt_at):
"""Sets the sdt_at of this DeviceDataSource.
:param sdt_at: The sdt_at of this DeviceDataSource. # noqa: E501
:type: str
"""
self._sdt_at = sdt_at
@property
def next_auto_discovery_on(self):
"""Gets the next_auto_discovery_on of this DeviceDataSource. # noqa: E501
:return: The next_auto_discovery_on of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._next_auto_discovery_on
@next_auto_discovery_on.setter
def next_auto_discovery_on(self, next_auto_discovery_on):
"""Sets the next_auto_discovery_on of this DeviceDataSource.
:param next_auto_discovery_on: The next_auto_discovery_on of this DeviceDataSource. # noqa: E501
:type: int
"""
self._next_auto_discovery_on = next_auto_discovery_on
@property
def id(self):
"""Gets the id of this DeviceDataSource. # noqa: E501
:return: The id of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DeviceDataSource.
:param id: The id of this DeviceDataSource. # noqa: E501
:type: int
"""
self._id = id
@property
def alert_status_priority(self):
"""Gets the alert_status_priority of this DeviceDataSource. # noqa: E501
:return: The alert_status_priority of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._alert_status_priority
@alert_status_priority.setter
def alert_status_priority(self, alert_status_priority):
"""Sets the alert_status_priority of this DeviceDataSource.
:param alert_status_priority: The alert_status_priority of this DeviceDataSource. # noqa: E501
:type: int
"""
self._alert_status_priority = alert_status_priority
@property
def alert_disable_status(self):
"""Gets the alert_disable_status of this DeviceDataSource. # noqa: E501
:return: The alert_disable_status of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._alert_disable_status
@alert_disable_status.setter
def alert_disable_status(self, alert_disable_status):
"""Sets the alert_disable_status of this DeviceDataSource.
:param alert_disable_status: The alert_disable_status of this DeviceDataSource. # noqa: E501
:type: str
"""
self._alert_disable_status = alert_disable_status
@property
def data_source_description(self):
"""Gets the data_source_description of this DeviceDataSource. # noqa: E501
:return: The data_source_description of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._data_source_description
@data_source_description.setter
def data_source_description(self, data_source_description):
"""Sets the data_source_description of this DeviceDataSource.
:param data_source_description: The data_source_description of this DeviceDataSource. # noqa: E501
:type: str
"""
self._data_source_description = data_source_description
@property
def overview_graphs(self):
"""Gets the overview_graphs of this DeviceDataSource. # noqa: E501
:return: The overview_graphs of this DeviceDataSource. # noqa: E501
:rtype: list[DeviceDatasourceGraph]
"""
return self._overview_graphs
@overview_graphs.setter
def overview_graphs(self, overview_graphs):
"""Sets the overview_graphs of this DeviceDataSource.
:param overview_graphs: The overview_graphs of this DeviceDataSource. # noqa: E501
:type: list[DeviceDatasourceGraph]
"""
self._overview_graphs = overview_graphs
@property
def stop_monitoring(self):
"""Gets the stop_monitoring of this DeviceDataSource. # noqa: E501
:return: The stop_monitoring of this DeviceDataSource. # noqa: E501
:rtype: bool
"""
return self._stop_monitoring
@stop_monitoring.setter
def stop_monitoring(self, stop_monitoring):
"""Sets the stop_monitoring of this DeviceDataSource.
:param stop_monitoring: The stop_monitoring of this DeviceDataSource. # noqa: E501
:type: bool
"""
self._stop_monitoring = stop_monitoring
@property
def assigned_on(self):
"""Gets the assigned_on of this DeviceDataSource. # noqa: E501
:return: The assigned_on of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._assigned_on
@assigned_on.setter
def assigned_on(self, assigned_on):
"""Sets the assigned_on of this DeviceDataSource.
:param assigned_on: The assigned_on of this DeviceDataSource. # noqa: E501
:type: int
"""
self._assigned_on = assigned_on
@property
def is_multiple(self):
"""Gets the is_multiple of this DeviceDataSource. # noqa: E501
:return: The is_multiple of this DeviceDataSource. # noqa: E501
:rtype: bool
"""
return self._is_multiple
@is_multiple.setter
def is_multiple(self, is_multiple):
"""Sets the is_multiple of this DeviceDataSource.
:param is_multiple: The is_multiple of this DeviceDataSource. # noqa: E501
:type: bool
"""
self._is_multiple = is_multiple
@property
def instance_number(self):
"""Gets the instance_number of this DeviceDataSource. # noqa: E501
:return: The instance_number of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._instance_number
@instance_number.setter
def instance_number(self, instance_number):
"""Sets the instance_number of this DeviceDataSource.
:param instance_number: The instance_number of this DeviceDataSource. # noqa: E501
:type: int
"""
self._instance_number = instance_number
@property
def updated_on(self):
"""Gets the updated_on of this DeviceDataSource. # noqa: E501
:return: The updated_on of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._updated_on
@updated_on.setter
def updated_on(self, updated_on):
"""Sets the updated_on of this DeviceDataSource.
:param updated_on: The updated_on of this DeviceDataSource. # noqa: E501
:type: int
"""
self._updated_on = updated_on
@property
def sdt_status(self):
"""Gets the sdt_status of this DeviceDataSource. # noqa: E501
:return: The sdt_status of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._sdt_status
@sdt_status.setter
def sdt_status(self, sdt_status):
"""Sets the sdt_status of this DeviceDataSource.
:param sdt_status: The sdt_status of this DeviceDataSource. # noqa: E501
:type: str
"""
self._sdt_status = sdt_status
@property
def data_source_name(self):
"""Gets the data_source_name of this DeviceDataSource. # noqa: E501
:return: The data_source_name of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._data_source_name
@data_source_name.setter
def data_source_name(self, data_source_name):
"""Sets the data_source_name of this DeviceDataSource.
:param data_source_name: The data_source_name of this DeviceDataSource. # noqa: E501
:type: str
"""
self._data_source_name = data_source_name
@property
def device_display_name(self):
"""Gets the device_display_name of this DeviceDataSource. # noqa: E501
:return: The device_display_name of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._device_display_name
@device_display_name.setter
def device_display_name(self, device_display_name):
"""Sets the device_display_name of this DeviceDataSource.
:param device_display_name: The device_display_name of this DeviceDataSource. # noqa: E501
:type: str
"""
self._device_display_name = device_display_name
@property
def monitoring_instance_number(self):
"""Gets the monitoring_instance_number of this DeviceDataSource. # noqa: E501
:return: The monitoring_instance_number of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._monitoring_instance_number
@monitoring_instance_number.setter
def monitoring_instance_number(self, monitoring_instance_number):
"""Sets the monitoring_instance_number of this DeviceDataSource.
:param monitoring_instance_number: The monitoring_instance_number of this DeviceDataSource. # noqa: E501
:type: int
"""
self._monitoring_instance_number = monitoring_instance_number
@property
def groups_disabled_this_source(self):
"""Gets the groups_disabled_this_source of this DeviceDataSource. # noqa: E501
:return: The groups_disabled_this_source of this DeviceDataSource. # noqa: E501
:rtype: list[TreeNode]
"""
return self._groups_disabled_this_source
@groups_disabled_this_source.setter
def groups_disabled_this_source(self, groups_disabled_this_source):
"""Sets the groups_disabled_this_source of this DeviceDataSource.
:param groups_disabled_this_source: The groups_disabled_this_source of this DeviceDataSource. # noqa: E501
:type: list[TreeNode]
"""
self._groups_disabled_this_source = groups_disabled_this_source
@property
def group_name(self):
"""Gets the group_name of this DeviceDataSource. # noqa: E501
:return: The group_name of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._group_name
@group_name.setter
def group_name(self, group_name):
"""Sets the group_name of this DeviceDataSource.
:param group_name: The group_name of this DeviceDataSource. # noqa: E501
:type: str
"""
self._group_name = group_name
@property
def instance_auto_group_enabled(self):
"""Gets the instance_auto_group_enabled of this DeviceDataSource. # noqa: E501
:return: The instance_auto_group_enabled of this DeviceDataSource. # noqa: E501
:rtype: bool
"""
return self._instance_auto_group_enabled
@instance_auto_group_enabled.setter
def instance_auto_group_enabled(self, instance_auto_group_enabled):
"""Sets the instance_auto_group_enabled of this DeviceDataSource.
:param instance_auto_group_enabled: The instance_auto_group_enabled of this DeviceDataSource. # noqa: E501
:type: bool
"""
self._instance_auto_group_enabled = instance_auto_group_enabled
@property
def alerting_disabled_on(self):
"""Gets the alerting_disabled_on of this DeviceDataSource. # noqa: E501
:return: The alerting_disabled_on of this DeviceDataSource. # noqa: E501
:rtype: TreeNode
"""
return self._alerting_disabled_on
@alerting_disabled_on.setter
def alerting_disabled_on(self, alerting_disabled_on):
"""Sets the alerting_disabled_on of this DeviceDataSource.
:param alerting_disabled_on: The alerting_disabled_on of this DeviceDataSource. # noqa: E501
:type: TreeNode
"""
self._alerting_disabled_on = alerting_disabled_on
@property
def data_source_type(self):
"""Gets the data_source_type of this DeviceDataSource. # noqa: E501
:return: The data_source_type of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._data_source_type
@data_source_type.setter
def data_source_type(self, data_source_type):
"""Sets the data_source_type of this DeviceDataSource.
:param data_source_type: The data_source_type of this DeviceDataSource. # noqa: E501
:type: str
"""
self._data_source_type = data_source_type
@property
def status(self):
"""Gets the status of this DeviceDataSource. # noqa: E501
:return: The status of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this DeviceDataSource.
:param status: The status of this DeviceDataSource. # noqa: E501
:type: int
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeviceDataSource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeviceDataSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((26750, 26783), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (26763, 26783), False, 'import six\n')]
|
import cv2, sys, os
import numpy as np
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets'
print('Recognizing Face Please Be in sufficient Lights...')
(images, lables, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(datasets):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(datasets, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
lable = id
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(width, height) = (130, 100)
(images, lables) = [np.array(lis) for lis in [images, lables]]
model = cv2.face.LBPHFaceRecognizer_create()
model.train(images, lables)
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(0)
while True:
(_, im) = webcam.read()
(_, im2) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
for (x, y, w, h) in faces:
cv2.rectangle(gray, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
prediction = model.predict(face_resize)
cv2.rectangle(gray, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1]<100:
cv2.putText(gray, 'The person of % s - %.0f' %(names[prediction[0]], prediction[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
cv2.putText(im, 'The person of % s - %.0f' %(names[prediction[0]], prediction[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
else:
cv2.putText(gray, 'Not Recognized', (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
cv2.putText(im, 'Not Recognized', (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
mask = cv2.inRange(hsv, lower_blue, upper_blue)
cv2.imshow('Window 1', im)
cv2.imshow('Window 2', im2)
cv2.imshow('Window 3', gray)
cv2.imshow('Window 4', mask)
key = cv2.waitKey(10)
if key == 27:
cv2.destroyAllWindows()
break
|
[
"cv2.face.LBPHFaceRecognizer_create",
"os.path.join",
"cv2.putText",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"os.walk",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.imread",
"numpy.array",
"cv2.CascadeClassifier",
"cv2.imshow",
"cv2.inRange",
"os.listdir",
"cv2.resize"
] |
[((252, 269), 'os.walk', 'os.walk', (['datasets'], {}), '(datasets)\n', (259, 269), False, 'import cv2, sys, os\n'), ((645, 681), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (679, 681), False, 'import cv2, sys, os\n'), ((727, 759), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['haar_file'], {}), '(haar_file)\n', (748, 759), False, 'import cv2, sys, os\n'), ((770, 789), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (786, 789), False, 'import cv2, sys, os\n'), ((592, 605), 'numpy.array', 'np.array', (['lis'], {}), '(lis)\n', (600, 605), True, 'import numpy as np\n'), ((868, 904), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (880, 904), False, 'import cv2, sys, os\n'), ((967, 1002), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2HSV'], {}), '(im, cv2.COLOR_BGR2HSV)\n', (979, 1002), False, 'import cv2, sys, os\n'), ((1017, 1040), 'numpy.array', 'np.array', (['[110, 50, 50]'], {}), '([110, 50, 50])\n', (1025, 1040), True, 'import numpy as np\n'), ((1053, 1078), 'numpy.array', 'np.array', (['[130, 255, 255]'], {}), '([130, 255, 255])\n', (1061, 1078), True, 'import numpy as np\n'), ((2020, 2060), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_blue', 'upper_blue'], {}), '(hsv, lower_blue, upper_blue)\n', (2031, 2060), False, 'import cv2, sys, os\n'), ((2063, 2089), 'cv2.imshow', 'cv2.imshow', (['"""Window 1"""', 'im'], {}), "('Window 1', im)\n", (2073, 2089), False, 'import cv2, sys, os\n'), ((2091, 2118), 'cv2.imshow', 'cv2.imshow', (['"""Window 2"""', 'im2'], {}), "('Window 2', im2)\n", (2101, 2118), False, 'import cv2, sys, os\n'), ((2120, 2148), 'cv2.imshow', 'cv2.imshow', (['"""Window 3"""', 'gray'], {}), "('Window 3', gray)\n", (2130, 2148), False, 'import cv2, sys, os\n'), ((2150, 2178), 'cv2.imshow', 'cv2.imshow', (['"""Window 4"""', 'mask'], {}), "('Window 4', mask)\n", (2160, 2178), False, 'import cv2, sys, os\n'), ((2188, 2203), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (2199, 2203), False, 'import cv2, sys, os\n'), ((332, 362), 'os.path.join', 'os.path.join', (['datasets', 'subdir'], {}), '(datasets, subdir)\n', (344, 362), False, 'import cv2, sys, os\n'), ((382, 405), 'os.listdir', 'os.listdir', (['subjectpath'], {}), '(subjectpath)\n', (392, 405), False, 'import cv2, sys, os\n'), ((1109, 1168), 'cv2.rectangle', 'cv2.rectangle', (['gray', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(gray, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (1122, 1168), False, 'import cv2, sys, os\n'), ((1172, 1229), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(im, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (1185, 1229), False, 'import cv2, sys, os\n'), ((1280, 1313), 'cv2.resize', 'cv2.resize', (['face', '(width, height)'], {}), '(face, (width, height))\n', (1290, 1313), False, 'import cv2, sys, os\n'), ((1363, 1422), 'cv2.rectangle', 'cv2.rectangle', (['gray', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(3)'], {}), '(gray, (x, y), (x + w, y + h), (0, 255, 0), 3)\n', (1376, 1422), False, 'import cv2, sys, os\n'), ((1426, 1483), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(3)'], {}), '(im, (x, y), (x + w, y + h), (0, 255, 0), 3)\n', (1439, 1483), False, 'import cv2, sys, os\n'), ((2223, 2246), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2244, 2246), False, 'import cv2, sys, os\n'), ((1516, 1671), 'cv2.putText', 'cv2.putText', (['gray', "('The person of % s - %.0f' % (names[prediction[0]], prediction[1]))", '(x - 10, y - 10)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 255)', '(2)'], {}), "(gray, 'The person of % s - %.0f' % (names[prediction[0]],\n prediction[1]), (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2\n )\n", (1527, 1671), False, 'import cv2, sys, os\n'), ((1665, 1818), 'cv2.putText', 'cv2.putText', (['im', "('The person of % s - %.0f' % (names[prediction[0]], prediction[1]))", '(x - 10, y - 10)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 255)', '(2)'], {}), "(im, 'The person of % s - %.0f' % (names[prediction[0]],\n prediction[1]), (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2\n )\n", (1676, 1818), False, 'import cv2, sys, os\n'), ((1821, 1922), 'cv2.putText', 'cv2.putText', (['gray', '"""Not Recognized"""', '(x - 10, y - 10)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 255)', '(2)'], {}), "(gray, 'Not Recognized', (x - 10, y - 10), cv2.\n FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)\n", (1832, 1922), False, 'import cv2, sys, os\n'), ((1920, 2018), 'cv2.putText', 'cv2.putText', (['im', '"""Not Recognized"""', '(x - 10, y - 10)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 255)', '(2)'], {}), "(im, 'Not Recognized', (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN,\n 1, (0, 0, 255), 2)\n", (1931, 2018), False, 'import cv2, sys, os\n'), ((479, 498), 'cv2.imread', 'cv2.imread', (['path', '(0)'], {}), '(path, 0)\n', (489, 498), False, 'import cv2, sys, os\n')]
|
# import libraries
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import matplotlib.pyplot as plt
import argparse
from collections import defaultdict
#%matplotlib inline
# set font
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Helvetica'
# set the style of the axes and the text color
plt.rcParams['axes.edgecolor']='#333F4B'
plt.rcParams['axes.linewidth']=0.8
plt.rcParams['xtick.color']='#333F4B'
plt.rcParams['ytick.color']='#333F4B'
plt.rcParams['text.color']='#333F4B'
parser = argparse.ArgumentParser(description='Draw Bar')
parser.add_argument('--tsv', default='input.tsv', help='input file separted by \'\\t\' ')
parser.add_argument('--fig', default='out.png', help='the output figure')
parser.add_argument('--title', default='Concept Count in All Papers', help='the title of the graph')
parser.add_argument('--colored_concepts', default=None, nargs='+',
help='An interleaved list of filenames containing concept tags (e.g. first.txt red second.txt purple)')
args = parser.parse_args()
concept_colors = defaultdict(lambda: '#007ACC')
if args.colored_concepts:
for i in range(0, len(args.colored_concepts), 2):
print(f"opening {args.colored_concepts[i]} as {args.colored_concepts[i+1]}")
with open(args.colored_concepts[i], 'r') as f:
for line in f:
line = line.strip()
concept_colors[line] = args.colored_concepts[i+1]
print(f'concept_colors[{line}] = {args.colored_concepts[i+1]}')
tsv_file = args.tsv
fig_file = args.fig
fin = open(tsv_file,"r")
cpt_list = []
val_list = []
for line in fin:
line = line.strip()
cpt, val = line.split("\t")
val_list.append(int(val))
cpt_list.append(cpt)
fin.close()
percentages = pd.Series(val_list,
index=cpt_list)
df = pd.DataFrame({'percentage' : percentages})
df = df.sort_values(by='percentage')
color_list = [concept_colors[x] for x in df.index]
# we first need a numeric placeholder for the y axis
my_range=list(range(1,len(df.index)+1))
fig, ax = plt.subplots(figsize=(10,25))
# create lines and dots for each bar
plt.hlines(y=my_range, xmin=0, xmax=df['percentage'], colors=color_list, alpha=0.5, linewidth=5)
# plt.plot(df['percentage'], my_range, "o", markersize=5, colors=color_list, alpha=0.6)
# set labels
ax.set_xlabel(args.title, fontsize=15, fontweight='black', color = '#333F4B')
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
#ax.set_ylabel('')
# set axis
ax.tick_params(axis='both', which='major', labelsize=12)
plt.yticks(my_range, df.index)
# add an horizonal label for the y axis
#fig.text(-0.23, 0.86, 'Concept Coverage (Fulltext)', fontsize=15, fontweight='black', color = '#333F4B')
# change the style of the axis spines
ax.spines['bottom'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['top'].set_smart_bounds(True)
'''
# set the spines position
ax.spines['bottom'].set_position(('axes', -0.04))
ax.spines['left'].set_position(('axes', 0.015))
'''
plt.savefig(fig_file, dpi=300, bbox_inches='tight')
|
[
"pandas.DataFrame",
"argparse.ArgumentParser",
"matplotlib.pyplot.yticks",
"collections.defaultdict",
"matplotlib.use",
"pandas.Series",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((37, 58), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (51, 58), False, 'import matplotlib\n'), ((534, 581), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Draw Bar"""'}), "(description='Draw Bar')\n", (557, 581), False, 'import argparse\n'), ((1074, 1105), 'collections.defaultdict', 'defaultdict', (["(lambda : '#007ACC')"], {}), "(lambda : '#007ACC')\n", (1085, 1105), False, 'from collections import defaultdict\n'), ((1715, 1750), 'pandas.Series', 'pd.Series', (['val_list'], {'index': 'cpt_list'}), '(val_list, index=cpt_list)\n', (1724, 1750), True, 'import pandas as pd\n'), ((1782, 1823), 'pandas.DataFrame', 'pd.DataFrame', (["{'percentage': percentages}"], {}), "({'percentage': percentages})\n", (1794, 1823), True, 'import pandas as pd\n'), ((2019, 2049), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 25)'}), '(figsize=(10, 25))\n', (2031, 2049), True, 'import matplotlib.pyplot as plt\n'), ((2087, 2187), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': 'my_range', 'xmin': '(0)', 'xmax': "df['percentage']", 'colors': 'color_list', 'alpha': '(0.5)', 'linewidth': '(5)'}), "(y=my_range, xmin=0, xmax=df['percentage'], colors=color_list,\n alpha=0.5, linewidth=5)\n", (2097, 2187), True, 'import matplotlib.pyplot as plt\n'), ((2508, 2538), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_range', 'df.index'], {}), '(my_range, df.index)\n', (2518, 2538), True, 'import matplotlib.pyplot as plt\n'), ((3015, 3066), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_file'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(fig_file, dpi=300, bbox_inches='tight')\n", (3026, 3066), True, 'import matplotlib.pyplot as plt\n')]
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,md,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# Add to the path the generator folder.
# %%
import sys
from pathlib import Path
path = Path("../generator")
sys.path.insert(0, path.as_posix())
pwd = path.parent
# %%
import pandas as pd
from generator import TimeSeriesGenerator
# %%
NUMBER_SERIES = 10
NUMBER_DAYS = 260
# %%
configuration = {
"meta": {
"number_of_observations": NUMBER_DAYS,
"path": "./timeseries/",
"time_series_name": "01-base",
},
"base_line": {"base": 10, "variance": 2},
"timestamps": {"start": 0, "step": 1},
"trend": {"slope": 0.1},
"season": {"height": 5, "period": 21},
"breaks": [{"from": 10, "to": 100, "value": 10}],
}
# %%
# Generate time series
generator = TimeSeriesGenerator(configuration["meta"])
series = []
for number in range(1, NUMBER_SERIES + 1):
# Add randomness to differentiate the time series
configuration["base_line"]["base"] = np.random.randint(low=10, high=50)
configuration["trend"]["slope"] = 0.35 * np.random.rand()
configuration["season"]["height"] = np.abs(5 * np.random.rand())
configuration["season"]["period"] = np.random.randint(low=21, high=120)
configuration["breaks"][0]["from"] = np.random.randint(low=21, high=120)
configuration["breaks"][0]["to"] = np.random.randint(
low=configuration["breaks"][0]["from"], high=NUMBER_DAYS
)
configuration["breaks"][0]["value"] = np.random.randint(low=5, high=10)
generator.generate(configuration)
ts = generator.get_business_like()
ts.name = number
series.append(ts)
# Collect all time series
prices_df = pd.DataFrame(series).T
prices_df.index.name = "date"
# %%
prices_df.plot()
|
[
"pandas.DataFrame",
"generator.TimeSeriesGenerator",
"pathlib.Path"
] |
[((408, 428), 'pathlib.Path', 'Path', (['"""../generator"""'], {}), "('../generator')\n", (412, 428), False, 'from pathlib import Path\n'), ((1018, 1060), 'generator.TimeSeriesGenerator', 'TimeSeriesGenerator', (["configuration['meta']"], {}), "(configuration['meta'])\n", (1037, 1060), False, 'from generator import TimeSeriesGenerator\n'), ((1900, 1920), 'pandas.DataFrame', 'pd.DataFrame', (['series'], {}), '(series)\n', (1912, 1920), True, 'import pandas as pd\n')]
|
"""
Project: SSITH CyberPhysical Demonstrator
Name: test_canout.py
Author: <NAME>
Date: 08 April 2021
Tests for the cyberphys can location poller
"""
import cyberphyslib.demonstrator.can_out as ccout
import cyberphyslib.demonstrator.component as ccomp
from cyberphyslib.demonstrator.handler import ComponentHandler
import time
def test_canout():
"""test the canout service
operational tests:
1. start / stop
failure mode tests:
<None>
"""
# simple start / stop
# TODO: conduct more tests
handler = ComponentHandler()
msg = handler.start_component(ccout.CanOutPoller(None))
assert msg == ccomp.ComponentStatus.READY
handler.exit()
|
[
"cyberphyslib.demonstrator.handler.ComponentHandler",
"cyberphyslib.demonstrator.can_out.CanOutPoller"
] |
[((546, 564), 'cyberphyslib.demonstrator.handler.ComponentHandler', 'ComponentHandler', ([], {}), '()\n', (562, 564), False, 'from cyberphyslib.demonstrator.handler import ComponentHandler\n'), ((599, 623), 'cyberphyslib.demonstrator.can_out.CanOutPoller', 'ccout.CanOutPoller', (['None'], {}), '(None)\n', (617, 623), True, 'import cyberphyslib.demonstrator.can_out as ccout\n')]
|
# -*- coding: utf-8 -*-
from functools import partial
import numpy as np
import pandas as pd
def summarize_results(results):
values = []
for df in results:
values.append(df.pd_dataframe().values)
df = df.pd_dataframe()
columns = df.columns
return (
pd.DataFrame(np.mean(values, axis=0), columns=columns, index=df.index),
pd.DataFrame(np.std(values, axis=0), columns=columns, index=df.index),
)
def _run_backtest(
rep, model, x_test, y_test, start=0.3, stride=1, horizon=4, enable_mc_dropout=True
):
backtest = model.historical_forecasts(
y_test,
past_covariates=x_test,
start=start,
forecast_horizon=horizon,
stride=stride,
retrain=False,
verbose=False,
enable_mc_dropout=enable_mc_dropout,
)
return backtest
def parallelized_inference(
model, x, y, repeats=100, start=0.3, stride=1, horizon=6, enable_mc_dropout=True
):
results = []
backtest_partial = partial(
_run_backtest,
model=model,
x_test=x,
y_test=y,
start=start,
stride=stride,
horizon=horizon,
enable_mc_dropout=enable_mc_dropout,
)
for res in map(backtest_partial, range(repeats)):
results.append(res)
return results
def _run_forcast(_, model, x_full, y_past, future_len, enable_mc_dropout=True):
return model.predict(
future_len, series=y_past, past_covariates=x_full, enable_mc_dropout=enable_mc_dropout
)
def forecast(model, x_full, y_past, future_len, repeats=100, enable_mc_dropout=True):
results = []
backtest_partial = partial(
_run_forcast,
model=model,
x_full=x_full,
y_past=y_past,
future_len=future_len,
enable_mc_dropout=enable_mc_dropout,
)
for res in map(backtest_partial, range(repeats)):
results.append(res)
return results
|
[
"numpy.std",
"functools.partial",
"numpy.mean"
] |
[((1004, 1146), 'functools.partial', 'partial', (['_run_backtest'], {'model': 'model', 'x_test': 'x', 'y_test': 'y', 'start': 'start', 'stride': 'stride', 'horizon': 'horizon', 'enable_mc_dropout': 'enable_mc_dropout'}), '(_run_backtest, model=model, x_test=x, y_test=y, start=start, stride\n =stride, horizon=horizon, enable_mc_dropout=enable_mc_dropout)\n', (1011, 1146), False, 'from functools import partial\n'), ((1655, 1784), 'functools.partial', 'partial', (['_run_forcast'], {'model': 'model', 'x_full': 'x_full', 'y_past': 'y_past', 'future_len': 'future_len', 'enable_mc_dropout': 'enable_mc_dropout'}), '(_run_forcast, model=model, x_full=x_full, y_past=y_past, future_len\n =future_len, enable_mc_dropout=enable_mc_dropout)\n', (1662, 1784), False, 'from functools import partial\n'), ((304, 327), 'numpy.mean', 'np.mean', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (311, 327), True, 'import numpy as np\n'), ((384, 406), 'numpy.std', 'np.std', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (390, 406), True, 'import numpy as np\n')]
|
from datetime import date
__version__ = "1.3.1"
__author__ = u"<NAME>"
__author_email__ = "<EMAIL>"
__copyright__ = u"Copyright (c) 2017-{}, {} <{}>".format(
date.today().year, __author__, __author_email__
)
__website__ = "https://benvial.github.io/pytheas"
__license__ = "License :: OSI Approved :: MIT License"
__status__ = "Development Status :: 5 - Production/Stable"
__description__ = (
"Python Electromagnetic Analysis and Simulation with the Finite Element Method"
)
|
[
"datetime.date.today"
] |
[((163, 175), 'datetime.date.today', 'date.today', ([], {}), '()\n', (173, 175), False, 'from datetime import date\n')]
|
'''
Created by auto_sdk on 2020.09.01
'''
from top.api.base import RestApi
class WdtStatSalesBySpecShopWarehouseQueryRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.consign_date = None
self.sid = None
def getapiname(self):
return 'hu3cgwt0tc.wdt.stat.sales.by.spec.shop.warehouse.query'
|
[
"top.api.base.RestApi.__init__"
] |
[((199, 235), 'top.api.base.RestApi.__init__', 'RestApi.__init__', (['self', 'domain', 'port'], {}), '(self, domain, port)\n', (215, 235), False, 'from top.api.base import RestApi\n')]
|
# pseudocode
# Breadth-First Search
"""
procedure BFS(G, root) is
let Q be a queue
label root as discovered
Q.enqueue(root)
while Q is not empty do
v := Q.dequeue()
if v is the goal then
return v
for all edges from v to w in G.adjacentEdges(v) do
if w is not labeled as discovered then
label w as discovered
w.parent := v
Q.enqueue(w)
"""
from collections import deque
adjList = {
1: {2, 3},
2: {4},
3: {4},
4: {1}
}
def bfs(graph, startingNode, destinationNode):
queue = deque()
visited = set()
visited.add(startingNode)
queue.append(startingNode)
while len(queue) > 0:
currNode = queue.popleft()
print(f"visiting node {currNode}")
if currNode == destinationNode:
print(f"found dthe destination node {currNode}")
return currNode
for neighbor in graph[currNode]:
if neighbor not in visited:
visited.add(neighbor)
queue.append(neighbor)
bfs(adjList, 1, 4)
|
[
"collections.deque"
] |
[((608, 615), 'collections.deque', 'deque', ([], {}), '()\n', (613, 615), False, 'from collections import deque\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Tridots Tech Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
# from _future_ import unicode_literals
import frappe
import frappe.utils
import json
from frappe import _
def get_context(context):
location = frappe.request.cookies.get('city_location')
path = frappe.local.request.path
path = path.replace('csd-', '')
path = path.replace('-price', '')
context.path = path
path = path.strip('/')
word = path.split('/')
category_route = word[0]
brand_route = word[1]
item_route = word[2]
variant_route = word[3]
addrightadd = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={"view": 'Variant Detail Page', 'position': 'Right Panel'})
context.addrightadd = addrightadd
context.addtopadd = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={'view': 'Variant Detail Page', 'position': 'Top Panel'})
context.addbottomadd = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={'view': 'Variant Detail Page', 'position': 'Bottom Panel'})
context.addmidads = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={'view': 'Variant Detail Page', 'position': 'Middle Panel'})
item_name = frappe.db.get_value("Item",
filters={'route': item_route}, fieldname=['name'])
context.item_brand = frappe.db.get_value("ItemBrand",
filters={'route': brand_route}, fieldname=['brand_name'])
context.item_title = frappe.db.get_value("Item",
filters={'route': item_route}, fieldname=['item_name'])
context.category_title = frappe.db.get_value("Category",
filters={'route': category_route}, fieldname=['category_name'])
context.item_brand_route = brand_route
context.item_category_route = category_route
context.item_route = item_route
context.variant_route = variant_route
context.variant_title = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['variant_name'])
context.meta_title = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['meta_title'])
context.meta_description = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['meta_description'])
context.meta_keywords = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['meta_keywords'])
context.item_featured_image = frappe.db.get_value("Item",
filters={'route': item_route}, fieldname=['featured_image'])
item_variant_doc_name = frappe.db.get_value("Item Variant",
filters={'route': variant_route}, fieldname=['name'])
context.item_variant_doc_name =item_variant_doc_name
item_variants = frappe.db.get_all("Item Variant",
fields=['route','variant_name', 'name'],
filters={'item': item_name},
limit_page_length= 100)
for x in item_variants:
if frappe.request.cookies.get('city_location'):
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'],
filters = {'variant': x.name, 'city': frappe.request.cookies.get('city_location'), 'item': item_name})
if len(price) > 0:
x.csd_price = price[0].csd_price
x.market_price = price[0].market_price
else:
x.csd_price = "Na"
x.market_price = "Na"
else:
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'],
filters = {'variant': x.name, 'city': 'Delhi', 'item': item_name})
if len(price) > 0:
x.csd_price = price[0].csd_price
x.market_price = price[0].market_price
else:
x.csd_price = "Na"
x.market_price = "Na"
context.item_variants = item_variants
variant_specifications = frappe.db.get_list('Item Specification',
fields=['specification', 'value'],
filters={'parent': item_variant_doc_name})
for x in variant_specifications:
x.specification_group = frappe.db.get_value("Specification",
filters={'name': x.specification}, fieldname=['specification_category'])
context.variant_specifications = variant_specifications
if frappe.request.cookies.get('city_location'):
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'],
filters = {'variant': item_variant_doc_name, 'city': frappe.request.cookies.get('city_location'), 'item': item_name})
if len(price) > 0:
context.csd_price = price[0].csd_price
context.market_price = price[0].market_price
else:
context.csd_price = "Na"
context.market_price = "Na"
else:
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'], filters = {'variant': item_variant_doc_name, 'city': 'Delhi', 'item': item_name})
if len(price) > 0:
context.csd_price = price[0].csd_price
context.market_price = price[0].market_price
context.difference=price[0].difference
else:
context.csd_price = "Na"
context.market_price = "Na"
|
[
"frappe.db.get_value",
"frappe.db.get_list",
"frappe.db.get_all",
"frappe.request.cookies.get"
] |
[((354, 397), 'frappe.request.cookies.get', 'frappe.request.cookies.get', (['"""city_location"""'], {}), "('city_location')\n", (380, 397), False, 'import frappe\n'), ((717, 862), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Widget Placeholder"""'], {'fieldname': "['google_ad_script']", 'filters': "{'view': 'Variant Detail Page', 'position': 'Right Panel'}"}), "('Widget Placeholder', fieldname=['google_ad_script'],\n filters={'view': 'Variant Detail Page', 'position': 'Right Panel'})\n", (736, 862), False, 'import frappe\n'), ((917, 1060), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Widget Placeholder"""'], {'fieldname': "['google_ad_script']", 'filters': "{'view': 'Variant Detail Page', 'position': 'Top Panel'}"}), "('Widget Placeholder', fieldname=['google_ad_script'],\n filters={'view': 'Variant Detail Page', 'position': 'Top Panel'})\n", (936, 1060), False, 'import frappe\n'), ((1082, 1228), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Widget Placeholder"""'], {'fieldname': "['google_ad_script']", 'filters': "{'view': 'Variant Detail Page', 'position': 'Bottom Panel'}"}), "('Widget Placeholder', fieldname=['google_ad_script'],\n filters={'view': 'Variant Detail Page', 'position': 'Bottom Panel'})\n", (1101, 1228), False, 'import frappe\n'), ((1247, 1393), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Widget Placeholder"""'], {'fieldname': "['google_ad_script']", 'filters': "{'view': 'Variant Detail Page', 'position': 'Middle Panel'}"}), "('Widget Placeholder', fieldname=['google_ad_script'],\n filters={'view': 'Variant Detail Page', 'position': 'Middle Panel'})\n", (1266, 1393), False, 'import frappe\n'), ((1406, 1484), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item"""'], {'filters': "{'route': item_route}", 'fieldname': "['name']"}), "('Item', filters={'route': item_route}, fieldname=['name'])\n", (1425, 1484), False, 'import frappe\n'), ((1516, 1611), 'frappe.db.get_value', 'frappe.db.get_value', (['"""ItemBrand"""'], {'filters': "{'route': brand_route}", 'fieldname': "['brand_name']"}), "('ItemBrand', filters={'route': brand_route}, fieldname=\n ['brand_name'])\n", (1535, 1611), False, 'import frappe\n'), ((1641, 1729), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item"""'], {'filters': "{'route': item_route}", 'fieldname': "['item_name']"}), "('Item', filters={'route': item_route}, fieldname=[\n 'item_name'])\n", (1660, 1729), False, 'import frappe\n'), ((1763, 1862), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Category"""'], {'filters': "{'route': category_route}", 'fieldname': "['category_name']"}), "('Category', filters={'route': category_route},\n fieldname=['category_name'])\n", (1782, 1862), False, 'import frappe\n'), ((2073, 2193), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item Variant"""'], {'filters': "{'route': variant_route, 'item': item_name}", 'fieldname': "['variant_name']"}), "('Item Variant', filters={'route': variant_route, 'item':\n item_name}, fieldname=['variant_name'])\n", (2092, 2193), False, 'import frappe\n'), ((2224, 2342), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item Variant"""'], {'filters': "{'route': variant_route, 'item': item_name}", 'fieldname': "['meta_title']"}), "('Item Variant', filters={'route': variant_route, 'item':\n item_name}, fieldname=['meta_title'])\n", (2243, 2342), False, 'import frappe\n'), ((2379, 2503), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item Variant"""'], {'filters': "{'route': variant_route, 'item': item_name}", 'fieldname': "['meta_description']"}), "('Item Variant', filters={'route': variant_route, 'item':\n item_name}, fieldname=['meta_description'])\n", (2398, 2503), False, 'import frappe\n'), ((2537, 2658), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item Variant"""'], {'filters': "{'route': variant_route, 'item': item_name}", 'fieldname': "['meta_keywords']"}), "('Item Variant', filters={'route': variant_route, 'item':\n item_name}, fieldname=['meta_keywords'])\n", (2556, 2658), False, 'import frappe\n'), ((2700, 2793), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item"""'], {'filters': "{'route': item_route}", 'fieldname': "['featured_image']"}), "('Item', filters={'route': item_route}, fieldname=[\n 'featured_image'])\n", (2719, 2793), False, 'import frappe\n'), ((2826, 2919), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item Variant"""'], {'filters': "{'route': variant_route}", 'fieldname': "['name']"}), "('Item Variant', filters={'route': variant_route},\n fieldname=['name'])\n", (2845, 2919), False, 'import frappe\n'), ((3001, 3132), 'frappe.db.get_all', 'frappe.db.get_all', (['"""Item Variant"""'], {'fields': "['route', 'variant_name', 'name']", 'filters': "{'item': item_name}", 'limit_page_length': '(100)'}), "('Item Variant', fields=['route', 'variant_name', 'name'],\n filters={'item': item_name}, limit_page_length=100)\n", (3018, 3132), False, 'import frappe\n'), ((4032, 4154), 'frappe.db.get_list', 'frappe.db.get_list', (['"""Item Specification"""'], {'fields': "['specification', 'value']", 'filters': "{'parent': item_variant_doc_name}"}), "('Item Specification', fields=['specification', 'value'],\n filters={'parent': item_variant_doc_name})\n", (4050, 4154), False, 'import frappe\n'), ((4428, 4471), 'frappe.request.cookies.get', 'frappe.request.cookies.get', (['"""city_location"""'], {}), "('city_location')\n", (4454, 4471), False, 'import frappe\n'), ((3187, 3230), 'frappe.request.cookies.get', 'frappe.request.cookies.get', (['"""city_location"""'], {}), "('city_location')\n", (3213, 3230), False, 'import frappe\n'), ((4238, 4351), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Specification"""'], {'filters': "{'name': x.specification}", 'fieldname': "['specification_category']"}), "('Specification', filters={'name': x.specification},\n fieldname=['specification_category'])\n", (4257, 4351), False, 'import frappe\n'), ((4896, 5062), 'frappe.db.get_list', 'frappe.db.get_list', (['"""Item Variant Price"""'], {'fields': "['market_price', 'csd_price']", 'filters': "{'variant': item_variant_doc_name, 'city': 'Delhi', 'item': item_name}"}), "('Item Variant Price', fields=['market_price',\n 'csd_price'], filters={'variant': item_variant_doc_name, 'city':\n 'Delhi', 'item': item_name})\n", (4914, 5062), False, 'import frappe\n'), ((3635, 3786), 'frappe.db.get_list', 'frappe.db.get_list', (['"""Item Variant Price"""'], {'fields': "['market_price', 'csd_price']", 'filters': "{'variant': x.name, 'city': 'Delhi', 'item': item_name}"}), "('Item Variant Price', fields=['market_price',\n 'csd_price'], filters={'variant': x.name, 'city': 'Delhi', 'item':\n item_name})\n", (3653, 3786), False, 'import frappe\n'), ((4628, 4671), 'frappe.request.cookies.get', 'frappe.request.cookies.get', (['"""city_location"""'], {}), "('city_location')\n", (4654, 4671), False, 'import frappe\n'), ((3379, 3422), 'frappe.request.cookies.get', 'frappe.request.cookies.get', (['"""city_location"""'], {}), "('city_location')\n", (3405, 3422), False, 'import frappe\n')]
|
import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.autograd import Variable
class NormedLinearLayer(nn.Module):
def __init__(self, input_dim, out_dim, momentum=0.1):
super(NormedLinearLayer, self).__init__()
self.input_dim = input_dim
self.out_dim = out_dim
self.momentum = momentum
self._build_model()
def _build_model(self):
self.linear = nn.utils.weight_norm(nn.Linear(self.input_dim, self.out_dim))
self.bias = Parameter(torch.Tensor)
self.register_buffer('running_mean', torch.zeros(self.out_dim))
self.reset_parameter()
def reset_parameter(self):
self.running_mean.zero_()
self.bias.data.zero_()
def forward(self, inputs):
inputs = self.linear(inputs)
if self.training:
avg = torch.mean(inputs, dim=0)
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * avg.data
else:
avg = Variable(self.running_mean, requires_grad=False)
out = inputs - avg + self.bias
return out
|
[
"torch.nn.Parameter",
"torch.mean",
"torch.autograd.Variable",
"torch.nn.Linear",
"torch.zeros"
] |
[((524, 547), 'torch.nn.Parameter', 'Parameter', (['torch.Tensor'], {}), '(torch.Tensor)\n', (533, 547), False, 'from torch.nn import Parameter\n'), ((462, 501), 'torch.nn.Linear', 'nn.Linear', (['self.input_dim', 'self.out_dim'], {}), '(self.input_dim, self.out_dim)\n', (471, 501), True, 'import torch.nn as nn\n'), ((594, 619), 'torch.zeros', 'torch.zeros', (['self.out_dim'], {}), '(self.out_dim)\n', (605, 619), False, 'import torch\n'), ((874, 899), 'torch.mean', 'torch.mean', (['inputs'], {'dim': '(0)'}), '(inputs, dim=0)\n', (884, 899), False, 'import torch\n'), ((1034, 1082), 'torch.autograd.Variable', 'Variable', (['self.running_mean'], {'requires_grad': '(False)'}), '(self.running_mean, requires_grad=False)\n', (1042, 1082), False, 'from torch.autograd import Variable\n')]
|
from dataclasses import dataclass
import h5pickle as h5py
import json
import numpy as np
from numpy import ndarray
from pathlib import Path
from typing import List
import random
from robolfd.types import Transition
import robosuite
from robosuite.utils.mjcf_utils import postprocess_model_xml
import itertools
from tqdm import tqdm
from multiprocessing import Pool
@dataclass
class DemoConfig:
obs_keys: dict
max_episodes: int
num_workers: int
def __str__(self) -> str:
return f"demo config - observation keys: {self.obs_keys} max_episodes: {self.max_episodes}, num_workers: {self.num_workers}"
def generate_episode_transitions(demo_info):
f, episode_num, config = demo_info
episodes = list(f["data"].keys())
episode = episodes[episode_num]
env_info = json.loads(f["data"].attrs["env_info"])
env = robosuite.make(
**env_info,
has_renderer=False,
has_offscreen_renderer=False,
ignore_done=True,
use_camera_obs=False,
reward_shaping=True,
control_freq=20,
)
model_xml = f[f"data/{episode}"].attrs["model_file"]
env.reset()
xml = postprocess_model_xml(model_xml)
env.reset_from_xml_string(xml)
env.sim.reset()
all_observations = []
all_actions = []
# TODO: start from state
states = f[f"data/{episode}/states"][()]
actions = np.array(f[f"data/{episode}/actions"][()])
# load the initial state
env.sim.set_state_from_flattened(states[0])
env.sim.forward()
observations = []
action = [0, 0, 0, -1]
observation, _, _, _ = env.step(action)
# observe the current state
observations.append(observation)
used_actions = []
# Fix the order of action, observation sampling problem here
for j, action in enumerate(actions):
action = np.clip(action, -1, 1)
observation, reward, done, misc = env.step(action)
# use when you want to evaluate the environment
# env.render()
used_actions.append(action)
observations.append(observation)
# repeat last action for last observation
used_actions.append(actions[-1])
flat_observations = []
for observation in observations:
flat_observations.append(np.concatenate([observation[key] for key in config.obs_keys]))
# z
all_observations.extend(flat_observations)
all_actions.extend(used_actions)
return list(zip(all_observations, all_actions))
def make_demonstrations(demo_path: Path, config: DemoConfig) -> ndarray:
f = h5py.File(demo_path, "r", skip_cache=False)
episodes = list(f["data"].keys())[-config.max_episodes:]
# TODO: Decide how to batch transitions across episodes
# Dataset is collected in the form of transitions.
pbar = tqdm(total=len(episodes))
with Pool(config.num_workers) as pool:
# simple pool usage
# transitions = pool.map(generate_episode_transitions, [(demo_path, i, config) for i in range(len(episodes))])
# for measuring progress:
res = [pool.apply_async(generate_episode_transitions, args=((f, i, config),),
callback=lambda _: pbar.update(1)) for i in range(len(episodes))]
transitions = [p.get() for p in res]
pool.close()
pool.join()
return transitions
def make_eval_env(demo_path: Path, robot_name="Panda", has_offscreen_renderer = True):
f = h5py.File(demo_path, "r")
env_name = f["data"].attrs["env"]
env_info = json.loads(f["data"].attrs["env_info"])
env_info['robots'] = robot_name
env = robosuite.make(
**env_info,
has_renderer=not has_offscreen_renderer,
has_offscreen_renderer=has_offscreen_renderer,
ignore_done=True,
use_camera_obs=has_offscreen_renderer,
reward_shaping=True,
control_freq=20,
camera_names="frontview",
camera_heights=512,
camera_widths=512,
)
return env
|
[
"robosuite.make",
"json.loads",
"numpy.clip",
"h5pickle.File",
"robosuite.utils.mjcf_utils.postprocess_model_xml",
"numpy.array",
"multiprocessing.Pool",
"numpy.concatenate"
] |
[((802, 841), 'json.loads', 'json.loads', (["f['data'].attrs['env_info']"], {}), "(f['data'].attrs['env_info'])\n", (812, 841), False, 'import json\n'), ((853, 1015), 'robosuite.make', 'robosuite.make', ([], {'has_renderer': '(False)', 'has_offscreen_renderer': '(False)', 'ignore_done': '(True)', 'use_camera_obs': '(False)', 'reward_shaping': '(True)', 'control_freq': '(20)'}), '(**env_info, has_renderer=False, has_offscreen_renderer=False,\n ignore_done=True, use_camera_obs=False, reward_shaping=True,\n control_freq=20)\n', (867, 1015), False, 'import robosuite\n'), ((1155, 1187), 'robosuite.utils.mjcf_utils.postprocess_model_xml', 'postprocess_model_xml', (['model_xml'], {}), '(model_xml)\n', (1176, 1187), False, 'from robosuite.utils.mjcf_utils import postprocess_model_xml\n'), ((1385, 1427), 'numpy.array', 'np.array', (["f[f'data/{episode}/actions'][()]"], {}), "(f[f'data/{episode}/actions'][()])\n", (1393, 1427), True, 'import numpy as np\n'), ((2547, 2590), 'h5pickle.File', 'h5py.File', (['demo_path', '"""r"""'], {'skip_cache': '(False)'}), "(demo_path, 'r', skip_cache=False)\n", (2556, 2590), True, 'import h5pickle as h5py\n'), ((3419, 3444), 'h5pickle.File', 'h5py.File', (['demo_path', '"""r"""'], {}), "(demo_path, 'r')\n", (3428, 3444), True, 'import h5pickle as h5py\n'), ((3498, 3537), 'json.loads', 'json.loads', (["f['data'].attrs['env_info']"], {}), "(f['data'].attrs['env_info'])\n", (3508, 3537), False, 'import json\n'), ((3589, 3879), 'robosuite.make', 'robosuite.make', ([], {'has_renderer': '(not has_offscreen_renderer)', 'has_offscreen_renderer': 'has_offscreen_renderer', 'ignore_done': '(True)', 'use_camera_obs': 'has_offscreen_renderer', 'reward_shaping': '(True)', 'control_freq': '(20)', 'camera_names': '"""frontview"""', 'camera_heights': '(512)', 'camera_widths': '(512)'}), "(**env_info, has_renderer=not has_offscreen_renderer,\n has_offscreen_renderer=has_offscreen_renderer, ignore_done=True,\n use_camera_obs=has_offscreen_renderer, reward_shaping=True,\n control_freq=20, camera_names='frontview', camera_heights=512,\n camera_widths=512)\n", (3603, 3879), False, 'import robosuite\n'), ((1837, 1859), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (1844, 1859), True, 'import numpy as np\n'), ((2820, 2844), 'multiprocessing.Pool', 'Pool', (['config.num_workers'], {}), '(config.num_workers)\n', (2824, 2844), False, 'from multiprocessing import Pool\n'), ((2256, 2317), 'numpy.concatenate', 'np.concatenate', (['[observation[key] for key in config.obs_keys]'], {}), '([observation[key] for key in config.obs_keys])\n', (2270, 2317), True, 'import numpy as np\n')]
|
from pprint import pprint
import json
import gzip
import pickle
import copy
from androguard.decompiler.dad.decompile import DvMethod
from androguard.misc import AnalyzeAPK
from core.parser import ASTParser
from core.parser import ConstData
from core.parser import stmtList
from core.parser import actionList
from core.parser import dataList
from core.statements import Statement
from core.graph import ASTGraph
from core.graph import GraphConfig
from core.utils import save_pickle, load_pickle
from core.utils import get_filteredFileList_from_directory as get_targets
import networkx as nx
targetPath = 'data/'
target = 'data/okhttp-3.1.0_dex.jar'
resultPath = '/root/result/'
targetExts = ['.apk', '.jar']
# config = GraphConfig(10000,20000)
def create_ast(method):
if method.is_external():
return
try:
dv = DvMethod(method)
dv.process(doAST=True)
return dv.get_ast()
except AttributeError as ae:
print('ERROR : in creat_ast()')
if __name__ == '__main__' :
targetList = get_targets(targetPath, targetExts)
for target in targetList:
a, d, dx = AnalyzeAPK(target)
t_count = 0
graphList = list()
for method in dx.get_methods():
m_ast = create_ast(method)
ap = ASTParser()
if m_ast is not None:
ap.load_ast(m_ast)
ap.parse_ast()
# for node in ap.parsedNodes:
# if 'APIName' == node.nodeInfo.type:
# pprint(node.nodeInfo)
# for edge in ap.parsedEdges:
# pprint(edge)
# ag = ASTGraph(ap.parsedNodes, ap.parsedEdges, config)
ag = ASTGraph(ap.parsedNodes, ap.parsedEdges)
ag.graph_initialize()
# encode_flag makes the index of edges meaningful
# ag.graph_initialize(encode_flag = True)
if ag.graph == None:
pass
else:
graphList.append(ag.graph)
save_pickle(resultPath + target.split('/')[1] + '.pickle', graphList)
|
[
"androguard.misc.AnalyzeAPK",
"core.parser.ASTParser",
"core.graph.ASTGraph",
"core.utils.get_filteredFileList_from_directory",
"androguard.decompiler.dad.decompile.DvMethod"
] |
[((1088, 1123), 'core.utils.get_filteredFileList_from_directory', 'get_targets', (['targetPath', 'targetExts'], {}), '(targetPath, targetExts)\n', (1099, 1123), True, 'from core.utils import get_filteredFileList_from_directory as get_targets\n'), ((882, 898), 'androguard.decompiler.dad.decompile.DvMethod', 'DvMethod', (['method'], {}), '(method)\n', (890, 898), False, 'from androguard.decompiler.dad.decompile import DvMethod\n'), ((1177, 1195), 'androguard.misc.AnalyzeAPK', 'AnalyzeAPK', (['target'], {}), '(target)\n', (1187, 1195), False, 'from androguard.misc import AnalyzeAPK\n'), ((1352, 1363), 'core.parser.ASTParser', 'ASTParser', ([], {}), '()\n', (1361, 1363), False, 'from core.parser import ASTParser\n'), ((1812, 1852), 'core.graph.ASTGraph', 'ASTGraph', (['ap.parsedNodes', 'ap.parsedEdges'], {}), '(ap.parsedNodes, ap.parsedEdges)\n', (1820, 1852), False, 'from core.graph import ASTGraph\n')]
|
from django.forms import ModelForm, Form, CharField
from ..models import Project, Task
class TaskSearchForm(Form):
'''Form for the task search bar'''
query = CharField(max_length=100)
query.widget.attrs.update({'placeholder': 'Search all tasks...',
'class': 'form-control',})
class TaskForm(ModelForm):
class Meta:
model = Task
fields = ('title','body','project',)
def __init__(self, *args, **kwargs):
'''Uses the passed request to populate fields'''
if kwargs['request']:
self.request = kwargs.pop('request')
super(TaskForm,self).__init__(*args, **kwargs)
self.fields['project'].queryset = Project.objects.filter(user=self.request.user)
else:
super(TaskForm,self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs.update({'class': 'form-control border border-dark',
'placeholder': 'Title',
'id': 'taskTitle',})
self.fields['body'].widget.attrs.update({'class': 'form-control border border-dark',
'placeholder': 'Body',
'id': 'taskBody',
'style': 'height: 8rem;',})
self.fields['project'].widget.attrs.update({'class': 'form-select border border-dark',
'placeholder': 'Project',
'id': 'taskProject',})
|
[
"django.forms.CharField"
] |
[((168, 193), 'django.forms.CharField', 'CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (177, 193), False, 'from django.forms import ModelForm, Form, CharField\n')]
|
from unittest import TestCase
from chibi.object import Chibi_object
from chibi.object.descriptor import (
String, Dict, Tree_simple, Dict_defaults, Set
)
class Chibi_object_empty( Chibi_object ):
pass
class Chibi_object_with_descriptors( Chibi_object ):
name = String()
test_dict = Dict()
test_dict_default = Dict_defaults()
test_tree = Tree_simple()
test_set = Set()
class Chibi_object_with_defaults( Chibi_object ):
name = String( default='hello' )
test_dict_default = Dict_defaults( default='word' )
class Test_chibi_object( TestCase ):
def test_chibi_object_simple( self ):
obj = Chibi_object_empty()
self.assertIsInstance( obj, Chibi_object )
def test_with_descriptors( self ):
obj = Chibi_object_with_descriptors()
self.assertIsInstance( obj, Chibi_object )
self.assertEqual( obj.name, '' )
obj.name = 'hellooo'
self.assertEqual( obj.name, 'hellooo' )
self.assertIsNone( obj.test_dict )
obj.test_dict = { 'key': 'test' }
self.assertEqual( obj.test_dict, { 'key': 'test' } )
self.assertFalse( obj.test_dict_default )
data = obj.test_dict_default[ 'sadf' ]
self.assertIsNone( data )
self.assertFalse( obj.test_tree )
self.assertEqual( list( obj.test_tree.keys() ), [] )
self.assertEqual( len( obj.test_set ), 0 )
def test_with_descriptior_assing( self ):
obj = Chibi_object_with_descriptors( name='stuff', test_dict={} )
self.assertEqual( obj.test_dict, {} )
self.assertEqual( obj.name, 'stuff' )
obj.name = 'asdf'
self.assertEqual( obj.name, 'asdf' )
obj.test_dict['asdf'] = 123
self.assertEqual( obj.test_dict, { 'asdf': 123 } )
self.assertFalse( obj.test_tree )
obj.test_tree.a.b.c
self.assertEqual( obj.test_tree, { 'a': { 'b': { 'c': {} } } } )
obj.test_tree.a.rusky = 'RUSH B'
self.assertEqual( obj.test_tree, { 'a': { 'rusky': 'RUSH B',
'b': { 'c': {} } } } )
obj.test_dict_default[ 'qwer' ] = 'word'
self.assertIsNotNone( obj.test_dict_default[ 'qwer' ] )
self.assertEqual( obj.test_dict_default[ 'qwer' ], 'word' )
obj.test_set |= set( 'abc' )
self.assertEqual( len( obj.test_set ), 3 )
def test_with_defaults( self ):
obj = Chibi_object_with_defaults()
self.assertEqual( obj.name, 'hello' )
obj.name = 'zxcv'
self.assertEqual( obj.name, 'zxcv' )
word = obj.test_dict_default[ 'hello' ]
self.assertEqual( word, 'word' )
|
[
"chibi.object.descriptor.Dict",
"chibi.object.descriptor.Dict_defaults",
"chibi.object.descriptor.Tree_simple",
"chibi.object.descriptor.Set",
"chibi.object.descriptor.String"
] |
[((278, 286), 'chibi.object.descriptor.String', 'String', ([], {}), '()\n', (284, 286), False, 'from chibi.object.descriptor import String, Dict, Tree_simple, Dict_defaults, Set\n'), ((303, 309), 'chibi.object.descriptor.Dict', 'Dict', ([], {}), '()\n', (307, 309), False, 'from chibi.object.descriptor import String, Dict, Tree_simple, Dict_defaults, Set\n'), ((334, 349), 'chibi.object.descriptor.Dict_defaults', 'Dict_defaults', ([], {}), '()\n', (347, 349), False, 'from chibi.object.descriptor import String, Dict, Tree_simple, Dict_defaults, Set\n'), ((366, 379), 'chibi.object.descriptor.Tree_simple', 'Tree_simple', ([], {}), '()\n', (377, 379), False, 'from chibi.object.descriptor import String, Dict, Tree_simple, Dict_defaults, Set\n'), ((395, 400), 'chibi.object.descriptor.Set', 'Set', ([], {}), '()\n', (398, 400), False, 'from chibi.object.descriptor import String, Dict, Tree_simple, Dict_defaults, Set\n'), ((464, 487), 'chibi.object.descriptor.String', 'String', ([], {'default': '"""hello"""'}), "(default='hello')\n", (470, 487), False, 'from chibi.object.descriptor import String, Dict, Tree_simple, Dict_defaults, Set\n'), ((514, 543), 'chibi.object.descriptor.Dict_defaults', 'Dict_defaults', ([], {'default': '"""word"""'}), "(default='word')\n", (527, 543), False, 'from chibi.object.descriptor import String, Dict, Tree_simple, Dict_defaults, Set\n')]
|
# -*- test-case-name: <INSERT_TEST_MODULE> -*-
# Copyright (c) 2014 <NAME> <<EMAIL>>
# See LICENSE for more details
"""
.. module:: controller
:platform: Linux
:synopsis: Just the __init__.py file
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from txrest.managers.routing import RouteManager
route = RouteManager().route
|
[
"txrest.managers.routing.RouteManager"
] |
[((305, 319), 'txrest.managers.routing.RouteManager', 'RouteManager', ([], {}), '()\n', (317, 319), False, 'from txrest.managers.routing import RouteManager\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 11 18:55:01 2019
@author: kenneth
"""
from __future__ import absolute_import
import numpy as np
from Utils.utils import EvalR
from Utils.Loss import loss
from Utils.kernels import Kernels
class kernelridge(EvalR, loss, Kernels):
def __init__(self, kernel = None, lamda = None):
super().__init__()
if not kernel:
kernel = 'linear'
self.kernel = kernel
else:
self.kernel = kernel
if not lamda:
lamda = 100000
self.lamda = lamda
else:
self.lamda = lamda
return
def kernelize(self, x1, x2):
'''
:params: x1: NxD
:params: x2: NxD
'''
if self.kernel == 'linear':
return Kernels.linear(x1, x2)
elif self.kernel == 'rbf':
return Kernels.rbf(x1, x2)
elif self.kernel == 'sigmoid':
return Kernels.sigmoid(x1, x2)
elif self.kernel == 'polynomial':
return Kernels.polynomial(x1, x2)
elif self.kernel == 'cosine':
return Kernels.cosine(x1, x2)
elif self.kernel == 'correlation':
return Kernels.correlation(x1, x2)
def fit(self, X, y):
'''
:param: X: NxD
:param: Dx1
'''
self.X = X
self.y = y
self.alpha = np.linalg.solve(self.kernelize(self.X, self.X) + self.lamda*np.eye(self.X.shape[0]), self.y)
return self
def predict(self, X):
'''
:param: X: NxD
:return type: Dx1 vector
'''
return np.dot((self.kernelize(self.X, X).T * self.y), self.alpha.T)
#%% Testing
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Normalizer, StandardScaler
X, y = load_boston().data, load_boston().target
X = StandardScaler().fit_transform(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size = .3)
kridge = kernelridge().fit(X_train, Y_train)
kridge.predict(X_test)
kridge.summary(X, Y_test, kridge.predict(X_test))
#%%
from sklearn.kernel_ridge import KernelRidge
clf = KernelRidge(alpha=1.0, kernel='linear')
clf.fit(X, y)
kridge.summary(X, Y_test, clf.predict(X_test))
|
[
"sklearn.preprocessing.StandardScaler",
"sklearn.kernel_ridge.KernelRidge",
"sklearn.model_selection.train_test_split",
"Utils.kernels.Kernels.cosine",
"Utils.kernels.Kernels.polynomial",
"sklearn.datasets.load_boston",
"Utils.kernels.Kernels.sigmoid",
"Utils.kernels.Kernels.rbf",
"Utils.kernels.Kernels.correlation",
"numpy.eye",
"Utils.kernels.Kernels.linear"
] |
[((2019, 2056), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)'}), '(X, y, test_size=0.3)\n', (2035, 2056), False, 'from sklearn.model_selection import train_test_split\n'), ((2237, 2276), 'sklearn.kernel_ridge.KernelRidge', 'KernelRidge', ([], {'alpha': '(1.0)', 'kernel': '"""linear"""'}), "(alpha=1.0, kernel='linear')\n", (2248, 2276), False, 'from sklearn.kernel_ridge import KernelRidge\n'), ((1905, 1918), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (1916, 1918), False, 'from sklearn.datasets import load_boston\n'), ((1925, 1938), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (1936, 1938), False, 'from sklearn.datasets import load_boston\n'), ((1950, 1966), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1964, 1966), False, 'from sklearn.preprocessing import Normalizer, StandardScaler\n'), ((822, 844), 'Utils.kernels.Kernels.linear', 'Kernels.linear', (['x1', 'x2'], {}), '(x1, x2)\n', (836, 844), False, 'from Utils.kernels import Kernels\n'), ((899, 918), 'Utils.kernels.Kernels.rbf', 'Kernels.rbf', (['x1', 'x2'], {}), '(x1, x2)\n', (910, 918), False, 'from Utils.kernels import Kernels\n'), ((977, 1000), 'Utils.kernels.Kernels.sigmoid', 'Kernels.sigmoid', (['x1', 'x2'], {}), '(x1, x2)\n', (992, 1000), False, 'from Utils.kernels import Kernels\n'), ((1479, 1502), 'numpy.eye', 'np.eye', (['self.X.shape[0]'], {}), '(self.X.shape[0])\n', (1485, 1502), True, 'import numpy as np\n'), ((1062, 1088), 'Utils.kernels.Kernels.polynomial', 'Kernels.polynomial', (['x1', 'x2'], {}), '(x1, x2)\n', (1080, 1088), False, 'from Utils.kernels import Kernels\n'), ((1146, 1168), 'Utils.kernels.Kernels.cosine', 'Kernels.cosine', (['x1', 'x2'], {}), '(x1, x2)\n', (1160, 1168), False, 'from Utils.kernels import Kernels\n'), ((1231, 1258), 'Utils.kernels.Kernels.correlation', 'Kernels.correlation', (['x1', 'x2'], {}), '(x1, x2)\n', (1250, 1258), False, 'from Utils.kernels import Kernels\n')]
|
import sys
sys.path.append('../../')
# Global variables
from test1.test_map import PACMAN_MAP
from test1.test_map import WIDTH
from test1.test_map import HEIGHT
# Class
from Challenge import Case
from Challenge import Node
from Challenge import Edge
from Challenge import BoardNodesAndEdges
# Global
# Method
from Challenge import t_update_width_and_height
import unittest
t_update_width_and_height(WIDTH, HEIGHT)
class _noding(unittest.TestCase):
def test_node(self):
kanban_node = BoardNodesAndEdges(None)
kanban_node.set_up(PACMAN_MAP)
# OK
for k_coord, n1 in kanban_node.nodes.items():
y1, x1 = k_coord
print(f'(x {x1} y {y1}) n {n1}')
print()
for e1 in kanban_node.edges:
k1_coord, k2_coord = e1.allays[0], e1.allays[-1]
y1, x1 = k1_coord.coord
y2, x2 = k2_coord.coord
print(f'(x {x1} y {y1}) (x {x2} y {y2})')
print(f'e {e1}')
print()
return
if __name__ == '__main__':
unittest.main()
|
[
"sys.path.append",
"unittest.main",
"Challenge.BoardNodesAndEdges",
"Challenge.t_update_width_and_height"
] |
[((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((379, 419), 'Challenge.t_update_width_and_height', 't_update_width_and_height', (['WIDTH', 'HEIGHT'], {}), '(WIDTH, HEIGHT)\n', (404, 419), False, 'from Challenge import t_update_width_and_height\n'), ((1049, 1064), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1062, 1064), False, 'import unittest\n'), ((503, 527), 'Challenge.BoardNodesAndEdges', 'BoardNodesAndEdges', (['None'], {}), '(None)\n', (521, 527), False, 'from Challenge import BoardNodesAndEdges\n')]
|
from initial_data_prep_code import movielens, amazon, goodreads, beeradvocate
from data_path_constants import get_data_path
from svp_handler import SVPHandler
percent_sample = [ 20, 40, 60, 80, 90, 99 ]
# Which datasets to prep?
for dataset in [
'magazine',
'ml-100k',
## Did not download & preprocess the following in
## the included code, but feel free to download and uncomment
# 'luxury',
# 'video_games',
# 'beeradvocate',
# 'goodreads_comics',
]:
print("\n\n\n!!!!!!!! STARTED PROCESSING {} !!!!!!!!\n\n\n".format(dataset))
if dataset in [ 'ml-100k' ]: total_data = movielens.prep(dataset)
elif dataset in [ 'luxury', 'magazine', 'video_games' ]: total_data = amazon.prep(dataset)
elif dataset in [ 'goodreads_comics' ]: total_data = goodreads.prep(dataset)
elif dataset in [ 'beeradvocate' ]: total_data = beeradvocate.prep(dataset)
# Store original data
total_data.save_data(get_data_path(dataset))
# Sampling
for train_test_split in [ '20_percent_hist', 'leave_2' ]:
total_data.complete_data_stats = None # Since task changed
path_uptil_now = get_data_path(dataset) + "/" + train_test_split + "/"
# Make full-data (No sampling)
total_data.train_test_split(train_test_split)
print("\n{} split, Overall:".format(train_test_split))
total_data.save_index(path_uptil_now + "/complete_data/")
# Frequency sample from user hist (Stratified)
print("\n{} split, user history random sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.frequency_sample(percent, 0)
total_data.save_index(path_uptil_now + str(percent) + "_perc_freq_user_rns")
# Sample users randomly
print("\n{} split, user random sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.user_random_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_user_rns")
# Sample interactions randomly
print("\n{} split, interaction random sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.interaction_random_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_interaction_rns")
# Temporal sampling
print("\n{} split, user history temporal sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.temporal_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_temporal")
# Remove tail users sampling
print("\n{} split, tail user sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.tail_user_remove(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_tail_user_remove")
# Pagerank based sampling
print("\n{} split, pagerank sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.pagerank_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_pagerank")
# RW based sampling
print("\n{} split, random walk sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.random_walk_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_random_walk")
# Forest-fire based sampling
print("\n{} split, forest fire sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.forest_fire_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_forest_fire")
# Sample interactions according to SVP
hyper_params = {}
hyper_params['dataset'] = dataset
hyper_params['sampling'] = 'complete_data' # While training the proxy model
for proxy_model in [ 'bias_only', 'MF_dot' ]:
scenarios = [ 'sequential' ] if train_test_split == 'leave_2' else [ 'implicit', 'explicit' ]
for loss_type in scenarios:
print() ; svp_handler = SVPHandler(proxy_model, loss_type, hyper_params)
for sampling in [
'forgetting_events',
'forgetting_events_propensity',
'forgetting_events_user',
'forgetting_events_user_propensity',
]:
print("\n{} split, SVP: {}_{}, {} loss".format(train_test_split, proxy_model, sampling, loss_type))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.svp_sample(percent, svp_handler, sampling)
total_data.save_index(path_uptil_now + "svp_{}_{}/{}_perc_{}".format(proxy_model, loss_type, percent, sampling))
|
[
"initial_data_prep_code.amazon.prep",
"data_path_constants.get_data_path",
"initial_data_prep_code.beeradvocate.prep",
"initial_data_prep_code.goodreads.prep",
"svp_handler.SVPHandler",
"initial_data_prep_code.movielens.prep"
] |
[((588, 611), 'initial_data_prep_code.movielens.prep', 'movielens.prep', (['dataset'], {}), '(dataset)\n', (602, 611), False, 'from initial_data_prep_code import movielens, amazon, goodreads, beeradvocate\n'), ((905, 927), 'data_path_constants.get_data_path', 'get_data_path', (['dataset'], {}), '(dataset)\n', (918, 927), False, 'from data_path_constants import get_data_path\n'), ((683, 703), 'initial_data_prep_code.amazon.prep', 'amazon.prep', (['dataset'], {}), '(dataset)\n', (694, 703), False, 'from initial_data_prep_code import movielens, amazon, goodreads, beeradvocate\n'), ((758, 781), 'initial_data_prep_code.goodreads.prep', 'goodreads.prep', (['dataset'], {}), '(dataset)\n', (772, 781), False, 'from initial_data_prep_code import movielens, amazon, goodreads, beeradvocate\n'), ((4413, 4461), 'svp_handler.SVPHandler', 'SVPHandler', (['proxy_model', 'loss_type', 'hyper_params'], {}), '(proxy_model, loss_type, hyper_params)\n', (4423, 4461), False, 'from svp_handler import SVPHandler\n'), ((832, 858), 'initial_data_prep_code.beeradvocate.prep', 'beeradvocate.prep', (['dataset'], {}), '(dataset)\n', (849, 858), False, 'from initial_data_prep_code import movielens, amazon, goodreads, beeradvocate\n'), ((1082, 1104), 'data_path_constants.get_data_path', 'get_data_path', (['dataset'], {}), '(dataset)\n', (1095, 1104), False, 'from data_path_constants import get_data_path\n')]
|
# -*- coding: utf-8 -*-
import wx
class Interactor(object):
"""Connects the UI events with the Presenter class."""
def Connect(self, presenter, view):
"""Listens to UI evens and asigns an event handler on the Presenter."""
self.presenter = presenter
self.view = view
# Menu Archivo
view.Bind(wx.EVT_MENU, self.OnOpenDatasetClicked, view.mItemDataset)
view.Bind(wx.EVT_MENU, self.OnExportImageClicked, view.mItemExportImage)
view.Bind(wx.EVT_MENU, self.OnExportCsvClicked, view.mItemExportCsv)
view.Bind(wx.EVT_MENU, self.OnExitClicked, view.mItemExit)
# Menu Proceso
view.Bind(wx.EVT_MENU, self.OnProcessDataset, view.mItemProcess)
view.Bind(wx.EVT_MENU, self.OnPlotResults, view.mItemPlot)
view.Bind(wx.EVT_CLOSE, self.OnExitClicked)
# Menu Ayuda
view.Bind(wx.EVT_MENU, self.OnHelpGetHelp, view.mItemHelp)
view.Bind(wx.EVT_MENU, self.OnHelpAbout, view.mItemAbout)
view.Bind(view.EVT_FILE_SELECTED, self.OnFileSelected)
view.Bind(view.EVT_EXPORT_CSV_FILE_SELECTED, self.OnExportCsvFileSelected)
view.Bind(view.EVT_EXPORT_PNG_FILE_SELECTED, self.OnExportPngFileSelected)
def OnOpenDatasetClicked(self, evt):
self.presenter.ShowFileDialog()
def OnExportImageClicked(self, evt):
self.presenter.ShowExportImageDialog()
def OnExportPngFileSelected(self, evt):
self.presenter.ExportPngFile(evt.path)
def OnExportCsvClicked(self, evt):
self.presenter.ShowExportCsvDialog()
def OnExportCsvFileSelected(self, evt):
self.presenter.ExportCsvFile(evt.path)
def OnFileSelected(self, evt):
self.presenter.SetSelectedFile(evt.path)
def OnProcessDataset(self, evt):
self.presenter.ShowDatasetConfigDialog()
# self.presenter.Process()
def OnHelpGetHelp(self, evt):
wx.BeginBusyCursor()
import webbrowser
webbrowser.open("https://github.com/iturricf/clusteris/wiki/How-to-use-Clusteris")
wx.EndBusyCursor()
def OnHelpAbout(self, evt):
box = wx.MessageDialog(None, 'ClusteRIS v1.0 \nAplicación desarrollada para lograr el agrupamiento de datos mediante la técnica de algoritmos genéticos. \n\n Autores: <NAME>, <NAME> y <NAME>.', 'Acerca de CluteRIS', wx.OK)
box.ShowModal()
def OnPlotResults(self, evt):
self.presenter.ShowPlotConfigDialog()
# self.presenter.Plot()
def OnExitClicked(self, evt):
self.presenter.Close()
|
[
"webbrowser.open",
"wx.BeginBusyCursor",
"wx.MessageDialog",
"wx.EndBusyCursor"
] |
[((1920, 1940), 'wx.BeginBusyCursor', 'wx.BeginBusyCursor', ([], {}), '()\n', (1938, 1940), False, 'import wx\n'), ((1975, 2062), 'webbrowser.open', 'webbrowser.open', (['"""https://github.com/iturricf/clusteris/wiki/How-to-use-Clusteris"""'], {}), "(\n 'https://github.com/iturricf/clusteris/wiki/How-to-use-Clusteris')\n", (1990, 2062), False, 'import webbrowser\n'), ((2066, 2084), 'wx.EndBusyCursor', 'wx.EndBusyCursor', ([], {}), '()\n', (2082, 2084), False, 'import wx\n'), ((2132, 2358), 'wx.MessageDialog', 'wx.MessageDialog', (['None', '"""ClusteRIS v1.0 \nAplicación desarrollada para lograr el agrupamiento de datos mediante la técnica de algoritmos genéticos. \n\n Autores: <NAME>, <NAME> y <NAME>."""', '"""Acerca de CluteRIS"""', 'wx.OK'], {}), '(None,\n """ClusteRIS v1.0 \nAplicación desarrollada para lograr el agrupamiento de datos mediante la técnica de algoritmos genéticos. \n\n Autores: <NAME>, <NAME> y <NAME>."""\n , \'Acerca de CluteRIS\', wx.OK)\n', (2148, 2358), False, 'import wx\n')]
|
from django.db import models
from cart import models as cart_models
from django.db.models.signals import post_save
from django.dispatch import receiver
class Order(models.Model):
cart = models.OneToOneField(
cart_models.Cart,
related_name='order',
on_delete=models.PROTECT
)
delivery_status_choices = (('1', 'В процессе оформления'), ('2', 'На рассмотрении модерации'), ('3', 'Отменен'),
('4', 'Заказан'), ('5', 'Доставка'), ('6', 'Доставлен'))
delivery_status = models.CharField(
verbose_name='Статус заказа',
default=1,
max_length=20,
choices=delivery_status_choices
)
comment = models.TextField(
verbose_name='Комментарий',
blank=True,
null=True
)
type_of_payment = models.CharField(
verbose_name='Тип оплаты',
default=1,
max_length=20
)
date_add = models.DateTimeField(
auto_now=False,
auto_now_add=True,
verbose_name='Дата создания заказа'
)
date_last_change = models.DateTimeField(
auto_now=True,
auto_now_add=False,
verbose_name='Дата последнего изменения заказа'
)
def __str__(self):
return f'Заказ №{self.pk}, статус заказа: {self.get_delivery_status_display()}.'
class Meta:
verbose_name = 'Заказ'
verbose_name_plural = 'Заказы'
class AddressInOrder(models.Model):
order = models.ForeignKey(
Order,
on_delete=models.PROTECT,
related_name='address_in_order'
)
country = models.CharField(
'Страна',
max_length=20,
blank=True,
null=True
)
city = models.CharField(
'Город',
max_length=20,
blank=True,
null=True
)
index = models.CharField(
'Индекс',
max_length=15,
blank=True,
null=True
)
address1 = models.CharField(
'Адрес1',
max_length=50,
blank=True,
null=True
)
address2 = models.CharField(
'Адрес2',
max_length=50,
blank=True,
null=True
)
def __str__(self):
return f'Профиль адрес в заказе №{self.pk}.'
class Meta:
verbose_name = 'Профиль адрес в заказе'
verbose_name_plural = 'Профиль адреса в заказах'
|
[
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.DateTimeField"
] |
[((192, 283), 'django.db.models.OneToOneField', 'models.OneToOneField', (['cart_models.Cart'], {'related_name': '"""order"""', 'on_delete': 'models.PROTECT'}), "(cart_models.Cart, related_name='order', on_delete=\n models.PROTECT)\n", (212, 283), False, 'from django.db import models\n'), ((536, 645), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Статус заказа"""', 'default': '(1)', 'max_length': '(20)', 'choices': 'delivery_status_choices'}), "(verbose_name='Статус заказа', default=1, max_length=20,\n choices=delivery_status_choices)\n", (552, 645), False, 'from django.db import models\n'), ((694, 761), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Комментарий"""', 'blank': '(True)', 'null': '(True)'}), "(verbose_name='Комментарий', blank=True, null=True)\n", (710, 761), False, 'from django.db import models\n'), ((814, 883), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Тип оплаты"""', 'default': '(1)', 'max_length': '(20)'}), "(verbose_name='Тип оплаты', default=1, max_length=20)\n", (830, 883), False, 'from django.db import models\n'), ((929, 1026), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(False)', 'auto_now_add': '(True)', 'verbose_name': '"""Дата создания заказа"""'}), "(auto_now=False, auto_now_add=True, verbose_name=\n 'Дата создания заказа')\n", (949, 1026), False, 'from django.db import models\n'), ((1075, 1184), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'auto_now_add': '(False)', 'verbose_name': '"""Дата последнего изменения заказа"""'}), "(auto_now=True, auto_now_add=False, verbose_name=\n 'Дата последнего изменения заказа')\n", (1095, 1184), False, 'from django.db import models\n'), ((1460, 1548), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Order'], {'on_delete': 'models.PROTECT', 'related_name': '"""address_in_order"""'}), "(Order, on_delete=models.PROTECT, related_name=\n 'address_in_order')\n", (1477, 1548), False, 'from django.db import models\n'), ((1588, 1652), 'django.db.models.CharField', 'models.CharField', (['"""Страна"""'], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), "('Страна', max_length=20, blank=True, null=True)\n", (1604, 1652), False, 'from django.db import models\n'), ((1702, 1765), 'django.db.models.CharField', 'models.CharField', (['"""Город"""'], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), "('Город', max_length=20, blank=True, null=True)\n", (1718, 1765), False, 'from django.db import models\n'), ((1816, 1880), 'django.db.models.CharField', 'models.CharField', (['"""Индекс"""'], {'max_length': '(15)', 'blank': '(True)', 'null': '(True)'}), "('Индекс', max_length=15, blank=True, null=True)\n", (1832, 1880), False, 'from django.db import models\n'), ((1934, 1998), 'django.db.models.CharField', 'models.CharField', (['"""Адрес1"""'], {'max_length': '(50)', 'blank': '(True)', 'null': '(True)'}), "('Адрес1', max_length=50, blank=True, null=True)\n", (1950, 1998), False, 'from django.db import models\n'), ((2052, 2116), 'django.db.models.CharField', 'models.CharField', (['"""Адрес2"""'], {'max_length': '(50)', 'blank': '(True)', 'null': '(True)'}), "('Адрес2', max_length=50, blank=True, null=True)\n", (2068, 2116), False, 'from django.db import models\n')]
|
from collections import deque
q1 = deque()
q2 = deque()
player_2 = False
with open('in', 'r') as f:
f.readline()
for line in f.readlines():
try:
i = int(line.strip())
if player_2:
q2.append(i)
else:
q1.append(i)
except Exception:
player_2 = True
while len(q1) > 0 and len(q2) > 0:
card1 = q1.popleft()
card2 = q2.popleft()
if card1 > card2:
q1.append(max(card1, card2))
q1.append(min(card1, card2))
else:
q2.append(max(card1, card2))
q2.append(min(card1, card2))
q = q1 if len(q2) == 0 else q2
result = 0
for i in range(1, len(q) + 1):
result += i * q.pop()
print(result)
|
[
"collections.deque"
] |
[((36, 43), 'collections.deque', 'deque', ([], {}), '()\n', (41, 43), False, 'from collections import deque\n'), ((49, 56), 'collections.deque', 'deque', ([], {}), '()\n', (54, 56), False, 'from collections import deque\n')]
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.build_graph.address import Address, BuildFileAddress
from pants.engine.addressable import BuildFileAddresses
from pants.engine.fs import Digest, FileContent, InputFilesContent, Workspace
from pants.engine.interactive_runner import InteractiveRunner
from pants.rules.core import run
from pants.rules.core.binary import CreatedBinary
from pants.testutil.console_rule_test_base import ConsoleRuleTestBase
from pants.testutil.engine.util import MockConsole, run_rule
class RunTest(ConsoleRuleTestBase):
goal_cls = run.Run
def create_mock_binary(self, program_text: bytes) -> CreatedBinary:
input_files_content = InputFilesContent((
FileContent(path='program.py', content=program_text, is_executable=True),
))
digest, = self.scheduler.product_request(Digest, [input_files_content])
return CreatedBinary(
binary_name='program.py',
digest=digest,
)
def single_target_run(self, *, console: MockConsole, program_text: bytes, spec: str):
workspace = Workspace(self.scheduler)
interactive_runner = InteractiveRunner(self.scheduler)
address = Address.parse(spec)
bfa = BuildFileAddress(
build_file=None,
target_name=address.target_name,
rel_path=f'{address.spec_path}/BUILD'
)
build_file_addresses = BuildFileAddresses((bfa,))
res = run_rule(run.run, console, workspace, interactive_runner, build_file_addresses, {
(CreatedBinary, Address): lambda _: self.create_mock_binary(program_text)
})
return res
def test_normal_run(self) -> None:
console = MockConsole(use_colors=False)
program_text = b'#!/usr/bin/python\nprint("hello")'
res = self.single_target_run(
console=console,
program_text=program_text,
spec='some/addr'
)
self.assertEqual(res.exit_code, 0)
self.assertEquals(console.stdout.getvalue(), "Running target: some/addr:addr\nsome/addr:addr ran successfully.\n")
self.assertEquals(console.stderr.getvalue(), "")
def test_failed_run(self) -> None:
console = MockConsole(use_colors=False)
program_text = b'#!/usr/bin/python\nraise RuntimeError("foo")'
res = self.single_target_run(
console=console,
program_text=program_text,
spec='some/addr'
)
self.assertEqual(res.exit_code, 1)
self.assertEquals(console.stdout.getvalue(), "Running target: some/addr:addr\n")
self.assertEquals(console.stderr.getvalue(), "some/addr:addr failed with code 1!\n")
|
[
"pants.testutil.engine.util.MockConsole",
"pants.engine.fs.FileContent",
"pants.build_graph.address.Address.parse",
"pants.build_graph.address.BuildFileAddress",
"pants.engine.interactive_runner.InteractiveRunner",
"pants.rules.core.binary.CreatedBinary",
"pants.engine.fs.Workspace",
"pants.engine.addressable.BuildFileAddresses"
] |
[((956, 1010), 'pants.rules.core.binary.CreatedBinary', 'CreatedBinary', ([], {'binary_name': '"""program.py"""', 'digest': 'digest'}), "(binary_name='program.py', digest=digest)\n", (969, 1010), False, 'from pants.rules.core.binary import CreatedBinary\n'), ((1135, 1160), 'pants.engine.fs.Workspace', 'Workspace', (['self.scheduler'], {}), '(self.scheduler)\n', (1144, 1160), False, 'from pants.engine.fs import Digest, FileContent, InputFilesContent, Workspace\n'), ((1186, 1219), 'pants.engine.interactive_runner.InteractiveRunner', 'InteractiveRunner', (['self.scheduler'], {}), '(self.scheduler)\n', (1203, 1219), False, 'from pants.engine.interactive_runner import InteractiveRunner\n'), ((1234, 1253), 'pants.build_graph.address.Address.parse', 'Address.parse', (['spec'], {}), '(spec)\n', (1247, 1253), False, 'from pants.build_graph.address import Address, BuildFileAddress\n'), ((1265, 1375), 'pants.build_graph.address.BuildFileAddress', 'BuildFileAddress', ([], {'build_file': 'None', 'target_name': 'address.target_name', 'rel_path': 'f"""{address.spec_path}/BUILD"""'}), "(build_file=None, target_name=address.target_name, rel_path\n =f'{address.spec_path}/BUILD')\n", (1281, 1375), False, 'from pants.build_graph.address import Address, BuildFileAddress\n'), ((1422, 1448), 'pants.engine.addressable.BuildFileAddresses', 'BuildFileAddresses', (['(bfa,)'], {}), '((bfa,))\n', (1440, 1448), False, 'from pants.engine.addressable import BuildFileAddresses\n'), ((1695, 1724), 'pants.testutil.engine.util.MockConsole', 'MockConsole', ([], {'use_colors': '(False)'}), '(use_colors=False)\n', (1706, 1724), False, 'from pants.testutil.engine.util import MockConsole, run_rule\n'), ((2163, 2192), 'pants.testutil.engine.util.MockConsole', 'MockConsole', ([], {'use_colors': '(False)'}), '(use_colors=False)\n', (2174, 2192), False, 'from pants.testutil.engine.util import MockConsole, run_rule\n'), ((788, 860), 'pants.engine.fs.FileContent', 'FileContent', ([], {'path': '"""program.py"""', 'content': 'program_text', 'is_executable': '(True)'}), "(path='program.py', content=program_text, is_executable=True)\n", (799, 860), False, 'from pants.engine.fs import Digest, FileContent, InputFilesContent, Workspace\n')]
|
"""
Main file
"""
import argparse
import logging
import random
import gym
from tqdm import trange
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from common_definitions import CHECKPOINTS_PATH, TOTAL_EPISODES, TF_LOG_DIR, UNBALANCE_P
from model import Brain
from utils import Tensorboard
if __name__ == "__main__":
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(
prog="Deep Deterministic Policy Gradient (DDPG)",
description="Deep Deterministic Policy Gradient (DDPG) in Tensorflow 2"
)
parser.add_argument('--env', type=str, nargs='?',
default="BipedalWalker-v3",
help='The OpenAI Gym environment to train on, '
'e.g. BipedalWalker-v3, LunarLanderContinuous-v2,'
' Pendulum-v0')
parser.add_argument('--render_env', type=bool, nargs='?', default=True,
help='Render the environment to be visually visible')
parser.add_argument('--train', type=bool, nargs='?', default=True,
help='Train the network on the modified DDPG algorithm')
parser.add_argument('--use_noise', type=bool, nargs='?', default=True,
help='OU Noise will be applied to the policy action')
parser.add_argument('--eps_greedy', type=float, nargs='?', default=0.95,
help="The epsilon for Epsilon-greedy in the policy's action")
parser.add_argument('--warm_up', type=bool, nargs='?', default=1,
help='Following recommendation from OpenAI Spinning Up, the actions in the '
'early epochs can be set random to increase exploration. This warm up '
'defines how many epochs are initially set to do this.')
parser.add_argument('--save_weights', type=bool, nargs='?', default=True,
help='Save the weight of the network in the defined checkpoint file '
'directory.')
args = parser.parse_args()
RL_TASK = args.env
RENDER_ENV = args.render_env
LEARN = args.train
USE_NOISE = args.use_noise
WARM_UP = args.warm_up
SAVE_WEIGHTS = args.save_weights
EPS_GREEDY = args.eps_greedy
# Step 1. create the gym environment
env = gym.make(RL_TASK)
action_space_high = env.action_space.high[0]
action_space_low = env.action_space.low[0]
brain = Brain(env.observation_space.shape[0], env.action_space.shape[0], action_space_high,
action_space_low)
tensorboard = Tensorboard(log_dir=TF_LOG_DIR)
# load weights if available
logging.info("Loading weights from %s*, make sure the folder exists", CHECKPOINTS_PATH)
brain.load_weights(CHECKPOINTS_PATH)
# all the metrics
acc_reward = tf.keras.metrics.Sum('reward', dtype=tf.float32)
actions_squared = tf.keras.metrics.Mean('actions', dtype=tf.float32)
Q_loss = tf.keras.metrics.Mean('Q_loss', dtype=tf.float32)
A_loss = tf.keras.metrics.Mean('A_loss', dtype=tf.float32)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# run iteration
with trange(TOTAL_EPISODES) as t:
for ep in t:
prev_state = env.reset()
acc_reward.reset_states()
actions_squared.reset_states()
Q_loss.reset_states()
A_loss.reset_states()
brain.noise.reset()
for _ in range(2000):
if RENDER_ENV: # render the environment into GUI
env.render()
# Recieve state and reward from environment.
cur_act = brain.act(tf.expand_dims(prev_state, 0), _notrandom=(ep >= WARM_UP) and
(random.random() < EPS_GREEDY+(1-EPS_GREEDY)*ep/TOTAL_EPISODES),
noise=USE_NOISE)
state, reward, done, _ = env.step(cur_act)
brain.remember(prev_state, reward, state, int(done))
# update weights
if LEARN:
c, a = brain.learn(brain.buffer.get_batch(unbalance_p=UNBALANCE_P))
Q_loss(c)
A_loss(a)
# post update for next step
acc_reward(reward)
actions_squared(np.square(cur_act/action_space_high))
prev_state = state
if done:
break
ep_reward_list.append(acc_reward.result().numpy())
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
avg_reward_list.append(avg_reward)
# print the average reward
t.set_postfix(r=avg_reward)
tensorboard(ep, acc_reward, actions_squared, Q_loss, A_loss)
# save weights
if ep % 5 == 0 and SAVE_WEIGHTS:
brain.save_weights(CHECKPOINTS_PATH)
env.close()
brain.save_weights(CHECKPOINTS_PATH)
logging.info("Training done...")
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
|
[
"tensorflow.expand_dims",
"matplotlib.pyplot.show",
"gym.make",
"argparse.ArgumentParser",
"logging.basicConfig",
"tensorflow.keras.metrics.Mean",
"matplotlib.pyplot.plot",
"tqdm.trange",
"numpy.square",
"logging.getLogger",
"logging.info",
"random.random",
"numpy.mean",
"tensorflow.keras.metrics.Sum",
"matplotlib.pyplot.ylabel",
"model.Brain",
"matplotlib.pyplot.xlabel",
"utils.Tensorboard"
] |
[((351, 372), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (370, 372), False, 'import logging\n'), ((434, 584), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""Deep Deterministic Policy Gradient (DDPG)"""', 'description': '"""Deep Deterministic Policy Gradient (DDPG) in Tensorflow 2"""'}), "(prog='Deep Deterministic Policy Gradient (DDPG)',\n description='Deep Deterministic Policy Gradient (DDPG) in Tensorflow 2')\n", (457, 584), False, 'import argparse\n'), ((2392, 2409), 'gym.make', 'gym.make', (['RL_TASK'], {}), '(RL_TASK)\n', (2400, 2409), False, 'import gym\n'), ((2519, 2624), 'model.Brain', 'Brain', (['env.observation_space.shape[0]', 'env.action_space.shape[0]', 'action_space_high', 'action_space_low'], {}), '(env.observation_space.shape[0], env.action_space.shape[0],\n action_space_high, action_space_low)\n', (2524, 2624), False, 'from model import Brain\n'), ((2657, 2688), 'utils.Tensorboard', 'Tensorboard', ([], {'log_dir': 'TF_LOG_DIR'}), '(log_dir=TF_LOG_DIR)\n', (2668, 2688), False, 'from utils import Tensorboard\n'), ((2726, 2817), 'logging.info', 'logging.info', (['"""Loading weights from %s*, make sure the folder exists"""', 'CHECKPOINTS_PATH'], {}), "('Loading weights from %s*, make sure the folder exists',\n CHECKPOINTS_PATH)\n", (2738, 2817), False, 'import logging\n'), ((2895, 2943), 'tensorflow.keras.metrics.Sum', 'tf.keras.metrics.Sum', (['"""reward"""'], {'dtype': 'tf.float32'}), "('reward', dtype=tf.float32)\n", (2915, 2943), True, 'import tensorflow as tf\n'), ((2966, 3016), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""actions"""'], {'dtype': 'tf.float32'}), "('actions', dtype=tf.float32)\n", (2987, 3016), True, 'import tensorflow as tf\n'), ((3030, 3079), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""Q_loss"""'], {'dtype': 'tf.float32'}), "('Q_loss', dtype=tf.float32)\n", (3051, 3079), True, 'import tensorflow as tf\n'), ((3093, 3142), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""A_loss"""'], {'dtype': 'tf.float32'}), "('A_loss', dtype=tf.float32)\n", (3114, 3142), True, 'import tensorflow as tf\n'), ((5164, 5196), 'logging.info', 'logging.info', (['"""Training done..."""'], {}), "('Training done...')\n", (5176, 5196), False, 'import logging\n'), ((5258, 5283), 'matplotlib.pyplot.plot', 'plt.plot', (['avg_reward_list'], {}), '(avg_reward_list)\n', (5266, 5283), True, 'import matplotlib.pyplot as plt\n'), ((5288, 5309), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (5298, 5309), True, 'import matplotlib.pyplot as plt\n'), ((5314, 5348), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Avg. Epsiodic Reward"""'], {}), "('Avg. Epsiodic Reward')\n", (5324, 5348), True, 'import matplotlib.pyplot as plt\n'), ((5353, 5363), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5361, 5363), True, 'import matplotlib.pyplot as plt\n'), ((3328, 3350), 'tqdm.trange', 'trange', (['TOTAL_EPISODES'], {}), '(TOTAL_EPISODES)\n', (3334, 3350), False, 'from tqdm import trange\n'), ((377, 396), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (394, 396), False, 'import logging\n'), ((4745, 4774), 'numpy.mean', 'np.mean', (['ep_reward_list[-40:]'], {}), '(ep_reward_list[-40:])\n', (4752, 4774), True, 'import numpy as np\n'), ((3828, 3857), 'tensorflow.expand_dims', 'tf.expand_dims', (['prev_state', '(0)'], {}), '(prev_state, 0)\n', (3842, 3857), True, 'import tensorflow as tf\n'), ((4492, 4530), 'numpy.square', 'np.square', (['(cur_act / action_space_high)'], {}), '(cur_act / action_space_high)\n', (4501, 4530), True, 'import numpy as np\n'), ((3927, 3942), 'random.random', 'random.random', ([], {}), '()\n', (3940, 3942), False, 'import random\n')]
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Desc :Log Injection
"""
from flask import Flask
from flask import request
import logging
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
@app.route('/good1')
def good1():
name = request.args.get('name')
name = name.replace('\r\n','').replace('\n','')
logging.info('User name: ' + name) # Good
return 'good1'
if __name__ == '__main__':
app.debug = True
handler = logging.FileHandler('log')
app.logger.addHandler(handler)
app.run()
|
[
"logging.FileHandler",
"flask.request.args.get",
"logging.basicConfig",
"flask.Flask",
"logging.info"
] |
[((143, 183), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (162, 183), False, 'import logging\n'), ((191, 206), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (196, 206), False, 'from flask import Flask\n'), ((253, 277), 'flask.request.args.get', 'request.args.get', (['"""name"""'], {}), "('name')\n", (269, 277), False, 'from flask import request\n'), ((334, 368), 'logging.info', 'logging.info', (["('User name: ' + name)"], {}), "('User name: ' + name)\n", (346, 368), False, 'import logging\n'), ((458, 484), 'logging.FileHandler', 'logging.FileHandler', (['"""log"""'], {}), "('log')\n", (477, 484), False, 'import logging\n')]
|
import flatlib
from flatlib.chart import Chart
from flatlib.datetime import Datetime
from flatlib.geopos import GeoPos
def generate_data():
date = Datetime('2015/01/13', '17:00', '+10:00')
print(date)
pos = GeoPos('38n32', '8w54')
print(pos)
chart = Chart(date, pos)
print(chart)
if __name__ == '__main__':
generate_data()
|
[
"flatlib.datetime.Datetime",
"flatlib.chart.Chart",
"flatlib.geopos.GeoPos"
] |
[((153, 194), 'flatlib.datetime.Datetime', 'Datetime', (['"""2015/01/13"""', '"""17:00"""', '"""+10:00"""'], {}), "('2015/01/13', '17:00', '+10:00')\n", (161, 194), False, 'from flatlib.datetime import Datetime\n'), ((222, 245), 'flatlib.geopos.GeoPos', 'GeoPos', (['"""38n32"""', '"""8w54"""'], {}), "('38n32', '8w54')\n", (228, 245), False, 'from flatlib.geopos import GeoPos\n'), ((274, 290), 'flatlib.chart.Chart', 'Chart', (['date', 'pos'], {}), '(date, pos)\n', (279, 290), False, 'from flatlib.chart import Chart\n')]
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='Tilibot',
version='1.0', # Fix
description='Tiliqua Biomechanics Emulation',
author='<NAME>',
author_email='<EMAIL>',
packages=['distutils', 'distutils.command'], # Fix
)
|
[
"distutils.core.setup"
] |
[((61, 239), 'distutils.core.setup', 'setup', ([], {'name': '"""Tilibot"""', 'version': '"""1.0"""', 'description': '"""Tiliqua Biomechanics Emulation"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['distutils', 'distutils.command']"}), "(name='Tilibot', version='1.0', description=\n 'Tiliqua Biomechanics Emulation', author='<NAME>', author_email=\n '<EMAIL>', packages=['distutils', 'distutils.command'])\n", (66, 239), False, 'from distutils.core import setup\n')]
|
from fidget.backend.QtGui import QIcon
# noinspection PyUnresolvedReferences
import fidget.backend._resources
class LazyIcon:
def __init__(self, path):
self.path = path
self._instance = None
def __call__(self, *args, **kwargs):
if not self._instance:
self._instance = QIcon(self.path)
return self._instance
add_col_left_icon = LazyIcon(':fidget/feather/corner-left-down.svg')
add_col_right_icon = LazyIcon(':fidget/feather/corner-right-down.svg')
add_row_above_icon = LazyIcon(':fidget/feather/corner-up-right.svg')
add_row_below_icon = LazyIcon(':fidget/feather/corner-down-right.svg')
del_col_icon = LazyIcon(':fidget/feather/delete.svg')
del_row_icon = del_col_icon
error_icon = LazyIcon(':fidget/feather/alert-triangle.svg')
ok_icon = LazyIcon(':fidget/feather/check.svg')
|
[
"fidget.backend.QtGui.QIcon"
] |
[((315, 331), 'fidget.backend.QtGui.QIcon', 'QIcon', (['self.path'], {}), '(self.path)\n', (320, 331), False, 'from fidget.backend.QtGui import QIcon\n')]
|
from django import template
from django.conf import settings
register = template.Library()
@register.assignment_tag
def get_language_byindex(index):
lang = ('', '')
try:
lang = settings.LANGUAGES[index]
except KeyError:
pass
except IndexError:
pass
return lang
|
[
"django.template.Library"
] |
[((73, 91), 'django.template.Library', 'template.Library', ([], {}), '()\n', (89, 91), False, 'from django import template\n')]
|
from collections import namedtuple
from src import bootstrap
import settings
import const
if __name__ == "__main__":
argsClass = namedtuple('argsClass', 'build predict')
buildClass = namedtuple('argsClass', 'input directed sample method dimension windowsize walklen nbofwalks embedtype classificationfunc optimizeclassifier '
'temp_dir temp_id logfile train_ratio verbose keep_dropout use_cuda epoch_num batch_size task force')
print(const.SLASHDOT_GRAPH)
build = buildClass(input=settings.config[const.SLASHDOT_GRAPH],
directed=True, sample=["degree", 120], method="3type",
dimension=10, windowsize=3, walklen=50, nbofwalks=20, embedtype="py", classificationfunc= "MLP", optimizeclassifier= True,
temp_dir=settings.config[const.TEMP_DIR],
temp_id="slash-full", train_ratio=0.8, verbose=True, logfile = "log.txt", keep_dropout = 0.8, use_cuda=False, epoch_num=10,
batch_size = 512, task = 'link',
#force=['model'])
force=[ 'sample', 'preprocess', 'postprocess', 'model'])
# args = argsClass(build=build, predict=None)
# bootstrap.main(args)
print("----------------------------")
# build = build._replace(method="attention")
args = argsClass(build=build, predict=None)
bootstrap.main(args)
print("----------------------------")
|
[
"collections.namedtuple",
"src.bootstrap.main"
] |
[((136, 176), 'collections.namedtuple', 'namedtuple', (['"""argsClass"""', '"""build predict"""'], {}), "('argsClass', 'build predict')\n", (146, 176), False, 'from collections import namedtuple\n'), ((194, 444), 'collections.namedtuple', 'namedtuple', (['"""argsClass"""', '"""input directed sample method dimension windowsize walklen nbofwalks embedtype classificationfunc optimizeclassifier temp_dir temp_id logfile train_ratio verbose keep_dropout use_cuda epoch_num batch_size task force"""'], {}), "('argsClass',\n 'input directed sample method dimension windowsize walklen nbofwalks embedtype classificationfunc optimizeclassifier temp_dir temp_id logfile train_ratio verbose keep_dropout use_cuda epoch_num batch_size task force'\n )\n", (204, 444), False, 'from collections import namedtuple\n'), ((1414, 1434), 'src.bootstrap.main', 'bootstrap.main', (['args'], {}), '(args)\n', (1428, 1434), False, 'from src import bootstrap\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: contrib/coms/client/protos/account_state.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from protos import protocol_pb2 as contrib_dot_coms_dot_protos_dot_protocol__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='contrib/coms/client/protos/account_state.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n.contrib/coms/client/protos/account_state.proto\x1a\"contrib/coms/protos/protocol.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xe8\x01\n\x0c\x41\x63\x63ountState\x12\x1d\n\tportfolio\x18\x01 \x01(\x0b\x32\n.Portfolio\x12\x19\n\x07\x61\x63\x63ount\x18\x02 \x01(\x0b\x32\x08.Account\x12\x33\n\x0flast_checkpoint\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x16\n\x06orders\x18\x04 \x03(\x0b\x32\x06.Order\x12\x31\n\rfirst_session\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1e\n\rdaily_returns\x18\x06 \x03(\x0b\x32\x07.Return\"F\n\x06Return\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05value\x18\x02 \x01(\x02\x62\x06proto3')
,
dependencies=[contrib_dot_coms_dot_protos_dot_protocol__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_ACCOUNTSTATE = _descriptor.Descriptor(
name='AccountState',
full_name='AccountState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='portfolio', full_name='AccountState.portfolio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='account', full_name='AccountState.account', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='last_checkpoint', full_name='AccountState.last_checkpoint', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='orders', full_name='AccountState.orders', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='first_session', full_name='AccountState.first_session', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='daily_returns', full_name='AccountState.daily_returns', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=120,
serialized_end=352,
)
_RETURN = _descriptor.Descriptor(
name='Return',
full_name='Return',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='Return.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Return.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=354,
serialized_end=424,
)
_ACCOUNTSTATE.fields_by_name['portfolio'].message_type = contrib_dot_coms_dot_protos_dot_protocol__pb2._PORTFOLIO
_ACCOUNTSTATE.fields_by_name['account'].message_type = contrib_dot_coms_dot_protos_dot_protocol__pb2._ACCOUNT
_ACCOUNTSTATE.fields_by_name['last_checkpoint'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ACCOUNTSTATE.fields_by_name['orders'].message_type = contrib_dot_coms_dot_protos_dot_protocol__pb2._ORDER
_ACCOUNTSTATE.fields_by_name['first_session'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ACCOUNTSTATE.fields_by_name['daily_returns'].message_type = _RETURN
_RETURN.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name['AccountState'] = _ACCOUNTSTATE
DESCRIPTOR.message_types_by_name['Return'] = _RETURN
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AccountState = _reflection.GeneratedProtocolMessageType('AccountState', (_message.Message,), dict(
DESCRIPTOR = _ACCOUNTSTATE,
__module__ = 'contrib.coms.client.protos.account_state_pb2'
# @@protoc_insertion_point(class_scope:AccountState)
))
_sym_db.RegisterMessage(AccountState)
Return = _reflection.GeneratedProtocolMessageType('Return', (_message.Message,), dict(
DESCRIPTOR = _RETURN,
__module__ = 'contrib.coms.client.protos.account_state_pb2'
# @@protoc_insertion_point(class_scope:Return)
))
_sym_db.RegisterMessage(Return)
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor"
] |
[((470, 496), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (494, 496), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1833, 2167), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""portfolio"""', 'full_name': '"""AccountState.portfolio"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='portfolio', full_name=\n 'AccountState.portfolio', index=0, number=1, type=11, cpp_type=10,\n label=1, has_default_value=False, default_value=None, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (1860, 2167), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2193, 2524), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""account"""', 'full_name': '"""AccountState.account"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='account', full_name=\n 'AccountState.account', index=1, number=2, type=11, cpp_type=10, label=\n 1, has_default_value=False, default_value=None, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (2220, 2524), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2549, 2895), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""last_checkpoint"""', 'full_name': '"""AccountState.last_checkpoint"""', 'index': '(2)', 'number': '(3)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='last_checkpoint', full_name=\n 'AccountState.last_checkpoint', index=2, number=3, type=11, cpp_type=10,\n label=1, has_default_value=False, default_value=None, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (2576, 2895), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2921, 3247), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""orders"""', 'full_name': '"""AccountState.orders"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='orders', full_name='AccountState.orders',\n index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=\n False, default_value=[], message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR)\n", (2948, 3247), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3273, 3615), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""first_session"""', 'full_name': '"""AccountState.first_session"""', 'index': '(4)', 'number': '(5)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='first_session', full_name=\n 'AccountState.first_session', index=4, number=5, type=11, cpp_type=10,\n label=1, has_default_value=False, default_value=None, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (3300, 3615), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3641, 3981), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""daily_returns"""', 'full_name': '"""AccountState.daily_returns"""', 'index': '(5)', 'number': '(6)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='daily_returns', full_name=\n 'AccountState.daily_returns', index=5, number=6, type=11, cpp_type=10,\n label=3, has_default_value=False, default_value=[], message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (3668, 3981), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4373, 4701), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""timestamp"""', 'full_name': '"""Return.timestamp"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='timestamp', full_name='Return.timestamp',\n index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=\n False, default_value=None, message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR)\n", (4400, 4701), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
import argparse
import sys
import tensorflow as tf
from gan_model_data import model
from common.experiment import Experiment, load_checkpoint
from common.training_loop import TrainingLoopParams, training_loop
def print_graph(session, model, step, nn_generator):
"""
A helper function for printing key training characteristics.
"""
if nn_generator:
real, fake = session.run([model.average_probability_real, model.average_probability_fake])
print("Saved model with step %d; real = %f, fake = %f" % (step, real, fake))
else:
real, fake, mean, stddev = session.run([model.average_probability_real, model.average_probability_fake, model.mean, model.stddev])
print("Saved model with step %d; real = %f, fake = %f, mean = %f, stddev = %f" % (step, real, fake, mean, stddev))
def train(session, global_step, model_ops, args, hparams):
print_graph(session, model_ops, global_step, hparams.nn_generator)
# First, we run one step of discriminator training.
for _ in range(max(int(args.discriminator_steps/2), 1)):
session.run(model_ops.discriminator_train)
# Then we run one step of generator training.
for _ in range(args.generator_steps):
session.run(model_ops.generator_train)
for _ in range(int(args.discriminator_steps/2)):
session.run(model_ops.discriminator_train)
def main(args):
"""
The main function to train the model.
"""
parser = argparse.ArgumentParser(description="Train the gan-normal model.")
parser.add_argument("--batch_size", type=int, default=32, help="The size of the minibatch")
parser.add_argument("--d_learning_rate", type=float, default=0.01, help="The discriminator learning rate")
parser.add_argument("--g_learning_rate", type=float, default=0.02, help="The generator learning rate")
parser.add_argument("--d_l2_reg", type=float, default=0.0005, help="The discriminator L2 regularization parameter")
parser.add_argument("--g_l2_reg", type=float, default=0., help="The generator L2 regularization parameter")
parser.add_argument("--input_mean", type=float, default=[], help="The mean of the input dataset", action="append")
parser.add_argument("--input_stddev", type=float, default=[], help="The standard deviation of the input dataset", action="append")
parser.add_argument("--dropout", type=float, default=0.5, help="The dropout rate to use in the descriminator")
parser.add_argument("--discriminator_steps", type=int, default=1, help="The number of steps to train the descriminator on each iteration")
parser.add_argument("--generator_steps", type=int, default=1, help="The number of steps to train the generator on each iteration")
parser.add_argument("--nn_generator", default=False, action="store_true", help="Whether to use a neural network as a generator")
parser.add_argument("--generator_features", default=[], action="append", type=int, help="The number of features in generators hidden layers")
parser.add_argument("--discriminator_features", default=[], action="append", type=int, help="The number of features in discriminators hidden layers")
Experiment.add_arguments(parser)
TrainingLoopParams.add_arguments(parser)
args = parser.parse_args(args)
# Default input mean and stddev.
if not args.input_mean:
args.input_mean.append(15.)
if not args.input_stddev:
args.input_stddev.append(7.)
if len(args.input_mean) != len(args.input_stddev):
print("There must be the same number of input means and standard deviations.")
sys.exit(1)
experiment = Experiment.from_args(args)
hparams = experiment.load_hparams(model.ModelParams, args)
# Create the model.
model_ops = model.GanNormalModel(hparams, model.DatasetParams(args), model.TrainingParams(args, training=True))
training_loop(TrainingLoopParams(args), experiment, model_ops.summaries,
lambda session, global_step: train(session, global_step, model_ops, args, hparams),
checkpoint=load_checkpoint(args))
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"common.training_loop.TrainingLoopParams",
"common.experiment.Experiment.add_arguments",
"argparse.ArgumentParser",
"gan_model_data.model.TrainingParams",
"common.training_loop.TrainingLoopParams.add_arguments",
"common.experiment.load_checkpoint",
"common.experiment.Experiment.from_args",
"gan_model_data.model.DatasetParams",
"sys.exit"
] |
[((1456, 1522), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train the gan-normal model."""'}), "(description='Train the gan-normal model.')\n", (1479, 1522), False, 'import argparse\n'), ((3153, 3185), 'common.experiment.Experiment.add_arguments', 'Experiment.add_arguments', (['parser'], {}), '(parser)\n', (3177, 3185), False, 'from common.experiment import Experiment, load_checkpoint\n'), ((3190, 3230), 'common.training_loop.TrainingLoopParams.add_arguments', 'TrainingLoopParams.add_arguments', (['parser'], {}), '(parser)\n', (3222, 3230), False, 'from common.training_loop import TrainingLoopParams, training_loop\n'), ((3614, 3640), 'common.experiment.Experiment.from_args', 'Experiment.from_args', (['args'], {}), '(args)\n', (3634, 3640), False, 'from common.experiment import Experiment, load_checkpoint\n'), ((3584, 3595), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3592, 3595), False, 'import sys\n'), ((3775, 3800), 'gan_model_data.model.DatasetParams', 'model.DatasetParams', (['args'], {}), '(args)\n', (3794, 3800), False, 'from gan_model_data import model\n'), ((3802, 3843), 'gan_model_data.model.TrainingParams', 'model.TrainingParams', (['args'], {'training': '(True)'}), '(args, training=True)\n', (3822, 3843), False, 'from gan_model_data import model\n'), ((3864, 3888), 'common.training_loop.TrainingLoopParams', 'TrainingLoopParams', (['args'], {}), '(args)\n', (3882, 3888), False, 'from common.training_loop import TrainingLoopParams, training_loop\n'), ((4034, 4055), 'common.experiment.load_checkpoint', 'load_checkpoint', (['args'], {}), '(args)\n', (4049, 4055), False, 'from common.experiment import Experiment, load_checkpoint\n')]
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/assets/'
# Extra places to collect and find static files
# STATICFILES_DIRS = (os.path.join(BASE_DIR, '/assets/'))
|
[
"os.path.abspath",
"os.path.join"
] |
[((198, 235), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""staticfiles"""'], {}), "(BASE_DIR, 'staticfiles')\n", (210, 235), False, 'import os\n'), ((54, 79), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (69, 79), False, 'import os\n')]
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.chrome.options import Options
options = Options()
extset = ['enable-automation', 'ignore-certificate-errors']
options.add_argument("--window-size=600,600")
options.add_argument("--headless")
options.add_experimental_option("excludeSwitches", extset)
driver = webdriver.Chrome(options=options)
# driver = webdriver.Chrome()
driver.implicitly_wait(5)
driver.get('http://homestead.test')
driver.find_element_by_id('email').send_keys('<EMAIL>')
driver.find_element_by_id('password').send_keys('<PASSWORD>' + Keys.ENTER)
time.sleep(0.5)
|
[
"selenium.webdriver.chrome.options.Options",
"selenium.webdriver.Chrome",
"time.sleep"
] |
[((156, 165), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (163, 165), False, 'from selenium.webdriver.chrome.options import Options\n'), ((376, 409), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options'}), '(options=options)\n', (392, 409), False, 'from selenium import webdriver\n'), ((634, 649), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (644, 649), False, 'import time\n')]
|
from utilities.constants import TREAT, CONC
from utilities.counts import count_cells_per_well, normalise_count_cells
# labels for concentration of treatments in the experiment
number2conc = {2: '0 ug/mL',
3: '0.137 ug/mL',
4: '0.412 ug/mL',
5: '1.235 ug/mL',
6: '3.704 ug/mL',
7: '11.11 ug/mL',
8: '33.33 ug/mL',
9: '100 ug/mL',
10: '300ug/mL'}
# labels for the nanoparticle treatments in the experiment
row2np = {'A': 'Si-F8BT',
'B': 'Si-CNPPV',
'C': 'Si-P3',
'D': 'Si-P4',
'E': 'PP-F8BT',
'F': 'PP-CNPPV',
'G': 'PP-P3',
'H': 'PP-P4'}
# labels for the control treatments in the experiment
controls = {'A': 'FCCP Control',
'B': 'FCCP Control',
'C': 'Triton-X',
'D': 'Triton-X',
'E': 'H2O',
'F': 'H2O',
'G': 'DMSO',
'H': 'DMSO'}
def clean_data(data):
"""Clean a csv file"""
# removing Weighted_Relative_Moment_Inertia
# high frequency of nan
data.drop(columns=['Weighted_Relative_Moment_Inertia'])
data.columns = [format_column_name(x) for x in data.columns]
data = label_data(data)
data = normalise_data(data)
count = count_cells_per_well(data)
normalised_counts = normalise_count_cells(data, count)
return data, count, normalised_counts
def label_data(data):
""" Takes one dataframe and applies the correct labels to each row"""
# some rows miss these two features, which are fundamental. **EXTERMINATE**
drop = data[['Area Nuc', 'Area Cell']].isnull().sum(axis=1) != 0
drop = data.index.values[drop]
data = data.drop(index=drop)
data[CONC] = data.apply(lambda x: number2conc.get(x['Number'], 'control'), axis=1)
data.head()
data[TREAT] = data.apply(lambda x: row2np.get(x['Row'], 'control'), axis=1)
data.head()
for key in controls:
data.loc[(data[CONC] == 'control') & (data['Row'] == key), TREAT] = controls[key]
data = data.drop(columns=['Number', 'Count Nuc'])
return data
def format_column_name(string):
"""Automatically reformats feature names into something more machine-readable."""
string = ' '.join(string.strip().split())
string = (string
.replace('_', ' ')
.replace('[', '')
.title()
.replace('- Um', '')
)
# if ('Feret' in string or 'Perimeter' in string) and '(μm)' not in string:
# string += ' (μm)'
if 'Mempernuc' in string:
string = string.replace('Mempernuc', 'Mem Per Nuc')
if 'Mitoint' in string:
string = string.replace('Mitoint', 'Mito Int ')
string = string.title()
if 'dxa' in string or 'Dxa' in string:
string = string.replace('dxa', ' DxA')
string = string.replace('Dxa', ' DxA')
if 'Wmoi' in string:
string = string.replace('Wmoi', 'WMOI')
if 'Conc' in string:
string = string.replace('Conc', 'Concentration')
return string
def format_dataframe_columns(df):
df.columns = [format_column_name(colname) for colname in df.columns]
return df
def normalise_data(data):
"""Z-scores all numeric data."""
# select only numeric data
numeric = data._get_numeric_data()
# apply transformation
numeric = numeric - numeric.mean()
numeric = numeric / numeric.std()
# mind that we don't have the classes column in this dataframe!
# put class information back in
numeric[CONC] = data[CONC].tolist()
numeric[TREAT] = data[TREAT].tolist()
return numeric
|
[
"utilities.counts.normalise_count_cells",
"utilities.counts.count_cells_per_well"
] |
[((1324, 1350), 'utilities.counts.count_cells_per_well', 'count_cells_per_well', (['data'], {}), '(data)\n', (1344, 1350), False, 'from utilities.counts import count_cells_per_well, normalise_count_cells\n'), ((1375, 1409), 'utilities.counts.normalise_count_cells', 'normalise_count_cells', (['data', 'count'], {}), '(data, count)\n', (1396, 1409), False, 'from utilities.counts import count_cells_per_well, normalise_count_cells\n')]
|
import paho.mqtt.client as mqtt
import ssl
from redis_support_py3.graph_query_support_py3 import Query_Support
from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
import time
import msgpack
class MQTT_Current_Monitor_Publish(object):
def __init__(self,redis_site,topic_prefix,qs ) :
self.topic_prefix = topic_prefix
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_terminal( query_list,
relationship = "PACKAGE", property_mask={"name":"MQTT_DEVICES_DATA"} )
package_sets, package_sources = qs.match_list(query_list)
package = package_sources[0]
generate_handlers = Generate_Handlers(package,qs)
data_structures = package["data_structures"]
self.job_queue_client = generate_handlers.construct_job_queue_client(data_structures["MQTT_PUBLISH_QUEUE"])
def read_current_limit(self):
request = {}
request["topic"] = "INPUT/MQTT_CURRENT/GET_LIMIT_CURRENTS"
self.send_request(request)
def read_max_currents(self):
request = {}
request["topic"] = "INPUT/MQTT_CURRENT/GET_MAX_CURRENTS"
self.send_request(request)
def clear_max_currents(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/CLEAR_MAX_CURRENTS"
self.send_request(request)
def read_current(self):
request = {}
request["topic"] = "INPUT/MQTT_CURRENT/READ_CURRENT"
self.send_request(request)
def enable_equipment_relay(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/ENABLE_EQUIPMENT_RELAY"
self.send_request(request)
def enable_irrigation_relay(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/ENABLE_IRRIGATION_RELAY"
self.send_request(request)
def disable_equipment_relay(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/DISABLE_EQUIPMENT_RELAY"
self.send_request(request)
def disable_irrigation_irrigation(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/DISABLE_IRRIGATION_RELAY"
self.send_request(request)
def read_relay_states(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/READ_RELAY_STATES"
self.send_request(request)
def send_request(self,msg_dict):
msg_dict["tx_topic"] =self.topic_prefix +msg_dict["topic"]
#print("msg_dict",msg_dict)
self.job_queue_client.push(msg_dict)
if __name__ == "__main__":
import datetime
import time
import string
import urllib.request
import math
import redis
import base64
import json
import os
import copy
#import load_files_py3
from redis_support_py3.graph_query_support_py3 import Query_Support
import datetime
from py_cf_new_py3.chain_flow_py3 import CF_Base_Interpreter
#
#
# Read Boot File
# expand json file
#
file_handle = open("system_data_files/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site = json.loads(data)
x = MQTT_Current_Monitor_Publish(redis_site,"/REMOTES/CURRENT_MONITOR_1/")
while(1):
time.sleep(5)
x.read_max_currents()
time.sleep(5)
x.clear_max_currents()
time.sleep(5)
x.read_relay_states()
|
[
"redis_support_py3.construct_data_handlers_py3.Generate_Handlers",
"json.loads",
"time.sleep"
] |
[((3403, 3419), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (3413, 3419), False, 'import json\n'), ((846, 876), 'redis_support_py3.construct_data_handlers_py3.Generate_Handlers', 'Generate_Handlers', (['package', 'qs'], {}), '(package, qs)\n', (863, 876), False, 'from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers\n'), ((3522, 3535), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3532, 3535), False, 'import time\n'), ((3572, 3585), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3582, 3585), False, 'import time\n'), ((3623, 3636), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3633, 3636), False, 'import time\n')]
|
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Course(models.Model):
title = models.CharField(max_length=200)
code = models.SlugField(max_length=200, unique=True)
summary = models.TextField(blank=True)
class Meta:
ordering = ['title']
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("courses_admin_list")
class Unit(models.Model):
course = models.ForeignKey(Course,
related_name='courses',
on_delete=models.CASCADE)
title = models.CharField(max_length=200)
code = models.SlugField(max_length=200, unique=True)
overview = models.TextField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering =['created']
def __str__(self):
return self.title
class Module(models.Model):
unit = models.ForeignKey(Unit,
related_name='modules',
on_delete=models.CASCADE)
title = models.CharField(max_length=200)
description = models.TextField(blank=True)
def __str__(self):
return self.title
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.SlugField",
"django.urls.reverse",
"django.db.models.DateTimeField"
] |
[((147, 179), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (163, 179), False, 'from django.db import models\n'), ((192, 237), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(200)', 'unique': '(True)'}), '(max_length=200, unique=True)\n', (208, 237), False, 'from django.db import models\n'), ((252, 280), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (268, 280), False, 'from django.db import models\n'), ((495, 570), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Course'], {'related_name': '"""courses"""', 'on_delete': 'models.CASCADE'}), "(Course, related_name='courses', on_delete=models.CASCADE)\n", (512, 570), False, 'from django.db import models\n'), ((645, 677), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (661, 677), False, 'from django.db import models\n'), ((689, 734), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(200)', 'unique': '(True)'}), '(max_length=200, unique=True)\n', (705, 734), False, 'from django.db import models\n'), ((750, 768), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (766, 768), False, 'from django.db import models\n'), ((783, 822), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (803, 822), False, 'from django.db import models\n'), ((963, 1036), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Unit'], {'related_name': '"""modules"""', 'on_delete': 'models.CASCADE'}), "(Unit, related_name='modules', on_delete=models.CASCADE)\n", (980, 1036), False, 'from django.db import models\n'), ((1107, 1139), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1123, 1139), False, 'from django.db import models\n'), ((1158, 1186), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (1174, 1186), False, 'from django.db import models\n'), ((424, 453), 'django.urls.reverse', 'reverse', (['"""courses_admin_list"""'], {}), "('courses_admin_list')\n", (431, 453), False, 'from django.urls import reverse\n')]
|
"""Recognize and extract forms."""
import os
from statistics import fmean
from azure.ai.formrecognizer.aio import FormRecognizerClient, FormTrainingClient
from azure.core.credentials import AzureKeyCredential
class RecognizeCustomFormsSampleAsync:
"""Class to recognize forms in async mode."""
async def recognize_custom_forms(self, custom_model_id, filename):
"""Extract text from custom form.
Args:
custom_model_id: The trained custom model id.
filename: The filename of the document that will be scanned.
Returns:
The header for the table and the extracted text.
"""
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id)
async with FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
# Make sure your form's type is included in the
# list of form types the custom model can recognize
form_url = (
f"https://storage.googleapis.com/"
f"{os.getenv('GS_MEDIA_BUCKET_NAME')}/"
f"{filename}"
)
poller = await form_recognizer_client.begin_recognize_custom_forms_from_url(
model_id=model_id, form_url=form_url, include_field_elements=True
)
forms = await poller.result()
table = []
header = {}
for _, form in enumerate(forms):
row = {}
for idx, (name, field) in enumerate(form.fields.items()):
if idx >= 3:
for value in field.value:
for i, val in value.to_dict()["value"].items():
data = val["value_data"]
# Condition for "No Data"
if data:
words = data["field_elements"]
# Condition for multiple word result
if len(words) > 1:
word_list = [word["text"] for word in words]
confidence_list = [word["confidence"] for word in words]
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": " ".join(word_list),
"confidence": round(fmean(confidence_list), 3),
}
else:
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": words[0]["text"],
"confidence": words[0]["confidence"],
}
else:
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": data,
"confidence": data,
}
if i == "REMARKS":
table.append(row)
row = {}
else:
slug_name = (
name.lower().replace(" ", "_").replace("(", "").replace(")", "")
)
header[slug_name] = {
"text": field.value,
"confidence": field.confidence,
}
return header, table
async def form_recognizer_runner(filename):
"""Runner for the form recognizer.
Args:
filename: The filename of the document to be scanned
Returns:
The form header and the table scanned.
"""
sample = RecognizeCustomFormsSampleAsync()
model_id = None
if os.getenv("CONTAINER_SAS_URL"):
endpoint = os.getenv("AZURE_FORM_RECOGNIZER_ENDPOINT")
key = os.getenv("AZURE_FORM_RECOGNIZER_KEY")
if not endpoint or not key:
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
model = await (
await form_training_client.begin_training(
os.getenv("CONTAINER_SAS_URL"), use_training_labels=True
)
).result()
model_id = model.model_id
return await sample.recognize_custom_forms(model_id, filename)
|
[
"statistics.fmean",
"azure.core.credentials.AzureKeyCredential",
"os.getenv"
] |
[((5192, 5222), 'os.getenv', 'os.getenv', (['"""CONTAINER_SAS_URL"""'], {}), "('CONTAINER_SAS_URL')\n", (5201, 5222), False, 'import os\n'), ((783, 836), 'os.getenv', 'os.getenv', (['"""CUSTOM_TRAINED_MODEL_ID"""', 'custom_model_id'], {}), "('CUSTOM_TRAINED_MODEL_ID', custom_model_id)\n", (792, 836), False, 'import os\n'), ((5243, 5286), 'os.getenv', 'os.getenv', (['"""AZURE_FORM_RECOGNIZER_ENDPOINT"""'], {}), "('AZURE_FORM_RECOGNIZER_ENDPOINT')\n", (5252, 5286), False, 'import os\n'), ((5301, 5339), 'os.getenv', 'os.getenv', (['"""AZURE_FORM_RECOGNIZER_KEY"""'], {}), "('AZURE_FORM_RECOGNIZER_KEY')\n", (5310, 5339), False, 'import os\n'), ((5559, 5582), 'azure.core.credentials.AzureKeyCredential', 'AzureKeyCredential', (['key'], {}), '(key)\n', (5577, 5582), False, 'from azure.core.credentials import AzureKeyCredential\n'), ((921, 944), 'azure.core.credentials.AzureKeyCredential', 'AzureKeyCredential', (['key'], {}), '(key)\n', (939, 944), False, 'from azure.core.credentials import AzureKeyCredential\n'), ((1202, 1235), 'os.getenv', 'os.getenv', (['"""GS_MEDIA_BUCKET_NAME"""'], {}), "('GS_MEDIA_BUCKET_NAME')\n", (1211, 1235), False, 'import os\n'), ((5741, 5771), 'os.getenv', 'os.getenv', (['"""CONTAINER_SAS_URL"""'], {}), "('CONTAINER_SAS_URL')\n", (5750, 5771), False, 'import os\n'), ((2982, 3004), 'statistics.fmean', 'fmean', (['confidence_list'], {}), '(confidence_list)\n', (2987, 3004), False, 'from statistics import fmean\n')]
|
# Module: launch
# Description: Lauches a custom shortcut in the shortcuts directory
# Usage: !launch [shortcut]
# Dependencies: os, time, glob
import os, configs,time
from lib.helpers import checkfolder
from lib.reco_embeds import recoEmbeds as rm
from glob import glob
async def launch(ctx,client, shortcut=None):
p=configs.BOT_PREFIX
fileOpened=False
checkfolder()
if configs.operating_sys == "Windows":
if shortcut!="":
if shortcut.isnumeric():
msg=await rm.msg(ctx,f"**Opening File No: {shortcut}**",color=rm.color('colorforWaitingMsg'))
elif shortcut=="list":
msg=await ctx.send("> Gathering files from **Shortcut Folder**.")
else:
msg=await rm.msg(ctx,f"Searching **{shortcut.capitalize()}**",color=rm.color('colorforWaitingMsg'))
elif shortcut=="":
await rm.msg(ctx,f'''**Help - {p}launch**
Using launch command you can easily open any application or file which are available in your Reco's **Shortcut folder**.
**Commands:**
```{p}launch list
{p}launch open
{p}launch File_Number
{p}launch File_Name```
**🎬 YouTube**
**[How to use {p}launch in {client.user.name}?](https://youtu.be/-b-7-8oK1tI)**''')
return
shortcutFolderPath=configs.RECO_PATH+"/shortcuts/*"
files = glob(shortcutFolderPath)
print(len(files))
print(files)
time.sleep(1)
if len(files)!=0:
folderExtensions=set([f".{e.split('.')[-1]}" for e in files])
folderFileNames=[f"{f.split(chr(92))[-1]}" for f in files]
print(folderExtensions)
else:
await msg.delete()
await rm.msg(ctx,f"**Shortcut Folder is Empty!**\n\n**Path**: {shortcutFolderPath}",rm.color('colorforError'))
return
if shortcut=="list":
await msg.delete()
filenames=f"Files Count: **{len(files)}** \n\n"+"\n".join([f"**{n}** - **{f.split(chr(92))[-1].replace('_',f'{chr(92)}_')}**" for n,f in enumerate(files)])
await rm.extendableMsg(ctx,filenames)
elif shortcut.isnumeric():
if int(shortcut)<len(files):
await rm.editMsg(ctx,msg,f"**Opening {files[int(shortcut)].split(chr(92))[-1]}**...")
os.startfile(files[int(shortcut)])
else:
await rm.editMsg(ctx,msg,f"**❌ Invalid File Number!**\n\nTry:\n**{p}launch list**",color=rm.color('colorforError'))
elif shortcut!="":
if shortcut!=None:
for e in folderExtensions:
if (os.path.isfile("shortcuts/" + shortcut + e)):
await rm.editMsg(ctx,msg,f'**Opening {shortcut.capitalize() }{e}**...')
os.startfile("shortcuts\\" + shortcut + e)
fileOpened=True
break
elif shortcut.__contains__("."):
if (os.path.isfile("shortcuts/" + shortcut)):
await rm.editMsg(ctx,msg,f'**Opening {shortcut.capitalize()}**...')
os.startfile("shortcuts\\" + shortcut)
fileOpened=True
break
if not fileOpened:
for f in folderFileNames:
file=f.lower()
print("File Finder: ",shortcut,"->",file)
if file.__contains__(shortcut.lower()):
index= folderFileNames.index(f)
await rm.editMsg(ctx,msg,f'**Opening {files[index].split(chr(92))[-1]}**...')
os.startfile(files[index])
fileOpened=True
break
if not fileOpened:
await rm.editMsg(ctx,msg,"**No such file in your shortcuts folder.**",color=rm.color('colorforError'))
else:
await ctx.send("Module not yet supported on Linux and macOS")
|
[
"lib.reco_embeds.recoEmbeds.color",
"time.sleep",
"os.path.isfile",
"lib.reco_embeds.recoEmbeds.msg",
"lib.helpers.checkfolder",
"glob.glob",
"lib.reco_embeds.recoEmbeds.extendableMsg",
"os.startfile"
] |
[((370, 383), 'lib.helpers.checkfolder', 'checkfolder', ([], {}), '()\n', (381, 383), False, 'from lib.helpers import checkfolder\n'), ((1354, 1378), 'glob.glob', 'glob', (['shortcutFolderPath'], {}), '(shortcutFolderPath)\n', (1358, 1378), False, 'from glob import glob\n'), ((1435, 1448), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1445, 1448), False, 'import os, configs, time\n'), ((2087, 2119), 'lib.reco_embeds.recoEmbeds.extendableMsg', 'rm.extendableMsg', (['ctx', 'filenames'], {}), '(ctx, filenames)\n', (2103, 2119), True, 'from lib.reco_embeds import recoEmbeds as rm\n'), ((897, 1268), 'lib.reco_embeds.recoEmbeds.msg', 'rm.msg', (['ctx', 'f"""**Help - {p}launch**\n\nUsing launch command you can easily open any application or file which are available in your Reco\'s **Shortcut folder**.\n\n**Commands:**\n```{p}launch list\n{p}launch open\n{p}launch File_Number\n{p}launch File_Name```\n \n**🎬 YouTube**\n**[How to use {p}launch in {client.user.name}?](https://youtu.be/-b-7-8oK1tI)**"""'], {}), '(ctx,\n f"""**Help - {p}launch**\n\nUsing launch command you can easily open any application or file which are available in your Reco\'s **Shortcut folder**.\n\n**Commands:**\n```{p}launch list\n{p}launch open\n{p}launch File_Number\n{p}launch File_Name```\n \n**🎬 YouTube**\n**[How to use {p}launch in {client.user.name}?](https://youtu.be/-b-7-8oK1tI)**"""\n )\n', (903, 1268), True, 'from lib.reco_embeds import recoEmbeds as rm\n'), ((1794, 1819), 'lib.reco_embeds.recoEmbeds.color', 'rm.color', (['"""colorforError"""'], {}), "('colorforError')\n", (1802, 1819), True, 'from lib.reco_embeds import recoEmbeds as rm\n'), ((569, 599), 'lib.reco_embeds.recoEmbeds.color', 'rm.color', (['"""colorforWaitingMsg"""'], {}), "('colorforWaitingMsg')\n", (577, 599), True, 'from lib.reco_embeds import recoEmbeds as rm\n'), ((2638, 2681), 'os.path.isfile', 'os.path.isfile', (["('shortcuts/' + shortcut + e)"], {}), "('shortcuts/' + shortcut + e)\n", (2652, 2681), False, 'import os, configs, time\n'), ((820, 850), 'lib.reco_embeds.recoEmbeds.color', 'rm.color', (['"""colorforWaitingMsg"""'], {}), "('colorforWaitingMsg')\n", (828, 850), True, 'from lib.reco_embeds import recoEmbeds as rm\n'), ((2475, 2500), 'lib.reco_embeds.recoEmbeds.color', 'rm.color', (['"""colorforError"""'], {}), "('colorforError')\n", (2483, 2500), True, 'from lib.reco_embeds import recoEmbeds as rm\n'), ((2810, 2852), 'os.startfile', 'os.startfile', (["('shortcuts\\\\' + shortcut + e)"], {}), "('shortcuts\\\\' + shortcut + e)\n", (2822, 2852), False, 'import os, configs, time\n'), ((3018, 3057), 'os.path.isfile', 'os.path.isfile', (["('shortcuts/' + shortcut)"], {}), "('shortcuts/' + shortcut)\n", (3032, 3057), False, 'import os, configs, time\n'), ((3789, 3815), 'os.startfile', 'os.startfile', (['files[index]'], {}), '(files[index])\n', (3801, 3815), False, 'import os, configs, time\n'), ((4045, 4070), 'lib.reco_embeds.recoEmbeds.color', 'rm.color', (['"""colorforError"""'], {}), "('colorforError')\n", (4053, 4070), True, 'from lib.reco_embeds import recoEmbeds as rm\n'), ((3190, 3228), 'os.startfile', 'os.startfile', (["('shortcuts\\\\' + shortcut)"], {}), "('shortcuts\\\\' + shortcut)\n", (3202, 3228), False, 'import os, configs, time\n')]
|
#!/usr/bin/env python3
from __future__ import print_function
import json
import sys
import urllib.error
import urllib.parse
import urllib.request
from strsimpy.cosine import Cosine
import yaml
import re
import pandas as pds
import requests
import click
import logging
import click_log
import random
logger = logging.getLogger(__name__)
click_log.basic_config(logger)
pds.set_option('display.expand_frame_repr', False)
global inferred_model, ecg, opg, rrg, qfg, mdg, omg
ecg = None
failures = []
cols2display = ['enum_class', 'orig_enum', 'query', 'obo_id', 'pref_lab',
'name', 'cosine_dist', 'dist_ok', 'type', 'scope', 'rank']
success_frame = pds.DataFrame(columns=cols2display)
# MIN CHARACTERS FOR SEARCH NOT BEING ENFORCED
# TODO write mapped terms back in as meanings
# give option for overwriting?
# TODO all user to specify enum classes to process
# when verbose, stderr gets status and debugging info
# stdout gets the modified model as yaml and should be redirected to a file
# OLS dataframe structure not identical to previous BP dataframes:
# different columns
# BP shows one best row
# OLS lists up to N best
# not filtering out small queries in OLS approach yet
# (OLS approach?) neither handling nor optimizing for repeat values
# not merging results back into model yet
# examples of previously challenging mappings
# # bicarbonate
# # term_iri = 'https://www.ebi.ac.uk/ols/api/ontologies/chebi/terms/http%253A%252F%252Fpurl.obolibrary.org%252Fobo%252FCHEBI_32139'
# # fungus
# # term_iri = 'https://www.ebi.ac.uk/ols/api/ontologies/ncbitaxon/terms/http%253A%252F%252Fpurl.obolibrary.org%252Fobo%252FNCBITaxon_33169'
# # sars-cov-2
# # term_iri = 'https://www.ebi.ac.uk/ols/api/ontologies/ncbitaxon/terms/http%253A%252F%252Fpurl.obolibrary.org%252Fobo%252FNCBITaxon_2697049'
# # <NAME> T7
# # # http://purl.obolibrary.org/obo/NCBITaxon_10760
# # term_iri = 'https://www.ebi.ac.uk/ols/api/ontologies/ncbitaxon/terms/http%253A%252F%252Fpurl.obolibrary.org%252Fobo%252FNCBITaxon_10760'
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# TODO add filter based on min_search_chars_param?
# no longer requiring a minimum search length
def one_enum_to_ols_frame_list(permitteds, one_enum_param):
global failures
global success_frame
per_enum_frame = pds.DataFrame(columns=cols2display)
for orig_enum in permitteds:
temp = one_enum_param + ": " + orig_enum
logger.info(temp)
# tidied_enum = re.sub(r'[_,.\-;@#?!&$ ]+', ' ', orig_enum)
if ecg is not None:
tidied_enum = re.sub(r'[' + ecg + ']+', ' ', orig_enum)
ontologies_phrase = ''
if len(opg) > 1:
ontologies_phrase = 'ontology=' + opg.lower()
qf_phrase = ''
if len(qfg) > 1:
qf_phrase = 'queryFields=' + qfg.lower()
# requiring local loses EROs annotations of SV40
# 'local=true' + '&' + \
request_string = 'http://www.ebi.ac.uk/ols/api/search?q=' + \
urllib.parse.quote(tidied_enum) + '&' + \
'type=class' + '&' + \
'exact=false' + '&' + \
ontologies_phrase + "&" + \
'rows=' + str(rrg) + '&' + \
qf_phrase
logger.debug(request_string)
response_param = requests.get(request_string)
ols_string_search_res_j = response_param.json()
ols_string_search_res_frame = pds.DataFrame(ols_string_search_res_j['response']['docs'])
ols_string_search_res_frame.insert(0, "query", tidied_enum)
# did the string search get any result rows?
r, c = ols_string_search_res_frame.shape
if r == 0:
no_search_res_dict = {'description': '', 'id': orig_enum, 'iri': '', 'is_defining_ontology': '',
'label': '', 'obo_id': '', 'ontology_name': '', 'ontology_prefix': '',
'short_form': '', 'type': ''}
no_search_res_frame = pds.DataFrame([no_search_res_dict])
ols_string_search_res_frame = ols_string_search_res_frame.append(no_search_res_frame)
failures.append(orig_enum)
ols_string_search_res_frame['query'] = orig_enum
inner_cosine_obj = Cosine(1)
annotations_frame = pds.DataFrame(columns=['name', 'obo_id', 'scope', 'type', 'xrefs'])
for ols_string_search_res_row in ols_string_search_res_frame.itertuples(index=False):
once = urllib.parse.quote(ols_string_search_res_row.iri, safe='')
twice = urllib.parse.quote(once, safe='')
# build url from base
term_retr_base = 'http://www.ebi.ac.uk/ols/api/ontologies/'
term_retr_assembled = term_retr_base + ols_string_search_res_row.ontology_name + '/terms/' + twice
term_details = requests.get(term_retr_assembled)
term_json = term_details.json()
has_label = 'label' in set(term_json.keys())
if has_label:
logger.debug(term_retr_assembled)
temp = term_json['label']
logger.debug(temp)
label_frame = pds.DataFrame([[term_json['label'], 'label', 'label', '']],
columns=['name', 'scope', 'type', 'xrefs'])
label_frame['obo_id'] = term_json['obo_id']
label_frame['pref_lab'] = term_json['label']
annotations_frame = annotations_frame.append(label_frame, ignore_index=True)
# also get other properties?
has_synonyms = 'obo_synonym' in set(term_json.keys())
if has_synonyms:
obo_syn_json = term_json['obo_synonym']
obo_syn_frame = pds.DataFrame(obo_syn_json)
obo_syn_frame['obo_id'] = term_json['obo_id']
obo_syn_frame['pref_lab'] = term_json['label']
annotations_frame = annotations_frame.append(obo_syn_frame, ignore_index=True)
# # don't process every kind of annotation, like genetic code
# has_annotations = 'annotation' in set(term_json.keys())
# if has_annotations:
# obo_ano_json = term_json['annotation']
# for anokey in obo_ano_json.keys():
# for keyval in obo_ano_json[anokey]:
# new_row = {'name': keyval,
# 'obo_id': term_json['obo_id'],
# 'scope': anokey,
# 'type': 'annotation',
# 'xrefs': '',
# 'pref_lab': term_json['label']}
# annotations_frame = annotations_frame.append(new_row, ignore_index=True)
annotations_row_count = len(annotations_frame.index)
if annotations_row_count == 0:
logger.warning('NO ANNOTATIONS')
manual_row = pds.Series(['', '', '', '', '', ''])
row_df = pds.DataFrame([manual_row], columns=['name', 'obo_id', 'scope', 'type', 'xrefs', 'pref_lab'])
annotations_frame = pds.concat([row_df, annotations_frame], ignore_index=True)
failures.append(orig_enum)
annotations_frame['enum_class'] = one_enum_param
annotations_frame['query'] = tidied_enum
annotations_frame['orig_enum'] = orig_enum
# check whether anny of the annotation on any of the hits have an
# acceptable cosine string distance
annotations_frame['name'] = annotations_frame['name'].fillna('')
annotations_frame['cosine_dist'] = \
annotations_frame.apply(lambda row: inner_cosine_obj.distance(tidied_enum.strip().lower(),
row['name'].strip().lower()),
axis=1)
annotations_frame = annotations_frame.sort_values('cosine_dist')
annotations_frame['dist_ok'] = annotations_frame['cosine_dist'] <= mdg
annotations_frame['rank'] = list(range(1, len(annotations_frame.index)+1))
# annotations_frame = annotations_frame[
# ['enum_class', 'orig_enum', 'query', 'name', 'cosine_dist', 'dist_ok',
# 'obo_id', 'pref_lab', 'type', 'scope']]
annotations_frame = annotations_frame[cols2display]
# do something with xrefs?
logger.debug(annotations_frame)
# get best acceptable row
acceptable_cosine = annotations_frame[annotations_frame['cosine_dist'] <= mdg]
acceptable_row_count = len(acceptable_cosine.index)
if acceptable_row_count > 0:
best_acceptable = acceptable_cosine.iloc[0]
success_frame = success_frame.append(best_acceptable)
# check if permitted value already has a meaning
meaning_search = list(inferred_model['enums'][one_enum_param]['permissible_values'][orig_enum].keys())
if 'meaning' in meaning_search:
has_meaning = True
else:
has_meaning = False
meaningless = not has_meaning
if meaningless or omg:
# insert meaning
inferred_model['enums'][one_enum_param]['permissible_values'][orig_enum]['meaning'] = best_acceptable[
'obo_id']
inferred_model['enums'][one_enum_param]['permissible_values'][orig_enum]['description'] = \
best_acceptable['pref_lab']
else:
temp = 'NO ACCEPTABLE MAPPINGS FOR ' + one_enum_param + " " + orig_enum
logger.warning(temp)
# sort and make unique
failures.append(orig_enum)
per_enum_frame = per_enum_frame.append(annotations_frame)
# I think there will be one success frame for each enum
success_frame = success_frame[cols2display]
success_frame = success_frame[list(annotations_frame.columns)]
logger.info(success_frame)
return per_enum_frame
def all_enums_to_ols(inferred_model_param, the_enums_param):
multi_enum_frame = pds.DataFrame(columns=cols2display)
for one_enum in the_enums_param:
permitteds = get_one_enum_class(inferred_model_param, one_enum)
one_enum_class_list = one_enum_to_ols_frame_list(permitteds, one_enum)
multi_enum_frame = multi_enum_frame.append(one_enum_class_list)
return multi_enum_frame
def get_one_enum_class(inferred_model_param, enum_class_param):
inferred_enums = inferred_model_param['enums'][enum_class_param]['permissible_values']
inferred_keys = list(inferred_enums.keys())
inferred_keys.sort(key=str.casefold)
return inferred_keys
def get_enum_list(inferred_model_param):
inner_enums = list(inferred_model_param['enums'].keys())
return inner_enums
def case_fold_list_sort(input_list):
output_list = input_list
output_list.sort(key=str.casefold)
return output_list
def read_yaml_model(modelfile_param):
with open(modelfile_param) as file:
inner_inferred_model = yaml.load(file, Loader=yaml.FullLoader)
return inner_inferred_model
# don't forget type field on options ???
# synbio example (without redirection of yaml stdout):
# ./linkml_model_enrichment/mixs_qd_bp_or_ols.py \
# --modelfile target/Ontology_example_20210317_P2B1_allmods_categorytype_different_scores_per_mod-1.yaml \
# --ontoprefix NCBItaxon,SO \
# --enum_list species_enum,host_organism_enum,category_enum,type_enum,type_long_enum \
# --verbose
@click.command()
@click_log.simple_verbosity_option(logger)
@click.option('--modelfile', '-f',
help='Path to a YAML linkml file containing enumerated values.',
required=True,
type=click.Path(exists=True),
)
@click.option('--tabular_outputfile', '-t',
default='mappings_log.tsv',
help='A tsv dump of all search results will be written to this file.',
show_default=True,
type=click.Path()
)
@click.option('--ontoprefix', '-p',
default='NCBITaxon,SO,ENVO,PATO,GO,OBI',
help='comma-separated list of (abbreviated) ontologies to search over.',
show_default=True
)
@click.option('--enum_list', '-e',
default='',
help='Comma-separated list of enums to search with. Defaults to all enums.',
show_default=False
)
# the choice and order of the query_fields has a big impact on what terms are returned
# overwrite the model's description with preferred term?
# OLS defaults are {label, synonym, description, short_form, obo_id, annotations, logical_description, iri}
@click.option('--query_fields', '-q',
default='',
help="Comma-separated list of term properties to include in string similarity calculation. " +
"Defaults to label,synonym,description,short_form,obo_id,annotations,logical_description,iri.",
show_default=False
)
# replaced_chars impacts returned fields too
# 'SARS-CoV-2' fails if the hyphens are escaped or ???
@click.option('--replaced_chars', '-c',
default='\.\_\- ',
help='Characters to replace with whitespace.',
show_default=True
)
@click.option('--min_search_chars', '-n',
default=2,
help='TEMPORARILY DISABLED. Queries with fewer characters will not be submitted in the search.',
show_default=True
)
@click.option('--row_req', '-r',
default=5,
help='Requested number of search results.',
show_default=True
)
@click.option('--maxdist', '-x',
default=0.05,
help="Maximum string distance between query and best matching term's best matching property.",
show_default=True
)
@click.option('--overwite_meaning', '-m',
help="Should existing enum meanings and descriptions be overwritten?",
is_flag=True
)
@click.option('--search_engine', '-s',
default='OLS',
help="BioPortal option has been temporarily disabled.",
show_default=True
)
def clickmain(modelfile, tabular_outputfile, ontoprefix, enum_list, query_fields, replaced_chars, min_search_chars,
row_req, maxdist, overwite_meaning, search_engine):
"""Uses web-based ontology lookup tools to map the permitted values of enums from linkml files to CURIES.
Optionally overwrites the meaning with a CURIE and the description with a preferred label.
Writes the resulting YAML to STDOUT."""
global failures, inferred_model, ecg, opg, rrg, qfg, mdg, omg
inferred_model = read_yaml_model(modelfile)
ecg = replaced_chars
opg = ontoprefix
rrg = row_req
qfg = query_fields
mdg = maxdist
omg = overwite_meaning
requested_enums = enum_list.split(",")
sorted_requested = case_fold_list_sort(requested_enums)
avaialble_enums = get_enum_list(inferred_model)
sorted_avaialble = case_fold_list_sort(avaialble_enums)
logger.info(sorted_avaialble)
if len(enum_list) == 0 or len(enum_list[0]) == 0:
settled_enums = sorted_avaialble
else:
settled_enums = sorted_requested
if search_engine == 'OLS':
all_ols_results = all_enums_to_ols(inferred_model, settled_enums)
logger.info("MAPPING FAILURES")
logger.info(list(set(failures)))
all_ols_results.to_csv(tabular_outputfile, sep='\t')
yaml.safe_dump(inferred_model, sys.stdout, default_flow_style=False)
elif search_engine == 'BioPortal':
logger.warning('BioPortal search temporarily disabled')
return
else:
logger.warning('No valid search engine specified')
if __name__ == '__main__':
clickmain(auto_envvar_prefix='ENUMENRICH')
|
[
"pandas.DataFrame",
"click_log.simple_verbosity_option",
"yaml.load",
"yaml.safe_dump",
"click.option",
"click.command",
"re.sub",
"strsimpy.cosine.Cosine",
"requests.get",
"click.Path",
"pandas.Series",
"click_log.basic_config",
"pandas.set_option",
"pandas.concat",
"logging.getLogger"
] |
[((310, 337), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (327, 337), False, 'import logging\n'), ((338, 368), 'click_log.basic_config', 'click_log.basic_config', (['logger'], {}), '(logger)\n', (360, 368), False, 'import click_log\n'), ((369, 419), 'pandas.set_option', 'pds.set_option', (['"""display.expand_frame_repr"""', '(False)'], {}), "('display.expand_frame_repr', False)\n", (383, 419), True, 'import pandas as pds\n'), ((665, 700), 'pandas.DataFrame', 'pds.DataFrame', ([], {'columns': 'cols2display'}), '(columns=cols2display)\n', (678, 700), True, 'import pandas as pds\n'), ((11708, 11723), 'click.command', 'click.command', ([], {}), '()\n', (11721, 11723), False, 'import click\n'), ((11725, 11766), 'click_log.simple_verbosity_option', 'click_log.simple_verbosity_option', (['logger'], {}), '(logger)\n', (11758, 11766), False, 'import click_log\n'), ((12223, 12398), 'click.option', 'click.option', (['"""--ontoprefix"""', '"""-p"""'], {'default': '"""NCBITaxon,SO,ENVO,PATO,GO,OBI"""', 'help': '"""comma-separated list of (abbreviated) ontologies to search over."""', 'show_default': '(True)'}), "('--ontoprefix', '-p', default='NCBITaxon,SO,ENVO,PATO,GO,OBI',\n help='comma-separated list of (abbreviated) ontologies to search over.',\n show_default=True)\n", (12235, 12398), False, 'import click\n'), ((12449, 12600), 'click.option', 'click.option', (['"""--enum_list"""', '"""-e"""'], {'default': '""""""', 'help': '"""Comma-separated list of enums to search with. Defaults to all enums."""', 'show_default': '(False)'}), "('--enum_list', '-e', default='', help=\n 'Comma-separated list of enums to search with. Defaults to all enums.',\n show_default=False)\n", (12461, 12600), False, 'import click\n'), ((12902, 13180), 'click.option', 'click.option', (['"""--query_fields"""', '"""-q"""'], {'default': '""""""', 'help': "('Comma-separated list of term properties to include in string similarity calculation. '\n +\n 'Defaults to label,synonym,description,short_form,obo_id,annotations,logical_description,iri.'\n )", 'show_default': '(False)'}), "('--query_fields', '-q', default='', help=\n 'Comma-separated list of term properties to include in string similarity calculation. '\n +\n 'Defaults to label,synonym,description,short_form,obo_id,annotations,logical_description,iri.'\n , show_default=False)\n", (12914, 13180), False, 'import click\n'), ((13339, 13470), 'click.option', 'click.option', (['"""--replaced_chars"""', '"""-c"""'], {'default': '"""\\\\.\\\\_\\\\- """', 'help': '"""Characters to replace with whitespace."""', 'show_default': '(True)'}), "('--replaced_chars', '-c', default='\\\\.\\\\_\\\\- ', help=\n 'Characters to replace with whitespace.', show_default=True)\n", (13351, 13470), False, 'import click\n'), ((13521, 13698), 'click.option', 'click.option', (['"""--min_search_chars"""', '"""-n"""'], {'default': '(2)', 'help': '"""TEMPORARILY DISABLED. Queries with fewer characters will not be submitted in the search."""', 'show_default': '(True)'}), "('--min_search_chars', '-n', default=2, help=\n 'TEMPORARILY DISABLED. Queries with fewer characters will not be submitted in the search.'\n , show_default=True)\n", (13533, 13698), False, 'import click\n'), ((13747, 13857), 'click.option', 'click.option', (['"""--row_req"""', '"""-r"""'], {'default': '(5)', 'help': '"""Requested number of search results."""', 'show_default': '(True)'}), "('--row_req', '-r', default=5, help=\n 'Requested number of search results.', show_default=True)\n", (13759, 13857), False, 'import click\n'), ((13911, 14080), 'click.option', 'click.option', (['"""--maxdist"""', '"""-x"""'], {'default': '(0.05)', 'help': '"""Maximum string distance between query and best matching term\'s best matching property."""', 'show_default': '(True)'}), '(\'--maxdist\', \'-x\', default=0.05, help=\n "Maximum string distance between query and best matching term\'s best matching property."\n , show_default=True)\n', (13923, 14080), False, 'import click\n'), ((14129, 14263), 'click.option', 'click.option', (['"""--overwite_meaning"""', '"""-m"""'], {'help': '"""Should existing enum meanings and descriptions be overwritten?"""', 'is_flag': '(True)'}), "('--overwite_meaning', '-m', help=\n 'Should existing enum meanings and descriptions be overwritten?',\n is_flag=True)\n", (14141, 14263), False, 'import click\n'), ((14299, 14431), 'click.option', 'click.option', (['"""--search_engine"""', '"""-s"""'], {'default': '"""OLS"""', 'help': '"""BioPortal option has been temporarily disabled."""', 'show_default': '(True)'}), "('--search_engine', '-s', default='OLS', help=\n 'BioPortal option has been temporarily disabled.', show_default=True)\n", (14311, 14431), False, 'import click\n'), ((2340, 2375), 'pandas.DataFrame', 'pds.DataFrame', ([], {'columns': 'cols2display'}), '(columns=cols2display)\n', (2353, 2375), True, 'import pandas as pds\n'), ((10287, 10322), 'pandas.DataFrame', 'pds.DataFrame', ([], {'columns': 'cols2display'}), '(columns=cols2display)\n', (10300, 10322), True, 'import pandas as pds\n'), ((3399, 3427), 'requests.get', 'requests.get', (['request_string'], {}), '(request_string)\n', (3411, 3427), False, 'import requests\n'), ((3522, 3580), 'pandas.DataFrame', 'pds.DataFrame', (["ols_string_search_res_j['response']['docs']"], {}), "(ols_string_search_res_j['response']['docs'])\n", (3535, 3580), True, 'import pandas as pds\n'), ((4341, 4350), 'strsimpy.cosine.Cosine', 'Cosine', (['(1)'], {}), '(1)\n', (4347, 4350), False, 'from strsimpy.cosine import Cosine\n'), ((4380, 4447), 'pandas.DataFrame', 'pds.DataFrame', ([], {'columns': "['name', 'obo_id', 'scope', 'type', 'xrefs']"}), "(columns=['name', 'obo_id', 'scope', 'type', 'xrefs'])\n", (4393, 4447), True, 'import pandas as pds\n'), ((11250, 11289), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (11259, 11289), False, 'import yaml\n'), ((15815, 15883), 'yaml.safe_dump', 'yaml.safe_dump', (['inferred_model', 'sys.stdout'], {'default_flow_style': '(False)'}), '(inferred_model, sys.stdout, default_flow_style=False)\n', (15829, 15883), False, 'import yaml\n'), ((11929, 11952), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (11939, 11952), False, 'import click\n'), ((12193, 12205), 'click.Path', 'click.Path', ([], {}), '()\n', (12203, 12205), False, 'import click\n'), ((2609, 2649), 're.sub', 're.sub', (["('[' + ecg + ']+')", '""" """', 'orig_enum'], {}), "('[' + ecg + ']+', ' ', orig_enum)\n", (2615, 2649), False, 'import re\n'), ((4083, 4118), 'pandas.DataFrame', 'pds.DataFrame', (['[no_search_res_dict]'], {}), '([no_search_res_dict])\n', (4096, 4118), True, 'import pandas as pds\n'), ((4919, 4952), 'requests.get', 'requests.get', (['term_retr_assembled'], {}), '(term_retr_assembled)\n', (4931, 4952), False, 'import requests\n'), ((5237, 5344), 'pandas.DataFrame', 'pds.DataFrame', (["[[term_json['label'], 'label', 'label', '']]"], {'columns': "['name', 'scope', 'type', 'xrefs']"}), "([[term_json['label'], 'label', 'label', '']], columns=['name',\n 'scope', 'type', 'xrefs'])\n", (5250, 5344), True, 'import pandas as pds\n'), ((5824, 5851), 'pandas.DataFrame', 'pds.DataFrame', (['obo_syn_json'], {}), '(obo_syn_json)\n', (5837, 5851), True, 'import pandas as pds\n'), ((7059, 7095), 'pandas.Series', 'pds.Series', (["['', '', '', '', '', '']"], {}), "(['', '', '', '', '', ''])\n", (7069, 7095), True, 'import pandas as pds\n'), ((7121, 7218), 'pandas.DataFrame', 'pds.DataFrame', (['[manual_row]'], {'columns': "['name', 'obo_id', 'scope', 'type', 'xrefs', 'pref_lab']"}), "([manual_row], columns=['name', 'obo_id', 'scope', 'type',\n 'xrefs', 'pref_lab'])\n", (7134, 7218), True, 'import pandas as pds\n'), ((7251, 7309), 'pandas.concat', 'pds.concat', (['[row_df, annotations_frame]'], {'ignore_index': '(True)'}), '([row_df, annotations_frame], ignore_index=True)\n', (7261, 7309), True, 'import pandas as pds\n')]
|
import asyncio
import logging
import struct
from surrortg.inputs import Switch
from . import UdpInput
class UdpSwitch(Switch, UdpInput):
"""Class for udp-controlled switch.
:param cmd: udp byte that identifies the control id
:type cmd: int
:param multiplier: multiplier of the value, defaults to 1.0
:type multiplier: float, optional
:param repeat_commands: defines if commands should be repeated,
defaults to False
:type repeat_commands: bool, optional
"""
def __init__(self, cmd, repeat_commands=False):
super().__init__()
self.cmd = cmd
self.value_off = 0
self.value_on = 1
self.should_repeat = repeat_commands
self.current_val = self.value_off
self.repeat_task = None
async def on(self, seat):
self._handle_command(self.value_on, seat)
async def off(self, seat):
self._handle_command(self.value_off, seat)
def _handle_command(self, val, seat):
self._send_command(val, seat)
if self.should_repeat:
self.current_val = val
if self.repeat_task is not None:
self.repeat_task.cancel()
self.repeat_task = asyncio.create_task(
self._repeat_command(10, 0.2, seat)
)
def _send_command(self, val, seat):
"""Sends a udp command to the endpoint of the seat
:param val: switch position value, 0 or 1
:type val: int
:param seat: Robot seat
:type seat: int
"""
assert val == 0 or val == 1
if seat not in self.endpoints:
logging.warning(
f"Endpoint not found for seat {seat}, not sending command."
)
return
endpoint = self.endpoints[seat]
logging.debug(
f"Running udp switch {self.cmd} of seat {seat} with value {val}"
)
if not endpoint.closed:
try:
endpoint.send(struct.pack("BB", self.cmd, val))
except OSError as e:
logging.warning(
f"Failed to send value {val} to seat {seat} "
f"command {self.cmd}: {e}"
)
else:
logging.debug(
f"Did not send value {val} to seat {seat} "
f"command {self.cmd}, was closed"
)
async def _repeat_command(self, num_sends, interval, seat):
"""Calls _send_command on repeat a specific number of times
:param num_sends: number of times _send_command is called
:type num_sends: int
:param interval: number of seconds between command sends
:type interval: float
:param seat: Robot seat
:type seat: int
"""
for _ in range(num_sends):
await asyncio.sleep(interval)
self._send_command(self.current_val, seat)
|
[
"logging.warning",
"struct.pack",
"logging.debug",
"asyncio.sleep"
] |
[((1800, 1879), 'logging.debug', 'logging.debug', (['f"""Running udp switch {self.cmd} of seat {seat} with value {val}"""'], {}), "(f'Running udp switch {self.cmd} of seat {seat} with value {val}')\n", (1813, 1879), False, 'import logging\n'), ((1624, 1700), 'logging.warning', 'logging.warning', (['f"""Endpoint not found for seat {seat}, not sending command."""'], {}), "(f'Endpoint not found for seat {seat}, not sending command.')\n", (1639, 1700), False, 'import logging\n'), ((2239, 2332), 'logging.debug', 'logging.debug', (['f"""Did not send value {val} to seat {seat} command {self.cmd}, was closed"""'], {}), "(\n f'Did not send value {val} to seat {seat} command {self.cmd}, was closed')\n", (2252, 2332), False, 'import logging\n'), ((2823, 2846), 'asyncio.sleep', 'asyncio.sleep', (['interval'], {}), '(interval)\n', (2836, 2846), False, 'import asyncio\n'), ((1981, 2013), 'struct.pack', 'struct.pack', (['"""BB"""', 'self.cmd', 'val'], {}), "('BB', self.cmd, val)\n", (1992, 2013), False, 'import struct\n'), ((2065, 2155), 'logging.warning', 'logging.warning', (['f"""Failed to send value {val} to seat {seat} command {self.cmd}: {e}"""'], {}), "(\n f'Failed to send value {val} to seat {seat} command {self.cmd}: {e}')\n", (2080, 2155), False, 'import logging\n')]
|
import argparse
import errno
import json
import logging
import os
import textwrap
from os import walk
def load_json_file(path: str):
f = open(path, "r")
data = f.read()
f.close()
return json.loads(data)
def save_markdown_file(path: str, data):
f = open(path, "w")
f.writelines(data)
f.close()
def convert_list_content(list_content):
data = ""
for item in list_content:
checked = "x" if item["isChecked"] else " "
text = item["text"]
data = data + f"- [{checked}] {text}\n"
return data
def convert_to_markdown(json_data, note_name):
archived = json_data["isArchived"]
data = f"# {note_name}\n\n"
if "listContent" in json_data:
data = data + convert_list_content(json_data["listContent"])
if "textContent" in json_data:
data = data + json_data["textContent"]
return archived, data
def set_path_to_file_names(dir, filenames):
new_files = []
for file in filenames:
new_files.append(os.path.join(dir, file))
return new_files
def get_folder_files(path, recursive):
dirpath, dirnames, filenames = next(walk(path), (None, None, []))
filenames = set_path_to_file_names(path, filenames)
if recursive and dirnames:
for dir in dirnames:
filenames = filenames + get_folder_files(os.path.join(path, dir), recursive)
return filenames
def convert_file(
input,
output=None,
archived=False,
archivedoutput=None,
from_folder=False,
force_file=False,
):
print(f"\n\nConverting file {input}")
file_name, extension = os.path.splitext(os.path.basename(input))
if extension != ".json" and not force_file:
print(
"Skipping file, not json format. Use flag --force to force the file to be used. WARNING: This script may throw an error."
)
return
json_data = load_json_file(input)
note_name = json_data["title"] if json_data["title"] else file_name
note_archived, markdown = convert_to_markdown(json_data, note_name)
print(f"Archived: {note_archived}")
print(f"Note name: {note_name}")
print(f"File name: {file_name}")
if from_folder:
archive = "archived" if archived and note_archived else ""
archive = archivedoutput if archivedoutput and archived else archive
output_file = os.path.join(output, archive, f"{note_name}.md")
else:
output_file = output if output else f"{note_name}.md"
print(f"Outputing file to {output_file}")
save_markdown_file(output_file, markdown)
def convert_folder(
path,
recursive=False,
output=None,
archived=False,
archivedoutput=None,
force_file=False,
):
filenames = get_folder_files(path, recursive)
for file in filenames:
try:
convert_file(file, output, archived, archivedoutput, True, force_file)
except Exception as e:
print(f"Error converting file: {file}")
logging.error(e)
description_lines = [
"Convert Google Takeout Keep files to Markdown",
"",
"\tconvert.py --input some_exported_file.json --output converted.md",
"",
"\tconvert.py --input /path/to/input --output /path/to/output -r",
]
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(
"""\
Convert Google Takeout Keep files to Markdown.
----------------------------------------------
convert.py -i some_exported_file.json --o converted.md
convert.py -i /path/to/input -o /path/to/output -r -a
"""
),
)
parser.add_argument(
"-i",
"--input",
dest="input",
type=str,
required=True,
help="Path to input file or directory.",
)
parser.add_argument(
"-o", "--output", dest="output", type=str, help="Path to output file or directory."
)
parser.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="Directory only. Enable recursive convertion for directories. Not used for individual files. The subdirectories structures will be lost in the output folder.",
)
parser.add_argument(
"-a",
"--archived",
dest="archived",
action="store_true",
help='Directory only. Separate archived notes to a separate directory. Default directory "archived"',
)
parser.add_argument(
"-f",
"--force",
dest="force_file",
action="store_true",
help="Force the file to be read if the extension is not .json. This may break the conversion.",
)
parser.add_argument(
"--archivedoutput",
dest="archivedoutput",
type=str,
help="Path to archived output directory.",
)
if __name__ == "__main__":
args = parser.parse_args()
if os.path.isdir(args.input):
if args.output:
try:
os.mkdir(args.output)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
if args.archived:
archive_path = (
os.path.join(args.output, args.archivedoutput)
if args.archivedoutput
else os.path.join(args.output, "archived")
)
try:
print(f"Making dir {archive_path}")
os.mkdir(archive_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
convert_folder(
args.input,
args.recursive,
args.output,
args.archived,
args.archivedoutput,
args.force_file,
)
elif os.path.isfile(args.input):
if args.recursive:
print("Recursive flag will be ignored. Input not a folder.")
convert_file(args.input, args.output)
else:
parser.error("The input parameter is not a folder or usable file")
|
[
"textwrap.dedent",
"os.mkdir",
"logging.error",
"json.loads",
"os.path.basename",
"os.path.isdir",
"os.walk",
"os.path.isfile",
"os.path.join"
] |
[((205, 221), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (215, 221), False, 'import json\n'), ((4764, 4789), 'os.path.isdir', 'os.path.isdir', (['args.input'], {}), '(args.input)\n', (4777, 4789), False, 'import os\n'), ((1137, 1147), 'os.walk', 'walk', (['path'], {}), '(path)\n', (1141, 1147), False, 'from os import walk\n'), ((1621, 1644), 'os.path.basename', 'os.path.basename', (['input'], {}), '(input)\n', (1637, 1644), False, 'import os\n'), ((2352, 2400), 'os.path.join', 'os.path.join', (['output', 'archive', 'f"""{note_name}.md"""'], {}), "(output, archive, f'{note_name}.md')\n", (2364, 2400), False, 'import os\n'), ((3341, 3630), 'textwrap.dedent', 'textwrap.dedent', (['""" Convert Google Takeout Keep files to Markdown.\n ----------------------------------------------\n convert.py -i some_exported_file.json --o converted.md\n convert.py -i /path/to/input -o /path/to/output -r -a\n """'], {}), '(\n """ Convert Google Takeout Keep files to Markdown.\n ----------------------------------------------\n convert.py -i some_exported_file.json --o converted.md\n convert.py -i /path/to/input -o /path/to/output -r -a\n """\n )\n', (3356, 3630), False, 'import textwrap\n'), ((5677, 5703), 'os.path.isfile', 'os.path.isfile', (['args.input'], {}), '(args.input)\n', (5691, 5703), False, 'import os\n'), ((1009, 1032), 'os.path.join', 'os.path.join', (['dir', 'file'], {}), '(dir, file)\n', (1021, 1032), False, 'import os\n'), ((2975, 2991), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (2988, 2991), False, 'import logging\n'), ((4849, 4870), 'os.mkdir', 'os.mkdir', (['args.output'], {}), '(args.output)\n', (4857, 4870), False, 'import os\n'), ((5071, 5117), 'os.path.join', 'os.path.join', (['args.output', 'args.archivedoutput'], {}), '(args.output, args.archivedoutput)\n', (5083, 5117), False, 'import os\n'), ((5178, 5215), 'os.path.join', 'os.path.join', (['args.output', '"""archived"""'], {}), "(args.output, 'archived')\n", (5190, 5215), False, 'import os\n'), ((5315, 5337), 'os.mkdir', 'os.mkdir', (['archive_path'], {}), '(archive_path)\n', (5323, 5337), False, 'import os\n'), ((1336, 1359), 'os.path.join', 'os.path.join', (['path', 'dir'], {}), '(path, dir)\n', (1348, 1359), False, 'import os\n')]
|
"""
Custom ORM behavior.
"""
import pandas
import sqlalchemy.orm
from redpanda import dialects
class Query(sqlalchemy.orm.Query):
"""
RedPanda SQLAlchemy Query.
Adds the frame() method to queries.
"""
def __init__(self, entities, session=None, read_sql=None):
super(Query, self).__init__(entities, session)
if read_sql is None:
try:
entity_zero, *_ = entities
read_sql = entity_zero.__read_sql__
except (AttributeError, TypeError, ValueError):
read_sql = {}
self._read_sql = read_sql
def frame(self, **read_sql):
"""
Return RedPanda pandas.DataFrame instance.
"""
# Get conecion
conn = self.session.connection()
# Get SQL+params from engine
sql, params = dialects.statement_and_params(conn.engine, self)
# Get read_sql arguments
read_sql = {**self._read_sql, **{'params': params}, **read_sql}
# Read SQL into DataFrame
dataframe = pandas.read_sql(str(sql), conn.engine, **read_sql)
if read_sql.get('columns') is not None:
dataframe = dataframe[read_sql['columns']]
return dataframe
class Session(sqlalchemy.orm.Session):
"""
RedPanda SQLAlchemy Session.
Adds add_dataframe() method to session.
"""
def add_dataframe(self, cls, dataframe, parse_index=False):
"""
Return a generator for SQLAlchemy models from a pandas.DataFrame.
:param class cls: Target model for DataFrame
:param pandas.DataFrame dataframe: pandas.DataFrame to parse
:param boolean parse_index: parse the index as a model attr
:returns iter: Generator of SQLAlchemy objects.
"""
for idx, row in dataframe.iterrows():
attrs = row.dropna().to_dict()
if parse_index is True:
if dataframe.index.name is None:
raise ValueError('Cannot parse unnamed index')
attrs[dataframe.index.name] = idx
self.add(cls(**attrs))
def sessionmaker(class_=Session, query_cls=Query, **kwargs):
"""
Override of sqlalchemy.orm.sessionmaker to use RedPanda Session/Query.
"""
return sqlalchemy.orm.sessionmaker(
class_=class_, query_cls=query_cls, **kwargs)
def within(self, index):
"""
Like between() but takes a pandas index object.
:param pandas.Index index: pandas index
:returns self: result of between() with start/end as the ends of the index.
"""
try:
start = index.min().start_time
end = index.max().end_time
except AttributeError:
start = index.min()
end = index.max()
return self.between(start, end)
sqlalchemy.orm.attributes.InstrumentedAttribute.within = within
|
[
"redpanda.dialects.statement_and_params"
] |
[((837, 885), 'redpanda.dialects.statement_and_params', 'dialects.statement_and_params', (['conn.engine', 'self'], {}), '(conn.engine, self)\n', (866, 885), False, 'from redpanda import dialects\n')]
|
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
import argparse
import random
import gym
import sys
from collections import deque
from keras import backend as K
from keras.layers import Input, Dense
from keras.models import Model
from keras.utils import plot_model
env = gym.make('MountainCar-v0')
state_space=env.observation_space.shape[0]
action_s=env.action_space.n
#Hyperparameters
learning_rate=0.001
episodes=1000000
epsilon_start=0.5
epsilon_end=0.05
#decay=(epsilon_start-epsilon_end)/100000
decay = 0.9
batch_size=32
max_steps=200
gamma=1.0
hidden_layer=50
class QNetwork():
def __init__(self,learning_rate,action_space,input_dim):
# self.model= Sequential()
# self.model.add(Dense(units=30,activation='relu',input_dim=state_space,kernel_initializer='he_uniform'))
# self.model.add(Dense(units=30,activation='relu',kernel_initializer='he_uniform'))
# self.model.add(Dense(units=30,activation='relu',kernel_initializer='he_uniform'))
# self.model.add(Dense(units=action_s,activation='linear',kernel_initializer='he_uniform'))
self.input = Input(shape=(input_dim,))
self.x=Dense(hidden_layer,activation='relu')(self.input)
# self.x=keras.layers.BatchNormalization(axis=-1)(self.x)
self.x=Dense(hidden_layer,activation='relu')(self.x)
# self.x=keras.layers.BatchNormalization(axis=-1)(self.x)
self.x=Dense(hidden_layer,activation='relu')(self.x)
self.value= Dense(1,activation='linear',name='value')(self.x)
self.value1=self.value
self.advantage = Dense(action_s,activation='linear',name='advantage')(self.x)
self.advantage_mean = keras.layers.Lambda(lambda x:K.mean(x,axis=-1,keepdims=True))(self.advantage)
self.advantage_mean1 = self.advantage_mean
# self.value=keras.layers.RepeatVector(2)
# print('Value',self.value.shape)
# self.value = keras.layers.Lambda(lambda x:K.equal(x,axis=-1,keepdims=True))(self.value)
i=1
while(i<action_s):
self.value=keras.layers.Lambda(lambda x:K.concatenate(x, axis=-1))([self.value,self.value1])
self.advantage_mean=keras.layers.Lambda(lambda x:K.concatenate(x,axis=-1))([self.advantage_mean1,self.advantage_mean])
i+=1
# print('Adv',self.keras.backend.identity.shape)
# self.advantage_mean=keras.layers.Lambda(lambda x:K.identity(x))(self.advantage_mean)
# print('Val1',self.value1.shape)
self.advantage_subtract_mean = keras.layers.Subtract()([self.advantage,self.advantage_mean])
# print('Adv su',self.advantage_mean.shape)
self.added = keras.layers.Add()([self.advantage_subtract_mean,self.value])
# print("Added",self.added.shape)
# equivalent to added = keras.layers.add([x1, x2])
# self.out = Dense(action_s,activation='linear')(self.added)
# print("out",self.out.shape)
self.optimizer=keras.optimizers.Adam(lr=learning_rate)
self.model = Model(inputs=self.input, outputs=self.added)
self.model.compile(loss='mse',optimizer=self.optimizer)
plot_model(self.model, to_file='Duelling2.png')
def save_model_weights(self, fname):
self.model.save_weights(fname)
def load_model(self, model_file):
self.model.load(model_file)
def load_model_weights(self,fname):
self.model.load_weights(fname)
class DQN_Agent():
# In this class, we will implement functions to do the following.
# (1) Create an instance of the Q Network class.
# (2) Create a function that constructs a policy from the Q values predicted by the Q Network.
# (a) Epsilon Greedy Policy.
# (b) Greedy Policy.
# (3) Create a function to train the Q Network, by interacting with the environment.
# (4) Create a function to test the Q Network's performance on the environment.
# (5) Create a function for Experience Replay.
def __init__(self, environment_name, render=False):
self.env = environment_name
self.net=QNetwork(learning_rate,action_s,state_space)
self.prev_net=QNetwork(learning_rate,action_s,state_space)
self.prev_net.model.set_weights(self.net.model.get_weights())
self.q_values=np.zeros([batch_size,action_s])
self.memory=Replay_Memory()
self.burn_in_memory()
def epsilon_greedy_policy(self, q_values,epsilon):
if (epsilon>np.random.random()):
action=random.randrange(action_s)
else:
action=np.argmax(q_values[0])
return action
def greedy_policy(self, q_values):
action=np.argmax(q_values)
return action
def train(self):
# In this function, we will train our network.
# If training without experience replay_memory, then you will interact with the environment
# in this function, while also updating your network parameters.
# If you are using a replay memory, you should interact with environment here, and store these
# transitions to memory, while also updating your model.
epsilon = epsilon_start
for i in range(1000000):
state = env.reset()
state=np.reshape(state,[1,state_space])
total_reward=0
step=0
while step<max_steps:
env.render()
step+=1
q_values = self.net.model.predict(state)
action=self.epsilon_greedy_policy(q_values,epsilon)
new_state,reward,done, _ = env.step(action)
new_state=np.reshape(new_state,[1,state_space])
self.memory.append([state,action,reward,done,new_state])
minibatch=self.memory.sample_batch()
batch_states=np.zeros((batch_size,state_space))
batch_next_states=np.zeros((batch_size,state_space))
t_int=0
for batch_state, batch_action, batch_reward, batch_done, batch_new_state in minibatch:
batch_states[t_int]=batch_state
batch_next_states[t_int]=batch_new_state
t_int+=1
batch_q_values=self.net.model.predict(batch_states)
batch_prev_q_values=self.prev_net.model.predict(batch_next_states)
t_int=0
for batch_state, batch_action, batch_reward, batch_done, batch_new_state in minibatch:
if batch_done:
temp=0
else:
temp=gamma*(np.amax(batch_prev_q_values[t_int]))
batch_q_values[t_int][batch_action] = batch_reward+temp
t_int+=1
self.net.model.fit(batch_states,batch_q_values,batch_size=batch_size,epochs=1,verbose=0)
epsilon*=decay
if epsilon<epsilon_end:
epsilon = epsilon_end
total_reward+=reward
state=new_state
if done:
break
self.prev_net.model.set_weights(self.net.model.get_weights())
print(i,total_reward)
def test(self, model_file=None):
# Evaluate the performance of your agent over 100 episodes, by calculating cummulative rewards for the 100 episodes.
# Here you need to interact with the environment, irrespective of whether you are using a memory.
pass
def burn_in_memory(self):
state = env.reset()
state=np.reshape(state,[1,state_space])
for i in range(self.memory.burn_in):
action=random.randrange(action_s)
new_state, reward, done, _ = env.step(action)
new_state=np.reshape(new_state,[1,state_space])
self.memory.append([state,action,reward,done,new_state])
state=new_state
if done:
state=env.reset()
state=np.reshape(state,[1,state_space])
class Replay_Memory():
def __init__(self, memory_size=10000, burn_in=5000):
self.transitions =[]
self.memory_size=memory_size
self.burn_in = burn_in
def sample_batch(self, batch_size=32):
return random.sample(self.transitions,batch_size)
def append(self, transition):
if(len(self.transitions)<self.memory_size):
self.transitions.append(transition)
else:
idx=random.randint(1,self.memory_size-1)
# print(idx)
del self.transitions[idx]
self.transitions.append(transition)
def parse_arguments():
parser = argparse.ArgumentParser(description='Linear Q network parser')
parser.add_argument('--env',dest='env',type=str)
parser.add_argument('--render',dest='render',type=int,default=0)
parser.add_argument('--train',dest='train',type=int,default=1)
parser.add_argument('--model',dest='model_file',type=str)
return parser.parse_args()
def main(args):
args = parse_arguments()
environment_name = args.env
# Setting the session to allow growth, so it doesn't allocate all GPU memory.
gpu_ops = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_ops)
sess = tf.Session(config=config)
# Setting this as the default tensorflow session.
keras.backend.tensorflow_backend.set_session(sess)
agent=DQN_Agent(environment_name)
# print(agent)
DQN_Agent.train(agent)
# You want to create an instance of the DQN_Agent class here, and then train / test it.
if __name__ == '__main__':
main(sys.argv)
|
[
"argparse.ArgumentParser",
"numpy.argmax",
"random.sample",
"keras.models.Model",
"tensorflow.ConfigProto",
"keras.layers.Input",
"keras.backend.tensorflow_backend.set_session",
"tensorflow.GPUOptions",
"keras.backend.concatenate",
"random.randint",
"keras.utils.plot_model",
"numpy.reshape",
"tensorflow.Session",
"keras.optimizers.Adam",
"gym.make",
"numpy.zeros",
"keras.layers.Add",
"numpy.amax",
"keras.layers.Dense",
"numpy.random.random",
"random.randrange",
"keras.layers.Subtract",
"keras.backend.mean"
] |
[((360, 386), 'gym.make', 'gym.make', (['"""MountainCar-v0"""'], {}), "('MountainCar-v0')\n", (368, 386), False, 'import gym\n'), ((7552, 7614), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Linear Q network parser"""'}), "(description='Linear Q network parser')\n", (7575, 7614), False, 'import argparse\n'), ((8047, 8079), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (8060, 8079), True, 'import tensorflow as tf\n'), ((8090, 8125), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_ops'}), '(gpu_options=gpu_ops)\n', (8104, 8125), True, 'import tensorflow as tf\n'), ((8134, 8159), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (8144, 8159), True, 'import tensorflow as tf\n'), ((8214, 8264), 'keras.backend.tensorflow_backend.set_session', 'keras.backend.tensorflow_backend.set_session', (['sess'], {}), '(sess)\n', (8258, 8264), False, 'import keras\n'), ((1155, 1180), 'keras.layers.Input', 'Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (1160, 1180), False, 'from keras.layers import Input, Dense\n'), ((2818, 2857), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (2839, 2857), False, 'import keras\n'), ((2873, 2917), 'keras.models.Model', 'Model', ([], {'inputs': 'self.input', 'outputs': 'self.added'}), '(inputs=self.input, outputs=self.added)\n', (2878, 2917), False, 'from keras.models import Model\n'), ((2978, 3025), 'keras.utils.plot_model', 'plot_model', (['self.model'], {'to_file': '"""Duelling2.png"""'}), "(self.model, to_file='Duelling2.png')\n", (2988, 3025), False, 'from keras.utils import plot_model\n'), ((4024, 4056), 'numpy.zeros', 'np.zeros', (['[batch_size, action_s]'], {}), '([batch_size, action_s])\n', (4032, 4056), True, 'import numpy as np\n'), ((4341, 4360), 'numpy.argmax', 'np.argmax', (['q_values'], {}), '(q_values)\n', (4350, 4360), True, 'import numpy as np\n'), ((6646, 6681), 'numpy.reshape', 'np.reshape', (['state', '[1, state_space]'], {}), '(state, [1, state_space])\n', (6656, 6681), True, 'import numpy as np\n'), ((7221, 7264), 'random.sample', 'random.sample', (['self.transitions', 'batch_size'], {}), '(self.transitions, batch_size)\n', (7234, 7264), False, 'import random\n'), ((1190, 1228), 'keras.layers.Dense', 'Dense', (['hidden_layer'], {'activation': '"""relu"""'}), "(hidden_layer, activation='relu')\n", (1195, 1228), False, 'from keras.layers import Input, Dense\n'), ((1309, 1347), 'keras.layers.Dense', 'Dense', (['hidden_layer'], {'activation': '"""relu"""'}), "(hidden_layer, activation='relu')\n", (1314, 1347), False, 'from keras.layers import Input, Dense\n'), ((1424, 1462), 'keras.layers.Dense', 'Dense', (['hidden_layer'], {'activation': '"""relu"""'}), "(hidden_layer, activation='relu')\n", (1429, 1462), False, 'from keras.layers import Input, Dense\n'), ((1485, 1528), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""', 'name': '"""value"""'}), "(1, activation='linear', name='value')\n", (1490, 1528), False, 'from keras.layers import Input, Dense\n'), ((1580, 1634), 'keras.layers.Dense', 'Dense', (['action_s'], {'activation': '"""linear"""', 'name': '"""advantage"""'}), "(action_s, activation='linear', name='advantage')\n", (1585, 1634), False, 'from keras.layers import Input, Dense\n'), ((2429, 2452), 'keras.layers.Subtract', 'keras.layers.Subtract', ([], {}), '()\n', (2450, 2452), False, 'import keras\n'), ((2553, 2571), 'keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (2569, 2571), False, 'import keras\n'), ((4178, 4196), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4194, 4196), True, 'import numpy as np\n'), ((4209, 4235), 'random.randrange', 'random.randrange', (['action_s'], {}), '(action_s)\n', (4225, 4235), False, 'import random\n'), ((4254, 4276), 'numpy.argmax', 'np.argmax', (['q_values[0]'], {}), '(q_values[0])\n', (4263, 4276), True, 'import numpy as np\n'), ((4853, 4888), 'numpy.reshape', 'np.reshape', (['state', '[1, state_space]'], {}), '(state, [1, state_space])\n', (4863, 4888), True, 'import numpy as np\n'), ((6729, 6755), 'random.randrange', 'random.randrange', (['action_s'], {}), '(action_s)\n', (6745, 6755), False, 'import random\n'), ((6818, 6857), 'numpy.reshape', 'np.reshape', (['new_state', '[1, state_space]'], {}), '(new_state, [1, state_space])\n', (6828, 6857), True, 'import numpy as np\n'), ((7396, 7435), 'random.randint', 'random.randint', (['(1)', '(self.memory_size - 1)'], {}), '(1, self.memory_size - 1)\n', (7410, 7435), False, 'import random\n'), ((5139, 5178), 'numpy.reshape', 'np.reshape', (['new_state', '[1, state_space]'], {}), '(new_state, [1, state_space])\n', (5149, 5178), True, 'import numpy as np\n'), ((5297, 5332), 'numpy.zeros', 'np.zeros', (['(batch_size, state_space)'], {}), '((batch_size, state_space))\n', (5305, 5332), True, 'import numpy as np\n'), ((5354, 5389), 'numpy.zeros', 'np.zeros', (['(batch_size, state_space)'], {}), '((batch_size, state_space))\n', (5362, 5389), True, 'import numpy as np\n'), ((6979, 7014), 'numpy.reshape', 'np.reshape', (['state', '[1, state_space]'], {}), '(state, [1, state_space])\n', (6989, 7014), True, 'import numpy as np\n'), ((1697, 1730), 'keras.backend.mean', 'K.mean', (['x'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x, axis=-1, keepdims=True)\n', (1703, 1730), True, 'from keras import backend as K\n'), ((2037, 2062), 'keras.backend.concatenate', 'K.concatenate', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (2050, 2062), True, 'from keras import backend as K\n'), ((2142, 2167), 'keras.backend.concatenate', 'K.concatenate', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (2155, 2167), True, 'from keras import backend as K\n'), ((5885, 5920), 'numpy.amax', 'np.amax', (['batch_prev_q_values[t_int]'], {}), '(batch_prev_q_values[t_int])\n', (5892, 5920), True, 'import numpy as np\n')]
|
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import ctypes
import _ctypes
import pygame
import sys
import numpy as np
import cv2
#if sys.hexversion >= 0x03000000:
# import _thread as thread
#else:
# import thread
class DepthRuntime(object):
def __init__(self):
pygame.init()
# Used to manage how fast the screen updates
self._clock = pygame.time.Clock()
# Loop until the user clicks the close button.
self._done = False
# Used to manage how fast the screen updates
self._clock = pygame.time.Clock()
# Kinect runtime object, we want only color and body frames
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth)
# back buffer surface for getting Kinect depth frames, 8bit grey, width and height equal to the Kinect depth frame size
self._frame_surface = pygame.Surface((self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), 0, 24)
# here we will store skeleton data
self._bodies = None
# Set the width and height of the screen [width, height]
self._infoObject = pygame.display.Info()
self._screen = pygame.display.set_mode((self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
pygame.display.set_caption("Kinect for Windows v2 Depth")
#def background_subtraction(self, current_frame, previous_frame):
# previousFrame = [0] * 217088
# return frame
def draw_depth_frame(self, frame, target_surface):
if frame is None: # some usb hub do not provide the infrared image. it works with Kinect studio though
return
target_surface.lock()
f8=np.uint8(frame.clip(1,4000)/16.)
frame8bit=np.dstack((f8,f8,f8))
address = self._kinect.surface_as_array(target_surface.get_buffer())
ctypes.memmove(address, frame8bit.ctypes.data, frame8bit.size)
del address
target_surface.unlock()
def run(self):
# -------- Main Program Loop -----------
frame = [0] * 217088
frames = [frame] * 5
fgbg = cv2.createBackgroundSubtractorKNN()
# fgbg = cv2.createBackgroundSubtractorMOG2()
# print (len(previousFrames))
# print(previousFrames)
while not self._done:
# --- Main event loop
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
self._done = True # Flag that we are done so we exit this loop
elif event.type == pygame.VIDEORESIZE: # window resized
self._screen = pygame.display.set_mode(event.dict['size'], pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
# --- Getting frames and drawing
if self._kinect.has_new_depth_frame():
frame = self._kinect.get_last_depth_frame()
fgmask = fgbg.apply(frame)
# flattenMask = []
# for item in fgmask:
# flattenMask.append(item)
flattenMask = [value for element in fgmask for value in element]
# print (type(flattenMask[0]))
flattenMask = np.array(flattenMask)
# flattenMask = np.array(fgmask)
# flattenMask = flattenMask / 255
# print ("flattenMask\n",flattenMask)
frameMask = []
# frameMask = np.array(frameMask)
for val in np.nditer(flattenMask):
# i = 0
if val == 255:
frameMask.append(1)
# val = 1
else:
frameMask.append(0)
# val = 0
# i += 1
frameMask = np.array(frameMask)
# np.set_printoptions(threshold=sys.maxsize)
# print("frame\n",frame)
# print ("flattenMask\n",flattenMask)
# print ("frameMask\n",frameMask)
outputFrame = np.multiply(frame, frameMask)
# frames.append(outputFrame)
# frames.pop(0)
# outputFrame2 = []
# cv2.fastNlMeansDenoisingMulti(frames, 4, 4, outputFrame2)
# outputFrame2 = cv2.fastNlMeansDenoising(outputFrame)
# outputFrame = np.multiply(frame, fgmask)
# cv2.imshow('frame',fgmask)
self.draw_depth_frame(outputFrame, self._frame_surface)
# k = cv2.waitKey(30) & 0xff
# if k == 27:
# break
# frames.append(frame)
# frames.pop(0)
# outputFrame = np.subtract(frames[0], frames[1])
# self.draw_depth_frame(outputFrame, self._frame_surface)
#self.draw_depth_frame(frame, self._frame_surface)
#frame = np.average(np.array([frame, previousFrame]), axis=0)
#np.set_printoptions(threshold=sys.maxsize)
#print(outputFrame)
#print(frame.size)
# outputFrame = (np.array(previousFrames[0]) + np.array(previousFrames[1]) + np.array(previousFrames[2]) + np.array(previousFrames[3]) + np.array(previousFrames[4])) / 5
# self.draw_depth_frame(outputFrame.astype(int), self._frame_surface)
# frame2 = cv.fastNlMeansDenoisingMulti(previousFrames, 2 , 3)
frame = None
outputFrame = None
self._screen.blit(self._frame_surface, (0,0))
pygame.display.update()
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
self._clock.tick(60)
# Close our Kinect sensor, close the window and quit.
self._kinect.close()
pygame.quit()
__main__ = "Kinect v2 Depth"
game =DepthRuntime();
game.run();
|
[
"numpy.dstack",
"pygame.quit",
"numpy.multiply",
"pygame.Surface",
"pygame.event.get",
"pygame.display.set_mode",
"cv2.createBackgroundSubtractorKNN",
"numpy.nditer",
"ctypes.memmove",
"pygame.init",
"pygame.display.flip",
"pykinect2.PyKinectRuntime.PyKinectRuntime",
"pygame.display.update",
"pygame.display.Info",
"numpy.array",
"pygame.display.set_caption",
"pygame.time.Clock"
] |
[((341, 354), 'pygame.init', 'pygame.init', ([], {}), '()\n', (352, 354), False, 'import pygame\n'), ((430, 449), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (447, 449), False, 'import pygame\n'), ((607, 626), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (624, 626), False, 'import pygame\n'), ((719, 785), 'pykinect2.PyKinectRuntime.PyKinectRuntime', 'PyKinectRuntime.PyKinectRuntime', (['PyKinectV2.FrameSourceTypes_Depth'], {}), '(PyKinectV2.FrameSourceTypes_Depth)\n', (750, 785), False, 'from pykinect2 import PyKinectRuntime\n'), ((944, 1047), 'pygame.Surface', 'pygame.Surface', (['(self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height)', '(0)', '(24)'], {}), '((self._kinect.depth_frame_desc.Width, self._kinect.\n depth_frame_desc.Height), 0, 24)\n', (958, 1047), False, 'import pygame\n'), ((1207, 1228), 'pygame.display.Info', 'pygame.display.Info', ([], {}), '()\n', (1226, 1228), False, 'import pygame\n'), ((1252, 1422), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height)', '(pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE)', '(32)'], {}), '((self._kinect.depth_frame_desc.Width, self._kinect.\n depth_frame_desc.Height), pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.\n RESIZABLE, 32)\n', (1275, 1422), False, 'import pygame\n'), ((1417, 1474), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Kinect for Windows v2 Depth"""'], {}), "('Kinect for Windows v2 Depth')\n", (1443, 1474), False, 'import pygame\n'), ((1883, 1906), 'numpy.dstack', 'np.dstack', (['(f8, f8, f8)'], {}), '((f8, f8, f8))\n', (1892, 1906), True, 'import numpy as np\n'), ((1990, 2052), 'ctypes.memmove', 'ctypes.memmove', (['address', 'frame8bit.ctypes.data', 'frame8bit.size'], {}), '(address, frame8bit.ctypes.data, frame8bit.size)\n', (2004, 2052), False, 'import ctypes\n'), ((2255, 2290), 'cv2.createBackgroundSubtractorKNN', 'cv2.createBackgroundSubtractorKNN', ([], {}), '()\n', (2288, 2290), False, 'import cv2\n'), ((6101, 6114), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (6112, 6114), False, 'import pygame\n'), ((2504, 2522), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2520, 2522), False, 'import pygame\n'), ((5791, 5814), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (5812, 5814), False, 'import pygame\n'), ((5899, 5920), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (5918, 5920), False, 'import pygame\n'), ((3383, 3404), 'numpy.array', 'np.array', (['flattenMask'], {}), '(flattenMask)\n', (3391, 3404), True, 'import numpy as np\n'), ((3666, 3688), 'numpy.nditer', 'np.nditer', (['flattenMask'], {}), '(flattenMask)\n', (3675, 3688), True, 'import numpy as np\n'), ((3992, 4011), 'numpy.array', 'np.array', (['frameMask'], {}), '(frameMask)\n', (4000, 4011), True, 'import numpy as np\n'), ((4248, 4277), 'numpy.multiply', 'np.multiply', (['frame', 'frameMask'], {}), '(frame, frameMask)\n', (4259, 4277), True, 'import numpy as np\n'), ((2805, 2913), 'pygame.display.set_mode', 'pygame.display.set_mode', (["event.dict['size']", '(pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE)', '(32)'], {}), "(event.dict['size'], pygame.HWSURFACE | pygame.\n DOUBLEBUF | pygame.RESIZABLE, 32)\n", (2828, 2913), False, 'import pygame\n')]
|
# Libs
import flask
# Modules
from project.visionGrabber.device import Device
def get_vision_feed():
return flask.Response(generate_frame_from_view(Device()), mimetype='multipart/x-mixed-replace; boundary=frame')
def generate_frame_from_view(camera):
while True:
#get camera frame
frame = camera.get_frame()
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(frame) + b'\r\n')
|
[
"project.visionGrabber.device.Device"
] |
[((161, 169), 'project.visionGrabber.device.Device', 'Device', ([], {}), '()\n', (167, 169), False, 'from project.visionGrabber.device import Device\n')]
|
#!/usr/bin/env python
"""Tests for `ghoclient` package."""
import unittest
from click.testing import CliRunner
import ghoclient
from ghoclient import cli
from ghoclient import Index
import pandas as pd
from whoosh.searching import Hit
class TestGhoclient(unittest.TestCase):
"""Tests for `ghoclient` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
def test_command_line_interface(self):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
# assert 'ghoclient.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
class TestGHO(unittest.TestCase):
def test_get_countries_as_df(self):
GC = ghoclient.ghoclient.GHOSession()
df = GC.get_countries()
self.assertIsInstance(df, pd.DataFrame)
def test_get_dimensions_as_df(self):
GC = ghoclient.ghoclient.GHOSession()
df = GC.get_dimensions()
self.assertIsInstance(df, pd.DataFrame)
self.assertEquals(len(df.columns), 3)
def test_get_data(self):
GC = ghoclient.ghoclient.GHOSession()
df = GC.fetch_data_from_codes(code='WHS3_522')
class Test_Index(unittest.TestCase):
def test_build_index(self):
ghoclient.index.build_index(None)
assert ghoclient.index.ix is not None
def test_search(self):
res = ghoclient.index.search('tuberculosis')
self.assertGreaterEqual(len(res), 0)
self.assertIsInstance(res[0], dict)
self.assertIn('code', res[0])
|
[
"click.testing.CliRunner",
"ghoclient.index.build_index",
"ghoclient.index.search",
"ghoclient.ghoclient.GHOSession"
] |
[((613, 624), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (622, 624), False, 'from click.testing import CliRunner\n'), ((1022, 1054), 'ghoclient.ghoclient.GHOSession', 'ghoclient.ghoclient.GHOSession', ([], {}), '()\n', (1052, 1054), False, 'import ghoclient\n'), ((1190, 1222), 'ghoclient.ghoclient.GHOSession', 'ghoclient.ghoclient.GHOSession', ([], {}), '()\n', (1220, 1222), False, 'import ghoclient\n'), ((1401, 1433), 'ghoclient.ghoclient.GHOSession', 'ghoclient.ghoclient.GHOSession', ([], {}), '()\n', (1431, 1433), False, 'import ghoclient\n'), ((1568, 1601), 'ghoclient.index.build_index', 'ghoclient.index.build_index', (['None'], {}), '(None)\n', (1595, 1601), False, 'import ghoclient\n'), ((1699, 1737), 'ghoclient.index.search', 'ghoclient.index.search', (['"""tuberculosis"""'], {}), "('tuberculosis')\n", (1721, 1737), False, 'import ghoclient\n')]
|
import numpy as np
from .strategy import Strategy
from sklearn.neighbors import NearestNeighbors
import pickle
from datetime import datetime
class CoreSet(Strategy):
def __init__(self, X, Y, idxs_lb, net, handler, args, tor=1e-4):
super(CoreSet, self).__init__(X, Y, idxs_lb, net, handler, args)
self.tor = tor
def query(self, n):
lb_flag = self.idxs_lb.copy()
embedding = self.get_embedding(self.X, self.Y)
embedding = embedding.numpy()
print('calculate distance matrix')
t_start = datetime.now()
dist_mat = np.matmul(embedding, embedding.transpose())
sq = np.array(dist_mat.diagonal()).reshape(len(self.X), 1)
dist_mat *= -2
dist_mat += sq
dist_mat += sq.transpose()
dist_mat = np.sqrt(dist_mat)
print(datetime.now() - t_start)
print('calculate greedy solution')
t_start = datetime.now()
mat = dist_mat[~lb_flag, :][:, lb_flag]
for i in range(n):
if i%10 == 0:
print('greedy solution {}/{}'.format(i, n))
mat_min = mat.min(axis=1)
q_idx_ = mat_min.argmax()
q_idx = np.arange(self.n_pool)[~lb_flag][q_idx_]
lb_flag[q_idx] = True
mat = np.delete(mat, q_idx_, 0)
mat = np.append(mat, dist_mat[~lb_flag, q_idx][:, None], axis=1)
print(datetime.now() - t_start)
opt = mat.min(axis=1).max()
bound_u = opt
bound_l = opt/2.0
delta = opt
xx, yy = np.where(dist_mat <= opt)
dd = dist_mat[xx, yy]
lb_flag_ = self.idxs_lb.copy()
subset = np.where(lb_flag_==True)[0].tolist()
SEED = 5
pickle.dump((xx.tolist(), yy.tolist(), dd.tolist(), subset, float(opt), n, self.n_pool), open('mip{}.pkl'.format(SEED), 'wb'), 2)
import ipdb
ipdb.set_trace()
# solving MIP
# download Gurobi software from http://www.gurobi.com/
# sh {GUROBI_HOME}/linux64/bin/gurobi.sh < core_set_sovle_solve.py
sols = pickle.load(open('sols{}.pkl'.format(SEED), 'rb'))
if sols is None:
q_idxs = lb_flag
else:
lb_flag_[sols] = True
q_idxs = lb_flag_
print('sum q_idxs = {}'.format(q_idxs.sum()))
return np.arange(self.n_pool)[(self.idxs_lb ^ q_idxs)]
|
[
"ipdb.set_trace",
"numpy.append",
"numpy.where",
"numpy.arange",
"datetime.datetime.now",
"numpy.delete",
"numpy.sqrt"
] |
[((502, 516), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (514, 516), False, 'from datetime import datetime\n'), ((711, 728), 'numpy.sqrt', 'np.sqrt', (['dist_mat'], {}), '(dist_mat)\n', (718, 728), True, 'import numpy as np\n'), ((813, 827), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (825, 827), False, 'from datetime import datetime\n'), ((1323, 1348), 'numpy.where', 'np.where', (['(dist_mat <= opt)'], {}), '(dist_mat <= opt)\n', (1331, 1348), True, 'import numpy as np\n'), ((1617, 1633), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (1631, 1633), False, 'import ipdb\n'), ((1101, 1126), 'numpy.delete', 'np.delete', (['mat', 'q_idx_', '(0)'], {}), '(mat, q_idx_, 0)\n', (1110, 1126), True, 'import numpy as np\n'), ((1136, 1194), 'numpy.append', 'np.append', (['mat', 'dist_mat[~lb_flag, q_idx][:, None]'], {'axis': '(1)'}), '(mat, dist_mat[~lb_flag, q_idx][:, None], axis=1)\n', (1145, 1194), True, 'import numpy as np\n'), ((1989, 2011), 'numpy.arange', 'np.arange', (['self.n_pool'], {}), '(self.n_pool)\n', (1998, 2011), True, 'import numpy as np\n'), ((737, 751), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (749, 751), False, 'from datetime import datetime\n'), ((1204, 1218), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1216, 1218), False, 'from datetime import datetime\n'), ((1026, 1048), 'numpy.arange', 'np.arange', (['self.n_pool'], {}), '(self.n_pool)\n', (1035, 1048), True, 'import numpy as np\n'), ((1418, 1444), 'numpy.where', 'np.where', (['(lb_flag_ == True)'], {}), '(lb_flag_ == True)\n', (1426, 1444), True, 'import numpy as np\n')]
|
import os.path
import pytest
from unittest import mock
from it_automation.supplier_image_upload import post_images
@pytest.mark.parametrize(
"_input, expected",
[(201, "Success"), (400, "POST error status=400")]
)
@mock.patch("it_automation.run.requests.post")
def test_post_images(mock_requests_post, _input, expected):
mock_requests_post.return_value = mock.Mock(**{"status_code": _input})
test_url = 'test_url'
test_image_directory = os.path.expanduser('~') + '/Documents' \
'/google_class' \
'/project_8' \
'/tests' \
'/images'
if _input != 201:
with pytest.raises(Exception, match=expected):
post_images(test_url, test_image_directory)
else:
post_images(test_url, test_image_directory)
mock_requests_post.assert_called()
|
[
"unittest.mock.Mock",
"unittest.mock.patch",
"pytest.raises",
"it_automation.supplier_image_upload.post_images",
"pytest.mark.parametrize"
] |
[((117, 216), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""_input, expected"""', "[(201, 'Success'), (400, 'POST error status=400')]"], {}), "('_input, expected', [(201, 'Success'), (400,\n 'POST error status=400')])\n", (140, 216), False, 'import pytest\n'), ((224, 269), 'unittest.mock.patch', 'mock.patch', (['"""it_automation.run.requests.post"""'], {}), "('it_automation.run.requests.post')\n", (234, 269), False, 'from unittest import mock\n'), ((368, 404), 'unittest.mock.Mock', 'mock.Mock', ([], {}), "(**{'status_code': _input})\n", (377, 404), False, 'from unittest import mock\n'), ((897, 940), 'it_automation.supplier_image_upload.post_images', 'post_images', (['test_url', 'test_image_directory'], {}), '(test_url, test_image_directory)\n', (908, 940), False, 'from it_automation.supplier_image_upload import post_images\n'), ((781, 821), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': 'expected'}), '(Exception, match=expected)\n', (794, 821), False, 'import pytest\n'), ((835, 878), 'it_automation.supplier_image_upload.post_images', 'post_images', (['test_url', 'test_image_directory'], {}), '(test_url, test_image_directory)\n', (846, 878), False, 'from it_automation.supplier_image_upload import post_images\n')]
|
#!/usr/bin/python
import os, math
import pandas as pd
import numpy as np
np.random.seed(42)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
torch.manual_seed(42)
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterSampler
def doc_mean_thres(df):
doc_mean = df.mean()
df_bin = 1.0 * (df.values > doc_mean.values)
df_bin = pd.DataFrame(df_bin, columns=df.columns, index=df.index)
return df_bin
def load_doc_term_matrix(version=190325, binarize=True):
dtm = pd.read_csv("../../../data/text/dtm_{}.csv.gz".format(version), compression="gzip", index_col=0)
if binarize:
dtm = doc_mean_thres(dtm)
return dtm
def load_coordinates():
atlas_labels = pd.read_csv("../../../data/brain/labels.csv")
activations = pd.read_csv("../../../data/brain/coordinates.csv", index_col=0)
activations = activations[atlas_labels["PREPROCESSED"]]
return activations
def load_raw_domains(k):
list_file = "../../lists/lists_k{:02d}.csv".format(k)
lists = pd.read_csv(list_file, index_col=None)
circuit_file = "../../circuits/circuits_k{:02d}.csv".format(k)
circuits = pd.read_csv(circuit_file, index_col=None)
return lists, circuits
def numpy2torch(data):
inputs, labels = data
inputs = Variable(torch.from_numpy(inputs.T).float())
labels = Variable(torch.from_numpy(labels.T).float())
return inputs, labels
def reset_weights(m):
if isinstance(m, nn.Linear):
m.reset_parameters()
class Net(nn.Module):
def __init__(self, n_input=0, n_output=0, n_hid=100, p_dropout=0.5):
super(Net, self).__init__()
self.fc1 = nn.Linear(n_input, n_hid)
self.bn1 = nn.BatchNorm1d(n_hid)
self.dropout1 = nn.Dropout(p=p_dropout)
self.fc2 = nn.Linear(n_hid, n_hid)
self.bn2 = nn.BatchNorm1d(n_hid)
self.dropout2 = nn.Dropout(p=p_dropout)
self.fc3 = nn.Linear(n_hid, n_hid)
self.bn3 = nn.BatchNorm1d(n_hid)
self.dropout3 = nn.Dropout(p=p_dropout)
self.fc4 = nn.Linear(n_hid, n_hid)
self.bn4 = nn.BatchNorm1d(n_hid)
self.dropout4 = nn.Dropout(p=p_dropout)
self.fc5 = nn.Linear(n_hid, n_hid)
self.bn5 = nn.BatchNorm1d(n_hid)
self.dropout5 = nn.Dropout(p=p_dropout)
self.fc6 = nn.Linear(n_hid, n_hid)
self.bn6 = nn.BatchNorm1d(n_hid)
self.dropout6 = nn.Dropout(p=p_dropout)
self.fc7 = nn.Linear(n_hid, n_hid)
self.bn7 = nn.BatchNorm1d(n_hid)
self.dropout7 = nn.Dropout(p=p_dropout)
self.fc8 = nn.Linear(n_hid, n_output)
# Xavier initialization for weights
for fc in [self.fc1, self.fc2, self.fc3, self.fc4,
self.fc5, self.fc6, self.fc7, self.fc8]:
nn.init.xavier_uniform_(fc.weight)
def forward(self, x):
x = self.dropout1(F.relu(self.bn1(self.fc1(x))))
x = self.dropout2(F.relu(self.bn2(self.fc2(x))))
x = self.dropout3(F.relu(self.bn3(self.fc3(x))))
x = self.dropout4(F.relu(self.bn4(self.fc4(x))))
x = self.dropout5(F.relu(self.bn5(self.fc5(x))))
x = self.dropout6(F.relu(self.bn6(self.fc6(x))))
x = self.dropout7(F.relu(self.bn7(self.fc7(x))))
x = torch.sigmoid(self.fc8(x))
return x
def optimize_hyperparameters(param_list, train_set, val_set, n_epochs=100):
criterion = F.binary_cross_entropy
inputs_val, labels_val = numpy2torch(val_set[0])
op_idx, op_params, op_score_val, op_state_dict, op_loss = 0, 0, 0, 0, 0
for params in param_list:
print("-" * 75)
print(" ".join(["{} {:6.5f}".format(k.upper(), v) for k, v in params.items()]))
print("-" * 75 + "\n")
# Initialize variables for this set of parameters
n_input = train_set[0][0].shape[0]
n_output = train_set[0][1].shape[0]
net = Net(n_input=n_input, n_output=n_output,
n_hid=params["n_hid"], p_dropout=params["p_dropout"])
optimizer = optim.Adam(net.parameters(),
lr=params["lr"], weight_decay=params["weight_decay"])
net.apply(reset_weights)
running_loss = []
# Loop over the dataset multiple times
for epoch in range(n_epochs):
for data in train_set:
# Get the inputs
inputs, labels = numpy2torch(data)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Update the running loss
running_loss += [loss.item()]
if epoch % (n_epochs/5) == (n_epochs/5) - 1:
print(" Epoch {:3d}\tLoss {:6.6f}".format(epoch + 1, running_loss[-1] / 100))
# Evaluate on the validation set
with torch.no_grad():
preds_val = net.eval()(inputs_val).float()
score_val = roc_auc_score(labels_val, preds_val, average="macro")
print("\n Validation Set ROC-AUC {:6.4f}\n".format(score_val))
# Update outputs if this model is the best so far
if score_val > op_score_val:
print(" Best so far!\n")
op_score_val = score_val
op_state_dict = net.state_dict()
op_params = params
op_loss = running_loss
return op_score_val
def load_mini_batches(X, Y, split, mini_batch_size=64, seed=0, reshape_labels=False):
np.random.seed(seed)
m = len(split) # Number of training examples
mini_batches = []
# Split the data
X = X.loc[split].T.values
Y = Y.loc[split].T.values
# Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation]
if reshape_labels:
shuffled_Y = shuffled_Y.reshape((1,m))
# Partition (shuffled_X, shuffled_Y), except the end case
num_complete_minibatches = math.floor(m / mini_batch_size) # Mumber of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size : (k+1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k+1) * mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handle the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, -(m % mini_batch_size):]
mini_batch_Y = shuffled_Y[:, -(m % mini_batch_size):]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def optimize_list_len(k):
# Load the data splits
splits = {}
for split in ["train", "validation"]:
splits[split] = [int(pmid.strip()) for pmid in open("../../../data/splits/{}.txt".format(split), "r").readlines()]
act_bin = load_coordinates()
dtm_bin = load_doc_term_matrix(version=190325, binarize=True)
lists, circuits = load_raw_domains(k)
# Specify the hyperparameters for the randomized grid search
param_grid = {"lr": [0.001],
"weight_decay": [0.001],
"n_hid": [100],
"p_dropout": [0.1]}
param_list = list(ParameterSampler(param_grid, n_iter=1, random_state=42))
batch_size = 1024
n_epochs = 100
list_lens = range(5, 26)
op_lists = pd.DataFrame()
for circuit in range(1, k+1):
print("-" * 100)
print("Fitting models for domain {:02d}".format(circuit))
forward_scores, reverse_scores = [], []
structures = circuits.loc[circuits["CLUSTER"] == circuit, "STRUCTURE"]
for list_len in list_lens:
print("-" * 85)
print("Fitting models for lists of length {:02d}".format(list_len))
words = lists.loc[lists["CLUSTER"] == circuit, "TOKEN"][:list_len]
# Optimize forward inference classifier
train_set_f = load_mini_batches(dtm_bin[words], act_bin[structures], splits["train"], mini_batch_size=batch_size, seed=42)
val_set_f = load_mini_batches(dtm_bin[words], act_bin[structures], splits["validation"], mini_batch_size=len(splits["validation"]), seed=42)
try:
op_val_f = optimize_hyperparameters(param_list, train_set_f, val_set_f, n_epochs=n_epochs)
except:
op_val_f = 0.0
forward_scores.append(op_val_f)
# Optimize reverse inference classifier
train_set_r = load_mini_batches(act_bin[structures], dtm_bin[words], splits["train"], mini_batch_size=batch_size, seed=42)
val_set_r = load_mini_batches(act_bin[structures], dtm_bin[words], splits["validation"], mini_batch_size=len(splits["validation"]), seed=42)
try:
op_val_r = optimize_hyperparameters(param_list, train_set_r, val_set_r, n_epochs=n_epochs)
except:
op_val_r = 0.0
reverse_scores.append(op_val_r)
scores = [(forward_scores[i] + reverse_scores[i])/2.0 for i in range(len(forward_scores))]
print("-" * 85)
print("Mean ROC-AUC scores: {}".format(scores))
op_len = list_lens[scores.index(max(scores))]
print("-" * 100)
print("\tCircuit {:02d} has {:02d} words".format(circuit, op_len))
op_df = lists.loc[lists["CLUSTER"] == circuit][:op_len]
op_df["ROC_AUC"] = max(scores)
op_lists = op_lists.append(op_df)
op_lists.to_csv("../../lists/lists_k{:02d}_oplen_nn.csv".format(k), index=None)
|
[
"pandas.DataFrame",
"torch.nn.Dropout",
"numpy.random.seed",
"pandas.read_csv",
"torch.manual_seed",
"torch.nn.init.xavier_uniform_",
"torch.nn.BatchNorm1d",
"math.floor",
"sklearn.metrics.roc_auc_score",
"sklearn.model_selection.ParameterSampler",
"torch.nn.Linear",
"numpy.random.permutation",
"torch.no_grad",
"torch.from_numpy"
] |
[((74, 92), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (88, 92), True, 'import numpy as np\n'), ((225, 246), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (242, 246), False, 'import torch\n'), ((450, 506), 'pandas.DataFrame', 'pd.DataFrame', (['df_bin'], {'columns': 'df.columns', 'index': 'df.index'}), '(df_bin, columns=df.columns, index=df.index)\n', (462, 506), True, 'import pandas as pd\n'), ((788, 833), 'pandas.read_csv', 'pd.read_csv', (['"""../../../data/brain/labels.csv"""'], {}), "('../../../data/brain/labels.csv')\n", (799, 833), True, 'import pandas as pd\n'), ((850, 913), 'pandas.read_csv', 'pd.read_csv', (['"""../../../data/brain/coordinates.csv"""'], {'index_col': '(0)'}), "('../../../data/brain/coordinates.csv', index_col=0)\n", (861, 913), True, 'import pandas as pd\n'), ((1086, 1124), 'pandas.read_csv', 'pd.read_csv', (['list_file'], {'index_col': 'None'}), '(list_file, index_col=None)\n', (1097, 1124), True, 'import pandas as pd\n'), ((1203, 1244), 'pandas.read_csv', 'pd.read_csv', (['circuit_file'], {'index_col': 'None'}), '(circuit_file, index_col=None)\n', (1214, 1244), True, 'import pandas as pd\n'), ((5266, 5286), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5280, 5286), True, 'import numpy as np\n'), ((5727, 5758), 'math.floor', 'math.floor', (['(m / mini_batch_size)'], {}), '(m / mini_batch_size)\n', (5737, 5758), False, 'import os, math\n'), ((7197, 7211), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7209, 7211), True, 'import pandas as pd\n'), ((1683, 1708), 'torch.nn.Linear', 'nn.Linear', (['n_input', 'n_hid'], {}), '(n_input, n_hid)\n', (1692, 1708), True, 'import torch.nn as nn\n'), ((1724, 1745), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hid'], {}), '(n_hid)\n', (1738, 1745), True, 'import torch.nn as nn\n'), ((1766, 1789), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'p_dropout'}), '(p=p_dropout)\n', (1776, 1789), True, 'import torch.nn as nn\n'), ((1805, 1828), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_hid'], {}), '(n_hid, n_hid)\n', (1814, 1828), True, 'import torch.nn as nn\n'), ((1844, 1865), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hid'], {}), '(n_hid)\n', (1858, 1865), True, 'import torch.nn as nn\n'), ((1886, 1909), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'p_dropout'}), '(p=p_dropout)\n', (1896, 1909), True, 'import torch.nn as nn\n'), ((1925, 1948), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_hid'], {}), '(n_hid, n_hid)\n', (1934, 1948), True, 'import torch.nn as nn\n'), ((1964, 1985), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hid'], {}), '(n_hid)\n', (1978, 1985), True, 'import torch.nn as nn\n'), ((2006, 2029), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'p_dropout'}), '(p=p_dropout)\n', (2016, 2029), True, 'import torch.nn as nn\n'), ((2045, 2068), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_hid'], {}), '(n_hid, n_hid)\n', (2054, 2068), True, 'import torch.nn as nn\n'), ((2084, 2105), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hid'], {}), '(n_hid)\n', (2098, 2105), True, 'import torch.nn as nn\n'), ((2126, 2149), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'p_dropout'}), '(p=p_dropout)\n', (2136, 2149), True, 'import torch.nn as nn\n'), ((2165, 2188), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_hid'], {}), '(n_hid, n_hid)\n', (2174, 2188), True, 'import torch.nn as nn\n'), ((2204, 2225), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hid'], {}), '(n_hid)\n', (2218, 2225), True, 'import torch.nn as nn\n'), ((2246, 2269), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'p_dropout'}), '(p=p_dropout)\n', (2256, 2269), True, 'import torch.nn as nn\n'), ((2285, 2308), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_hid'], {}), '(n_hid, n_hid)\n', (2294, 2308), True, 'import torch.nn as nn\n'), ((2324, 2345), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hid'], {}), '(n_hid)\n', (2338, 2345), True, 'import torch.nn as nn\n'), ((2366, 2389), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'p_dropout'}), '(p=p_dropout)\n', (2376, 2389), True, 'import torch.nn as nn\n'), ((2405, 2428), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_hid'], {}), '(n_hid, n_hid)\n', (2414, 2428), True, 'import torch.nn as nn\n'), ((2444, 2465), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hid'], {}), '(n_hid)\n', (2458, 2465), True, 'import torch.nn as nn\n'), ((2486, 2509), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'p_dropout'}), '(p=p_dropout)\n', (2496, 2509), True, 'import torch.nn as nn\n'), ((2525, 2551), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_output'], {}), '(n_hid, n_output)\n', (2534, 2551), True, 'import torch.nn as nn\n'), ((4778, 4831), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels_val', 'preds_val'], {'average': '"""macro"""'}), "(labels_val, preds_val, average='macro')\n", (4791, 4831), False, 'from sklearn.metrics import roc_auc_score\n'), ((5481, 5505), 'numpy.random.permutation', 'np.random.permutation', (['m'], {}), '(m)\n', (5502, 5505), True, 'import numpy as np\n'), ((7054, 7109), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['param_grid'], {'n_iter': '(1)', 'random_state': '(42)'}), '(param_grid, n_iter=1, random_state=42)\n', (7070, 7109), False, 'from sklearn.model_selection import ParameterSampler\n'), ((2710, 2744), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['fc.weight'], {}), '(fc.weight)\n', (2733, 2744), True, 'import torch.nn as nn\n'), ((4696, 4711), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4709, 4711), False, 'import torch\n'), ((1339, 1365), 'torch.from_numpy', 'torch.from_numpy', (['inputs.T'], {}), '(inputs.T)\n', (1355, 1365), False, 'import torch\n'), ((1395, 1421), 'torch.from_numpy', 'torch.from_numpy', (['labels.T'], {}), '(labels.T)\n', (1411, 1421), False, 'import torch\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import functools
import json
import os
from urllib import parse
import time
import bs4
import requests
from flask import request
from selenium import webdriver
class GlobeParser(object):
def __init__(self):
print('Initializing...')
self.driver_options = webdriver.ChromeOptions()
self.driver_options.add_argument('headless')
driver = webdriver.Chrome(options=self.driver_options)
self.login(driver)
self.cookies = driver.get_cookies()
driver.close()
self.session = requests.session()
for cookie in self.cookies:
c = requests.cookies.create_cookie(
domain=cookie['domain'], name=cookie['name'], value=cookie['value']
)
self.session.cookies.set_cookie(c)
print('Logged in! Ready.')
def get_driver(self) -> webdriver.Chrome:
driver = webdriver.Chrome(options=self.driver_options)
driver.get('https://www.bostonglobe.com')
for cookie in self.cookies:
if 'expiry' in cookie:
del(cookie['expiry'])
driver.add_cookie(cookie)
return driver
@staticmethod
def login(driver):
driver.get('https://pages.bostonglobe.com/login/')
email_field = driver.find_element_by_name('email')
email_field.send_keys(os.environ['BOSTONGLOBE_USER'])
pass_field = driver.find_element_by_name('password')
pass_field.send_keys(os.environ['<PASSWORD>BE_<PASSWORD>'])
submit = driver.find_element_by_xpath('/html/body/div/div/section/form/input')
submit.click()
time.sleep(10)
@staticmethod
def replace_url(url):
if 'bostonglobe.com' not in url:
# These links are served by www3 and start with /
url = 'https://www3.bostonglobe.com%s' % url
original_encoded = parse.quote(url)
return '%s/proxy/%s' % (request.url_root, original_encoded)
@staticmethod
def restore_url(url):
url = url.replace('%s/proxy/' % request.url_root, '')
return parse.unquote(url)
@staticmethod
def parse_title(soup) -> str:
return soup.title.text.replace(' - The Boston Globe', '')
@staticmethod
def fix_image_url(url: str):
# Images hosted in this domain are (so far) prepended
# by a resizer script. Go straight to the source.
index = url.find('arc-anglerfish')
if index > -1:
url = url[index:]
if url.startswith('//'):
url = 'https:%s' % url
if not url.startswith('https://'):
url = 'https://%s' % url
return url
@staticmethod
def parse_metadata(soup) -> dict:
# TODO(knikolla): There are still cases where author doesn't show up.
try:
metadata = json.loads(soup.find('script', type='application/ld+json').text)
except AttributeError:
return {'author': '<EMAIL>'}
try:
authors = metadata['author']['name']
if isinstance(authors, list):
authors = ', '.join(authors)
metadata['author'] = authors
except KeyError:
metadata['author'] = '<EMAIL>'
return metadata
@classmethod
def parse_images(cls, soup) -> list:
images = []
query = soup.find_all('img', 'width_full')
for image in query:
images.append({'src': cls.fix_image_url(image['data-src']),
'alt': image['alt']})
query = soup.find_all('img', 'lead-media__media')
for image in query:
images.append({'src': cls.fix_image_url(image['src']),
'alt': image['alt']})
return images
@staticmethod
def parse_article_from_script(soup) -> list:
scripts = soup.find_all('script')
messy_json = None
for script in scripts:
if 'Fusion.globalContent' in script.text:
messy_json = script.text
if not messy_json:
print('Error finding article data!')
return ['Error loading article.']
start = messy_json.find('{"_id":')
messy_json = messy_json[start:]
end = messy_json.find(';Fusion.globalContentConfig')
script = messy_json[:end]
inside = False
clean_json = ''
for i, char in enumerate(script):
if char == '<':
inside = True
if char == '>':
inside = False
if inside and char == '"':
char = '\"' # Unescaped characters prevent json loading
clean_json = clean_json + char
article = json.loads(clean_json)
return [
x['content'] for x in article['content_elements'] if x['type'] == 'text'
]
@property
def today_url(self):
now = datetime.datetime.now()
today = now.strftime('%Y/%m/%d')
return 'https://www3.bostonglobe.com/todayspaper/%s' % today
def find_top_stories(self):
html = self.session.get(self.today_url).text
soup = bs4.BeautifulSoup(html, 'html5lib')
# Top Stories
top = soup.find('div', 'stories-top')
top = top.find_all('div', 'story')
top_stories = []
for story in top:
processed = {
'title': story.find('h2').text,
'url': self.replace_url(story.find('a')['href']),
'summary': ''.join([p.text for p in story.find_all('p')])
}
image = story.find('img')
if image:
processed['image'] = self.fix_image_url(image['src'])
top_stories.append(processed)
return top_stories
def find_section(self, key):
html = self.session.get(self.today_url).text
soup = bs4.BeautifulSoup(html, 'html5lib')
sections = soup.find_all('div', 'tod-paper-section')
found = None
for section in sections:
title = section.find('h2').find('a').text
if key in title.lower():
found = section
break
if not found:
return
stories = []
parsed = section.find_all('a')[1:]
for story in parsed:
try:
stories.append({'title': story.find('h3').text,
'url': self.replace_url(story['href'])})
except AttributeError:
# Because of course, in some the A is inside the H3
continue
parsed = section.find_all('h3')[1:]
for story in parsed:
try:
stories.append({'title': story.text,
'url': self.replace_url(story.find('a')['href'])})
except (AttributeError, TypeError):
# Because of course, in some the A is inside the H3
continue
return stories
def get_section(self, section):
html = self.session.get('https://www3.bostonglobe.com/news/%s' % section).text
soup = bs4.BeautifulSoup(html, 'html5lib')
section = soup.find_all('div', 'stories-top')[0]
stories = []
parsed = section.find_all('div', 'story')
for story in parsed:
a = story.find('a')
stories.append({'title': a.text,
'url': self.replace_url(a['href'])})
return stories
@functools.lru_cache(maxsize=128)
def get_article_selenium(self, url):
driver = self.get_driver()
driver.get(url)
soup = bs4.BeautifulSoup(driver.page_source, 'html5lib')
article = soup.find('div', 'article-content')
driver.close()
return {
'title': self.parse_title(soup),
'paragraphs': [p.text for p in article.find_all('p')],
'images': self.parse_images(soup),
'metadata': self.parse_metadata(soup),
}
@functools.lru_cache(maxsize=128)
def get_article(self, url):
url = self.restore_url(url)
r = self.session.get(url)
if r.status_code == 404:
# Some Javascript shit is happening here, use Selenium.
return self.get_article_selenium(url)
soup = bs4.BeautifulSoup(r.text, 'html5lib')
return {
'title': self.parse_title(soup),
'paragraphs': self.parse_article_from_script(soup),
'metadata': self.parse_metadata(soup),
'images': self.parse_images(soup),
}
|
[
"requests.session",
"urllib.parse.unquote",
"json.loads",
"requests.cookies.create_cookie",
"time.sleep",
"urllib.parse.quote",
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.Chrome",
"bs4.BeautifulSoup",
"functools.lru_cache",
"datetime.datetime.now"
] |
[((8011, 8043), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(128)'}), '(maxsize=128)\n', (8030, 8043), False, 'import functools\n'), ((8530, 8562), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(128)'}), '(maxsize=128)\n', (8549, 8562), False, 'import functools\n'), ((834, 859), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (857, 859), False, 'from selenium import webdriver\n'), ((930, 975), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'self.driver_options'}), '(options=self.driver_options)\n', (946, 975), False, 'from selenium import webdriver\n'), ((1095, 1113), 'requests.session', 'requests.session', ([], {}), '()\n', (1111, 1113), False, 'import requests\n'), ((1443, 1488), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'self.driver_options'}), '(options=self.driver_options)\n', (1459, 1488), False, 'from selenium import webdriver\n'), ((2181, 2195), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2191, 2195), False, 'import time\n'), ((2428, 2444), 'urllib.parse.quote', 'parse.quote', (['url'], {}), '(url)\n', (2439, 2444), False, 'from urllib import parse\n'), ((2635, 2653), 'urllib.parse.unquote', 'parse.unquote', (['url'], {}), '(url)\n', (2648, 2653), False, 'from urllib import parse\n'), ((5247, 5269), 'json.loads', 'json.loads', (['clean_json'], {}), '(clean_json)\n', (5257, 5269), False, 'import json\n'), ((5436, 5459), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5457, 5459), False, 'import datetime\n'), ((5671, 5706), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['html', '"""html5lib"""'], {}), "(html, 'html5lib')\n", (5688, 5706), False, 'import bs4\n'), ((6401, 6436), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['html', '"""html5lib"""'], {}), "(html, 'html5lib')\n", (6418, 6436), False, 'import bs4\n'), ((7646, 7681), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['html', '"""html5lib"""'], {}), "(html, 'html5lib')\n", (7663, 7681), False, 'import bs4\n'), ((8159, 8208), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['driver.page_source', '"""html5lib"""'], {}), "(driver.page_source, 'html5lib')\n", (8176, 8208), False, 'import bs4\n'), ((8833, 8870), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['r.text', '"""html5lib"""'], {}), "(r.text, 'html5lib')\n", (8850, 8870), False, 'import bs4\n'), ((1166, 1269), 'requests.cookies.create_cookie', 'requests.cookies.create_cookie', ([], {'domain': "cookie['domain']", 'name': "cookie['name']", 'value': "cookie['value']"}), "(domain=cookie['domain'], name=cookie['name'],\n value=cookie['value'])\n", (1196, 1269), False, 'import requests\n')]
|
from datetime import datetime
from config.period import Period
from config.schedule import Schedule
from config.scheduler_config import SchedulerConfig
from schedulers.state_service import StateService, State
config = SchedulerConfig(
periods={
"period1": Period(
name="period1",
begin_time="9:00",
end_time="13:00",
weekdays=[0, 1, 2, 3, 4],
),
"period2": Period(
name="period2",
begin_time="15:00",
end_time="16:00",
weekdays=[0, 1, 2, 3, 4],
),
"period3": Period(
name="period3", end_time="21:00", weekdays=[0, 1, 2, 3, 4, 5, 6]
),
},
schedules={
"schedule1": Schedule(
name="schedule1", periods_names=["period1", "period2", "period3"]
)
},
schedule_tag_name="schedule",
timezone="Europe/Warsaw",
)
service = StateService(config=config)
def test_automatic_schedules_businessday():
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 08:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 09:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.RUNNING
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 10:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.RUNNING
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 13:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 13:30"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-02 18:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 15:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-02 13:00"),
)
== State.RUNNING
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 15:30"),
last_start=datetime.fromisoformat("2021-03-02 15:00"),
last_stop=datetime.fromisoformat("2021-03-02 13:00"),
)
== State.RUNNING
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 16:00"),
last_start=datetime.fromisoformat("2021-03-02 15:00"),
last_stop=datetime.fromisoformat("2021-03-02 13:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 16:30"),
last_start=datetime.fromisoformat("2021-03-02 15:00"),
last_stop=datetime.fromisoformat("2021-03-02 16:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 21:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-02 18:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 22:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-02 21:00"),
)
== State.STOPPED
)
def test_automatic_schedules_manual_start():
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 08:00"),
last_start=datetime.fromisoformat("2021-03-02 07:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 09:00"),
last_start=datetime.fromisoformat("2021-03-02 07:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 09:00"),
last_start=datetime.fromisoformat("2021-03-02 07:00"),
last_stop=datetime.fromisoformat("2021-03-02 08:00"),
)
== State.RUNNING
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 19:00"),
last_start=datetime.fromisoformat("2021-03-02 20:00"),
last_stop=datetime.fromisoformat("2021-03-02 18:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 23:00"),
last_start=datetime.fromisoformat("2021-03-02 22:00"),
last_stop=datetime.fromisoformat("2021-03-02 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-07 22:00"),
last_start=datetime.fromisoformat("2021-03-07 17:00"),
last_stop=datetime.fromisoformat("2021-03-07 23:10"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-07 22:00"),
last_start=datetime.fromisoformat("2021-03-07 17:00"),
last_stop=datetime.fromisoformat("2021-03-07 01:10"),
)
== State.STOPPED
)
def test_automatic_schedules_manual_stop():
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 11:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-02 10:00"),
)
== State.UNKNOWN
)
def test_automatic_schedules_weekend():
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 08:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 09:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 10:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 13:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 13:30"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 21:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 22:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-06 21:00"),
)
== State.STOPPED
)
|
[
"schedulers.state_service.StateService",
"config.schedule.Schedule",
"datetime.datetime.fromisoformat",
"config.period.Period"
] |
[((922, 949), 'schedulers.state_service.StateService', 'StateService', ([], {'config': 'config'}), '(config=config)\n', (934, 949), False, 'from schedulers.state_service import StateService, State\n'), ((269, 358), 'config.period.Period', 'Period', ([], {'name': '"""period1"""', 'begin_time': '"""9:00"""', 'end_time': '"""13:00"""', 'weekdays': '[0, 1, 2, 3, 4]'}), "(name='period1', begin_time='9:00', end_time='13:00', weekdays=[0, 1,\n 2, 3, 4])\n", (275, 358), False, 'from config.period import Period\n'), ((434, 524), 'config.period.Period', 'Period', ([], {'name': '"""period2"""', 'begin_time': '"""15:00"""', 'end_time': '"""16:00"""', 'weekdays': '[0, 1, 2, 3, 4]'}), "(name='period2', begin_time='15:00', end_time='16:00', weekdays=[0, 1,\n 2, 3, 4])\n", (440, 524), False, 'from config.period import Period\n'), ((600, 672), 'config.period.Period', 'Period', ([], {'name': '"""period3"""', 'end_time': '"""21:00"""', 'weekdays': '[0, 1, 2, 3, 4, 5, 6]'}), "(name='period3', end_time='21:00', weekdays=[0, 1, 2, 3, 4, 5, 6])\n", (606, 672), False, 'from config.period import Period\n'), ((740, 815), 'config.schedule.Schedule', 'Schedule', ([], {'name': '"""schedule1"""', 'periods_names': "['period1', 'period2', 'period3']"}), "(name='schedule1', periods_names=['period1', 'period2', 'period3'])\n", (748, 815), False, 'from config.schedule import Schedule\n'), ((1112, 1154), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 08:00"""'], {}), "('2021-03-02 08:00')\n", (1134, 1154), False, 'from datetime import datetime\n'), ((1179, 1221), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (1201, 1221), False, 'from datetime import datetime\n'), ((1245, 1287), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-01 21:00"""'], {}), "('2021-03-01 21:00')\n", (1267, 1287), False, 'from datetime import datetime\n'), ((1447, 1489), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (1469, 1489), False, 'from datetime import datetime\n'), ((1514, 1556), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (1536, 1556), False, 'from datetime import datetime\n'), ((1580, 1622), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-01 21:00"""'], {}), "('2021-03-01 21:00')\n", (1602, 1622), False, 'from datetime import datetime\n'), ((1782, 1824), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 10:00"""'], {}), "('2021-03-02 10:00')\n", (1804, 1824), False, 'from datetime import datetime\n'), ((1849, 1891), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (1871, 1891), False, 'from datetime import datetime\n'), ((1915, 1957), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-01 21:00"""'], {}), "('2021-03-01 21:00')\n", (1937, 1957), False, 'from datetime import datetime\n'), ((2117, 2159), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 13:00"""'], {}), "('2021-03-02 13:00')\n", (2139, 2159), False, 'from datetime import datetime\n'), ((2184, 2226), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (2206, 2226), False, 'from datetime import datetime\n'), ((2250, 2292), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-01 21:00"""'], {}), "('2021-03-01 21:00')\n", (2272, 2292), False, 'from datetime import datetime\n'), ((2452, 2494), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 13:30"""'], {}), "('2021-03-02 13:30')\n", (2474, 2494), False, 'from datetime import datetime\n'), ((2519, 2561), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (2541, 2561), False, 'from datetime import datetime\n'), ((2585, 2627), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 18:00"""'], {}), "('2021-03-02 18:00')\n", (2607, 2627), False, 'from datetime import datetime\n'), ((2787, 2829), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 15:00"""'], {}), "('2021-03-02 15:00')\n", (2809, 2829), False, 'from datetime import datetime\n'), ((2854, 2896), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (2876, 2896), False, 'from datetime import datetime\n'), ((2920, 2962), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 13:00"""'], {}), "('2021-03-02 13:00')\n", (2942, 2962), False, 'from datetime import datetime\n'), ((3122, 3164), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 15:30"""'], {}), "('2021-03-02 15:30')\n", (3144, 3164), False, 'from datetime import datetime\n'), ((3189, 3231), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 15:00"""'], {}), "('2021-03-02 15:00')\n", (3211, 3231), False, 'from datetime import datetime\n'), ((3255, 3297), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 13:00"""'], {}), "('2021-03-02 13:00')\n", (3277, 3297), False, 'from datetime import datetime\n'), ((3457, 3499), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 16:00"""'], {}), "('2021-03-02 16:00')\n", (3479, 3499), False, 'from datetime import datetime\n'), ((3524, 3566), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 15:00"""'], {}), "('2021-03-02 15:00')\n", (3546, 3566), False, 'from datetime import datetime\n'), ((3590, 3632), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 13:00"""'], {}), "('2021-03-02 13:00')\n", (3612, 3632), False, 'from datetime import datetime\n'), ((3792, 3834), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 16:30"""'], {}), "('2021-03-02 16:30')\n", (3814, 3834), False, 'from datetime import datetime\n'), ((3859, 3901), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 15:00"""'], {}), "('2021-03-02 15:00')\n", (3881, 3901), False, 'from datetime import datetime\n'), ((3925, 3967), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 16:00"""'], {}), "('2021-03-02 16:00')\n", (3947, 3967), False, 'from datetime import datetime\n'), ((4127, 4169), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 21:00"""'], {}), "('2021-03-02 21:00')\n", (4149, 4169), False, 'from datetime import datetime\n'), ((4194, 4236), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (4216, 4236), False, 'from datetime import datetime\n'), ((4260, 4302), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 18:00"""'], {}), "('2021-03-02 18:00')\n", (4282, 4302), False, 'from datetime import datetime\n'), ((4462, 4504), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 22:00"""'], {}), "('2021-03-02 22:00')\n", (4484, 4504), False, 'from datetime import datetime\n'), ((4529, 4571), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (4551, 4571), False, 'from datetime import datetime\n'), ((4595, 4637), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 21:00"""'], {}), "('2021-03-02 21:00')\n", (4617, 4637), False, 'from datetime import datetime\n'), ((4843, 4885), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 08:00"""'], {}), "('2021-03-02 08:00')\n", (4865, 4885), False, 'from datetime import datetime\n'), ((4910, 4952), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 07:00"""'], {}), "('2021-03-02 07:00')\n", (4932, 4952), False, 'from datetime import datetime\n'), ((4976, 5018), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-01 21:00"""'], {}), "('2021-03-01 21:00')\n", (4998, 5018), False, 'from datetime import datetime\n'), ((5178, 5220), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (5200, 5220), False, 'from datetime import datetime\n'), ((5245, 5287), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 07:00"""'], {}), "('2021-03-02 07:00')\n", (5267, 5287), False, 'from datetime import datetime\n'), ((5311, 5353), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-01 21:00"""'], {}), "('2021-03-01 21:00')\n", (5333, 5353), False, 'from datetime import datetime\n'), ((5513, 5555), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (5535, 5555), False, 'from datetime import datetime\n'), ((5580, 5622), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 07:00"""'], {}), "('2021-03-02 07:00')\n", (5602, 5622), False, 'from datetime import datetime\n'), ((5646, 5688), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 08:00"""'], {}), "('2021-03-02 08:00')\n", (5668, 5688), False, 'from datetime import datetime\n'), ((5848, 5890), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 19:00"""'], {}), "('2021-03-02 19:00')\n", (5870, 5890), False, 'from datetime import datetime\n'), ((5915, 5957), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 20:00"""'], {}), "('2021-03-02 20:00')\n", (5937, 5957), False, 'from datetime import datetime\n'), ((5981, 6023), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 18:00"""'], {}), "('2021-03-02 18:00')\n", (6003, 6023), False, 'from datetime import datetime\n'), ((6183, 6225), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 23:00"""'], {}), "('2021-03-02 23:00')\n", (6205, 6225), False, 'from datetime import datetime\n'), ((6250, 6292), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 22:00"""'], {}), "('2021-03-02 22:00')\n", (6272, 6292), False, 'from datetime import datetime\n'), ((6316, 6358), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 21:00"""'], {}), "('2021-03-02 21:00')\n", (6338, 6358), False, 'from datetime import datetime\n'), ((6518, 6560), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-07 22:00"""'], {}), "('2021-03-07 22:00')\n", (6540, 6560), False, 'from datetime import datetime\n'), ((6585, 6627), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-07 17:00"""'], {}), "('2021-03-07 17:00')\n", (6607, 6627), False, 'from datetime import datetime\n'), ((6651, 6693), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-07 23:10"""'], {}), "('2021-03-07 23:10')\n", (6673, 6693), False, 'from datetime import datetime\n'), ((6853, 6895), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-07 22:00"""'], {}), "('2021-03-07 22:00')\n", (6875, 6895), False, 'from datetime import datetime\n'), ((6920, 6962), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-07 17:00"""'], {}), "('2021-03-07 17:00')\n", (6942, 6962), False, 'from datetime import datetime\n'), ((6986, 7028), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-07 01:10"""'], {}), "('2021-03-07 01:10')\n", (7008, 7028), False, 'from datetime import datetime\n'), ((7233, 7275), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 11:00"""'], {}), "('2021-03-02 11:00')\n", (7255, 7275), False, 'from datetime import datetime\n'), ((7300, 7342), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 09:00"""'], {}), "('2021-03-02 09:00')\n", (7322, 7342), False, 'from datetime import datetime\n'), ((7366, 7408), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-02 10:00"""'], {}), "('2021-03-02 10:00')\n", (7388, 7408), False, 'from datetime import datetime\n'), ((7609, 7651), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 08:00"""'], {}), "('2021-03-06 08:00')\n", (7631, 7651), False, 'from datetime import datetime\n'), ((7676, 7718), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 09:00"""'], {}), "('2021-03-06 09:00')\n", (7698, 7718), False, 'from datetime import datetime\n'), ((7742, 7784), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-05 21:00"""'], {}), "('2021-03-05 21:00')\n", (7764, 7784), False, 'from datetime import datetime\n'), ((7944, 7986), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 09:00"""'], {}), "('2021-03-06 09:00')\n", (7966, 7986), False, 'from datetime import datetime\n'), ((8011, 8053), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 09:00"""'], {}), "('2021-03-06 09:00')\n", (8033, 8053), False, 'from datetime import datetime\n'), ((8077, 8119), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-05 21:00"""'], {}), "('2021-03-05 21:00')\n", (8099, 8119), False, 'from datetime import datetime\n'), ((8279, 8321), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 10:00"""'], {}), "('2021-03-06 10:00')\n", (8301, 8321), False, 'from datetime import datetime\n'), ((8346, 8388), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 09:00"""'], {}), "('2021-03-06 09:00')\n", (8368, 8388), False, 'from datetime import datetime\n'), ((8412, 8454), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-05 21:00"""'], {}), "('2021-03-05 21:00')\n", (8434, 8454), False, 'from datetime import datetime\n'), ((8614, 8656), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 13:00"""'], {}), "('2021-03-06 13:00')\n", (8636, 8656), False, 'from datetime import datetime\n'), ((8681, 8723), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 09:00"""'], {}), "('2021-03-06 09:00')\n", (8703, 8723), False, 'from datetime import datetime\n'), ((8747, 8789), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-05 21:00"""'], {}), "('2021-03-05 21:00')\n", (8769, 8789), False, 'from datetime import datetime\n'), ((8949, 8991), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 13:30"""'], {}), "('2021-03-06 13:30')\n", (8971, 8991), False, 'from datetime import datetime\n'), ((9016, 9058), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 09:00"""'], {}), "('2021-03-06 09:00')\n", (9038, 9058), False, 'from datetime import datetime\n'), ((9082, 9124), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-05 21:00"""'], {}), "('2021-03-05 21:00')\n", (9104, 9124), False, 'from datetime import datetime\n'), ((9284, 9326), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 21:00"""'], {}), "('2021-03-06 21:00')\n", (9306, 9326), False, 'from datetime import datetime\n'), ((9351, 9393), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 09:00"""'], {}), "('2021-03-06 09:00')\n", (9373, 9393), False, 'from datetime import datetime\n'), ((9417, 9459), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-05 21:00"""'], {}), "('2021-03-05 21:00')\n", (9439, 9459), False, 'from datetime import datetime\n'), ((9619, 9661), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 22:00"""'], {}), "('2021-03-06 22:00')\n", (9641, 9661), False, 'from datetime import datetime\n'), ((9686, 9728), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 09:00"""'], {}), "('2021-03-06 09:00')\n", (9708, 9728), False, 'from datetime import datetime\n'), ((9752, 9794), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-03-06 21:00"""'], {}), "('2021-03-06 21:00')\n", (9774, 9794), False, 'from datetime import datetime\n')]
|
import explanes as el
import numpy as np
import pandas as pd
np.random.seed(0)
experiment = el.experiment.Experiment()
experiment.project.name = 'example'
experiment.path.output = '/tmp/'+experiment.project.name+'/'
experiment.factor.f1 = [1, 2]
experiment.factor.f2 = [1, 2, 3]
experiment.metric.m1 = ['mean', 'std']
experiment.metric.m2 = ['min', 'argmin']
def process(setting, experiment):
metric1 = setting.f1+setting.f2+np.random.randn(100)
metric2 = setting.f1*setting.f2*np.random.randn(100)
np.save(experiment.path.output+setting.id()+'_m1.npy', metric1)
np.save(experiment.path.output+setting.id()+'_m2.npy', metric2)
experiment.setPath()
experiment.do([], process, progress=False)
(settingDescription, columnHeader, constantSettingDescription, nbColumnFactor) = experiment.metric.reduce(experiment.factor.mask([1]), experiment.path.output, verbose=True)
df = pd.DataFrame(settingDescription, columns=columnHeader)
df[columnHeader[nbColumnFactor:]] = df[columnHeader[nbColumnFactor:]].round(decimals=2)
print(constantSettingDescription)
print(df)
|
[
"explanes.experiment.Experiment",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.randn"
] |
[((62, 79), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (76, 79), True, 'import numpy as np\n'), ((94, 120), 'explanes.experiment.Experiment', 'el.experiment.Experiment', ([], {}), '()\n', (118, 120), True, 'import explanes as el\n'), ((883, 937), 'pandas.DataFrame', 'pd.DataFrame', (['settingDescription'], {'columns': 'columnHeader'}), '(settingDescription, columns=columnHeader)\n', (895, 937), True, 'import pandas as pd\n'), ((430, 450), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (445, 450), True, 'import numpy as np\n'), ((485, 505), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (500, 505), True, 'import numpy as np\n')]
|
"""Green's function computation and related methods
Deprecated: use the chebyshev module instead
"""
import warnings
from . import chebyshev
from .support.deprecated import LoudDeprecationWarning
__all__ = ['Greens', 'kpm', 'kpm_cuda']
Greens = chebyshev.KPM
def kpm(*args, **kwargs):
warnings.warn("Use pb.kpm() instead", LoudDeprecationWarning, stacklevel=2)
return chebyshev.kpm(*args, **kwargs)
def kpm_cuda(*args, **kwargs):
warnings.warn("Use pb.kpm_cuda() instead", LoudDeprecationWarning, stacklevel=2)
return chebyshev.kpm_cuda(*args, **kwargs)
|
[
"warnings.warn"
] |
[((294, 369), 'warnings.warn', 'warnings.warn', (['"""Use pb.kpm() instead"""', 'LoudDeprecationWarning'], {'stacklevel': '(2)'}), "('Use pb.kpm() instead', LoudDeprecationWarning, stacklevel=2)\n", (307, 369), False, 'import warnings\n'), ((449, 534), 'warnings.warn', 'warnings.warn', (['"""Use pb.kpm_cuda() instead"""', 'LoudDeprecationWarning'], {'stacklevel': '(2)'}), "('Use pb.kpm_cuda() instead', LoudDeprecationWarning, stacklevel=2\n )\n", (462, 534), False, 'import warnings\n')]
|
def full_function():
# Note that this function is not called, it's there just to make the mapping explicit.
a = 1 # map to cEll1, line 2
b = 2 # map to cEll1, line 3
c = 3 # map to cEll2, line 2
d = 4 # map to cEll2, line 3
def create_code():
cell1_code = compile(''' # line 1
a = 1 # line 2
b = 2 # line 3
''', '<cEll1>', 'exec')
cell2_code = compile('''# line 1
c = 3 # line 2
d = 4 # line 3
''', '<cEll2>', 'exec')
# Set up the source in linecache. Python doesn't have a public API for
# this, so we have to hack around it, similar to what IPython does.
import linecache
import time
code = ''' # line 1
a = 1 # line 2
b = 2 # line 3
'''
linecache.cache['<cEll1>'] = (
len(code),
time.time(),
[line + '\n' for line in code.splitlines()],
'<cEll1>',
)
code = '''# line 1
c = 3 # line 2
d = 4 # line 3
'''
linecache.cache['<cEll2>'] = (
len(code),
time.time(),
[line + '\n' for line in code.splitlines()],
'<cEll2>',
)
return {'cEll1': cell1_code, 'cEll2': cell2_code}
if __name__ == '__main__':
code = create_code()
exec(code['cEll1'])
exec(code['cEll1'])
exec(code['cEll2'])
exec(code['cEll2'])
print('TEST SUCEEDED')
|
[
"time.time"
] |
[((765, 776), 'time.time', 'time.time', ([], {}), '()\n', (774, 776), False, 'import time\n'), ((978, 989), 'time.time', 'time.time', ([], {}), '()\n', (987, 989), False, 'import time\n')]
|
import httpx
import pandas
from .util import format_dates
BLOCKARRIVE_BASISCODE = {
-6: "no_source",
-5: "no_link",
-4: "auto_suspend",
-3: "no_download_link",
-2: "manual_suspend",
-1: "block_open",
0: "routed",
1: "queue_full",
2: "rerouting",
}
class DataSvc:
"""PhEDEx datasvc REST API
Full documentation at https://cmsweb.cern.ch/phedex/datasvc/doc
"""
defaults = {
# PhEDEx datasvc base URL with trailing slash
"datasvc_base": "https://cmsweb.cern.ch/phedex/datasvc/",
# Options: prod, dev, debug
"phedex_instance": "prod",
}
def __init__(self, client, datasvc_base=None, phedex_instance=None):
if datasvc_base is None:
datasvc_base = DataSvc.defaults["datasvc_base"]
if phedex_instance is None:
phedex_instance = DataSvc.defaults["phedex_instance"]
self.client = client
self.baseurl = httpx.URL(datasvc_base)
self.jsonurl = self.baseurl.join("json/%s/" % phedex_instance)
self.xmlurl = self.baseurl.join("xml/%s/" % phedex_instance)
async def jsonmethod(self, method, **params):
return await self.client.getjson(url=self.jsonurl.join(method), params=params)
async def blockreplicas(self, **params):
"""Get block replicas as a pandas dataframe
Parameters
----------
block block name, can be multiple (*)
dataset dataset name, can be multiple (*)
node node name, can be multiple (*)
se storage element name, can be multiple (*)
update_since unix timestamp, only return replicas whose record was
updated since this time
create_since unix timestamp, only return replicas whose record was
created since this time. When no "dataset", "block"
or "node" are given, create_since is default to 24 hours ago
complete y or n, whether or not to require complete or incomplete
blocks. Open blocks cannot be complete. Default is to
return either.
dist_complete y or n, "distributed complete". If y, then returns
only block replicas for which at least one node has
all files in the block. If n, then returns block
replicas for which no node has all the files in the
block. Open blocks cannot be dist_complete. Default is
to return either kind of block replica.
subscribed y or n, filter for subscription. default is to return either.
custodial y or n. filter for custodial responsibility. default is
to return either.
group group name. default is to return replicas for any group.
show_dataset y or n, default n. If y, show dataset information with
the blocks; if n, only show blocks
"""
resjson = await self.jsonmethod("blockreplicas", **params)
df = pandas.json_normalize(
resjson["phedex"]["block"],
record_path="replica",
record_prefix="replica.",
meta=["bytes", "files", "name", "id", "is_open"],
)
format_dates(df, ["replica.time_create", "replica.time_update"])
return df
async def nodes(self, **params):
"""Returns a simple dump of phedex nodes.
Parameters
----------
node PhEDex node names to filter on, can be multiple (*)
noempty filter out nodes which do not host any data
"""
resjson = await self.jsonmethod("nodes", **params)
df = pandas.json_normalize(
resjson["phedex"],
record_path="node",
record_prefix="node.",
)
return df
async def data(self, human_readable=None, **params):
"""Shows data which is registered (injected) to phedex
Parameters
----------
dataset dataset name to output data for (wildcard support)
block block name to output data for (wildcard support)
file file name to output data for (wildcard support)
level display level, 'file' or 'block'. when level=block
no file details would be shown. Default is 'file'.
when level = 'block', return data of which blocks were created since this time;
when level = 'file', return data of which files were created since this time
create_since when no parameters are given, default create_since is set to one day ago
"""
if type(human_readable) is not bool and human_readable is not None:
raise Exception("Wrong human_readable parameter type")
resjson = await self.jsonmethod("data", **params)
out = []
for _instance in resjson["phedex"]["dbs"]:
for _dataset in _instance["dataset"]:
for _block in _dataset["block"]:
for _file in _block["file"]:
out.append(
{
"Dataset": _dataset["name"],
"Is_dataset_open": _dataset["is_open"],
"block_Name": _block["name"],
"Block_size_(GB)": _block["bytes"] / 1000000000.0,
"Time_block_was_created": _block["time_create"],
"File_name": _file["lfn"],
"File_checksum": _file["checksum"],
"File_size": _file["size"] / 1000000000.0,
"Time_file_was_created": _file["time_create"],
}
)
df = pandas.json_normalize(out)
format_dates(df, ["Time_file_was_created", "Time_block_was_created"])
if human_readable:
mapping = {
"Is_dataset_open": "Is dataset open",
"block_Name": "Block Name",
"Block_size_(GB)": "Block size (GB)",
"Time_block_was_created": "Time Block Was Created",
"File_name": "File Name",
"File_checksum": "File Checksum",
"File_size": "File Size (GB)",
"Time_file_was_created": "Time File Was Created",
}
df2 = df.rename(columns=mapping)
return df2
else:
return df
async def errorlog(self, human_readable=None, **params):
"""Return detailed transfer error information, including logs of the transfer and validation commands.
Note that phedex only stores the last 100 errors per link, so more errors may have occurred then indicated by this API
call.
Parameters
----------
Required inputs: at least one of the followings: from, to, block, lfn
optional inputs: (as filters) from, to, dataset, block, lfn
from name of the source node, could be multiple
to name of the destination node, could be multiple
block block name
dataset dataset name
lfn logical file name
"""
if type(human_readable) is not bool and human_readable is not None:
raise Exception("Wrong human_readable parameter type")
resjson = await self.jsonmethod("errorlog", **params)
out = []
for _instance in resjson["phedex"]["link"]:
for _block in _instance["block"]:
for _file in _block["file"]:
for _transfer_error in _file["transfer_error"]:
out.append(
{
"Link": _instance["from"] + " to " + _instance["to"],
"LFN": _file["name"],
"file_Checksum": _file["checksum"],
"file_size_(GB)": _file["size"] / 1000000000.0,
"Block_name": _block["name"],
"Error_log": str(_transfer_error["detail_log"]["$t"]),
"From_PFN": _transfer_error["from_pfn"],
"To_PFN": _transfer_error["to_pfn"],
"Time": _transfer_error["time_done"],
}
)
df = pandas.json_normalize(out)
format_dates(df, ["Time"])
if human_readable:
mapping = {
"From_PFN": "From PFN",
"To_PFN": "To PFN",
"Error_log": "Error Log",
"Block_Name": "Block Name",
"Block_size_(GB)": "Block size (GB)",
"file_checksum": "File Checksum",
"file_size_(GB)": "File Size (GB)",
}
df2 = df.rename(columns=mapping)
return df2
else:
return df
async def blockarrive(self, human_readable=None, **params):
"""Return estimated time of arrival for blocks currently subscribed for transfer. If the estimated time of arrival (ETA)
cannot be calculated, or the block will never arrive, a reason for the missing estimate is provided.
Parameters
----------
id block id
block block name, could be multiple, could have wildcard
dataset dataset name, could be multiple, could have wildcard
to_node destination node, could be multiple, could have wildcard
priority priority, could be multiple
update_since updated since this time
basis technique used for the ETA calculation, or reason it's missing.
arrive_before only show blocks that are expected to arrive before this time.
arrive_after only show blocks that are expected to arrive after this time.
"""
if type(human_readable) is not bool and human_readable is not None:
raise Exception("Wrong human_readable parameter type")
resjson = await self.jsonmethod("blockarrive", **params)
out = []
for _block in resjson["phedex"]["block"]:
for _destination in _block["destination"]:
out.append(
{
"Block_Name": _block["name"],
"Destination": _destination["name"],
"Time_Arrive": _destination["time_arrive"],
"Time_update": _destination["time_update"],
"Number_of_files": _destination["files"],
"Block_size_(GB)": _destination["bytes"] / 1000000000.0,
"Basis_code": BLOCKARRIVE_BASISCODE.get(
_destination["basis"], "No code specified"
),
}
)
df = pandas.json_normalize(out)
format_dates(df, ["Time_Arrive", "Time_update"])
if human_readable:
mapping = {
"Block_Name": "Block Name",
"Block_size_(GB)": "Block size (GB)",
"Time_Arrive": "Time Arrive",
"Time_update": "Time Update",
"Number_of_files": "Number Of Files",
"Basis_code": "Basis Code",
}
df2 = df.rename(columns=mapping)
return df2
else:
return df
async def filereplicas(self, human_readable=None, **params):
"""Serves the file replicas known to phedex.
Parameters
----------
block block name, with '*' wildcards, can be multiple (*). required when no lfn is specified. Block names must
follow the syntax /X/Y/Z#, i.e. have three /'s and a '#'. Anything else is rejected.
dataset dataset name. Syntax: /X/Y/Z, all three /'s obligatory. Wildcads are allowed.
node node name, can be multiple (*)
se storage element name, can be multiple (*)
update_since unix timestamp, only return replicas updated since this
time
create_since unix timestamp, only return replicas created since this
time
complete y or n. if y, return only file replicas from complete block
replicas. if n only return file replicas from incomplete block
replicas. default is to return either.
dist_complete y or n. if y, return only file replicas from blocks
where all file replicas are available at some node. if
n, return only file replicas from blocks which have
file replicas not available at any node. default is
to return either.
subscribed y or n, filter for subscription. default is to return either.
custodial y or n. filter for custodial responsibility. default is
to return either.
group group name. default is to return replicas for any group.
lfn logical file name
"""
if type(human_readable) is not bool and human_readable is not None:
raise Exception("Wrong human_readable parameter type")
resjson = await self.jsonmethod("filereplicas", **params)
out = []
for _block in resjson["phedex"]["block"]:
for _file in _block["file"]:
for _replica in _file["replica"]:
out.append(
{
"Block_name": _block["name"],
"Files": _block["files"],
"Block_size_(GB)": _block["bytes"] / 1000000000.0,
"lfn": _file["name"],
"Checksum": _file["checksum"],
"File_created_on": _file["time_create"],
"File_replica_at": _replica["node"],
"File_subcribed": _replica["subscribed"],
"Custodial": _replica["custodial"],
"Group": _replica["group"],
"File_in_node_since": _replica["time_create"],
}
)
df = pandas.json_normalize(out)
format_dates(df, ["File_created_on", "File_in_node_since"])
if human_readable is True:
mapping = {
"Block_name": "Block Name",
"Block_size_(GB)": "Block size (GB)",
"File_created_on": "File Created On",
"File_replica_at": "File Replica At",
"File_subcribed": "File Subcribed",
"File_in_node_since": "File In Node Since",
}
df2 = df.rename(columns=mapping)
return df2
else:
return df
async def agentlogs(self, human_readable=None, **params):
"""Show messages from the agents.
Parameters
----------
required inputs: at least one of the optional inputs
optional inputs: (as filters) user, host, pid, agent, update_since
node name of the node
user user name who owns agent processes
host hostname where agent runs
agent name of the agent
pid process id of agent
update_since ower bound of time to show log messages. Default last 24 h.
"""
if type(human_readable) is not bool and human_readable is not None:
raise Exception("Wrong human_readable parameter type")
resjson = await self.jsonmethod("agentlogs", **params)
out = []
for _agent in resjson["phedex"]["agent"]:
for _node in _agent["node"]:
node = _node["name"]
for _log in _agent["log"]:
out.append(
{
"Agent": _agent["name"],
"Host": _agent["host"],
"PID": _agent["pid"],
"Node": node,
"User": _agent["user"],
"Reason": _log["reason"],
"Time": _log["time"],
"state_dir": _log["state_dir"],
"working_dir": _log["working_dir"],
"Message": str(_log["message"]["$t"]),
}
)
df = pandas.json_normalize(out)
format_dates(df, ["Time"])
if human_readable is True:
mapping = {
"state_dir": "State Directory",
"working_dir": "Working Directory",
}
df2 = df.rename(columns=mapping)
return df2
else:
return df
async def missingfiles(self, human_readable=None, **params):
"""Show files which are missing from blocks at a node.
Parameters
----------
block block name (wildcards) (*)
lfn logical file name (*)
node node name (wildcards)
se storage element.
subscribed y or n. whether the block is subscribed to the node or not
default is null (either)
custodial y or n. filter for custodial responsibility,
default is to return either
group group name
default is to return missing blocks for any group.
(*) either block or lfn is required
"""
resjson = await self.jsonmethod("missingfiles", **params)
out = []
if human_readable is not None and type(human_readable) is not bool:
print("Wrong human_readable parameter type")
df = pandas.json_normalize(out)
return df
elif human_readable is None or human_readable is False:
for _block in resjson["phedex"]["block"]:
for _file in _block["file"]:
for _missing in _file["missing"]:
out.append(
{
"block_name": _block["name"],
"file_name": _file["name"],
"checksum": _file["checksum"],
"size": _file["bytes"],
"created": _file["time_create"],
"origin_node": _file["origin_node"],
"missing_from": _missing["node_name"],
"disk": _missing["se"],
"custodial": _missing["custodial"],
"subscribed": _missing["subscribed"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["created"])
elif human_readable is True:
for _block in resjson["phedex"]["block"]:
for _file in _block["file"]:
for _missing in _file["missing"]:
out.append(
{
"Block Name": _block["name"],
"File Name": _file["name"],
"checksum": _file["checksum"],
"Size of file": _file["bytes"],
"Time created": _file["time_create"],
"Origin Node": _file["origin_node"],
"Missing from": _missing["node_name"],
"Disk": _missing["se"],
"Custodial?": _missing["custodial"],
"Subscribed?": _missing["subscribed"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["Time created"])
async def agents(self, human_readable=None, **params):
"""Serves information about running (or at least recently running) phedex agents.
Parameters
----------
required inputs: none
optional inputs: (as filters) node, se, agent
node node name, could be multiple
se storage element name, could be multiple
agent agent name, could be multiple
version phedex version
update_since updated since this time
detail 'y' or 'n', default 'n'. show "code" information at file level *
"""
resjson = await self.jsonmethod("agents", **params)
out = []
if human_readable is not None and type(human_readable) is not bool:
print("Wrong human_readable parameter type")
df = pandas.json_normalize(out)
return df
elif human_readable is None or human_readable is False:
for _node in resjson["phedex"]["node"]:
for _agent in _node["agent"]:
out.append(
{
"Node": _node["node"],
"Host": _node["host"],
"Agent_name": _node["name"],
"Agent_label": _agent["label"],
"Time_update": _agent["time_update"],
"state_dir": _agent["state_dir"],
"version": _agent["version"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["Time_update"])
elif human_readable is True:
for _node in resjson["phedex"]["node"]:
for _agent in _node["agent"]:
out.append(
{
"Node": _node["node"],
"Host": _node["host"],
"Agent name": _node["name"],
"Agent label": _agent["label"],
"Time update": _agent["time_update"],
"Directory": _agent["state_dir"],
"Version": _agent["version"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["Time update"])
async def blocklatency(self, human_readable=None, **params):
"""Show authentication state and abilities
Parameters
----------
ability authorization ability. If passed then the nodes (from TMDB)
that the user is allowed to use "ability" for are returned.
require_cert if passed then the call will die if the user is not
authenticated by certificate
require_passwd if passed then the call will die if the user is not
authenticated by password
"""
resjson = await self.jsonmethod("blocklatency", **params)
out = []
if human_readable is not None and type(human_readable) is not bool:
print("Wrong human_readable parameter type")
df = pandas.json_normalize(out)
return df
elif human_readable is None or human_readable is False:
for _block in resjson["phedex"]["block"]:
for _destination in _block["destination"]:
for _latency in _destination["latency"]:
out.append(
{
"Block": _block["name"],
"Block_ID": _block["id"],
"Dataset": _block["dataset"],
"Size": _block["bytes"],
"Time_create": _block["time_create"],
"Number_of_files": _block["files"],
"Time_update": _block["time_update"],
"Destination": _destination["name"],
"custodial": _latency["is_custodial"],
"last_suspend": _latency["last_suspend"],
"last_replica": _latency["last_replica"],
"time_subscription": _latency["time_subscription"],
"block_closed": _latency["block_close"],
"latency": _latency["latency"],
}
)
df = pandas.json_normalize(out)
return format_dates(
df,
[
"Time_update",
"last_suspend",
"last_replica",
"time_subscription",
"block_closed",
"Time_create",
],
)
elif human_readable is True:
for _block in resjson["phedex"]["block"]:
for _destination in _block["destination"]:
for _latency in _destination["latency"]:
out.append(
{
"Block": _block["name"],
"Block ID": _block["id"],
"Dataset": _block["dataset"],
"Size": _block["bytes"],
"Time Create": _block["time_create"],
"Number of files": _block["files"],
"Time Update": _block["time_update"],
"Destination": _destination["name"],
"custodial": _latency["is_custodial"],
"Last Suspend": _latency["last_suspend"],
"Last Replica": _latency["last_replica"],
"Time Subscription": _latency["time_subscription"],
"Block Closed": _latency["block_close"],
"Latency": _latency["latency"],
}
)
df = pandas.json_normalize(out)
return format_dates(
df,
[
"Time Update",
"Last Suspend",
"Last Replica",
"Time Subscription",
"Block Closed",
"Time Create",
],
)
async def requestlist(self, human_readable=None, **params):
"""Serve as a simple request search and cache-able catalog of requests to save within a client,
which may then use the request ID to obtain further details using TransferRequests or DeletionRequests.
Parameters
----------
request * request id
type request type, 'xfer' (default) or 'delete'
approval approval state, 'approved', 'disapproved', 'mixed', or 'pending'
requested_by * requestor's name
node * name of the destination node
(show requests in which this node is involved)
decision decision at the node, 'approved', 'disapproved' or 'pending'
group * user group
create_since created since this time
create_until created until this time
decide_since decided since this time
decide_until decided until this time
dataset * dataset is part of request, or a block from this dataset
block * block is part of request, or part of a dataset in request
decided_by * name of person who approved the request
* could be multiple and/or with wildcard
** when both 'block' and 'dataset' are present, they form a logical disjunction (ie. or)
"""
resjson = await self.jsonmethod("requestlist", **params)
out = []
if human_readable is not None and type(human_readable) is not bool:
df = pandas.json_normalize(out)
raise Exception("Wrong human_readable parameter type")
return df
elif human_readable is None or human_readable is False:
for _request in resjson["phedex"]["request"]:
for _node in _request["node"]:
out.append(
{
"request_id": _request["id"],
"time_created": _request["time_create"],
"requested_by": _request["requested_by"],
"approval": _request["approval"],
"node": _node["name"],
"time_decided": _node["time_decided"],
"decided_by": _node["decided_by"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["time_created", "time_decided"])
else:
for _request in resjson["phedex"]["request"]:
for _node in _request["node"]:
out.append(
{
"Request ID": _request["id"],
"Time Created": _request["time_create"],
"Requested by": _request["requested_by"],
"Approval": _request["approval"],
"Node": _node["name"],
"Time decided": _node["time_decided"],
"Decided by": _node["decided_by"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["Time Created", "Time decided"])
async def blockreplicasummary(self, human_readable=None, **params):
"""Show authentication state and abilities
Parameters
----------
ability authorization ability. If passed then the nodes (from TMDB)
that the user is allowed to use "ability" for are returned.
require_cert if passed then the call will die if the user is not
authenticated by certificate
require_passwd if passed then the call will die if the user is not
authenticated by password
"""
resjson = await self.jsonmethod("blockreplicasummary", **params)
out = []
if human_readable is not None and type(human_readable) is not bool:
print("Wrong human_readable parameter type")
df = pandas.json_normalize(out)
return df
else:
for _block in resjson["phedex"]["block"]:
for _replica in _block["replica"]:
out.append(
{
"Block": _block["name"],
"Node": _replica["node"],
"Complete": _replica["complete"],
}
)
df = pandas.json_normalize(out)
return df
|
[
"pandas.json_normalize",
"httpx.URL"
] |
[((947, 970), 'httpx.URL', 'httpx.URL', (['datasvc_base'], {}), '(datasvc_base)\n', (956, 970), False, 'import httpx\n'), ((3141, 3293), 'pandas.json_normalize', 'pandas.json_normalize', (["resjson['phedex']['block']"], {'record_path': '"""replica"""', 'record_prefix': '"""replica."""', 'meta': "['bytes', 'files', 'name', 'id', 'is_open']"}), "(resjson['phedex']['block'], record_path='replica',\n record_prefix='replica.', meta=['bytes', 'files', 'name', 'id', 'is_open'])\n", (3162, 3293), False, 'import pandas\n'), ((3781, 3869), 'pandas.json_normalize', 'pandas.json_normalize', (["resjson['phedex']"], {'record_path': '"""node"""', 'record_prefix': '"""node."""'}), "(resjson['phedex'], record_path='node', record_prefix=\n 'node.')\n", (3802, 3869), False, 'import pandas\n'), ((6039, 6065), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (6060, 6065), False, 'import pandas\n'), ((8725, 8751), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (8746, 8751), False, 'import pandas\n'), ((11304, 11330), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (11325, 11330), False, 'import pandas\n'), ((14771, 14797), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (14792, 14797), False, 'import pandas\n'), ((17041, 17067), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (17062, 17067), False, 'import pandas\n'), ((18395, 18421), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (18416, 18421), False, 'import pandas\n'), ((21446, 21472), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (21467, 21472), False, 'import pandas\n'), ((23826, 23852), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (23847, 23852), False, 'import pandas\n'), ((28767, 28793), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (28788, 28793), False, 'import pandas\n'), ((31324, 31350), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (31345, 31350), False, 'import pandas\n'), ((31784, 31810), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (31805, 31810), False, 'import pandas\n'), ((19440, 19466), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (19461, 19466), False, 'import pandas\n'), ((22185, 22211), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (22206, 22211), False, 'import pandas\n'), ((25203, 25229), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (25224, 25229), False, 'import pandas\n'), ((29615, 29641), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (29636, 29641), False, 'import pandas\n'), ((30395, 30421), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (30416, 30421), False, 'import pandas\n'), ((20500, 20526), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (20521, 20526), False, 'import pandas\n'), ((22928, 22954), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (22949, 22954), False, 'import pandas\n'), ((26854, 26880), 'pandas.json_normalize', 'pandas.json_normalize', (['out'], {}), '(out)\n', (26875, 26880), False, 'import pandas\n')]
|
"""
Generate a golden NPZ file from a dicom ZIP archive.
"""
import argparse
import numpy as np
from dicom_numpy.zip_archive import combined_series_from_zip
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help='Output golden NPZ file', required=False)
parser.add_argument('input', help="Input DICOM zip archive")
return parser.parse_args()
def generate_golden_values(input_zip, output_path='golden_values'):
"""
Generate a golden NPZ file for a given DICOM zip archive.
"""
voxels, ijk_to_xyz = combined_series_from_zip(input_zip)
np.savez_compressed(output_path, voxels=voxels, ijk_to_xyz=ijk_to_xyz)
if __name__ == '__main__':
args = parse_args()
if args.output:
generate_golden_values(args.input, args.output)
else:
generate_golden_values(args.input)
|
[
"dicom_numpy.zip_archive.combined_series_from_zip",
"numpy.savez_compressed",
"argparse.ArgumentParser"
] |
[((192, 217), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (215, 217), False, 'import argparse\n'), ((576, 611), 'dicom_numpy.zip_archive.combined_series_from_zip', 'combined_series_from_zip', (['input_zip'], {}), '(input_zip)\n', (600, 611), False, 'from dicom_numpy.zip_archive import combined_series_from_zip\n'), ((616, 686), 'numpy.savez_compressed', 'np.savez_compressed', (['output_path'], {'voxels': 'voxels', 'ijk_to_xyz': 'ijk_to_xyz'}), '(output_path, voxels=voxels, ijk_to_xyz=ijk_to_xyz)\n', (635, 686), True, 'import numpy as np\n')]
|
"""
clicker - rapid command-line user interface development
- Provides convenient syntax and semantics for constructing command-line
interfaces definitions, and tools to speed up development of command-line
applications.
- Define all commands, options, and arguments accepted by an application using
a straight-forward syntax in yaml or json.
- For simple applications, an argument parser is easily instantiated for a
CLI definition, and callbacks for commands/options/arguments are
automatically mapped to Python functions implemented by the user.
(See the main function of this script for an example of this idiom.)
- For complex applications, skeleton Python source code can be generated for
command/option/argument handlers from a CLI definition in yaml/json, which
can then be implemented incrementally by the user.
- The command-line interface definition semantics allow 'inheritance', that
is, deriving a new CLI definition from an existing one, which could be useful
for complex applications with many commands that are more similar than
different.
- Last but far from least, clicker is built using the (outstanding, fantastic,
amazing, where-would-I-be-without-it) Click toolkit:
http://click.pocoo.org/
<NAME> <<EMAIL>>
"""
from __future__ import print_function
import click
import collections
import copy
import json
import sys
import traceback
import yaml
try:
import IPython
pp = IPython.lib.pretty.pprint
def debug():
traceback.print_exc()
IPython.embed()
except ImportError:
pp = print
import pdb
def debug():
traceback.print_exc()
pdb.pm()
def popkey(d, key, default=None):
if key in d:
r = d[key]
del d[key]
return r
return default
def merge(old, new):
def shift(k):
if k in new:
old[k] = new[k]
shift("name")
shift("help")
shift("options")
shift("arguments")
if "commands" in new:
if "commands" not in old:
old["commands"] = new["commands"]
else:
for new_command in new["commands"]:
try:
old_command = [
x for x in old["commands"]
if x["name"] == new_command["name"]
][0]
old["commands"].remove(old_command)
except IndexError:
pass
old["commands"].append(new_command)
if "groups" in new:
if "groups" not in old:
old["groups"] = new["groups"]
else:
for new_group in new["groups"]:
try:
old_group = [
x for x in old["groups"]
if x["name"] == new_group["name"]
][0]
merge(old_group, new_group)
except IndexError:
old["groups"].append(new_group)
return old
def stub(
data, fd=sys.stdout, groups=False, get_cb=None, tab=" ", indent=0,
imports=True
):
def tabs(): return indent * tab
def push(): tabs += 1
def pop(): tabs -= 1
def p(s): fd.write(s)
path = []
if get_cb is None:
get_cb = lambda p: "_".join(p)
def build_options(o):
pass
def print_command(c):
paths.append(c["name"])
p(tabs() + "def %s(%s):\n" % (get_cb(path), build_options(c)))
push()
p(tabs() + "pass\n\n")
pop()
paths.pop()
def print_commands(g):
for c in g.get("commands", ()):
print_command(c)
def print_group(g):
paths.append(g["name"])
if groups:
p(tabs() + "def %s(%s):\n" % (get_cb(path), build_options(g)))
push()
p(tabs() + "pass\n\n")
pop()
print_commands(g)
paths.pop()
def print_groups(g):
for gg in g.get("groups", ()):
print_group(gg)
if imports:
p(tabs() + "import click\n\n")
p(tabs() + "get_context = click.get_current_context\n")
p(tabs() + "get_obj = lambda: get_context().obj\n\n")
print_group(data)
def build(
data, env=None, get_cb=None, require_commands=True, require_groups=False
):
path = []
if get_cb is None:
def get_cb(p, r):
n = "_".join(p)
f = (env or globals()).get(n)
if not f and r:
raise KeyError("Required callback not found in globals(): %s" % n)
return f
def build_argument(a):
a = copy.copy(a)
name = popkey(a, "name")
a["type"] = eval(a.get("type", "None"), {"click": click})
a["default"] = eval(a.get("default", "None"))
a["nargs"] = eval(a.get("nargs", "None"))
return click.Argument([name], **a)
def build_arguments(c):
return [build_argument(x) for x in c.get("arguments", ())]
def build_option(o):
o = copy.copy(o)
name = popkey(o, "name").split(" ")
o["type"] = eval(o.get("type", "None"), {"click": click})
o["default"] = eval(o.get("default", "None"))
for n in name:
if n.startswith("--"):
break
else:
n = None
if n:
o["envvar"] = "%s_%s" % (
"_".join(path).upper(),
n[2:].replace("-", "_").upper()
)
return click.Option(name, **o)
def build_options(o):
return [build_option(x) for x in o.get("options", ())]
def build_command(c, require_cb=require_commands, cls=click.Command):
c = copy.copy(c)
path.append(c["name"])
try:
c["callback"] = get_cb(path, require_cb)
c["params"] = build_options(c)
c["params"].extend(build_arguments(c))
popkey(c, "options")
popkey(c, "arguments")
popkey(c, "commands")
name = popkey(c, "name")
return cls(name, **c)
finally:
path.pop()
def build_commands(g):
return [build_command(x) for x in g.get("commands", ())]
def build_group(g):
group = build_command(g, require_cb=require_groups, cls=click.Group)
try:
path.append(g["name"])
for subgroup in build_groups(g):
group.add_command(subgroup, name=subgroup.name)
for command in build_commands(g):
group.add_command(command)
return group
finally:
path.pop()
def build_groups(g):
return [build_group(x) for x in g.get("groups", ())]
if len(data.get("groups", ())) == 0 and len(data.get("commands", ())) == 0:
rv = build_command(data)
else:
rv = build_group(data)
return rv
#return build_group(data)
def _setup_yaml():
def representer(dumper, data):
return dumper.represent_dict(data.items())
def constructor(loader, node):
return collections.OrderedDict(loader.construct_pairs(node))
tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
yaml.add_representer(collections.OrderedDict, representer)
yaml.add_constructor(tag, constructor)
JSON = "json"
YAML = "yaml"
def loads(s, type=YAML, data={}):
"Load from string."
if type == JSON:
new_data = json.loads(s, object_pairs_hook=collections.OrderedDict)
elif type == YAML:
_setup_yaml()
new_data = yaml.load(s, Loader=yaml.loader.BaseLoader)
else:
raise ValueError("Invalid type: %s" % type)
return merge(data, new_data)
def loadf(f, type=None, data={}):
"Load from file."
if type is None:
if path.lower().endswith("json"):
type = JSON
elif path.lower()[-4:] in (".yml", "yaml"):
type = YAML
else:
raise ValueError("Can't determine file type: %s" % f)
with open(f) as fd:
return loads(fd.read(), type=type, data=data)
def loadmf(files, type=None, data={}):
"Load from many files."
for f in files:
load(f, type=type, data=data)
return data
def loadfd(fd, type=YAML, data={}):
"Load from file descriptor."
raise NotImplementedError()
def loadmfd(fds, type=YAML, data={}):
"Load from many file descriptors."
raise NotImplementedError()
class Cli:
def __init__(self):
self.data = {}
self.cli = None
def loads(self, s, type=YAML):
loads(s, type=type, data=self.data)
def loadf(self, file, type=None):
loadf(file, type=type, data=self.data)
def loadmf(self, files, type=None):
loadmf(files, data=self.data)
def loadfd(self, fd, type=YAML):
loadfd(fd, type=type, data=self.data)
def loadmfd(self, fds, type=YAML):
loadmfd(fds, type=type, data=self.data)
def build(self, *args, **kwargs):
self.cli = build(self.data, *args, **kwargs)
def run(self, *args, **kwargs):
self.build(*args, **kwargs)
self.cli()
def clear(self):
self.__init__()
_yaml = """
name: clicker
help: Do things with clicker CLI definitions
commands:
- name: merge
help: Merge multiple definition files into one
options:
- name: -o --output
help: Output file, default -
type: click.File('wb')
default: '"-"'
- name: -f --format
help: Output format, default yaml
type: click.Choice(["json", "yaml"])
default: '"yaml"'
arguments:
- name: files
nargs: -1
required: yes
- name: stub
help: Generate Python stubs from defininition files
options:
- name: -o --output
help: Output file, default -
type: click.File("wb")
default: '"-"'
- name: -g --groups
help: Generate group callbacks
is_flag: yes
- name: --no-imports
help: Don't generate imports
is_flag: yes
- name: -t --tab
help: Tab string, default '" "'
default: '" "'
- name: -c --click-stubs
help: Generate Click stubs
is_flag: yes
arguments:
- name: files
nargs: -1
required: yes
"""
def clicker_merge(output, format, files):
d = loadmf(files)
if format == YAML:
output.write(yaml.dump(d))
elif format == JSON:
output.write(json.dumps(d))
def clicker_stub(output, groups, no_imports, tab, click_stubs, files):
d = loadmf(files)
stub(d, fd=output, groups=group, imports=(not no_imports), tab=tab)
def main():
cli = Cli()
cli.loads(_yaml)
cli.run(require_groups=True)
if __name__ == "__main__":
main()
|
[
"pdb.pm",
"yaml.load",
"traceback.print_exc",
"yaml.add_constructor",
"json.loads",
"click.Argument",
"yaml.dump",
"copy.copy",
"IPython.embed",
"json.dumps",
"click.Option",
"yaml.add_representer"
] |
[((6377, 6435), 'yaml.add_representer', 'yaml.add_representer', (['collections.OrderedDict', 'representer'], {}), '(collections.OrderedDict, representer)\n', (6397, 6435), False, 'import yaml\n'), ((6438, 6476), 'yaml.add_constructor', 'yaml.add_constructor', (['tag', 'constructor'], {}), '(tag, constructor)\n', (6458, 6476), False, 'import yaml\n'), ((1481, 1502), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1500, 1502), False, 'import traceback\n'), ((1507, 1522), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (1520, 1522), False, 'import IPython\n'), ((4132, 4144), 'copy.copy', 'copy.copy', (['a'], {}), '(a)\n', (4141, 4144), False, 'import copy\n'), ((4343, 4370), 'click.Argument', 'click.Argument', (['[name]'], {}), '([name], **a)\n', (4357, 4370), False, 'import click\n'), ((4493, 4505), 'copy.copy', 'copy.copy', (['o'], {}), '(o)\n', (4502, 4505), False, 'import copy\n'), ((4878, 4901), 'click.Option', 'click.Option', (['name'], {}), '(name, **o)\n', (4890, 4901), False, 'import click\n'), ((5067, 5079), 'copy.copy', 'copy.copy', (['c'], {}), '(c)\n', (5076, 5079), False, 'import copy\n'), ((6599, 6655), 'json.loads', 'json.loads', (['s'], {'object_pairs_hook': 'collections.OrderedDict'}), '(s, object_pairs_hook=collections.OrderedDict)\n', (6609, 6655), False, 'import json\n'), ((1588, 1609), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1607, 1609), False, 'import traceback\n'), ((1614, 1622), 'pdb.pm', 'pdb.pm', ([], {}), '()\n', (1620, 1622), False, 'import pdb\n'), ((6710, 6753), 'yaml.load', 'yaml.load', (['s'], {'Loader': 'yaml.loader.BaseLoader'}), '(s, Loader=yaml.loader.BaseLoader)\n', (6719, 6753), False, 'import yaml\n'), ((9405, 9417), 'yaml.dump', 'yaml.dump', (['d'], {}), '(d)\n', (9414, 9417), False, 'import yaml\n'), ((9459, 9472), 'json.dumps', 'json.dumps', (['d'], {}), '(d)\n', (9469, 9472), False, 'import json\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/12/30 14:40
# @Author : way
# @Site :
# @Describe: 数据处理
import os
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
############################################# 合并数据文件 ##########################################################
# 只取用于分析的字段,因为字段数太多,去掉没用的字段可以极大的节省内存和提高效率
dir = r"C:\Users\Administrator\Desktop\AgeOfBarbarians"
data_list = []
for path in os.listdir(dir):
path = os.path.join(dir, path)
data = pd.read_csv(path)
data = data[
['user_id', 'register_time', 'pvp_battle_count', 'pvp_lanch_count', 'pvp_win_count', 'pve_battle_count',
'pve_lanch_count', 'pve_win_count', 'avg_online_minutes', 'pay_price', 'pay_count']
]
data_list.append(data)
data = pd.concat(data_list)
############################################# 输出处理 ##########################################################
# 没有重复值
# print(data[data.duplicated()])
# 没有缺失值
# print(data.isnull().sum())
############################################# 数据保存 ##########################################################
# 保存清洗后的数据 mysql
engine = create_engine('mysql://root:root@172.16.122.25:3306/test?charset=utf8')
data.to_sql('age_of_barbarians', con=engine, index=False, if_exists='append')
|
[
"os.listdir",
"pandas.read_csv",
"sqlalchemy.create_engine",
"os.path.join",
"pandas.concat"
] |
[((445, 460), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (455, 460), False, 'import os\n'), ((789, 809), 'pandas.concat', 'pd.concat', (['data_list'], {}), '(data_list)\n', (798, 809), True, 'import pandas as pd\n'), ((1137, 1208), 'sqlalchemy.create_engine', 'create_engine', (['"""mysql://root:root@172.16.122.25:3306/test?charset=utf8"""'], {}), "('mysql://root:root@172.16.122.25:3306/test?charset=utf8')\n", (1150, 1208), False, 'from sqlalchemy import create_engine\n'), ((473, 496), 'os.path.join', 'os.path.join', (['dir', 'path'], {}), '(dir, path)\n', (485, 496), False, 'import os\n'), ((508, 525), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (519, 525), True, 'import pandas as pd\n')]
|
import logging
from typing import Optional
from jinja2 import Environment
from jinja2 import FileSystemLoader
from nefelibata import __version__
from nefelibata.builders import Builder
from nefelibata.builders import Scope
from nefelibata.builders.utils import hash_n
from nefelibata.builders.utils import random_color
from nefelibata.post import get_posts
_logger = logging.getLogger(__name__)
class IndexBuilder(Builder):
scopes = [Scope.SITE]
def process_site(self, force: bool = True) -> None:
"""Generate index and archives."""
_logger.info("Creating index")
env = Environment(
loader=FileSystemLoader(
str(self.root / "templates" / self.config["theme"]),
),
)
template = env.get_template("index.html")
posts = get_posts(self.root)
posts.sort(key=lambda x: x.date, reverse=True)
show = self.config.get("posts-to-show", 10)
# first page; these will be updated
page = 1
name: Optional[str] = "index.html"
previous: Optional[str] = None
while name:
page_posts, posts = posts[:show], posts[show:]
# link to next page
next = f"archive{page}.html" if posts else None
html = template.render(
__version__=__version__,
config=self.config,
language=self.config["language"],
posts=page_posts,
breadcrumbs=[("Recent Posts", None)],
previous=previous,
next=next,
hash_n=hash_n,
random_color=random_color,
)
file_path = self.root / "build" / name
with open(file_path, "w") as fp:
fp.write(html)
page += 1
previous, name = name, next
|
[
"nefelibata.post.get_posts",
"logging.getLogger"
] |
[((370, 397), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (387, 397), False, 'import logging\n'), ((821, 841), 'nefelibata.post.get_posts', 'get_posts', (['self.root'], {}), '(self.root)\n', (830, 841), False, 'from nefelibata.post import get_posts\n')]
|
#!/usr/bin/env python
# THIS SHEBANG IS REALLY REALLY IMPORTANT
import rospy
import time
from std_msgs.msg import Int16MultiArray
if __name__ == '__main__':
try:
rospy.init_node('simple_publisher')
# Tell ros we are publishing to the robot topic
pub = rospy.Publisher('/robot', Int16MultiArray, queue_size=0)
# Setup our message
out = Int16MultiArray()
val = 20
# generate the message data
for j in range(0,4):
# set the joint angles
out.data = [0,50,50,50,int(val)]
# send the message
pub.publish(out)
# do some book keeping
val += 10
rospy.logwarn("Sent a message: {0}".format(val))
time.sleep(1)
except rospy.ROSInterruptException:
rospy.logwarn('ERROR!!!')
|
[
"rospy.logwarn",
"rospy.Publisher",
"time.sleep",
"rospy.init_node",
"std_msgs.msg.Int16MultiArray"
] |
[((176, 211), 'rospy.init_node', 'rospy.init_node', (['"""simple_publisher"""'], {}), "('simple_publisher')\n", (191, 211), False, 'import rospy\n'), ((282, 338), 'rospy.Publisher', 'rospy.Publisher', (['"""/robot"""', 'Int16MultiArray'], {'queue_size': '(0)'}), "('/robot', Int16MultiArray, queue_size=0)\n", (297, 338), False, 'import rospy\n'), ((381, 398), 'std_msgs.msg.Int16MultiArray', 'Int16MultiArray', ([], {}), '()\n', (396, 398), False, 'from std_msgs.msg import Int16MultiArray\n'), ((751, 764), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (761, 764), False, 'import time\n'), ((814, 839), 'rospy.logwarn', 'rospy.logwarn', (['"""ERROR!!!"""'], {}), "('ERROR!!!')\n", (827, 839), False, 'import rospy\n')]
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Partner, MediaPatron, MediaPatronage, NormalMediaPatronage, Colaborator
def activate_event(modeladmin, request, queryset):
for event in queryset.iterator():
event.active = True
event.save()
activate_event.short_description = u'Oznacz wybrane wydarzenia jako aktywne'
class MediaPatronageAdmin(admin.ModelAdmin):
list_display = ['name', 'city', 'spot',
'start', 'end', 'active', 'activated',
'contact_email',
'created', 'modified']
actions = [activate_event, ]
class NormalMediaPatronageAdmin(admin.ModelAdmin):
list_display = ['name', 'start', 'end', 'active']
admin.site.register(MediaPatronage, MediaPatronageAdmin)
admin.site.register(NormalMediaPatronage, NormalMediaPatronageAdmin)
admin.site.register(Partner)
admin.site.register(MediaPatron)
admin.site.register(Colaborator)
|
[
"django.contrib.admin.site.register"
] |
[((746, 802), 'django.contrib.admin.site.register', 'admin.site.register', (['MediaPatronage', 'MediaPatronageAdmin'], {}), '(MediaPatronage, MediaPatronageAdmin)\n', (765, 802), False, 'from django.contrib import admin\n'), ((803, 871), 'django.contrib.admin.site.register', 'admin.site.register', (['NormalMediaPatronage', 'NormalMediaPatronageAdmin'], {}), '(NormalMediaPatronage, NormalMediaPatronageAdmin)\n', (822, 871), False, 'from django.contrib import admin\n'), ((873, 901), 'django.contrib.admin.site.register', 'admin.site.register', (['Partner'], {}), '(Partner)\n', (892, 901), False, 'from django.contrib import admin\n'), ((902, 934), 'django.contrib.admin.site.register', 'admin.site.register', (['MediaPatron'], {}), '(MediaPatron)\n', (921, 934), False, 'from django.contrib import admin\n'), ((935, 967), 'django.contrib.admin.site.register', 'admin.site.register', (['Colaborator'], {}), '(Colaborator)\n', (954, 967), False, 'from django.contrib import admin\n')]
|
from app import create_app, db
from flask_script import Manager, Server
# Connect to models
from app.models import User, Category
# Set up migrations
from flask_migrate import Migrate,MigrateCommand
import os
# SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://francis:1234@localhost/blog'
# Creating app instance
# app = create_app('test')
# app = create_app('development')
app = create_app('production')
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://francis:1234@localhost/blogs'
# Create manager instance
manager = Manager(app)
# Create migrate instance
migrate = Migrate(app,db)
manager.add_command('server', Server)
manager.add_command('db',MigrateCommand)
@manager.command
def test():
'''
Run the unit tests
'''
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app=app, db=db, Category=Category)
if __name__ == '__main__':
manager.run()
|
[
"unittest.TextTestRunner",
"flask_script.Manager",
"app.create_app",
"flask_migrate.Migrate",
"unittest.TestLoader"
] |
[((383, 407), 'app.create_app', 'create_app', (['"""production"""'], {}), "('production')\n", (393, 407), False, 'from app import create_app, db\n'), ((526, 538), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (533, 538), False, 'from flask_script import Manager, Server\n'), ((576, 592), 'flask_migrate.Migrate', 'Migrate', (['app', 'db'], {}), '(app, db)\n', (583, 592), False, 'from flask_migrate import Migrate, MigrateCommand\n'), ((774, 795), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (793, 795), False, 'import unittest\n'), ((818, 854), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (841, 854), False, 'import unittest\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 28 00:02:08 2017
@author: kht
"""
import tensorflow as tf
import translate as tl
import numpy as np
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
einputs,dinputs,res_logits,all_attens=tl.self_decode()
einputs_t=[]
dinputs_t=[]
res_logits_t=[]
num_exp=len(res_logits)
for i in range(100):
einputs_t.append(einputs[num_exp-i-1])
dinputs_t.append(dinputs[num_exp-i-1])
res_logits_t.append(res_logits[num_exp-i-1])
batch_size=32
maxlen=13
sess = tf.InteractiveSession()
w_fc2 = weight_variable([128, 20])
b_fc2 = bias_variable([20])
x=tf.placeholder(tf.float32,[None,128])
y_=tf.placeholder(tf.float32,[None,20])
y_conv = tf.nn.softmax(tf.matmul(x, w_fc2) + b_fc2)
# train and evaluate the model
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
res=tf.argmax(y_conv, 1)
resreal=tf.argmax(y_, 1)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
init=tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
saver = tf.train.Saver()
saver.restore(sess, "train/NumAdd.ckpt")
for i in range(len(res_logits_t)):
din=dinputs_t[i]
dlogit=res_logits_t[i]
'''
for j in range(batch_size):
batch_x=[]
batch_y=np.zeros([13,20],dtype=np.float32)
for k in range(maxlen):
batch_y[k][din[k][j]]=1
dx=dlogit[k][j]
batch_x.append(dx)
print(sess.run(correct_prediction,feed_dict={x: batch_x, y_: batch_y}))
print('-----------------------------------------------------------------------')
print("**************************************************************************************")
'''
for j in range(batch_size):
batch_x=[]
batch_y=np.zeros([13,20],dtype=np.float32)
for k in range(maxlen):
batch_y[k][din[k][j]]=1
dx=dlogit[k][j]
batch_x.append(dx)
print(sess.run(res,feed_dict={x: batch_x, y_: batch_y}))
print(sess.run(resreal,feed_dict={x: batch_x, y_: batch_y}))
print('-----------------------------------------------------------------------')
|
[
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.Session",
"numpy.zeros",
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.Variable",
"tensorflow.matmul",
"translate.self_decode",
"tensorflow.initialize_all_variables",
"tensorflow.log",
"tensorflow.InteractiveSession",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.truncated_normal"
] |
[((410, 426), 'translate.self_decode', 'tl.self_decode', ([], {}), '()\n', (424, 426), True, 'import translate as tl\n'), ((685, 708), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (706, 708), True, 'import tensorflow as tf\n'), ((775, 814), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 128]'], {}), '(tf.float32, [None, 128])\n', (789, 814), True, 'import tensorflow as tf\n'), ((816, 854), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 20]'], {}), '(tf.float32, [None, 20])\n', (830, 854), True, 'import tensorflow as tf\n'), ((1140, 1160), 'tensorflow.argmax', 'tf.argmax', (['y_conv', '(1)'], {}), '(y_conv, 1)\n', (1149, 1160), True, 'import tensorflow as tf\n'), ((1169, 1185), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (1178, 1185), True, 'import tensorflow as tf\n'), ((1256, 1285), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1283, 1285), True, 'import tensorflow as tf\n'), ((191, 229), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (210, 229), True, 'import tensorflow as tf\n'), ((241, 261), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (252, 261), True, 'import tensorflow as tf\n'), ((303, 332), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (314, 332), True, 'import tensorflow as tf\n'), ((346, 366), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (357, 366), True, 'import tensorflow as tf\n'), ((1096, 1116), 'tensorflow.argmax', 'tf.argmax', (['y_conv', '(1)'], {}), '(y_conv, 1)\n', (1105, 1116), True, 'import tensorflow as tf\n'), ((1118, 1134), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (1127, 1134), True, 'import tensorflow as tf\n'), ((1212, 1248), 'tensorflow.cast', 'tf.cast', (['correct_prediction', '"""float"""'], {}), "(correct_prediction, 'float')\n", (1219, 1248), True, 'import tensorflow as tf\n'), ((1292, 1304), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1302, 1304), True, 'import tensorflow as tf\n'), ((1345, 1361), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1359, 1361), True, 'import tensorflow as tf\n'), ((877, 896), 'tensorflow.matmul', 'tf.matmul', (['x', 'w_fc2'], {}), '(x, w_fc2)\n', (886, 896), True, 'import tensorflow as tf\n'), ((1001, 1040), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.01)'], {}), '(0.01)\n', (1034, 1040), True, 'import tensorflow as tf\n'), ((972, 986), 'tensorflow.log', 'tf.log', (['y_conv'], {}), '(y_conv)\n', (978, 986), True, 'import tensorflow as tf\n'), ((2152, 2188), 'numpy.zeros', 'np.zeros', (['[13, 20]'], {'dtype': 'np.float32'}), '([13, 20], dtype=np.float32)\n', (2160, 2188), True, 'import numpy as np\n')]
|
# Generated by Django 3.0.7 on 2020-09-25 03:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('publiapp_api', '0009_auto_20200922_0303'),
]
operations = [
migrations.CreateModel(
name='Ubigeo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo_ubigeo', models.CharField(max_length=6)),
('departamento', models.CharField(max_length=50)),
('provincia', models.CharField(max_length=50)),
('distrito', models.CharField(max_length=50)),
],
),
migrations.AlterField(
model_name='precio',
name='anuncio',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='precios', to='publiapp_api.Anuncio'),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.AutoField"
] |
[((857, 975), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""precios"""', 'to': '"""publiapp_api.Anuncio"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='precios', to='publiapp_api.Anuncio')\n", (874, 975), False, 'from django.db import migrations, models\n'), ((368, 461), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (384, 461), False, 'from django.db import migrations, models\n'), ((494, 524), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(6)'}), '(max_length=6)\n', (510, 524), False, 'from django.db import migrations, models\n'), ((560, 591), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (576, 591), False, 'from django.db import migrations, models\n'), ((624, 655), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (640, 655), False, 'from django.db import migrations, models\n'), ((687, 718), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (703, 718), False, 'from django.db import migrations, models\n')]
|
#Various functions and utilities that we use to work with text
import re
import string
from string import punctuation
from string import digits
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim import corpora, models
from nltk.stem import *
nltk.download("stopwords", quiet=True)
nltk.download("punkt", quiet=True)
stop_words = set(
stopwords.words("english") + list(string.punctuation) + ["\\n"] + ["quot"]
)
regex_str = [
r"http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|" r"[!*\(\),](?:%[0-9a-f][0-9a-f]))+",
r"(?:\w+-\w+){2}",
r"(?:\w+-\w+)",
r"(?:\\\+n+)",
r"(?:@[\w_]+)",
r"<[^>]+>",
r"(?:\w+'\w)",
r"(?:[\w_]+)",
r"(?:\S)",
]
# Create the tokenizer which will be case insensitive and will ignore space.
tokens_re = re.compile(r"(" + "|".join(regex_str) + ")", re.VERBOSE | re.IGNORECASE)
stemmer = PorterStemmer()
def tokenize_document(text, remove_stops=False):
"""Preprocess a whole raw document.
Args:
text (str): Raw string of text.
remove_stops (bool): Flag to remove english stopwords
Return:
List of preprocessed and tokenized documents
"""
return [
clean_and_tokenize(sentence, remove_stops)
for sentence in nltk.sent_tokenize(text)
]
def clean_and_tokenize(text, remove_stops):
"""Preprocess a raw string/sentence of text.
Args:
text (str): Raw string of text.
remove_stops (bool): Flag to remove english stopwords
Return:
tokens (list, str): Preprocessed tokens.
"""
tokens = tokens_re.findall(text)
_tokens = [t.lower() for t in tokens]
filtered_tokens = [
token.replace("-", "_")
for token in _tokens
if not (remove_stops and len(token) <= 2)
and (not remove_stops or token not in stop_words)
and not any(x in token for x in string.digits)
and any(x in token for x in string.ascii_lowercase)
]
return filtered_tokens
def tfidf_vectors(data, max_features):
"""Transforms text to tfidf vectors.
Args:
data (pandas.Series)
Returns:
(`scipy.sparse`): Sparse TFIDF matrix.
"""
vectorizer = TfidfVectorizer(
stop_words="english", analyzer="word", max_features=max_features
)
return vectorizer.fit_transform(data)
#Characters to drop
drop_characters = re.sub('-','',punctuation)+digits
#Stopwords
from nltk.corpus import stopwords
stop = stopwords.words('English')
#Stem functions
from nltk.stem import *
stemmer = PorterStemmer()
def clean_tokenise(string,drop_characters=drop_characters,stopwords=stop_words):
'''
Takes a string and cleans (makes lowercase and removes stopwords)
'''
#Lowercase
str_low = string.lower()
#Remove symbols and numbers
str_letters = re.sub('[{drop}]'.format(drop=drop_characters),'',str_low)
#Remove stopwords
clean = [x for x in str_letters.split(' ') if (x not in stop) & (x!='')]
return(clean)
class CleanTokenize():
'''
This class takes a list of strings and returns a tokenised, clean list of token lists ready
to be processed with the LdaPipeline
It has a clean method to remove symbols and stopwords
It has a bigram method to detect collocated words
It has a stem method to stem words
'''
def __init__(self,corpus):
'''
Takes a corpus (list where each element is a string)
'''
#Store
self.corpus = corpus
def clean(self,drop=drop_characters,stopwords=stop):
'''
Removes strings and stopwords,
'''
cleaned = [clean_tokenise(doc,drop_characters=drop,stopwords=stop) for doc in self.corpus]
self.tokenised = cleaned
return(self)
def stem(self):
'''
Optional: stems words
'''
#Stems each word in each tokenised sentence
stemmed = [[stemmer.stem(word) for word in sentence] for sentence in self.tokenised]
self.tokenised = stemmed
return(self)
def bigram(self,threshold=10):
'''
Optional Create bigrams.
'''
#Colocation detector trained on the data
phrases = models.Phrases(self.tokenised,threshold=threshold)
bigram = models.phrases.Phraser(phrases)
self.tokenised = bigram[self.tokenised]
return(self)
def salient_words_per_category(token_df,corpus_freqs,thres=100,top_words=50):
'''
Create a list of salient terms in a df (salient terms normalised by corpus frequency).
Args:
tokens (list or series) a list where every element is a tokenised abstract
corpus_freqs (df) are the frequencies of terms in the whole corpus
thres (int) is the number of occurrences of a term in the subcorpus
top_words (int) is the number of salient words to output
'''
subcorpus_freqs = flatten_freq(token_df,freq=True)
merged= pd.concat([pd.DataFrame(subcorpus_freqs),corpus_freqs],axis=1,sort=True)
merged['salience'] = (merged.iloc[:,0]/merged.iloc[:,1])
results = merged.loc[merged.iloc[:,0]>thres].sort_values('salience',ascending=False).iloc[:top_words]
results.columns = ['sub_corpus','corpus','salience']
return results
def get_term_salience(df,sel_var,sel_term,corpus_freqs,thres=100,top_words=50):
'''
Returns a list of salient terms per SDG
Args:
df (df) is a df of interest
sel_var (str) is the variable we use to select
sel_term (str) is the term we use to select
corpus_freqs (df) is a df with corpus frequencies
thres (int) is the min number of word occurrences
top_words (int) is the number of words to report
'''
rel_corp = df.loc[df[sel_var]==sel_term].drop_duplicates('project_id')['tokenised_abstract']
salient_rel = salient_words_per_category(list(rel_corp),corpus_freqs,thres,top_words)
salient_rel.rename(columns={'sub_corpus':f'{str(sel_term)}_freq','corpus':'all_freq',
'salience':f'{str(sel_term)}_salience'},inplace=True)
return(salient_rel)
class LdaPipeline():
'''
This class processes lists of keywords.
How does it work?
-It is initialised with a list where every element is a collection of keywords
-It has a method to filter keywords removing those that appear less than a set number of times
-It has a method to process the filtered df into an object that gensim can work with
-It has a method to train the LDA model with the right parameters
-It has a method to predict the topics in a corpus
'''
def __init__(self,corpus):
'''
Takes the list of terms
'''
#Store the corpus
self.tokenised = corpus
def filter(self,minimum=5):
'''
Removes keywords that appear less than 5 times.
'''
#Load
tokenised = self.tokenised
#Count tokens
token_counts = pd.Series([x for el in tokenised for x in el]).value_counts()
#Tokens to keep
keep = token_counts.index[token_counts>minimum]
#Filter
tokenised_filtered = [[x for x in el if x in keep] for el in tokenised]
#Store
self.tokenised = tokenised_filtered
self.empty_groups = np.sum([len(x)==0 for x in tokenised_filtered])
return(self)
def clean(self):
'''
Remove symbols and numbers
'''
def process(self):
'''
This creates the bag of words we use in the gensim analysis
'''
#Load the list of keywords
tokenised = self.tokenised
#Create the dictionary
dictionary = corpora.Dictionary(tokenised)
#Create the Bag of words. This converts keywords into ids
corpus = [dictionary.doc2bow(x) for x in tokenised]
self.corpus = corpus
self.dictionary = dictionary
return(self)
def tfidf(self):
'''
This is optional: We extract the term-frequency inverse document frequency of the words in
the corpus. The idea is to identify those keywords that are more salient in a document by normalising over
their frequency in the whole corpus
'''
#Load the corpus
corpus = self.corpus
#Fit a TFIDF model on the data
tfidf = models.TfidfModel(corpus)
#Transform the corpus and save it
self.corpus = tfidf[corpus]
return(self)
def fit_lda(self,num_topics=20,passes=5,iterations=75,random_state=1803):
'''
This fits the LDA model taking a set of keyword arguments.
#Number of passes, iterations and random state for reproducibility. We will have to consider
reproducibility eventually.
'''
#Load the corpus
corpus = self.corpus
#Train the LDA model with the parameters we supplied
lda = models.LdaModel(corpus,id2word=self.dictionary,
num_topics=num_topics,passes=passes,iterations=iterations,random_state=random_state)
#Save the outputs
self.lda_model = lda
self.lda_topics = lda.show_topics(num_topics=num_topics)
return(self)
def predict_topics(self):
'''
This predicts the topic mix for every observation in the corpus
'''
#Load the attributes we will be working with
lda = self.lda_model
corpus = self.corpus
#Now we create a df
predicted = lda[corpus]
#Convert this into a dataframe
predicted_df = pd.concat([pd.DataFrame({x[0]:x[1] for x in topics},
index=[num]) for num,topics in enumerate(predicted)]).fillna(0)
self.predicted_df = predicted_df
return(self)
|
[
"pandas.DataFrame",
"gensim.models.phrases.Phraser",
"sklearn.feature_extraction.text.TfidfVectorizer",
"string.lower",
"gensim.models.TfidfModel",
"gensim.models.Phrases",
"gensim.corpora.Dictionary",
"gensim.models.LdaModel",
"pandas.Series",
"nltk.corpus.stopwords.words",
"re.sub"
] |
[((2520, 2546), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""English"""'], {}), "('English')\n", (2535, 2546), False, 'from nltk.corpus import stopwords\n'), ((2255, 2341), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'stop_words': '"""english"""', 'analyzer': '"""word"""', 'max_features': 'max_features'}), "(stop_words='english', analyzer='word', max_features=\n max_features)\n", (2270, 2341), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2432, 2460), 're.sub', 're.sub', (['"""-"""', '""""""', 'punctuation'], {}), "('-', '', punctuation)\n", (2438, 2460), False, 'import re\n'), ((2816, 2830), 'string.lower', 'string.lower', ([], {}), '()\n', (2828, 2830), False, 'import string\n'), ((4394, 4445), 'gensim.models.Phrases', 'models.Phrases', (['self.tokenised'], {'threshold': 'threshold'}), '(self.tokenised, threshold=threshold)\n', (4408, 4445), False, 'from gensim import corpora, models\n'), ((4471, 4502), 'gensim.models.phrases.Phraser', 'models.phrases.Phraser', (['phrases'], {}), '(phrases)\n', (4493, 4502), False, 'from gensim import corpora, models\n'), ((8105, 8134), 'gensim.corpora.Dictionary', 'corpora.Dictionary', (['tokenised'], {}), '(tokenised)\n', (8123, 8134), False, 'from gensim import corpora, models\n'), ((8805, 8830), 'gensim.models.TfidfModel', 'models.TfidfModel', (['corpus'], {}), '(corpus)\n', (8822, 8830), False, 'from gensim import corpora, models\n'), ((9424, 9564), 'gensim.models.LdaModel', 'models.LdaModel', (['corpus'], {'id2word': 'self.dictionary', 'num_topics': 'num_topics', 'passes': 'passes', 'iterations': 'iterations', 'random_state': 'random_state'}), '(corpus, id2word=self.dictionary, num_topics=num_topics,\n passes=passes, iterations=iterations, random_state=random_state)\n', (9439, 9564), False, 'from gensim import corpora, models\n'), ((5191, 5220), 'pandas.DataFrame', 'pd.DataFrame', (['subcorpus_freqs'], {}), '(subcorpus_freqs)\n', (5203, 5220), True, 'import pandas as pd\n'), ((434, 460), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (449, 460), False, 'from nltk.corpus import stopwords\n'), ((7316, 7362), 'pandas.Series', 'pd.Series', (['[x for el in tokenised for x in el]'], {}), '([x for el in tokenised for x in el])\n', (7325, 7362), True, 'import pandas as pd\n'), ((10149, 10204), 'pandas.DataFrame', 'pd.DataFrame', (['{x[0]: x[1] for x in topics}'], {'index': '[num]'}), '({x[0]: x[1] for x in topics}, index=[num])\n', (10161, 10204), True, 'import pandas as pd\n')]
|
import torch
from torch import nn
import os.path
import torchvision.transforms as transforms
from EnlightenGAN.data.base_dataset import BaseDataset, get_transform
from EnlightenGAN.data.image_folder import make_dataset
import random
from PIL import Image
import PIL
from pdb import set_trace as st
import numpy as np
from skimage import color, feature
from skimage.filters import gaussian
def pad_tensor(input):
height_org, width_org = input.shape[2], input.shape[3]
divide = 16
if width_org % divide != 0 or height_org % divide != 0:
width_res = width_org % divide
height_res = height_org % divide
if width_res != 0:
width_div = divide - width_res
pad_left = int(width_div / 2)
pad_right = int(width_div - pad_left)
else:
pad_left = 0
pad_right = 0
if height_res != 0:
height_div = divide - height_res
pad_top = int(height_div / 2)
pad_bottom = int(height_div - pad_top)
else:
pad_top = 0
pad_bottom = 0
padding = nn.ReflectionPad2d((pad_left, pad_right, pad_top, pad_bottom))
input = padding(input).data
else:
pad_left = 0
pad_right = 0
pad_top = 0
pad_bottom = 0
height, width = input.shape[2], input.shape[3]
assert width % divide == 0, 'width cant divided by stride'
assert height % divide == 0, 'height cant divided by stride'
return input, pad_left, pad_right, pad_top, pad_bottom
def pad_tensor_back(input, pad_left, pad_right, pad_top, pad_bottom):
height, width = input.shape[2], input.shape[3]
return input[:,:, pad_top: height - pad_bottom, pad_left: width - pad_right]
class UnalignedDataset(BaseDataset):
def _reinit_A_paths(self):
self.A_paths = self.pos_names# + np.random.choice(self.neg_names_all, int(948/(10/1)), replace=False).tolist()
random.shuffle(self.A_paths)
self.B_paths = list(self.A_paths)
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
##############################
# self.dir_A = os.path.join(opt.dataroot)#, opt.phase + 'A')
# self.dir_B = os.path.join(opt.dataroot)#, opt.phase + 'B')
if not 'images' in self.opt.name:
self.dir_A = os.path.join("/ssd1/chenwy/bdd100k/seg_luminance/0_100/", opt.phase)
self.dir_B = os.path.join("/ssd1/chenwy/bdd100k/seg_luminance/100_255/", opt.phase)
# self.dir_A = os.path.join("/ssd1/chenwy/bdd100k/seg_luminance/0_75/", opt.phase)
# self.dir_B = os.path.join("/ssd1/chenwy/bdd100k/seg_luminance/100_105/", opt.phase)
else:
self.dir_A = os.path.join("/ssd1/chenwy/bdd100k/images_luminance/100k/0_100/", opt.phase)
self.dir_B = os.path.join("/ssd1/chenwy/bdd100k/images_luminance/100k/100_255/", opt.phase)
# self.dir_A = os.path.join("/ssd1/chenwy/bdd100k/images_luminance/100k/0_75/", opt.phase)
# self.dir_B = os.path.join("/ssd1/chenwy/bdd100k/images_luminance/100k/100_105/", opt.phase)
##############################
self.A_paths = make_dataset(self.dir_A)
self.B_paths = make_dataset(self.dir_B)
self.A_paths = sorted(self.A_paths)
self.B_paths = sorted(self.B_paths)
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
self.transform = get_transform(opt)
##### load image2reward to resample dataset ############################
# image2reward = np.load("/home/chenwy/DynamicLightEnlighten/image2reward.npy").item()
# self.pos = []; self.pos_names = []; self.neg_names_all = []
# for k, v in image2reward.items():
# if v > 0:
# self.pos.append(v)
# self.pos_names.append(k)
# elif v < 0:
# self.neg_names_all.append(k)
# self.pos_names = [k for v,k in sorted(zip(self.pos, self.pos_names), reverse=True)]
# self._reinit_A_paths()
#################################
self.low_range = range(55, 70)
self.high_range = range(110, 125)
self.N_TRY = 20
def __getitem__(self, index_A):
A_path = self.A_paths[index_A % self.A_size]
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B % self.B_size]
A_image = Image.open(A_path).convert('RGB')
B_image = Image.open(B_path).convert('RGB')
# A_size = A_img.size
# B_size = B_img.size
# A_size = A_size = (A_size[0]//16*16, A_size[1]//16*16)
# B_size = B_size = (B_size[0]//16*16, B_size[1]//16*16)
# A_img = A_img.resize(A_size, Image.BICUBIC)
# B_img = B_img.resize(B_size, Image.BICUBIC)
# A_gray = A_img.convert('LA')
# A_gray = 255.0-A_gray
w, h = A_image.size
# without luminance selection #####################
# x1 = random.randint(0, w - self.opt.fineSize)
# y1 = random.randint(0, h - self.opt.fineSize)
# A_img = A_image.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))
# B_img = B_image.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))
# A_npy = np.array(A_img)
# B_npy = np.array(B_img)
# r,g,b = A_npy[:, :, 0], A_npy[:, :, 1], A_npy[:, :, 2]
# value_A = (0.299*r+0.587*g+0.114*b) / 255.
# value_A = np.sort(value_A.flatten())
# length = value_A.shape[0]
# value_A = value_A[int(np.round(length * 0.1)) : int(np.round(length * 0.9))].mean()
# if not 'images' in self.opt.name:
# # mask = Image.open(os.path.join("/ssd1/chenwy/bdd100k/seg/labels/", "train", os.path.splitext(A_path.split("/")[-1])[0] + '_train_id.png'))
# mask = Image.open(os.path.join("/ssd1/chenwy/bdd100k/seg/labels/", self.opt.phase, os.path.splitext(A_path.split("/")[-1])[0] + '_train_id.png'))
# mask = np.array(mask.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))).astype('int32') # cropped mask for light_enhance_AB/seg
# mask = self._mask_transform(mask)
# else:
# mask = torch.zeros(1)
###################################################
# patch luminance & mask class diversity selection ###########################
n_try = 0
while n_try < self.N_TRY:
x1 = random.randint(0, w - self.opt.fineSize)
y1 = random.randint(0, h - self.opt.fineSize)
A_img = A_image.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))
B_img = B_image.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))
A_npy = np.array(A_img)
B_npy = np.array(B_img)
r,g,b = A_npy[:, :, 0], A_npy[:, :, 1], A_npy[:, :, 2]
value_A = (0.299*r+0.587*g+0.114*b) / 255.
value_A = np.sort(value_A.flatten())
length = value_A.shape[0]
value_A = value_A[int(np.round(length * 0.1)) : int(np.round(length * 0.9))].mean()
if int(np.round(value_A*255)) not in self.low_range: n_try += 1; continue
r,g,b = B_npy[:, :, 0], B_npy[:, :, 1], B_npy[:, :, 2]
value_B = (0.299*r+0.587*g+0.114*b) / 255.
value_B = np.sort(value_B.flatten())
length = value_B.shape[0]
value_B = value_B[int(np.round(length * 0.1)) : int(np.round(length * 0.9))].mean()
if int(np.round(value_B*255)) not in self.high_range: n_try += 1; continue
if not 'images' in self.opt.name:
# mask = Image.open(os.path.join("/ssd1/chenwy/bdd100k/seg/labels/", "train", os.path.splitext(A_path.split("/")[-1])[0] + '_train_id.png'))
mask = Image.open(os.path.join("/ssd1/chenwy/bdd100k/seg/labels/", self.opt.phase, os.path.splitext(A_path.split("/")[-1])[0] + '_train_id.png'))
mask = np.array(mask.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))).astype('int32') # cropped mask for light_enhance_AB/seg
unique, counts = np.unique(mask, return_counts=True)
if len(unique) < 2 or (counts / counts.sum()).max() > 0.7: n_try += 1; continue
mask = self._mask_transform(mask)
else:
mask = torch.zeros(1)
break
if n_try == self.N_TRY:
# if int(np.round(value_A)) not in self.low_range:
# self.A_paths.pop(index_A % self.A_size)
# self.A_size -= 1
# if int(np.round(value_B)) not in self.high_range:
# self.B_paths.pop(index_B % self.B_size)
# self.B_size -= 1
index_A = random.randint(0, self.__len__())
return self.__getitem__(index_A)
##########################################################################
gray_mask = torch.ones(1, self.opt.fineSize, self.opt.fineSize) * value_A
A_img_border = A_image.crop((x1-self.opt.fineSize//2, y1-self.opt.fineSize//2, x1+2*self.opt.fineSize, y1+2*self.opt.fineSize))
A_Lab = torch.Tensor(color.rgb2lab(A_npy) / 100).permute([2, 0, 1])
A_npy = gaussian(A_npy, sigma=2, multichannel=True)
r,g,b = A_npy[:, :, 0], A_npy[:, :, 1], A_npy[:, :, 2]
A_npy = 0.299*r+0.587*g+0.114*b
edges_A = torch.unsqueeze(torch.from_numpy(feature.canny(A_npy, sigma=2).astype("float32")), 0)
A_img = self.transform(A_img)
A_img_border = self.transform(A_img_border)
B_img = self.transform(B_img)
if self.opt.resize_or_crop == 'no':
r,g,b = A_img[0]+1, A_img[1]+1, A_img[2]+1
A_gray = 1. - (0.299*r+0.587*g+0.114*b)/2.
A_gray = torch.unsqueeze(A_gray, 0)
input_img = A_img
# A_gray = (1./A_gray)/255.
r,g,b = A_img_border[0]+1, A_img_border[1]+1, A_img_border[2]+1
A_gray_border = 1. - (0.299*r+0.587*g+0.114*b)/2.
A_gray_border = torch.unsqueeze(A_gray_border, 0)
else:
w = A_img.size(2)
h = A_img.size(1)
# A_gray = (1./A_gray)/255.
if (not self.opt.no_flip) and random.random() < 0.5:
idx = [i for i in range(A_img.size(2) - 1, -1, -1)]
idx = torch.LongTensor(idx)
A_img = A_img.index_select(2, idx)
B_img = B_img.index_select(2, idx)
if (not self.opt.no_flip) and random.random() < 0.5:
idx = [i for i in range(A_img.size(1) - 1, -1, -1)]
idx = torch.LongTensor(idx)
A_img = A_img.index_select(1, idx)
B_img = B_img.index_select(1, idx)
if self.opt.vary == 1 and (not self.opt.no_flip) and random.random() < 0.5:
times = random.randint(self.opt.low_times,self.opt.high_times)/100.
input_img = (A_img+1)/2./times
input_img = input_img*2-1
else:
input_img = A_img
if self.opt.lighten:
B_img = (B_img + 1)/2.
B_img = (B_img - torch.min(B_img))/(torch.max(B_img) - torch.min(B_img))
B_img = B_img*2. -1
r,g,b = input_img[0]+1, input_img[1]+1, input_img[2]+1
A_gray = 1. - (0.299*r+0.587*g+0.114*b)/2.
A_gray = torch.unsqueeze(A_gray, 0)
return {'A': A_img, 'B': B_img, 'A_gray': A_gray, 'input_img': input_img,
'A_paths': A_path, 'B_paths': B_path, 'mask': mask,
'A_border': A_img_border, 'A_gray_border': A_gray_border,
'A_Lab': A_Lab, 'gray_mask': gray_mask, 'edges_A': edges_A
}
def __len__(self):
return max(self.A_size, self.B_size)
def name(self):
return 'UnalignedDataset'
def _mask_transform(self, mask):
target = np.array(mask).astype('int32')
target[target == 255] = -1
return torch.from_numpy(target).long()
|
[
"random.shuffle",
"EnlightenGAN.data.image_folder.make_dataset",
"numpy.round",
"numpy.unique",
"skimage.color.rgb2lab",
"torch.ones",
"random.randint",
"torch.nn.ReflectionPad2d",
"torch.zeros",
"random.random",
"torch.max",
"torch.unsqueeze",
"skimage.feature.canny",
"torch.min",
"EnlightenGAN.data.base_dataset.get_transform",
"torch.from_numpy",
"torch.LongTensor",
"PIL.Image.open",
"numpy.array",
"skimage.filters.gaussian"
] |
[((1959, 1987), 'random.shuffle', 'random.shuffle', (['self.A_paths'], {}), '(self.A_paths)\n', (1973, 1987), False, 'import random\n'), ((3292, 3316), 'EnlightenGAN.data.image_folder.make_dataset', 'make_dataset', (['self.dir_A'], {}), '(self.dir_A)\n', (3304, 3316), False, 'from EnlightenGAN.data.image_folder import make_dataset\n'), ((3340, 3364), 'EnlightenGAN.data.image_folder.make_dataset', 'make_dataset', (['self.dir_B'], {}), '(self.dir_B)\n', (3352, 3364), False, 'from EnlightenGAN.data.image_folder import make_dataset\n'), ((3568, 3586), 'EnlightenGAN.data.base_dataset.get_transform', 'get_transform', (['opt'], {}), '(opt)\n', (3581, 3586), False, 'from EnlightenGAN.data.base_dataset import BaseDataset, get_transform\n'), ((4438, 4472), 'random.randint', 'random.randint', (['(0)', '(self.B_size - 1)'], {}), '(0, self.B_size - 1)\n', (4452, 4472), False, 'import random\n'), ((9362, 9405), 'skimage.filters.gaussian', 'gaussian', (['A_npy'], {'sigma': '(2)', 'multichannel': '(True)'}), '(A_npy, sigma=2, multichannel=True)\n', (9370, 9405), False, 'from skimage.filters import gaussian\n'), ((1120, 1182), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(pad_left, pad_right, pad_top, pad_bottom)'], {}), '((pad_left, pad_right, pad_top, pad_bottom))\n', (1138, 1182), False, 'from torch import nn\n'), ((6562, 6602), 'random.randint', 'random.randint', (['(0)', '(w - self.opt.fineSize)'], {}), '(0, w - self.opt.fineSize)\n', (6576, 6602), False, 'import random\n'), ((6620, 6660), 'random.randint', 'random.randint', (['(0)', '(h - self.opt.fineSize)'], {}), '(0, h - self.opt.fineSize)\n', (6634, 6660), False, 'import random\n'), ((6855, 6870), 'numpy.array', 'np.array', (['A_img'], {}), '(A_img)\n', (6863, 6870), True, 'import numpy as np\n'), ((6891, 6906), 'numpy.array', 'np.array', (['B_img'], {}), '(B_img)\n', (6899, 6906), True, 'import numpy as np\n'), ((9072, 9123), 'torch.ones', 'torch.ones', (['(1)', 'self.opt.fineSize', 'self.opt.fineSize'], {}), '(1, self.opt.fineSize, self.opt.fineSize)\n', (9082, 9123), False, 'import torch\n'), ((9918, 9944), 'torch.unsqueeze', 'torch.unsqueeze', (['A_gray', '(0)'], {}), '(A_gray, 0)\n', (9933, 9944), False, 'import torch\n'), ((10182, 10215), 'torch.unsqueeze', 'torch.unsqueeze', (['A_gray_border', '(0)'], {}), '(A_gray_border, 0)\n', (10197, 10215), False, 'import torch\n'), ((11554, 11580), 'torch.unsqueeze', 'torch.unsqueeze', (['A_gray', '(0)'], {}), '(A_gray, 0)\n', (11569, 11580), False, 'import torch\n'), ((4545, 4563), 'PIL.Image.open', 'Image.open', (['A_path'], {}), '(A_path)\n', (4555, 4563), False, 'from PIL import Image\n'), ((4597, 4615), 'PIL.Image.open', 'Image.open', (['B_path'], {}), '(B_path)\n', (4607, 4615), False, 'from PIL import Image\n'), ((8244, 8279), 'numpy.unique', 'np.unique', (['mask'], {'return_counts': '(True)'}), '(mask, return_counts=True)\n', (8253, 8279), True, 'import numpy as np\n'), ((8467, 8481), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (8478, 8481), False, 'import torch\n'), ((10498, 10519), 'torch.LongTensor', 'torch.LongTensor', (['idx'], {}), '(idx)\n', (10514, 10519), False, 'import torch\n'), ((10777, 10798), 'torch.LongTensor', 'torch.LongTensor', (['idx'], {}), '(idx)\n', (10793, 10798), False, 'import torch\n'), ((12077, 12091), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (12085, 12091), True, 'import numpy as np\n'), ((12158, 12182), 'torch.from_numpy', 'torch.from_numpy', (['target'], {}), '(target)\n', (12174, 12182), False, 'import torch\n'), ((7232, 7255), 'numpy.round', 'np.round', (['(value_A * 255)'], {}), '(value_A * 255)\n', (7240, 7255), True, 'import numpy as np\n'), ((7624, 7647), 'numpy.round', 'np.round', (['(value_B * 255)'], {}), '(value_B * 255)\n', (7632, 7647), True, 'import numpy as np\n'), ((10385, 10400), 'random.random', 'random.random', ([], {}), '()\n', (10398, 10400), False, 'import random\n'), ((10664, 10679), 'random.random', 'random.random', ([], {}), '()\n', (10677, 10679), False, 'import random\n'), ((10966, 10981), 'random.random', 'random.random', ([], {}), '()\n', (10979, 10981), False, 'import random\n'), ((11013, 11068), 'random.randint', 'random.randint', (['self.opt.low_times', 'self.opt.high_times'], {}), '(self.opt.low_times, self.opt.high_times)\n', (11027, 11068), False, 'import random\n'), ((9299, 9319), 'skimage.color.rgb2lab', 'color.rgb2lab', (['A_npy'], {}), '(A_npy)\n', (9312, 9319), False, 'from skimage import color, feature\n'), ((9560, 9589), 'skimage.feature.canny', 'feature.canny', (['A_npy'], {'sigma': '(2)'}), '(A_npy, sigma=2)\n', (9573, 9589), False, 'from skimage import color, feature\n'), ((11319, 11335), 'torch.min', 'torch.min', (['B_img'], {}), '(B_img)\n', (11328, 11335), False, 'import torch\n'), ((11338, 11354), 'torch.max', 'torch.max', (['B_img'], {}), '(B_img)\n', (11347, 11354), False, 'import torch\n'), ((11357, 11373), 'torch.min', 'torch.min', (['B_img'], {}), '(B_img)\n', (11366, 11373), False, 'import torch\n'), ((7151, 7173), 'numpy.round', 'np.round', (['(length * 0.1)'], {}), '(length * 0.1)\n', (7159, 7173), True, 'import numpy as np\n'), ((7181, 7203), 'numpy.round', 'np.round', (['(length * 0.9)'], {}), '(length * 0.9)\n', (7189, 7203), True, 'import numpy as np\n'), ((7543, 7565), 'numpy.round', 'np.round', (['(length * 0.1)'], {}), '(length * 0.1)\n', (7551, 7565), True, 'import numpy as np\n'), ((7573, 7595), 'numpy.round', 'np.round', (['(length * 0.9)'], {}), '(length * 0.9)\n', (7581, 7595), True, 'import numpy as np\n')]
|
from datetime import datetime, date
import math
import numpy as np
import time
import sys
import requests
import re
from ortools.linear_solver import pywraplp
# if len(sys.argv) == 1:
# symbols = ['UPRO', 'TMF']
# else:
# symbols = sys.argv[1].split(',')
# for i in range(len(symbols)):
# symbols[i] = symbols[i].strip().upper()
symbols = ['TMF', 'UPRO']
num_trading_days_per_year = 252
window_size = 20
date_format = "%Y-%m-%d"
end_timestamp = int(time.time())
start_timestamp = int(end_timestamp - (1.4 * (window_size + 1) + 4) * 86400)
def get_volatility_and_performance(symbol,cookie,crumb):
download_url = "https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval=1d&events=history".format(symbol, start_timestamp, end_timestamp)
lines = requests.get(
download_url,
headers={
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2866.71 Safari/537.36'
}).text.strip().split('\n')
assert lines[0].split(',')[0] == 'Date'
assert lines[0].split(',')[4] == 'Close'
prices = []
for line in lines[1:]:
prices.append(float(line.split(',')[4]))
prices.reverse()
volatilities_in_window = []
for i in range(window_size):
volatilities_in_window.append(math.log(prices[i] / prices[i+1]))
most_recent_date = datetime.strptime(lines[-1].split(',')[0], date_format).date()
assert (date.today() - most_recent_date).days <= 4, "today is {}, most recent trading day is {}".format(date.today(), most_recent_date)
return np.std(volatilities_in_window, ddof = 1) * np.sqrt(num_trading_days_per_year), prices[0] / prices[window_size] - 1.0, prices[0]
def get_cookie():
url = 'https://finance.yahoo.com/quote/VOO/history?p=VOO'
r = requests.get(url)
txt = r.text
cookie = r.cookies['B']
pattern = re.compile('.*"CrumbStore":\{"crumb":"(?P<crumb>[^"]+)"\}')
for line in txt.splitlines():
m = pattern.match(line)
if m is not None:
crumb = m.groupdict()['crumb']
return cookie,crumb
def get_data():
#cookie,crumb=get_cookie()
cookie='9mev4idf68vgk&b=3&s=g9'
crumb='Xpr8Z7BQn4W'
volatilities = []
performances = []
current_prices = []
sum_inverse_volatility = 0.0
for symbol in symbols:
volatility, performance, current_price = get_volatility_and_performance(symbol,cookie,crumb)
sum_inverse_volatility += 1 / volatility
volatilities.append(volatility)
performances.append(performance)
current_prices.append(current_price)
alpha=1/(np.array(volatilities) * sum_inverse_volatility)
print ("Portfolio: {}, as of {} (window size is {} days)".format(str(symbols), date.today().strftime('%Y-%m-%d'), window_size))
for i in range(len(symbols)):
print ('{} allocation ratio: {:.2f}% (anualized volatility: {:.2f}%, performance: {:.2f}%)'.format(symbols[i], 100*(alpha[i]), float(volatilities[i] * 100), float(performances[i] * 100)))
return alpha,current_prices
def create_model(epsilon=0.01):
alpha[0]/alpha[1]
data={}
data['constraint_coeffs']=[
[current_prices[0],-(epsilon+alpha[0]/alpha[1])*current_prices[1],current_prices[0],-(epsilon+alpha[0]/alpha[1])*current_prices[1]],
[current_prices[0],-(alpha[0]/alpha[1]-epsilon)*current_prices[1],current_prices[0],-(alpha[0]/alpha[1]-epsilon)*current_prices[1]],
[current_prices[0],current_prices[1],current_prices[0],current_prices[1]],
[current_prices[0],current_prices[1],0,0],
[0,0,current_prices[0],current_prices[1]],
[1,0,0,0],
[0,1,0,0],
[1,1,1,1]
]
data['lb']=[-np.inf, 0,0,0,0,N_Tax_T,N_Tax_U,1]
data['ub']=[0, np.inf,S,S_Tax,S_IRA,np.inf,np.inf,np.inf]
data['obj_coeffs']=[current_prices[0],current_prices[1],current_prices[0],current_prices[1]]
data['xub']=[np.floor(S_Tax/current_prices[0]),np.floor(S_Tax/current_prices[1]),np.floor(S_IRA/current_prices[0]),np.floor(S_IRA/current_prices[1])]
data['num_vars']=len(data['obj_coeffs'])
data['num_constraints']=len(data['constraint_coeffs'])
return data
def findsol(epsilon=0.01):
data = create_model(epsilon)
solver = pywraplp.Solver.CreateSolver('CBC')
x={}
for j in range(data['num_vars']):
x[j] = solver.IntVar(0, data['xub'][j], 'x[%i]' % j)
for i in range(data['num_constraints']):
constraint = solver.RowConstraint(data['lb'][i], data['ub'][i], '')
for j in range(data['num_vars']):
constraint.SetCoefficient(x[j], data['constraint_coeffs'][i][j])
objective = solver.Objective()
for j in range(data['num_vars']):
objective.SetCoefficient(x[j], data['obj_coeffs'][j])
objective.SetMaximization()
status = solver.Solve()
if status==pywraplp.Solver.OPTIMAL:
sol=[x[i].solution_value() for i in range(4)]
else:
sol=[0,0,0,0]
return sol,status
alpha,current_prices=get_data()
N_Tax_T=float(input("Current shares of "+symbols[0]+" in taxable: "))
N_Tax_U=float(input("Current shares of "+symbols[1]+" in taxable: "))
Tax_C=float(input("Current cash in taxable: "))
N_IRA_T=float(input("Current shares of "+symbols[0]+" in IRA: "))
N_IRA_U=float(input("Current shares of "+symbols[1]+" in IRA: "))
IRA_C=float(input("Current cash in IRA: "))
Tax_T=N_Tax_T*current_prices[0]
Tax_U=N_Tax_U*current_prices[1]
IRA_T=N_IRA_T*current_prices[0]
IRA_U=N_IRA_U*current_prices[1]
S_Tax=Tax_T+Tax_U+Tax_C
S_IRA=IRA_T+IRA_U+IRA_C
S=S_Tax+S_IRA
epsilon=0.01
sol,status=findsol(epsilon)
while status != pywraplp.Solver.OPTIMAL:
epsilon=epsilon+0.01
sol,status=findsol(epsilon)
N_Tax_T2,N_Tax_U2,N_IRA_T2,N_IRA_U2=sol
print('-'*10+'result'+'-'*10)
Tax_C2=S_Tax-N_Tax_T2*current_prices[0]-N_Tax_U2*current_prices[1]
IRA_C2=S_IRA-N_IRA_T2*current_prices[0]-N_IRA_U2*current_prices[1]
S_T2=(N_Tax_T2+N_IRA_T2)*current_prices[0]
S_U2=(N_Tax_U2+N_IRA_U2)*current_prices[1]
print('Cash in Taxable %f' % Tax_C2)
print('Cash in IRA %f' % IRA_C2)
print('Achievable balance of TMF/UPRO: ({:.2f}%/{:.2f}%), target ({:.2f}%/{:.2f}%)'.format(100*S_T2/(S_T2+S_U2),100*S_U2/(S_T2+S_U2),100*alpha[0],100*alpha[1]))
print('-'*10+'action'+'-'*10)
print(('buy'*(N_Tax_T2-N_Tax_T>=0)+'sell'*(N_Tax_T2-N_Tax_T<0))+' TMF in Taxable: '+str(int(abs(N_Tax_T2-N_Tax_T)))+' at price '+str(current_prices[0]))
print(('buy'*(N_Tax_U2-N_Tax_U>=0)+'sell'*(N_Tax_U2-N_Tax_U<0))+' UPRO in Taxable: '+str(int(abs(N_Tax_U2-N_Tax_U)))+' at price '+str(current_prices[1]))
print(('buy'*(N_IRA_T2-N_IRA_T>=0)+'sell'*(N_IRA_T2-N_IRA_T<0))+' TMF in IRA: '+str(int(abs(N_IRA_T2-N_IRA_T)))+' at price '+str(current_prices[0]))
print(('buy'*(N_IRA_U2-N_IRA_U>=0)+'sell'*(N_IRA_U2-N_IRA_U<0))+' UPRO in IRA: '+str(int(abs(N_IRA_U2-N_IRA_U)))+' at price '+str(current_prices[1]))
|
[
"ortools.linear_solver.pywraplp.Solver.CreateSolver",
"numpy.std",
"numpy.floor",
"datetime.date.today",
"time.time",
"numpy.array",
"requests.get",
"math.log",
"numpy.sqrt",
"re.compile"
] |
[((491, 502), 'time.time', 'time.time', ([], {}), '()\n', (500, 502), False, 'import time\n'), ((1902, 1919), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1914, 1919), False, 'import requests\n'), ((1982, 2043), 're.compile', 're.compile', (['""".*"CrumbStore":\\\\{"crumb":"(?P<crumb>[^"]+)"\\\\}"""'], {}), '(\'.*"CrumbStore":\\\\{"crumb":"(?P<crumb>[^"]+)"\\\\}\')\n', (1992, 2043), False, 'import re\n'), ((4408, 4443), 'ortools.linear_solver.pywraplp.Solver.CreateSolver', 'pywraplp.Solver.CreateSolver', (['"""CBC"""'], {}), "('CBC')\n", (4436, 4443), False, 'from ortools.linear_solver import pywraplp\n'), ((1633, 1645), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1643, 1645), False, 'from datetime import datetime, date\n'), ((4070, 4105), 'numpy.floor', 'np.floor', (['(S_Tax / current_prices[0])'], {}), '(S_Tax / current_prices[0])\n', (4078, 4105), True, 'import numpy as np\n'), ((4104, 4139), 'numpy.floor', 'np.floor', (['(S_Tax / current_prices[1])'], {}), '(S_Tax / current_prices[1])\n', (4112, 4139), True, 'import numpy as np\n'), ((4138, 4173), 'numpy.floor', 'np.floor', (['(S_IRA / current_prices[0])'], {}), '(S_IRA / current_prices[0])\n', (4146, 4173), True, 'import numpy as np\n'), ((4172, 4207), 'numpy.floor', 'np.floor', (['(S_IRA / current_prices[1])'], {}), '(S_IRA / current_prices[1])\n', (4180, 4207), True, 'import numpy as np\n'), ((1400, 1435), 'math.log', 'math.log', (['(prices[i] / prices[i + 1])'], {}), '(prices[i] / prices[i + 1])\n', (1408, 1435), False, 'import math\n'), ((1679, 1717), 'numpy.std', 'np.std', (['volatilities_in_window'], {'ddof': '(1)'}), '(volatilities_in_window, ddof=1)\n', (1685, 1717), True, 'import numpy as np\n'), ((1722, 1756), 'numpy.sqrt', 'np.sqrt', (['num_trading_days_per_year'], {}), '(num_trading_days_per_year)\n', (1729, 1756), True, 'import numpy as np\n'), ((2749, 2771), 'numpy.array', 'np.array', (['volatilities'], {}), '(volatilities)\n', (2757, 2771), True, 'import numpy as np\n'), ((1537, 1549), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1547, 1549), False, 'from datetime import datetime, date\n'), ((2882, 2894), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2892, 2894), False, 'from datetime import datetime, date\n'), ((829, 1011), 'requests.get', 'requests.get', (['download_url'], {'headers': "{'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2866.71 Safari/537.36'\n }"}), "(download_url, headers={'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2866.71 Safari/537.36'\n })\n", (841, 1011), False, 'import requests\n')]
|
'''
hardware_efficient.py
This code is distributed under the constitution of GNU-GPL.
(c) PearCandy
Log of hardware_efficient
2021/01/06 Released by PearCandy
'''
#coding:utf-8
#-------------------------------------------------------------
from pennylane import numpy as np
from pennylane.templates import template #import the decorator
from pennylane.ops import CNOT, RX, RY, RZ, Hadamard, CZ
@template
def HardwareEfficient(weights, wires, depth=1):
for d in range(depth):
for i in range(len(wires)):
RY(weights[2 * i + 2 * len(wires) * d], wires=i)
RZ(weights[2 * i + 1 + 2 * len(wires) * d], wires=i)
for i in range(len(wires) // 2):
CZ(wires=[2 * i, 2 * i + 1])
for i in range(len(wires) // 2 - 1):
CZ(wires=[2 * i + 1, 2 * i + 2])
for i in range(len(wires)):
RY(weights[2 * i + 2 * len(wires) * depth], wires=i)
RZ(weights[2 * i + 1 + 2 * len(wires) * depth], wires=i)
|
[
"pennylane.ops.CZ"
] |
[((1182, 1210), 'pennylane.ops.CZ', 'CZ', ([], {'wires': '[2 * i, 2 * i + 1]'}), '(wires=[2 * i, 2 * i + 1])\n', (1184, 1210), False, 'from pennylane.ops import CNOT, RX, RY, RZ, Hadamard, CZ\n'), ((1268, 1300), 'pennylane.ops.CZ', 'CZ', ([], {'wires': '[2 * i + 1, 2 * i + 2]'}), '(wires=[2 * i + 1, 2 * i + 2])\n', (1270, 1300), False, 'from pennylane.ops import CNOT, RX, RY, RZ, Hadamard, CZ\n')]
|
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Test objects and function in the module reweighting.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import os
import tempfile
import numpy as np
from numpy.random import RandomState
import pint
from ..reweighting import DatasetReweighting
# =============================================================================
# GLOBAL VARIABLES
# =============================================================================
# Makes random test cases deterministic.
_random_state = RandomState(0)
_ureg = pint.UnitRegistry()
# =============================================================================
# TEST UTILITIES
# =============================================================================
class DummyStdReweighting(DatasetReweighting):
"""Dummy implementation of standard reweighting for testing."""
U0 = 0.0
def compute_potentials(self, batch_positions):
kJ_mol = _ureg.kJ / _ureg.mol
return (self.U0 + _random_state.rand(len(batch_positions))) * kJ_mol
def get_traj_info(self):
kJ_mol = _ureg.kJ / _ureg.mol
cvs = np.array(range(len(self.dataset)))
reference_potentials = _random_state.rand(len(cvs)) * kJ_mol
metad_rbias = np.zeros(len(cvs)) * kJ_mol
return cvs, reference_potentials, metad_rbias
# =============================================================================
# TESTS
# =============================================================================
def test_standard_reweighting_potentials_cache():
"""Test that DatasetReweighting caches and reuses the potentials correctly."""
import MDAnalysis.coordinates
from ..data import TrajectoryDataset, TrajectorySubset
def _get_potentials(dataset, file_path, u0, indices, batch_size, write_interval):
subset = TrajectorySubset(dataset, indices=indices)
DummyStdReweighting.U0 = u0
reweighting = DummyStdReweighting(
subset, n_bins=len(subset), temperature=300*_ureg.kelvin,
potentials_file_path=file_path)
return reweighting.compute_dataset_potentials(
batch_size=batch_size, write_interval=write_interval)
# Load the test PDB file.
pdb_file_path = os.path.join(os.path.dirname(__file__), 'data', 'chloro-fluoromethane.pdb')
with MDAnalysis.coordinates.PDB.PDBReader(pdb_file_path) as trajectory:
dataset = TrajectoryDataset(trajectory, return_batch_index=True)
# Cache the potentials in a temporary file.
with tempfile.TemporaryDirectory() as tmp_dir:
file_path = os.path.join(tmp_dir, 'potentials.npz')
# Cache a first value for the potentials of some of the frames.
u1 = 10
potentials1 = _get_potentials(dataset, file_path, u1, indices=[0, 2, 4],
batch_size=1, write_interval=2)
assert np.all((0 <= potentials1.magnitude - u1) & (potentials1.magnitude - u1 < 1))
# Check that what we have just computed does not get re-computed.
u2 = 20
potentials2 = _get_potentials(dataset, file_path, u2, indices=[1, 3, 4],
batch_size=5, write_interval=2)
assert potentials1[-1] == potentials2[-1]
assert np.all((0 <= potentials2.magnitude[:-1] - u2) & (potentials2.magnitude[:-1] - u2 < 1))
# The cache should be up-to-date.
times, potentials = DummyStdReweighting.load_cached_potentials_from_file(file_path)
assert not np.isnan(potentials).any()
|
[
"tempfile.TemporaryDirectory",
"pint.UnitRegistry",
"os.path.dirname",
"numpy.random.RandomState",
"numpy.isnan",
"os.path.join",
"numpy.all"
] |
[((825, 839), 'numpy.random.RandomState', 'RandomState', (['(0)'], {}), '(0)\n', (836, 839), False, 'from numpy.random import RandomState\n'), ((849, 868), 'pint.UnitRegistry', 'pint.UnitRegistry', ([], {}), '()\n', (866, 868), False, 'import pint\n'), ((2557, 2582), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2572, 2582), False, 'import os\n'), ((2835, 2864), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2862, 2864), False, 'import tempfile\n'), ((2901, 2940), 'os.path.join', 'os.path.join', (['tmp_dir', '"""potentials.npz"""'], {}), "(tmp_dir, 'potentials.npz')\n", (2913, 2940), False, 'import os\n'), ((3216, 3292), 'numpy.all', 'np.all', (['((0 <= potentials1.magnitude - u1) & (potentials1.magnitude - u1 < 1))'], {}), '((0 <= potentials1.magnitude - u1) & (potentials1.magnitude - u1 < 1))\n', (3222, 3292), True, 'import numpy as np\n'), ((3625, 3715), 'numpy.all', 'np.all', (['((0 <= potentials2.magnitude[:-1] - u2) & (potentials2.magnitude[:-1] - u2 < 1)\n )'], {}), '((0 <= potentials2.magnitude[:-1] - u2) & (potentials2.magnitude[:-1] -\n u2 < 1))\n', (3631, 3715), True, 'import numpy as np\n'), ((3879, 3899), 'numpy.isnan', 'np.isnan', (['potentials'], {}), '(potentials)\n', (3887, 3899), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import django_rest_admin
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = django_rest_admin.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
def get_install_requires():
"""
parse requirements.txt, ignore links, exclude comments
"""
requirements = []
for line in open('requirements.txt').readlines():
# skip to next iteration if comment or empty line
if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):
continue
# add line to requirements
requirements.append(line)
return requirements
setup(
name='django-rest-admin',
version=version,
description="""REST endpoints for administering django models.""",
long_description=readme + '\n\n' + history,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/inmagik/django-rest-admin',
packages=[
'django_rest_admin',
],
include_package_data=True,
install_requires=get_install_requires(),
license="BSD",
zip_safe=False,
keywords='django-rest-admin',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
[
"os.system",
"sys.exit"
] |
[((266, 307), 'os.system', 'os.system', (['"""python setup.py sdist upload"""'], {}), "('python setup.py sdist upload')\n", (275, 307), False, 'import os\n'), ((312, 359), 'os.system', 'os.system', (['"""python setup.py bdist_wheel upload"""'], {}), "('python setup.py bdist_wheel upload')\n", (321, 359), False, 'import os\n'), ((364, 374), 'sys.exit', 'sys.exit', ([], {}), '()\n', (372, 374), False, 'import sys\n'), ((450, 513), 'os.system', 'os.system', (['("git tag -a %s -m \'version %s\'" % (version, version))'], {}), '("git tag -a %s -m \'version %s\'" % (version, version))\n', (459, 513), False, 'import os\n'), ((518, 546), 'os.system', 'os.system', (['"""git push --tags"""'], {}), "('git push --tags')\n", (527, 546), False, 'import os\n'), ((551, 561), 'sys.exit', 'sys.exit', ([], {}), '()\n', (559, 561), False, 'import sys\n')]
|