id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
170770
|
import os
import sys
import argparse
import tensorflow as tf
import numpy as np
from PIL import Image
from reader import Reader
from source.anchor_filter import AnchorFilter
import logging
import random
import time
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
class VS3D(object):
def __init__(self, kitti,
train_set, val_set,
is_training=False,
mini_batch_size=[1024, 128]):
self.reader = Reader(kitti)
self.anchor_filter = AnchorFilter()
self.is_training = is_training
self.mini_batch_size = mini_batch_size
with open(train_set) as f:
indices = f.readlines()
self.train_indices = \
[index.strip() for index in indices]
self.train_indices.sort()
with open(val_set) as f:
indices = f.readlines()
self.val_indices = \
[index.strip() for index in indices]
self.val_indices.sort()
self.endpoint = self.build()
return
def random_keep(self, mask, num_keep):
curr_num = tf.reduce_sum(
tf.cast(mask, tf.float32))
keep_ratio = tf.divide(num_keep,
curr_num + 1.0)
rand_select = tf.random.uniform(
shape=tf.shape(mask),
minval=0,
maxval=1)
keep = tf.less(rand_select, keep_ratio)
mask = tf.logical_and(mask, keep)
return mask
def balance_pos_neg(self, scores, num_keep,
pos_thres=0.7,
neg_thres=0.4):
num_keep_pos = num_keep // 2
num_keep_neg = num_keep // 2
all_pos = tf.greater(scores, pos_thres)
select_pos = self.random_keep(all_pos,
num_keep_pos)
all_neg = tf.less(scores, neg_thres)
select_neg = self.random_keep(all_neg,
num_keep_neg)
select = tf.logical_or(select_pos, select_neg)
return select
def mask_out(self, mask, tensors):
masked_tensors = [tf.boolean_mask(tensor, mask) \
for tensor in tensors]
return masked_tensors
def build(self):
endpoint = {}
placeholder = {}
placeholder['sphere_map'] = tf.placeholder(
shape=[64, 512, 5],
dtype=tf.float32)
placeholder['input_image'] = tf.placeholder(
shape=[384, 1248, 3],
dtype=tf.float32)
placeholder['image_size'] = tf.placeholder(
shape=[2],
dtype=tf.float32)
placeholder['plane'] = tf.placeholder(
shape=[4],
dtype=tf.float32)
placeholder['velo_to_cam'] = tf.placeholder(
shape=[4, 4],
dtype=tf.float32)
placeholder['cam_to_img'] = tf.placeholder(
shape=[3, 4],
dtype=tf.float32)
placeholder['cam_to_velo'] = tf.placeholder(
shape=[4, 4],
dtype=tf.float32)
xyz, ranges, density = tf.split(
placeholder['sphere_map'],
[3, 1, 1], axis=-1)
anchor_centers, scores_init = \
self.anchor_filter.filt(xyz, placeholder['plane'],
placeholder['cam_to_velo'])
endpoint['anchor_centers'] = anchor_centers
endpoint['scores_init'] = scores_init
mask = tf.greater(scores_init, 0.9 if self.is_training else 0.8)
if self.is_training:
mask = self.random_keep(mask,
self.mini_batch_size[0])
bottom_centers, rotation, class_prob, full_prob = \
self.anchor_filter.filt_image(
placeholder['input_image'],
placeholder['plane'],
placeholder['cam_to_img'],
placeholder['image_size'],
mask)
if self.is_training:
mask_balance = self.balance_pos_neg(
class_prob,
self.mini_batch_size[1])
bottom_centers, rotation, class_prob, full_prob = \
self.mask_out(mask_balance,
[bottom_centers, rotation, class_prob, full_prob])
endpoint['bottom_centers'] = bottom_centers
endpoint['rotation'] = rotation
endpoint['class_prob'] = class_prob
endpoint['full_prob'] = full_prob
[_, rotation_lidar, rot_vect_lidar,
class_prob_lidar, mask_prob_lidar] = \
self.anchor_filter.filt_lidar(
placeholder['sphere_map'],
placeholder['plane'],
placeholder['cam_to_velo'],
placeholder['velo_to_cam'],
mask)
if self.is_training:
[_, rotation_lidar, rot_vect_lidar,
class_prob_lidar, mask_prob_lidar] = \
self.mask_out(mask_balance,
[_, rotation_lidar, rot_vect_lidar,
class_prob_lidar, mask_prob_lidar])
endpoint['rotation_lidar'] = rotation_lidar
endpoint['class_prob_lidar'] = class_prob_lidar
endpoint['rot_vect_lidar'] = rot_vect_lidar
endpoint['mask_prob_lidar'] = mask_prob_lidar
bottom_centers_aligned, point_cloud_density = \
self.anchor_filter.points_alignment(
xyz,
bottom_centers,
rotation,
placeholder['velo_to_cam'],
placeholder['cam_to_velo'])
endpoint['bottom_centers_aligned'] = bottom_centers_aligned
endpoint['point_cloud_density'] = point_cloud_density
bottom_centers_aligned_lidar, point_cloud_density_lidar = \
self.anchor_filter.points_alignment(
xyz,
bottom_centers,
rotation_lidar,
placeholder['velo_to_cam'],
placeholder['cam_to_velo'])
endpoint['bottom_centers_aligned_lidar'] = bottom_centers_aligned_lidar
endpoint['point_cloud_density_lidar'] = point_cloud_density_lidar
rotation_aligned = self.anchor_filter.rotation_align(
xyz,
bottom_centers,
rotation,
placeholder['velo_to_cam'],
placeholder['cam_to_velo'])
endpoint['rotation_aligned'] = rotation_aligned
instance_points, instance_mask = \
self.anchor_filter.instance_mask(
xyz,
bottom_centers_aligned,
rotation,
placeholder['velo_to_cam'],
placeholder['cam_to_velo'])
endpoint['instance_points'] = instance_points
endpoint['instance_mask'] = instance_mask
instance_points_lidar, instance_mask_lidar = \
self.anchor_filter.instance_mask(
xyz,
bottom_centers_aligned_lidar,
rotation_lidar,
placeholder['velo_to_cam'],
placeholder['cam_to_velo'])
endpoint['instance_points_lidar'] = instance_points_lidar
endpoint['instance_mask_lidar'] = instance_mask_lidar
endpoint['mask_prob_lidar'] = mask_prob_lidar
nms_indices = self.anchor_filter.nms_image(
bottom_centers,
rotation,
tf.minimum(class_prob, full_prob),
placeholder['cam_to_img'],
placeholder['image_size'])
endpoint['nms_indices'] = nms_indices
nms_indices_lidar = self.anchor_filter.nms_image(
bottom_centers,
rotation_lidar,
class_prob_lidar,
placeholder['cam_to_img'],
placeholder['image_size'])
endpoint['nms_indices_lidar'] = nms_indices_lidar
class_loss, rot_loss, mask_loss, rot_error = \
self.anchor_filter.build_loss(
rotation, rot_vect_lidar, rotation_lidar,
tf.minimum(class_prob, full_prob),
class_prob_lidar, instance_mask, mask_prob_lidar)
endpoint['class_loss'] = class_loss
endpoint['rot_loss'] = rot_loss
endpoint['mask_loss'] = mask_loss
endpoint['rot_error'] = rot_error
self.placeholder = placeholder
return endpoint
def to_kitti_line(self, bbox, center,
size, rotation, score):
kitti_line = 'Car -1 -1 -10 ' + \
'{:.2f} {:.2f} {:.2f} {:.2f} '.format(
bbox[0], bbox[1], bbox[2], bbox[3]) + \
'{:.2f} {:.2f} {:.2f} '.format(
size[0], size[1], size[2]) + \
'{:.2f} {:.2f} {:.2f} '.format(
center[0], center[1], center[2]) + \
'{:.2f} {:.2f} \n'.format(
rotation, score)
return kitti_line
def to_bbox(self, center,
size, rotation,
cam_to_img, image_size):
R = np.array([[+np.cos(rotation), 0, +np.sin(rotation)],
[ 0, 1, 0],
[-np.sin(rotation), 0, +np.cos(rotation)]],
dtype=np.float32)
h, w, l = size
x_corners = [l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2]
y_corners = [0,0,0,0,-h,-h,-h,-h]
z_corners = [w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2]
corners = np.dot(R, [x_corners, y_corners, z_corners])
corners = corners + center.reshape((3, 1))
projection = np.dot(cam_to_img, np.vstack([corners,
np.ones(8, dtype=np.float32)]))
projection = (projection / projection[2])[:2]
left = max(np.amin(projection[0]), 0)
right = min(np.amax(projection[0]), image_size[1])
top = max(np.amin(projection[1]), 0)
bottom = min(np.amax(projection[1]), image_size[0])
return [left, top, right, bottom]
def train(self,
model_image,
model_lidar=None,
save_dir='./runs/weights',
steps=160000,
learning_rate_init=1e-4,
l2_weight=1e-5,
clip_grads=False,
clip_grads_norm=2.0,
display_step=200,
save_step=2000):
class_loss = self.endpoint['class_loss']
rot_loss = self.endpoint['rot_loss'] * 5.0
mask_loss = self.endpoint['mask_loss'] * 2.0
rot_error = self.endpoint['rot_error']
global_step = tf.placeholder(shape=(), dtype=tf.int32)
learning_rate = tf.train.exponential_decay(
learning_rate_init, global_step,
120000, 0.2, staircase=False)
weight_loss = [tf.nn.l2_loss(var) for var \
in tf.trainable_variables()]
weight_loss = tf.reduce_sum(weight_loss) * l2_weight
total_loss = weight_loss + class_loss + rot_loss + mask_loss
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
all_vars = tf.get_collection_ref(
tf.GraphKeys.GLOBAL_VARIABLES)
var_list_image = \
[var for var in all_vars if "lidar" not in var.name]
var_list_lidar = \
[var for var in all_vars if "lidar" in var.name]
if clip_grads:
grads_and_vars = opt.compute_gradients(total_loss,
var_list_lidar)
grads, tvars = zip(*grads_and_vars)
clipped_grads, norm = tf.clip_by_global_norm(
grads, clip_grads_norm)
grads_and_vars = zip(clipped_grads, tvars)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = opt.apply_gradients(grads_and_vars)
else:
train_op = tf.train.AdamOptimizer(
learning_rate=learning_rate
).minimize(total_loss, var_list=var_list_lidar)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver_image = tf.train.Saver(var_list=var_list_image)
saver_lidar = tf.train.Saver(var_list=var_list_lidar)
saver_image.restore(sess, model_image)
if model_lidar:
saver_lidar.restore(sess, model_lidar)
rot_loss_np_list = []
class_loss_np_list = []
mask_loss_np_list = []
rot_error_np_list = []
for step in range(steps):
index = random.choice(self.train_indices)
data = self.reader.data[index]
sphere_map = np.load(open(
data['sphere_path'], 'rb'))
image_pil = Image.open(
data['image_path'])
width, height = image_pil.size
image_size = np.array([height, width],
dtype=np.float32)
image_np = np.array(image_pil.resize((1248, 384)),
dtype=np.float32)
cam_to_img = data['P2']
plane = data['plane']
velo_to_cam = np.dot(data['R0'], data['Tr'])
cam_to_velo = np.linalg.inv(velo_to_cam)
placeholder = self.placeholder
_, weight_loss_np, class_loss_np, \
rot_loss_np, mask_loss_np, rot_error_np, debug_np = \
sess.run([train_op, weight_loss,
class_loss, rot_loss,
mask_loss, rot_error,
tf.get_collection('debug')],
feed_dict={
placeholder['sphere_map']: sphere_map,
placeholder['plane']: plane,
placeholder['velo_to_cam']: velo_to_cam,
placeholder['cam_to_velo']: cam_to_velo,
placeholder['input_image']: image_np,
placeholder['image_size']: image_size,
placeholder['cam_to_img']: cam_to_img,
global_step: step})
rot_loss_np_list.append(rot_loss_np)
class_loss_np_list.append(class_loss_np)
mask_loss_np_list.append(mask_loss_np)
rot_error_np_list.append(rot_error_np)
if step % display_step == 0:
logging.info(
'Step: {} / {}, '.format(step, steps) + \
'Loss Weight: {:.3f}, '.format(weight_loss_np) + \
'Class: {:.3f}, '.format(np.mean(class_loss_np_list)) + \
'Rotation: {:.3f}, '.format(np.mean(rot_loss_np_list)) + \
'Mask: {:.3f}, '.format(np.mean(mask_loss_np_list)) + \
'Rot Error: {:.3f}'.format(np.mean(rot_error_np_list)))
rot_loss_np_list = []
class_loss_np_list = []
mask_loss_np_list = []
rot_error_np_list= []
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if step % save_step == 0:
saver_lidar.save(sess, os.path.join(save_dir,
'model_lidar_{}'.format(str(step).zfill(6))))
def run(self, score_thres=0.05,
density_thres=0.1, save_dir=None,
image_model=None, lidar_model=None,
return_pred=False, max_pred_frames=np.inf):
with tf.Session() as sess:
all_vars = tf.get_collection_ref(
tf.GraphKeys.GLOBAL_VARIABLES)
var_list_image = \
[var for var in all_vars if "lidar" not in var.name]
var_list_lidar = \
[var for var in all_vars if "lidar" in var.name]
assert len(all_vars) == len(var_list_image + var_list_lidar)
if image_model and not lidar_model:
saver = tf.train.Saver(var_list=var_list_image)
saver.restore(sess, image_model)
rotation_tf = self.endpoint['rotation']
centers_tf = self.endpoint['bottom_centers']
centers_aligned_tf = self.endpoint['bottom_centers_aligned']
scores_tf = tf.minimum(self.endpoint['class_prob'],
self.endpoint['full_prob'])
nms_indices_tf = self.endpoint['nms_indices']
point_cloud_density_tf = self.endpoint['point_cloud_density']
instance_points_tf = self.endpoint['instance_points']
instance_mask_tf = self.endpoint['instance_mask']
elif not image_model and lidar_model:
saver = tf.train.Saver(var_list=var_list_lidar)
saver.restore(sess, lidar_model)
rotation_tf = self.endpoint['rotation_lidar']
centers_tf = self.endpoint['bottom_centers']
centers_aligned_tf = self.endpoint['bottom_centers_aligned_lidar']
scores_tf = self.endpoint['class_prob_lidar']
nms_indices_tf = self.endpoint['nms_indices_lidar']
point_cloud_density_tf = self.endpoint['point_cloud_density_lidar']
instance_points_tf = self.endpoint['instance_points_lidar']
instance_mask_tf = self.endpoint['instance_mask_lidar']
elif image_model and lidar_model:
saver = tf.train.Saver(var_list=var_list_image)
saver.restore(sess, image_model)
saver = tf.train.Saver(var_list=var_list_lidar)
saver.restore(sess, lidar_model)
else:
raise Exception('Image or LiDAR model must be provided!')
out_tf = [rotation_tf, centers_tf, centers_aligned_tf,
scores_tf, nms_indices_tf, point_cloud_density_tf,
instance_points_tf, instance_mask_tf]
bbox_list = []
mask_list = []
index_list = []
total_time = []
for iindex, index in enumerate(self.val_indices):
if iindex == max_pred_frames:
break
logging.info('Inference {}'.format(index))
data = self.reader.data[index]
sphere_map = np.load(open(
data['sphere_path'], 'rb'))
image_pil = Image.open(
data['image_path'])
width, height = image_pil.size
image_size = np.array([height, width],
dtype=np.float32)
image_np = np.array(image_pil.resize((1248, 384)),
dtype=np.float32)
cam_to_img = data['P2']
plane = data['plane']
velo_to_cam = np.dot(data['R0'], data['Tr'])
cam_to_velo = np.linalg.inv(velo_to_cam)
placeholder = self.placeholder
start_time = time.time()
out_np = sess.run(out_tf,
feed_dict={
placeholder['sphere_map']: sphere_map,
placeholder['plane']: plane,
placeholder['velo_to_cam']: velo_to_cam,
placeholder['cam_to_velo']: cam_to_velo,
placeholder['input_image']: image_np,
placeholder['image_size']: image_size,
placeholder['cam_to_img']: cam_to_img})
total_time.append(time.time() - start_time)
[rotation_np, centers_np, centers_aligned_np,
scores_np, nms_indices_np, point_cloud_density_np,
instance_points_np, instance_mask_np] = out_np
if iindex % 300 == 0:
logging.info('Forward time: {:.3f}s, STD: {:.3f}s'.format(np.mean(total_time), np.std(total_time)))
total_time = []
kitti_lines = []
instance_points_masked = []
for aind in nms_indices_np:
score = scores_np[aind]
density = point_cloud_density_np[aind]
if score < score_thres or density < density_thres:
continue
bbox = self.to_bbox(center=centers_aligned_np[aind],
size=[1.45, 1.55, 4.00],
rotation=rotation_np[aind],
cam_to_img=cam_to_img,
image_size=image_size)
kitti_line = self.to_kitti_line(
bbox=bbox,
center=centers_aligned_np[aind],
size=[1.45, 1.55, 4.00],
rotation=rotation_np[aind],
score=score)
kitti_lines.append(kitti_line)
instance_points_masked.append(
instance_points_np[aind] * instance_mask_np[aind])
if not os.path.exists(save_dir):
os.makedirs(os.path.join(save_dir, 'bbox'))
os.makedirs(os.path.join(save_dir, 'mask'))
with open(os.path.join(save_dir,
'bbox',
index+'.txt'), 'w') as f:
f.writelines(kitti_lines)
f.close()
with open(os.path.join(save_dir,
'mask',
index+'.npy'), 'wb') as f:
np.save(f, instance_points_masked)
f.close()
if return_pred:
bbox_list.append(kitti_lines)
mask_list.append(instance_points_masked)
index_list.append(index)
if return_pred:
return bbox_list, mask_list, index_list, self.reader
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode',
type=str,
required=True,
help='train or evaluate')
parser.add_argument('--teacher_model',
type=str,
default='../data/pretrained/teacher/iter_158000',
help='required in training.')
parser.add_argument('--student_model',
type=str,
default=None,
help='required in testing and optional in training.')
parser.add_argument('--gpu',
type=str,
default='0',
help='GPU to use.')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
if args.mode == 'train':
vs3d = VS3D(kitti='../data/kitti/training',
train_set='../data/kitti/train.txt',
val_set='../data/kitti/val.txt',
is_training=True)
vs3d.train(model_image=args.teacher_model)
elif args.mode == 'evaluate':
vs3d = VS3D(kitti='../data/kitti/training',
train_set='../data/kitti/train.txt',
val_set='../data/kitti/val.txt',
is_training=False)
vs3d.run(save_dir='../output',
lidar_model=args.student_model)
else:
raise NotImplementedError
|
170790
|
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def initialize(self):
self.SUPPORTED_METHODS = ("GET", "POST", "PURGE")
def get(self):
self.write("Hello, world")
def post(self):
print ("processing body: %s" % self.request.body)
self.write("Hello, universe")
def purge(self):
print ("processing purge hdr: %s" % self.request.headers)
print ("processing purge body: %s" % self.request.body)
self.set_status(200)
self.write("Hello, milky way")
self.finish()
application = tornado.web.Application([
(r"/", MainHandler),
(r"/tcspurge/ccur/(.*)$", MainHandler),
(r"/v1/components/commands/transparentcache/(.*)$", MainHandler)
])
if __name__ == "__main__":
application.listen(80)
tornado.ioloop.IOLoop.instance().start()
|
170799
|
import numpy as np
import unittest
import pytest
from mvc.misc.batch import make_batch
class MakeBatchTest(unittest.TestCase):
def test_success(self):
batch_size = np.random.randint(32) + 1
data_size = batch_size * np.random.randint(10) + 1
data = {
'test1': np.random.random((data_size)),
'test2': np.random.random((data_size))
}
count = 0
for batch in make_batch(data, batch_size, data_size):
assert batch['test1'].shape[0] == batch_size
assert batch['test2'].shape[0] == batch_size
count += 1
assert count == data_size // batch_size
def test_assertion_error(self):
with pytest.raises(AssertionError):
data = [1, 2, 3]
batch_size = np.random.randint(32) + 1
data_size = np.random.randint(1024) + 1
batch = make_batch(data, batch_size, data_size)
next(batch)
|
170808
|
import json
import pytest
from lxml import etree
import numpy as np
import xarray as xr
import pandas as pd
import finch
import finch.processes
from finch.processes.wps_xclim_indices import XclimIndicatorBase
from finch.processes.wps_base import make_xclim_indicator_process
from . utils import execute_process, wps_input_file, wps_literal_input
from pathlib import Path
from pywps.app.exceptions import ProcessError
from pywps import configuration
from unittest import mock
from numpy.testing import assert_equal
from xclim.testing import open_dataset
K2C = 273.16
configuration.CONFIG['finch:metadata']['testing_session'] = "True"
def _get_output_standard_name(process_identifier):
for p in finch.processes.get_processes():
if p.identifier == process_identifier:
return p.xci.standard_name
@pytest.mark.parametrize("indicator", finch.processes.indicators)
def test_indicators_processes_discovery(indicator):
process = make_xclim_indicator_process(indicator, "Process", XclimIndicatorBase)
assert indicator.identifier == process.identifier
# Remove args not supported by finch: we remove special kinds,
# 50 is "kwargs". 70 is Dataset ('ds') and 99 is "unknown". All normal types are 0-9.
parameters = set([k for k, v in indicator.parameters.items() if v['kind'] < 50 or k == 'indexer'])
parameters.add("check_missing")
parameters.add("missing_options")
parameters.add("cf_compliance")
parameters.add("data_validation")
parameters.add("variable")
if "indexer" in parameters:
parameters.remove("indexer")
parameters.add("month")
parameters.add("season")
assert_equal(parameters, set(i.identifier for i in process.inputs), indicator.identifier)
# TODO : Extend test coverage
def test_processes(client, netcdf_datasets):
"""Run a dummy calculation for every process, keeping some default parameters."""
# indicators = finch.processes.indicators
processes = filter(lambda x: isinstance(x, XclimIndicatorBase), finch.processes.xclim.__dict__.values())
literal_inputs = {
"freq": "MS",
"window": "3",
"mid_date": "07-01",
"before_date": "07-01",
}
keep_defaults = ["thresh", "thresh_tasmin", "thresh_tasmax"]
attrs = xr.open_dataset(list(netcdf_datasets.values())[0], decode_times=False).attrs
for process in processes:
inputs = []
for process_input in process.inputs:
name = process_input.identifier
if name in netcdf_datasets.keys():
inputs.append(wps_input_file(name, netcdf_datasets[name]))
elif name in literal_inputs.keys():
inputs.append(wps_literal_input(name, literal_inputs[name]))
elif name in keep_defaults:
pass
else:
raise NotImplementedError
outputs = execute_process(client, process.identifier, inputs)
ds = xr.open_dataset(outputs[0])
output_variable = list(ds.data_vars)[0]
assert getattr(ds, output_variable).standard_name == process.xci.standard_name
assert ds.attrs['testing_session']
model = attrs["driving_model_id"]
experiment = attrs["driving_experiment_id"].replace(",", "+")
ensemble = (
f"r{attrs['driving_realization']}"
f"i{attrs['driving_initialization_method']}"
f"p{attrs['driving_physics_version']}"
)
date_start = pd.to_datetime(str(ds.time[0].values))
date_end = pd.to_datetime(str(ds.time[-1].values))
expected = (
f"{output_variable.replace('_', '-')}_"
f"{model}_{experiment}_{ensemble}_"
f"{date_start:%Y%m%d}-{date_end:%Y%m%d}.nc"
)
assert Path(outputs[0]).name == expected
def test_wps_daily_temperature_range_multiple(client, netcdf_datasets):
identifier = "dtr"
inputs = [wps_literal_input("freq", "YS")]
for _ in range(5):
inputs.append(wps_input_file("tasmax", netcdf_datasets["tasmax"]))
inputs.append(wps_input_file("tasmin", netcdf_datasets["tasmin"]))
with mock.patch(
"finch.processes.wps_xclim_indices.FinchProgressBar"
) as mock_progress:
outputs = execute_process(
client, identifier, inputs, output_names=["output_netcdf", "ref"]
)
assert mock_progress.call_args_list[0][1]["start_percentage"] == 0
assert mock_progress.call_args_list[0][1]["end_percentage"] == 20
assert mock_progress.call_args_list[4][1]["start_percentage"] == 80
assert mock_progress.call_args_list[4][1]["end_percentage"] == 100
et = etree.fromstring(outputs[1].data[0].encode())
urls = [e[2].text for e in et if e.tag.endswith("file")]
assert len(urls) == 5, "Containing 10 files"
assert len(set(urls)) == 5, "With different links"
assert urls[1].endswith("-1.nc")
def test_wps_daily_temperature_range_multiple_not_same_length(client, netcdf_datasets):
identifier = "dtr"
inputs = [wps_literal_input("freq", "YS")]
for _ in range(5):
inputs.append(wps_input_file("tasmax", netcdf_datasets["tasmax"]))
inputs.append(wps_input_file("tasmin", netcdf_datasets["tasmin"]))
inputs.pop()
with pytest.raises(ProcessError, match="must be equal"):
execute_process(
client, identifier, inputs, output_names=["output_netcdf", "ref"]
)
def test_heat_wave_frequency_window_thresh_parameters(client, netcdf_datasets):
identifier = "heat_wave_frequency"
inputs = [
wps_input_file("tasmax", netcdf_datasets["tasmax"]),
wps_input_file("tasmin", netcdf_datasets["tasmin"]),
wps_literal_input("window", "3"),
wps_literal_input("freq", "YS"),
wps_literal_input("thresh_tasmin", "20 degC"),
wps_literal_input("thresh_tasmax", "25 degC"),
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
assert ds.attrs["frequency"] == "yr"
assert ds.heat_wave_frequency.standard_name == _get_output_standard_name(identifier)
def test_heat_wave_index_thresh_parameter(client, netcdf_datasets):
identifier = "heat_wave_index"
inputs = [
wps_input_file("tasmax", netcdf_datasets["tasmax"]),
wps_literal_input("thresh", "30 degC"),
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
assert ds["heat_wave_index"].standard_name == _get_output_standard_name(identifier)
def test_missing_options(client, netcdf_datasets):
identifier = "tg_mean"
inputs = [
wps_input_file("tas", netcdf_datasets["tas_missing"]),
wps_literal_input("freq", "YS"),
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.tg_mean.isnull(), True)
inputs = [
wps_input_file("tas", netcdf_datasets["tas_missing"]),
wps_literal_input("freq", "YS"),
wps_literal_input("check_missing", "pct"),
wps_literal_input("missing_options", json.dumps({"pct": {"tolerance": 0.1}}))
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.tg_mean.isnull(), False)
def test_stats_process(client, netcdf_datasets):
"""Test stats and the capacity to choose the variable."""
identifier = "stats"
inputs = [
wps_input_file("da", netcdf_datasets["pr_discharge"]),
wps_literal_input("freq", "YS"),
wps_literal_input("op", "max"),
wps_literal_input("season", "JJA"),
wps_literal_input("variable", "discharge")
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.qsummermax.isnull(), False)
def test_freqanalysis_process(client, netcdf_datasets):
identifier = "freq_analysis"
inputs = [
wps_input_file("da", netcdf_datasets["discharge"]),
wps_literal_input("t", "2"),
wps_literal_input("t", "50"),
wps_literal_input("freq", "YS"),
wps_literal_input("mode", "max"),
wps_literal_input("season", "JJA"),
wps_literal_input("dist", "gumbel_r"),
wps_literal_input("variable", "discharge")
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.q1maxsummer.shape, (2, 5, 6))
class TestFitProcess:
identifier = "fit"
def test_simple(self, client, netcdf_datasets):
inputs = [
wps_input_file("da", netcdf_datasets["discharge"]),
wps_literal_input("dist", "norm"),
]
outputs = execute_process(client, self.identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.params.shape, (2, 5, 6))
def test_nan(self, client, q_series, tmp_path):
q_series([333, 145, 203, 109, 430, 230, np.nan]).to_netcdf(tmp_path / "q.nc")
inputs = [
wps_input_file("da", tmp_path / "q.nc"),
wps_literal_input("dist", "norm"),
]
outputs = execute_process(client, self.identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.params.isnull(), False)
def test_rain_approximation(client, pr_series, tas_series, tmp_path):
identifier = "prlp"
pr_series(np.ones(10)).to_netcdf(tmp_path / 'pr.nc')
tas_series(np.arange(10) + K2C).to_netcdf(tmp_path / 'tas.nc')
inputs = [wps_input_file("pr", tmp_path / "pr.nc"),
wps_input_file("tas", tmp_path / "tas.nc"),
wps_literal_input("thresh", "5 degC"),
wps_literal_input("method", "binary")]
outputs = execute_process(client, identifier, inputs)
with xr.open_dataset(outputs[0]) as ds:
np.testing.assert_allclose(
ds.prlp, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], atol=1e-5, rtol=1e-3
)
@pytest.mark.xfail
def test_two_nondefault_variable_name(client, pr_series, tas_series, tmp_path):
identifier = "prlp"
pr_series(np.ones(10)).to_dataset(name="my_pr").to_netcdf(tmp_path / 'pr.nc')
tas_series(np.arange(10) + K2C).to_dataset(name="my_tas").to_netcdf(tmp_path / 'tas.nc')
inputs = [wps_input_file("pr", tmp_path / "pr.nc"),
wps_input_file("tas", tmp_path / "tas.nc"),
wps_literal_input("thresh", "5 degC"),
wps_literal_input("method", "binary"),
wps_literal_input("variable", "my_pr")
]
outputs = execute_process(client, identifier, inputs)
with xr.open_dataset(outputs[0]) as ds:
np.testing.assert_allclose(
ds.prlp, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], atol=1e-5, rtol=1e-3
)
def test_degree_days_exceedance_date(client, tmp_path):
identifier = "degree_days_exceedance_date"
tas = open_dataset("FWI/GFWED_sample_2017.nc").tas
tas.attrs.update(
cell_methods="time: mean within days", standard_name="air_temperature"
)
tas.to_netcdf(tmp_path / "tas.nc")
inputs = [wps_input_file("tas", tmp_path / "tas.nc"),
wps_literal_input("thresh", "4 degC"),
wps_literal_input("op", ">"),
wps_literal_input("sum_thresh", "200 K days")
]
outputs = execute_process(client, identifier, inputs)
with xr.open_dataset(outputs[0]) as ds:
np.testing.assert_array_equal(ds.degree_days_exceedance_date, np.array([[153, 136, 9, 6]]).T)
|
170814
|
import numpy as np
import matplotlib.pyplot as plt
from tools import get_array, dags
data = dags[0]
find_or_add_index = get_array(data, "find_or_add index")
find_or_add_add = get_array(data, "find_or_add add")
find_or_add_level = get_array(data, "find_or_add level")
plt.xlabel("index")
plt.ylabel("num")
indices_add = find_or_add_add == 1
indices_noadd = find_or_add_add == 0
print("Num add: ", np.sum(indices_add))
print("Num no add: ", np.sum(indices_noadd))
assert np.sum(indices_add) + np.sum(indices_noadd) == len(find_or_add_index)
plot_index = 0
for level in range(32):
level_indices = level == find_or_add_level
if np.sum(level_indices) == 0:
continue
plot_index += 1
plt.subplot(2, 3, plot_index)
plt.title("level = " + str(level))
indices = find_or_add_index[np.logical_and(indices_noadd, level_indices)]
plt.hist(indices, color="blue", bins=range(0, int(np.max(indices) + 1)))
plt.show()
|
170825
|
import pycristoforo.utils.utils as utils_py
import pycristoforo.geo.key_value_pair as keyvaluepair_py
class CountryList:
def __init__(self, full_path):
"""
Constructor method that builds the country dictionary with 'key','value' pair
:param full_path: path where the geojson is stored
"""
self.__country_dict = {}
uid: int = 0
countries = utils_py.read_json(full_path)
for elem in countries['features']:
uid += 1
country = keyvaluepair_py.KeyValuePair(elem['properties']['ADMIN'].lower(), str(uid))
self.__country_dict[country.key] = country.value
country = keyvaluepair_py.KeyValuePair(elem['properties']['ISO_A3'].lower(), str(uid))
self.__country_dict[country.key] = country.value
country = keyvaluepair_py.KeyValuePair(str(uid), elem['geometry'])
self.__country_dict[country.key] = country.value
def get_by_key(self, key: str) -> int:
"""
It returns the element with a given key of the private dictionary
:param key: dict key
:return: dict element
"""
try:
return self.__country_dict[key.lower()]
except KeyError as ex:
print(f'No key {ex} in country dictionary')
return None
def get_country_dict(self):
"""
Getter method for private attribute "country_dict"
:return: dictionary of countries
"""
return self.__country_dict
|
170867
|
import kdtree
def nonmaximalsuppression(tensor, threshold):
pred_data = tensor.storage()
offset = tensor.storage_offset()
stride = int(tensor.stride()[0])
numel = tensor.numel()
points = []
# Corners
val = pred_data[0 + offset]
if val >= threshold and val >= pred_data[1 + offset] and val >= pred_data[stride + offset]:
points.append([0, 0])
val = pred_data[stride - 1 + offset]
if val >= threshold and val >= pred_data[stride - 2 + offset] and val >= pred_data[2 * stride - 1 + offset]:
points.append([stride - 1, 0])
val = pred_data[numel - stride + offset]
if val > threshold and val >= pred_data[numel - stride + 1 + offset] and val >= pred_data[numel - 2 * stride + offset]:
points.append([0, stride - 1])
val = pred_data[numel - 1 + offset]
if val > threshold and val >= pred_data[numel -2 + offset] and val >= pred_data[numel - 1 - stride + offset]:
points.append([stride - 1, stride - 1])
# Top y==0
for i in range(1,stride-1):
i += offset
val = pred_data[i]
if val >= threshold and val >= pred_data[i-1] and val >= pred_data[i+1] and val >= pred_data[i+stride]:
points.append([i - offset, 0])
# Bottom y==stride-1
for i in range(numel-stride+1,numel-1):
i += offset
val = pred_data[i]
if val >= threshold and val >= pred_data[i-1] and val >= pred_data[i+1] and val >= pred_data[i-stride]:
points.append([i - numel + stride - offset, stride - 1])
# Front x==0
for i in range(stride, stride * (stride - 1), stride):
i += offset
val = pred_data[i]
if val >= threshold and val >= pred_data[i+stride] and val >= pred_data[i-stride] and val >= pred_data[i+1]:
points.append([0, (i - offset) // stride])
# Back x == stride-1
for i in range(stride - 1, stride * (stride - 1), stride):
i += offset
val = pred_data[i]
if val >= threshold and val >= pred_data[i+stride] and val >= pred_data[i-stride] and val >= pred_data[i-1]:
points.append([stride - 1, (i - offset) // stride])
# Remaining inner pixels
for i in range(stride+1, stride * (stride - 1), stride):
for j in range(i,i+stride-2):
j += offset
val = pred_data[j]
if val >= threshold and val >= pred_data[j+1] and val >= pred_data[j-1] and val >= pred_data[j+stride] and val >= pred_data[j-stride]:
points.append([(j - offset) % stride, i // stride])
return points
def euclid(pt1, pt2):
return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2
def rrtree(lat, threshold):
if lat is None or len(lat) == 0:
return []
tree = kdtree.create(dimensions=2)
distance_threshold = threshold # 8^2
for i,pt in enumerate(lat):
t_pt = (float(pt[0]), float(pt[1]))
search_result = tree.search_nn(t_pt, dist=euclid)
if search_result is None:
tree.add(t_pt)
else:
node, dist = search_result[0], search_result[1]
if dist >= distance_threshold:
tree.add(t_pt)
filtered_points = [(int(pt.data[0]), int(pt.data[1])) for pt in kdtree.level_order(tree)]
return filtered_points
|
170881
|
import timeit
import argparse
import numpy as np
from core.bamnet.bamnet import BAMnetAgent
from core.matchnn.matchnn import MatchNNAgent
from core.bow.bow import BOWnetAgent
from core.bow.pbow import PBOWnetAgent
from core.build_data.utils import vectorize_data
from core.utils.utils import *
from core.config import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-config', '--config', required=True, type=str, help='path to the config file')
parser.add_argument('--sample', action='store_true', help='flag: run on sample data')
cfg = vars(parser.parse_args())
opt = get_config(cfg['config'])
print_config(opt)
# Ensure data is built
train_vec = load_json(os.path.join(opt['data_dir'], opt['train_data']))
valid_vec = load_json(os.path.join(opt['data_dir'], opt['valid_data']))
if cfg['sample']:
train_vec = [x[:5] for x in train_vec]
valid_vec = [x[:5] for x in valid_vec]
vocab2id = load_json(os.path.join(opt['data_dir'], 'vocab2id.json'))
train_queries, train_raw_queries, train_query_mentions, train_query_marks, train_memories, _, train_gold_ans_inds, _, _, _ = train_vec
train_queries, train_query_words, train_query_marks, train_query_lengths, train_memories, _ = vectorize_data(train_queries, train_query_mentions, train_query_marks, \
train_memories, max_query_size=opt['query_size'], \
max_ans_path_bow_size=opt['ans_path_bow_size'], \
vocab2id=vocab2id)
valid_queries, valid_raw_queries, valid_query_mentions, valid_query_marks, valid_memories, valid_cand_labels, valid_gold_ans_inds, valid_gold_ans_labels, _, _ = valid_vec
valid_queries, valid_query_words, valid_query_marks, valid_query_lengths, valid_memories, _ = vectorize_data(valid_queries, valid_query_mentions, valid_query_marks, \
valid_memories, max_query_size=opt['query_size'], \
max_ans_path_bow_size=opt['ans_path_bow_size'], \
vocab2id=vocab2id)
start = timeit.default_timer()
model_name = opt.get('model_name', 'bamnet')
if model_name == 'bamnet':
Agent = BAMnetAgent
elif model_name == 'matchnn':
Agent = MatchNNAgent
elif model_name == 'bow':
Agent = BOWnetAgent
elif model_name == 'pbow':
Agent = PBOWnetAgent
else:
raise RuntimeError('Unknown model_name: {}'.format(model_name))
model = Agent(opt, STOPWORDS, vocab2id)
model.train([train_memories, train_queries, train_query_words, train_raw_queries, train_query_mentions, train_query_marks, train_query_lengths], train_gold_ans_inds, \
[valid_memories, valid_queries, valid_query_words, valid_raw_queries, valid_query_mentions, valid_query_marks, valid_query_lengths], \
valid_gold_ans_inds, valid_cand_labels, valid_gold_ans_labels)
print('Runtime: %ss' % (timeit.default_timer() - start))
|
170882
|
import re
import xml.etree.ElementTree as ET
from os import path
from collections import Counter
import argparse
import csv
import json
import os
import random
# package local imports
import sys
import uuid
import matplotlib.pyplot as plt
import math
from dateutil.parser import parse
from tdigest import TDigest
import numpy as np
import boto3
from tqdm import tqdm
# package local imports
sys.path.append(os.getcwd() + "/..")
field_tokenization = ",.<>{}[]\"':;!@#$%^&*()-+=~"
from common_datagen import (
download_url,
generate_setup_json,
compress_files,
generate_inputs_dict_item,
humanized_bytes,
del_non_use_case_specific_keys,
add_key_metric,
upload_dataset_artifacts_s3,
add_deployment_requirements_redis_server_module,
add_deployment_requirements_benchmark_tool,
add_deployment_requirements_utilities,
init_deployment_requirement,
remove_file_if_exists,
decompress_file,
)
from tqdm import tqdm
from pathlib import Path
origin = "https://dumps.wikimedia.org/enwiki/20210501/enwiki-20210501-pages-articles1.xml-p1p41242.bz2"
filename = "enwiki-20210501-pages-articles1.xml-p1p41242.bz2"
decompressed_fname = "enwiki-20210501-pages-articles1.xml-p1p41242"
def generate_enwiki_pages_index_type():
types = {}
for f in ["title", "text", "comment"]:
types[f] = "text"
for f in ["username"]:
types[f] = "tag"
for f in ["timestamp"]:
types[f] = "numeric"
return types
def generate_lognormal_dist(n_elements):
mu, sigma = 0.0, 1
s = np.random.lognormal(mu, sigma, n_elements)
min_s = min(s)
max_s = max(s)
diff = max_s - min_s
s = s - min_s
s = s / diff
return s
def generate_ft_create_row(index, index_types, use_ftadd, no_index_list):
if use_ftadd:
cmd = ['"FT.CREATE"', '"{index}"'.format(index=index), '"SCHEMA"']
else:
cmd = [
'"FT.CREATE"',
'"{index}"'.format(index=index),
'"ON"',
'"HASH"',
'"SCHEMA"',
]
for f, v in index_types.items():
cmd.append('"{}"'.format(f))
cmd.append('"{}"'.format(v))
if f in no_index_list:
cmd.append('"NOINDEX"')
else:
cmd.append("SORTABLE")
cmd.append('"SORTABLE"')
return cmd
def generate_ft_drop_row(index):
cmd = ["FT.DROP", "{index}".format(index=index), "DD"]
return cmd
def EscapeTextFileString(field):
for char_escape in field_tokenization:
field = field.replace(char_escape, "\\{}".format(char_escape))
field = field.replace("\n", " \\n")
return field
def use_case_to_cmd(use_ftadd, title, text, comment, username, timestamp, total_docs):
escaped_title = EscapeTextFileString(title)
escaped_text = EscapeTextFileString(text)
escaped_comment = EscapeTextFileString(comment)
size = len(escaped_title) + len(escaped_text) + len(escaped_comment) + len(username)
unprunned_hash = {
"title": title,
"text": text,
"comment": comment,
"username": username,
"timestamp": timestamp,
}
# print(len(text),size)
hash = {
"title": escaped_title,
"text": escaped_text,
"comment": escaped_comment,
"username": username,
"timestamp": timestamp,
}
docid_str = "doc:{hash}:{n}".format(hash=uuid.uuid4().hex, n=total_docs)
fields = []
for f, v in hash.items():
if v is not None:
fields.append(f)
fields.append(v)
if use_ftadd is False:
cmd = ["WRITE", "W1", 1, "HSET", docid_str]
else:
cmd = ["WRITE", "W1", 2, "FT.ADD", indexname, docid_str, "1.0", "FIELDS"]
for x in fields:
cmd.append(x)
return cmd, size
def getQueryWords(doc, stop_words, size):
words = doc["comment"]
words = re.sub("[^0-9a-zA-Z]+", " ", words)
words = words.split(" ")
queryWords = []
totalQueryWords = 0
for word in words:
word = word.lstrip().rstrip()
if len(word) > 3 and word not in stop_words and word != "Wikipedia":
queryWords.append(word)
totalQueryWords = totalQueryWords + 1
if totalQueryWords > size:
break
return queryWords, totalQueryWords
def generate_benchmark_commands(
total_benchmark_commands,
bench_fname,
all_fname,
indexname,
docs,
stop_words,
use_numeric_range_searchs,
ts_digest,
p_writes,
query_choices,
):
total_benchmark_reads = 0
total_benchmark_writes = 0
all_csvfile = open(all_fname, "a", newline="")
bench_csvfile = open(bench_fname, "w", newline="")
all_csv_writer = csv.writer(all_csvfile, delimiter=",", quoting=csv.QUOTE_ALL)
bench_csv_writer = csv.writer(bench_csvfile, delimiter=",", quoting=csv.QUOTE_ALL)
progress = tqdm(unit="docs", total=total_benchmark_commands)
total_docs = len(docs)
## timestamp related
timestamps_pdist = generate_lognormal_dist(total_benchmark_commands)
min_ts = ts_digest.percentile(0.0)
max_ts = ts_digest.percentile(100.0)
query_range_digest = TDigest()
generated_commands = 0
while generated_commands < total_benchmark_commands:
query_ts_pdist = timestamps_pdist[generated_commands]
percentile = (1.0 - query_ts_pdist) * 100.0
query_min_ts = ts_digest.percentile(percentile)
random_doc_pos = random.randint(0, total_docs - 1)
doc = docs[random_doc_pos]
# decide read or write
p_cmd = random.random()
if p_cmd < p_writes:
## WRITE
total_benchmark_writes = total_benchmark_writes + 1
generated_row, doc_size = use_case_to_cmd(
use_ftadd,
doc["title"],
doc["text"],
doc["comment"],
doc["username"],
doc["timestamp"],
generated_commands,
)
else:
## READ
total_benchmark_reads = total_benchmark_reads + 1
words, totalW = getQueryWords(doc, stop_words, 2)
choice = random.choices(query_choices)[0]
generated_row = None
numeric_range_str = ""
if use_numeric_range_searchs:
numeric_range_str = "@timestamp:[{} {}] ".format(query_min_ts, max_ts)
query_range_digest.update(int(max_ts - query_min_ts))
if choice == "simple-1word-query" and len(words) >= 1:
generated_row = generate_ft_search_row(
indexname,
"simple-1word-query",
"{}{}".format(numeric_range_str, words[0]),
)
elif choice == "2word-union-query" and len(words) >= 2:
generated_row = generate_ft_search_row(
indexname,
"2word-union-query",
"{}{} {}".format(numeric_range_str, words[0], words[1]),
)
elif choice == "2word-intersection-query" and len(words) >= 2:
generated_row = generate_ft_search_row(
indexname,
"2word-intersection-query",
"{}{}|{}".format(numeric_range_str, words[0], words[1]),
)
if generated_row != None:
# all_csv_writer.writerow(generated_row)
# bench_csv_writer.writerow(generated_row)
progress.update()
generated_commands = generated_commands + 1
progress.close()
bench_csvfile.close()
all_csvfile.close()
# print()
xx = []
yy = []
p90 = query_range_digest.percentile(90.0)
dataset_percent = ts_digest.cdf(p90)
print(
"90% of the read queries target at max {} percent o keyspace".format(
dataset_percent
)
)
print(
"100% of the read queries target at max {} percent o keyspace".format(
ts_digest.cdf(max_ts - min_ts)
)
)
for centroid in query_range_digest.centroids_to_list():
ts_m = centroid["m"]
xx.append(ts_m)
yy.append(query_range_digest.cdf(ts_m))
plt.scatter(xx, yy)
plt.title("EnWiki pages Query time range")
plt.xlabel("Query time range")
plt.ylabel("cdf")
plt.xscale("log")
plt.show()
return total_benchmark_reads, total_benchmark_writes
def generate_ft_search_row(index, query_name, query):
cmd = [
"READ",
query_name,
1,
"FT.SEARCH",
"{index}".format(index=index),
"{query}".format(query=query),
]
return cmd
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="RediSearch FTSB data generator.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--project", type=str, default="redisearch", help="the project being tested"
)
parser.add_argument(
"--seed",
type=int,
default=12345,
help="the random seed used to generate random deterministic outputs",
)
parser.add_argument(
"--read-ratio", type=int, default=10, help="query time read ratio"
)
parser.add_argument(
"--write-ratio", type=int, default=1, help="query time write ratio"
)
parser.add_argument(
"--min-doc-len",
type=int,
default=1024,
help="Discard any generated document bellow the specified value",
)
parser.add_argument(
"--doc-limit",
type=int,
default=100000,
help="the total documents to generate to be added in the setup stage",
)
parser.add_argument(
"--total-benchmark-commands",
type=int,
default=100000,
help="the total commands to generate to be issued in the benchmark stage",
)
parser.add_argument(
"--stop-words",
type=str,
default="a,is,the,an,and,are,as,at,be,but,by,for,if,in,into,it,no,not,of,on,or,such,that,their,then,there,these,they,this,to,was,will,with",
help="When searching, stop-words are ignored and treated as if they were not sent to the query processor. Therefore, to be 100% correct we need to prevent those words to enter a query",
)
parser.add_argument(
"--index-name",
type=str,
default="enwiki_pages",
help="the name of the RediSearch index to be used",
)
parser.add_argument(
"--test-name",
type=str,
default="100K-enwiki_pages-hashes",
help="the name of the test",
)
parser.add_argument(
"--test-description",
type=str,
default="benchmark focused on full text search queries performance, making usage of English-language Wikipedia:Database page revisions",
help="the full description of the test",
)
parser.add_argument(
"--upload-artifacts-s3",
default=False,
action="store_true",
help="uploads the generated dataset files and configuration file to public benchmarks.redislabs bucket. Proper credentials are required",
)
parser.add_argument(
"--use-ftadd",
default=False,
action="store_true",
help="Use FT.ADD instead of HSET",
)
parser.add_argument(
"--query-use-ts-numeric-range-filter",
default=False,
action="store_true",
help="Use a numeric range filter on queries to simulate searchs that imply a log-normal keyspace access (very hot data and some cold data)",
)
parser.add_argument(
"--big-text-field-noindex",
default=False,
action="store_true",
help="On index creation mark the largest text field as no index. If a field has NOINDEX and doesn't have SORTABLE, it will just be ignored by the index. This is usefull to test RoF for example.",
)
parser.add_argument(
"--temporary-work-dir",
type=str,
default="./tmp",
help="The temporary dir to use as working directory for file download, compression,etc... ",
)
parser.add_argument(
"--query-choices",
type=str,
default="simple-1word-query,2word-union-query,2word-intersection-query",
help="comma separated list of queries to produce. one of: simple-1word-query,2word-union-query,2word-intersection-query",
)
parser.add_argument(
"--upload-artifacts-s3-uncompressed",
action="store_true",
help="uploads the generated dataset files and configuration file to public benchmarks.redislabs bucket. Proper credentials are required",
)
args = parser.parse_args()
query_choices = args.query_choices.split(",")
use_case_specific_arguments = del_non_use_case_specific_keys(dict(args.__dict__))
# generate the temporary working dir if required
working_dir = args.temporary_work_dir
Path(working_dir).mkdir(parents=True, exist_ok=True)
seed = args.seed
project = args.project
doc_limit = args.doc_limit
stop_words = args.stop_words.split(",")
indexname = args.index_name
test_name = args.test_name
use_numeric_range_searchs = args.query_use_ts_numeric_range_filter
no_index_list = []
big_text_field_noindex = args.big_text_field_noindex
if big_text_field_noindex:
test_name += "-big-text-field-noindex"
no_index_list = ["text"]
if use_numeric_range_searchs:
test_name += "-lognormal-numeric-range-searchs"
min_doc_len = args.min_doc_len
description = args.test_description
s3_bucket_name = "benchmarks.redislabs"
s3_bucket_path = "redisearch/datasets/{}/".format(test_name)
s3_uri = "https://s3.amazonaws.com/{bucket_name}/{bucket_path}".format(
bucket_name=s3_bucket_name, bucket_path=s3_bucket_path
)
benchmark_output_file = "{test_name}.{project}.commands".format(
test_name=test_name, project=project
)
benchmark_config_file = "{test_name}.{project}.cfg.json".format(
test_name=test_name, project=project
)
all_fname = "{}.ALL.csv".format(benchmark_output_file)
all_fname_compressed = "{}.ALL.tar.gz".format(benchmark_output_file)
all_fname = "{}.ALL.csv".format(benchmark_output_file)
setup_fname = "{}.SETUP.csv".format(benchmark_output_file)
bench_fname = "{}.BENCH.QUERY_{}_write_{}_to_read_{}.csv".format(
benchmark_output_file,
"__".join(query_choices),
args.write_ratio,
args.read_ratio,
)
all_fname_compressed = "{}.ALL.tar.gz".format(benchmark_output_file)
setup_fname_compressed = "{}.SETUP.tar.gz".format(benchmark_output_file)
bench_fname_compressed = "{}.BENCH.tar.gz".format(benchmark_output_file)
remote_url_all = "{}{}".format(s3_uri, all_fname_compressed)
remote_url_setup = "{}{}".format(s3_uri, setup_fname_compressed)
remote_url_bench = "{}{}".format(s3_uri, bench_fname_compressed)
## remove previous files if they exist
all_artifacts = [
all_fname,
setup_fname,
bench_fname,
all_fname_compressed,
setup_fname_compressed,
bench_fname_compressed,
benchmark_config_file,
]
for artifact in all_artifacts:
remove_file_if_exists(artifact)
use_ftadd = args.use_ftadd
total_benchmark_commands = args.total_benchmark_commands
used_indices = [indexname]
setup_commands = []
teardown_commands = []
key_metrics = []
add_key_metric(
key_metrics,
"setup",
"throughput",
"OverallRates.overallOpsRate",
"Overall writes query rate",
"docs/sec",
"numeric",
"higher-better",
1,
)
add_key_metric(
key_metrics,
"setup",
"latency",
"OverallQuantiles.allCommands.q50",
"Overall writes query q50 latency",
"ms",
"numeric",
"lower-better",
2,
)
add_key_metric(
key_metrics,
"benchmark",
"throughput",
"OverallRates.overallOpsRate",
"Overall writes query rate",
"docs/sec",
"numeric",
"higher-better",
1,
)
add_key_metric(
key_metrics,
"benchmark",
"latency",
"OverallQuantiles.allCommands.q50",
"Overall writes query q50 latency",
"ms",
"numeric",
"lower-better",
2,
)
total_writes = 0
total_reads = 0
total_updates = 0
total_deletes = 0
# 1:10
p_writes = float(args.write_ratio) / (
float(args.read_ratio) + float(args.write_ratio)
)
json_version = "0.1"
benchmark_repetitions_require_teardown_and_resetup = False
print("-- Benchmark: {} -- ".format(test_name))
print("-- Description: {} -- ".format(description))
total_docs = 0
print("Using random seed {0}".format(args.seed))
random.seed(args.seed)
print("Using the following stop-words: {0}".format(stop_words))
index_types = generate_enwiki_pages_index_type()
print("-- generating the ft.create commands -- ")
ft_create_cmd = generate_ft_create_row(
indexname, index_types, use_ftadd, no_index_list
)
print("FT.CREATE command: {}".format(" ".join(ft_create_cmd)))
setup_commands.append(ft_create_cmd)
print("-- generating the ft.drop commands -- ")
ft_drop_cmd = generate_ft_drop_row(indexname)
teardown_commands.append(ft_drop_cmd)
csv_filenames = []
print(
"Retrieving the required English-language Wikipedia:Database page edition data"
)
if path.exists(filename) is False:
print("Downloading {} to {}".format(origin, filename))
download_url(origin, filename)
else:
print("{} exists, no need to download again".format(filename))
if path.exists(decompressed_fname) is False:
print("Decompressing {}".format(filename))
decompress_file(filename)
docs = []
tree = ET.iterparse(decompressed_fname)
print("Reading {}\n".format(decompressed_fname))
progress = tqdm(unit="docs")
doc = {}
text = None
comment = None
username = None
timestamp = None
ts_digest = TDigest()
for event, elem in tree:
if elem.tag == "{http://www.mediawiki.org/xml/export-0.10/}page":
doc = {}
doc["title"] = elem.findtext(
"{http://www.mediawiki.org/xml/export-0.10/}title"
)
doc["text"] = text
doc["comment"] = comment
doc["username"] = username
doc["timestamp"] = int(timestamp)
ts_digest.update(int(timestamp))
if (
doc["text"] is not None
and doc["comment"] is not None
and doc["username"] is not None
and doc["timestamp"] is not None
):
total_docs = total_docs + 1
docs.append(doc)
progress.update()
elem.clear() # won't need the children any more
if elem.tag == "{http://www.mediawiki.org/xml/export-0.10/}revision":
text = elem.findtext("{http://www.mediawiki.org/xml/export-0.10/}text")
comment = elem.findtext(
"{http://www.mediawiki.org/xml/export-0.10/}comment"
)
ts = elem.findtext("{http://www.mediawiki.org/xml/export-0.10/}timestamp")
dt = parse(ts)
timestamp = dt.timestamp()
if elem.tag == "{http://www.mediawiki.org/xml/export-0.10/}contributor":
username = elem.findtext(
"{http://www.mediawiki.org/xml/export-0.10/}username"
)
progress.close()
print("\n")
setup_csvfile = open(setup_fname, "w", newline="")
all_csvfile = open(all_fname, "a", newline="")
all_csv_writer = csv.writer(all_csvfile, delimiter=",", quoting=csv.QUOTE_ALL)
setup_csv_writer = csv.writer(setup_csvfile, delimiter=",", quoting=csv.QUOTE_ALL)
print("\n")
print("-- generating the setup commands -- \n")
progress = tqdm(unit="docs", total=args.doc_limit)
doc_limit = args.doc_limit
docs_sizes = []
total_docs = 0
if doc_limit == 0:
doc_limit = len(docs)
while total_docs < doc_limit:
random_doc_pos = random.randint(0, len(docs) - 1)
doc = docs[random_doc_pos]
cmd, doc_size = use_case_to_cmd(
use_ftadd,
doc["title"],
doc["text"],
doc["comment"],
doc["username"],
doc["timestamp"],
total_docs,
)
if doc_size >= min_doc_len:
total_docs = total_docs + 1
docs_sizes.append(doc_size)
progress.update()
# setup_csv_writer.writerow(cmd)
# all_csv_writer.writerow(cmd)
# fixed bin size
bins = np.linspace(
math.ceil(min(docs_sizes)), math.floor(max(docs_sizes)), 200
) # fixed number of bins
plt.xlim([1, max(docs_sizes) + 5])
plt.hist(docs_sizes, bins=bins, alpha=0.5)
plt.title(
"EnWiki pages document size frequency. Avg document size: {} Bytes".format(
int(np.average(docs_sizes))
)
)
plt.xlabel("Document Size in Bytes")
plt.ylabel("count")
plt.xscale("log")
plt.show()
xx = []
yy = []
for centroid in ts_digest.centroids_to_list():
# print(centroid)
ts_m = centroid["m"]
xx.append(ts_m)
yy.append(ts_digest.cdf(ts_m))
plt.scatter(xx, yy)
plt.title("EnWiki pages timestamp range")
plt.xlabel("timestamp")
plt.ylabel("cdf")
# plt.xscale('log')
plt.show()
progress.close()
all_csvfile.close()
setup_csvfile.close()
print(
"-- generating {} full text search commands -- ".format(
total_benchmark_commands
)
)
print("\t saving to {} and {}".format(bench_fname, all_fname))
total_benchmark_reads, total_benchmark_writes = generate_benchmark_commands(
total_benchmark_commands,
bench_fname,
all_fname,
indexname,
docs,
stop_words,
use_numeric_range_searchs,
ts_digest,
p_writes,
query_choices,
)
total_commands = total_docs
total_setup_commands = total_docs
cmd_category_all = {
"setup-writes": total_docs,
"writes": total_writes,
"updates": total_updates,
"reads": total_reads,
"deletes": total_deletes,
}
cmd_category_setup = {
"setup-writes": total_docs,
"writes": 0,
"updates": 0,
"reads": 0,
"deletes": 0,
}
cmd_category_benchmark = {
"setup-writes": 0,
"writes": total_benchmark_writes,
"updates": total_updates,
"reads": total_benchmark_reads,
"deletes": total_deletes,
}
status, uncompressed_size, compressed_size = compress_files(
[all_fname], all_fname_compressed
)
inputs_entry_all = generate_inputs_dict_item(
"all",
all_fname,
"contains both setup and benchmark commands",
remote_url_all,
uncompressed_size,
all_fname_compressed,
compressed_size,
total_commands,
cmd_category_all,
)
status, uncompressed_size, compressed_size = compress_files(
[setup_fname], setup_fname_compressed
)
inputs_entry_setup = generate_inputs_dict_item(
"setup",
setup_fname,
"contains only the commands required to populate the dataset",
remote_url_setup,
uncompressed_size,
setup_fname_compressed,
compressed_size,
total_setup_commands,
cmd_category_setup,
)
status, uncompressed_size, compressed_size = compress_files(
[bench_fname], bench_fname_compressed
)
inputs_entry_benchmark = generate_inputs_dict_item(
"benchmark",
bench_fname,
"contains only the benchmark commands (requires the dataset to have been previously populated)",
remote_url_bench,
uncompressed_size,
bench_fname_compressed,
compressed_size,
total_benchmark_commands,
cmd_category_benchmark,
)
inputs = {
"all": inputs_entry_all,
"setup": inputs_entry_setup,
"benchmark": inputs_entry_benchmark,
}
deployment_requirements = init_deployment_requirement()
add_deployment_requirements_redis_server_module(
deployment_requirements, "search", {}
)
add_deployment_requirements_utilities(
deployment_requirements, "ftsb_redisearch", {}
)
add_deployment_requirements_benchmark_tool(
deployment_requirements, "ftsb_redisearch"
)
run_stages = ["benchmark"]
run_stages = ["setup", "benchmark"]
with open(benchmark_config_file, "w") as setupf:
setup_json = generate_setup_json(
json_version,
project,
use_case_specific_arguments,
test_name,
description,
run_stages,
deployment_requirements,
key_metrics,
inputs,
setup_commands,
teardown_commands,
used_indices,
total_commands,
total_setup_commands,
total_benchmark_commands,
total_docs,
total_writes,
total_updates,
total_reads,
total_deletes,
benchmark_repetitions_require_teardown_and_resetup,
["setup"],
["benchmark"],
)
json.dump(setup_json, setupf, indent=2)
if args.upload_artifacts_s3:
artifacts = [
benchmark_config_file,
all_fname_compressed,
setup_fname_compressed,
bench_fname_compressed,
]
upload_dataset_artifacts_s3(s3_bucket_name, s3_bucket_path, artifacts)
if args.upload_artifacts_s3_uncompressed:
artifacts = [setup_fname, bench_fname]
upload_dataset_artifacts_s3(s3_bucket_name, s3_bucket_path, artifacts)
print("############################################")
print("All artifacts generated.")
|
170894
|
from . import model_zoo
from .resnet import *
from .fcn import *
from .pspnet import *
from .deeplab import *
|
170936
|
import httpretty
from nose.tools import raises
from tests import FulcrumTestCase
class PhotoTest(FulcrumTestCase):
@httpretty.activate
def test_records_from_form_via_url_params(self):
httpretty.register_uri(httpretty.GET, self.api_root + '/photos/abc-123',
body='{"photo": {"id": "abc-123"}}',
status=200)
photo = self.fulcrum_api.photos.find('abc-123')
self.assertIsInstance(photo, dict)
self.assertEqual(photo['photo']['id'], 'abc-123')
@raises(AttributeError)
def test_missing_delete(self):
self.fulcrum_api.photos.delete('abc-123')
@raises(AttributeError)
def test_missing_create(self):
self.fulcrum_api.photos.create({'id': 'abc-123'})
@raises(AttributeError)
def test_missing_update(self):
self.fulcrum_api.photos.update('abc-123', {'id': 'abc-123'})
|
170952
|
import re
from src.commons.exceptions import ParameterValidationException
class WrongDatasetNameException(Exception):
pass
class WrongProjectNameException(Exception):
pass
class WrongWriteDispositionException(Exception):
pass
class WrongCreateDispositionException(Exception):
pass
project_id_pattern = re.compile("^[a-zA-Z0-9-]+$")
dataset_id_pattern = re.compile("^[a-zA-Z0-9_]+$")
available_create_dispositions = ["CREATE_IF_NEEDED", "CREATE_NEVER"]
available_write_dispositions = ["WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY"]
def validate_restore_request_params(
source_project_id=None, source_dataset_id=None,
target_project_id=None, target_dataset_id=None,
create_disposition=None, write_disposition=None):
try:
if source_project_id:
validate_project_id(source_project_id)
if source_dataset_id:
validate_dataset_id(source_dataset_id)
if target_project_id:
validate_project_id(target_project_id)
if target_dataset_id:
validate_dataset_id(target_dataset_id)
if write_disposition:
validate_write_disposition(write_disposition)
if create_disposition:
validate_create_disposition(create_disposition)
except (WrongDatasetNameException,
WrongProjectNameException,
WrongWriteDispositionException,
WrongCreateDispositionException), e:
raise ParameterValidationException(e.message)
def validate_project_id(project_id):
if not project_id or not project_id_pattern.match(project_id):
error_message = "Invalid project value: '{}'. Project IDs may " \
"contain letters, numbers, and " \
"dash".format(project_id)
raise WrongProjectNameException(error_message)
def validate_dataset_id(dataset_id):
if not dataset_id or not dataset_id_pattern.match(dataset_id):
error_message = "Invalid dataset value: '{}'. Dataset IDs may " \
"contain letters, numbers, and " \
"underscores".format(dataset_id)
raise WrongDatasetNameException(error_message)
def validate_write_disposition(write_disposition):
if write_disposition not in available_write_dispositions:
error_message = "Invalid write disposition: '{}'. " \
"The following values are supported: {}." \
.format(write_disposition, ', '.join(available_write_dispositions))
raise WrongWriteDispositionException(error_message)
def validate_create_disposition(create_disposition):
if create_disposition not in available_create_dispositions:
error_message = "Invalid create disposition: '{}'. " \
"The following values are supported: {}." \
.format(create_disposition,
', '.join(available_create_dispositions))
raise WrongCreateDispositionException(error_message)
|
170985
|
import pyhecdss
import pandas as pd
import numpy as np
import os
def test_read_write_cycle_rts():
'''
Test reading and writing of period time stamped data so
that reads and writes don't result in shifting the data
'''
fname = "test2.dss"
if os.path.exists(fname):
os.remove(fname)
path = '/SAMPLE/SIN/WAVE/01JAN1990 - 01JAN1990/15MIN/SAMPLE1/'
sina = np.sin(np.linspace(-np.pi, np.pi, 201))
dfr = pd.DataFrame(sina,
index=pd.period_range('01jan1990 0100', periods=len(sina), freq='15T'),
columns=[path])
d = pyhecdss.DSSFile(fname, create_new=True)
unit2, ptype2 = 'UNIT-X', 'PER-VAL'
d.write_rts(path, dfr, unit2, ptype2)
d.close()
#
d2 = pyhecdss.DSSFile(fname)
plist2 = d2.get_pathnames()
path = plist2[0]
dfr2, unit2, ptype2 = d.read_rts(path)
pd.testing.assert_frame_equal(dfr, dfr2)
|
170999
|
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
from httplib import HTTPConnection, HTTPSConnection
if PY3:
from http.client import HTTPConnection, HTTPSConnection
def basic():
conn = HTTPConnection('example.com')
conn.request('GET', '/path')
def indirect_caller():
conn = HTTPSConnection('example.com')
indirect_callee(conn)
def indirect_callee(conn):
conn.request('POST', '/path')
def method_not_known(method):
conn = HTTPConnection('example.com')
conn.request(method, '/path')
def sneaky_setting_host():
# We don't handle that the host is overwritten directly.
# A contrived example; you're not supposed to do this, but you certainly can.
fake = 'fakehost.com'
real = 'realhost.com'
conn = HTTPConnection(fake)
conn.host = real
conn.request('GET', '/path')
def tricky_not_attribute_node():
# A contrived example; you're not supposed to do this, but you certainly can.
conn = HTTPConnection('example.com')
req_meth = conn.request
req_meth('HEAD', '/path')
|
171004
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-15, 15, 0.5)
X, Y = np.meshgrid(x, y)
sigma = 4
Z = np.exp(-(X**2 + Y**2)/(2*sigma**2)) / (2*np.pi*sigma**2)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm)
plt.savefig("data/dst/matplotlib_mplot3d_surface.png")
ax.clear()
ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2)
plt.savefig("data/dst/matplotlib_mplot3d_wireframe.png")
ax.clear()
ax.scatter(X, Y, Z, s=1)
plt.savefig("data/dst/matplotlib_mplot3d_scatter.png")
|
171037
|
import os
from random import seed
import numpy as np
from hyperopt import hp, tpe, rand
import pytest
from sklearn.metrics import mean_squared_error as mse, roc_auc_score as roc
from fedot.core.data.data import InputData
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.pipelines.tuning.sequential import SequentialTuner
from fedot.core.pipelines.tuning.unified import PipelineTuner
from fedot.core.pipelines.tuning.search_space import SearchSpace
from fedot.core.repository.tasks import Task, TaskTypesEnum
from test.unit.tasks.test_forecasting import get_ts_data
seed(1)
np.random.seed(1)
@pytest.fixture()
def regression_dataset():
test_file_path = str(os.path.dirname(__file__))
file = os.path.join('../../data', 'advanced_regression.csv')
return InputData.from_csv(os.path.join(test_file_path, file), task=Task(TaskTypesEnum.regression))
@pytest.fixture()
def classification_dataset():
test_file_path = str(os.path.dirname(__file__))
file = os.path.join('../../data', 'advanced_classification.csv')
return InputData.from_csv(os.path.join(test_file_path, file), task=Task(TaskTypesEnum.classification))
def get_simple_regr_pipeline():
final = PrimaryNode(operation_type='xgbreg')
pipeline = Pipeline(final)
return pipeline
def get_complex_regr_pipeline():
node_scaling = PrimaryNode(operation_type='scaling')
node_ridge = SecondaryNode('ridge', nodes_from=[node_scaling])
node_linear = SecondaryNode('linear', nodes_from=[node_scaling])
final = SecondaryNode('xgbreg', nodes_from=[node_ridge, node_linear])
pipeline = Pipeline(final)
return pipeline
def get_simple_class_pipeline():
final = PrimaryNode(operation_type='logit')
pipeline = Pipeline(final)
return pipeline
def get_complex_class_pipeline():
first = PrimaryNode(operation_type='xgboost')
second = PrimaryNode(operation_type='pca')
final = SecondaryNode(operation_type='logit',
nodes_from=[first, second])
pipeline = Pipeline(final)
return pipeline
def get_not_default_search_space():
custom_search_space = {
'logit': {
'C': (hp.uniform, [0.01, 5.0])
},
'ridge': {
'alpha': (hp.uniform, [0.01, 5.0])
},
'xgbreg': {
'n_estimators': (hp.choice, [[100]]),
'max_depth': (hp.choice, [range(1, 7)]),
'learning_rate': (hp.choice, [[1e-3, 1e-2, 1e-1]]),
'subsample': (hp.choice, [np.arange(0.15, 1.01, 0.05)])
},
'xgboost': {
'max_depth': (hp.choice, [range(1, 5)]),
'subsample': (hp.uniform, [0.1, 0.9]),
'min_child_weight': (hp.choice, [range(1, 15)])
},
'ar': {
'lag_1': (hp.uniform, [2, 100]),
'lag_2': (hp.uniform, [2, 500])
},
'pca': {
'n_components': (hp.uniform, [0.2, 0.8])
}
}
return SearchSpace(custom_search_space=custom_search_space)
@pytest.mark.parametrize('data_fixture', ['classification_dataset'])
def test_custom_params_setter(data_fixture, request):
data = request.getfixturevalue(data_fixture)
pipeline = get_complex_class_pipeline()
custom_params = dict(C=10)
pipeline.root_node.custom_params = custom_params
pipeline.fit(data)
params = pipeline.root_node.fitted_operation.get_params()
assert params['C'] == 10
@pytest.mark.parametrize('data_fixture', ['regression_dataset'])
def test_pipeline_tuner_regression_correct(data_fixture, request):
""" Test PipelineTuner for pipeline based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for regression task
pipeline_simple = get_simple_regr_pipeline()
pipeline_complex = get_complex_regr_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
pipeline_tuner = PipelineTuner(pipeline=pipeline,
task=train_data.task,
iterations=1,
search_space=search_space,
algo=algo)
# Optimization will be performed on RMSE metric, so loss params are defined
tuned_pipeline = pipeline_tuner.tune_pipeline(input_data=train_data,
loss_function=mse,
loss_params={'squared': False})
is_tuning_finished = True
assert is_tuning_finished
@pytest.mark.parametrize('data_fixture', ['classification_dataset'])
def test_pipeline_tuner_classification_correct(data_fixture, request):
""" Test PipelineTuner for pipeline based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for classification task
pipeline_simple = get_simple_class_pipeline()
pipeline_complex = get_complex_class_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
pipeline_tuner = PipelineTuner(pipeline=pipeline,
task=train_data.task,
iterations=1,
search_space=search_space,
algo=algo)
tuned_pipeline = pipeline_tuner.tune_pipeline(input_data=train_data,
loss_function=roc)
is_tuning_finished = True
assert is_tuning_finished
@pytest.mark.parametrize('data_fixture', ['regression_dataset'])
def test_sequential_tuner_regression_correct(data_fixture, request):
""" Test SequentialTuner for pipeline based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for regression task
pipeline_simple = get_simple_regr_pipeline()
pipeline_complex = get_complex_regr_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
sequential_tuner = SequentialTuner(pipeline=pipeline,
task=train_data.task,
iterations=1,
search_space=search_space,
algo=algo)
# Optimization will be performed on RMSE metric, so loss params are defined
tuned_pipeline = sequential_tuner.tune_pipeline(input_data=train_data,
loss_function=mse,
loss_params={'squared': False})
is_tuning_finished = True
assert is_tuning_finished
@pytest.mark.parametrize('data_fixture', ['classification_dataset'])
def test_sequential_tuner_classification_correct(data_fixture, request):
""" Test SequentialTuner for pipeline based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for classification task
pipeline_simple = get_simple_class_pipeline()
pipeline_complex = get_complex_class_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
sequential_tuner = SequentialTuner(pipeline=pipeline,
task=train_data.task,
iterations=2,
search_space=search_space,
algo=algo)
tuned_pipeline = sequential_tuner.tune_pipeline(input_data=train_data,
loss_function=roc)
is_tuning_finished = True
assert is_tuning_finished
@pytest.mark.parametrize('data_fixture', ['regression_dataset'])
def test_certain_node_tuning_regression_correct(data_fixture, request):
""" Test SequentialTuner for particular node based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for regression task
pipeline_simple = get_simple_regr_pipeline()
pipeline_complex = get_complex_regr_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
sequential_tuner = SequentialTuner(pipeline=pipeline,
task=train_data.task,
iterations=1,
search_space=search_space,
algo=algo)
tuned_pipeline = sequential_tuner.tune_node(input_data=train_data,
node_index=0,
loss_function=mse)
is_tuning_finished = True
assert is_tuning_finished
@pytest.mark.parametrize('data_fixture', ['classification_dataset'])
def test_certain_node_tuning_classification_correct(data_fixture, request):
""" Test SequentialTuner for particular node based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for classification task
pipeline_simple = get_simple_class_pipeline()
pipeline_complex = get_complex_class_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
sequential_tuner = SequentialTuner(pipeline=pipeline,
task=train_data.task,
iterations=1,
search_space=search_space,
algo=algo)
tuned_pipeline = sequential_tuner.tune_node(input_data=train_data,
node_index=0,
loss_function=roc)
is_tuning_finished = True
assert is_tuning_finished
def test_ts_pipeline_with_stats_model():
""" Tests PipelineTuner for time series forecasting task with AR model """
train_data, test_data = get_ts_data(n_steps=200, forecast_length=5)
ar_pipeline = Pipeline(PrimaryNode('ar'))
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Tune AR model
tuner_ar = PipelineTuner(pipeline=ar_pipeline, task=train_data.task, iterations=3,
search_space=search_space, algo=algo)
tuned_ar_pipeline = tuner_ar.tune_pipeline(input_data=train_data,
loss_function=mse)
is_tuning_finished = True
assert is_tuning_finished
def test_search_space_correctness_after_customization():
default_search_space = SearchSpace()
custom_search_space = {'gbr': {'max_depth': (hp.choice, [[3, 7, 31, 127, 8191, 131071]])}}
custom_search_space_without_replace = SearchSpace(custom_search_space=custom_search_space,
replace_default_search_space=False)
custom_search_space_with_replace = SearchSpace(custom_search_space=custom_search_space,
replace_default_search_space=True)
default_params = default_search_space.get_node_params(node_id=0,
operation_name='gbr')
custom_without_replace_params = custom_search_space_without_replace.get_node_params(node_id=0,
operation_name='gbr')
custom_with_replace_params = custom_search_space_with_replace.get_node_params(node_id=0,
operation_name='gbr')
assert default_params.keys() == custom_without_replace_params.keys()
assert default_params.keys() != custom_with_replace_params.keys()
assert default_params['0 || gbr | max_depth'] != custom_without_replace_params['0 || gbr | max_depth']
assert default_params['0 || gbr | max_depth'] != custom_with_replace_params['0 || gbr | max_depth']
def test_search_space_get_operation_parameter_range():
default_search_space = SearchSpace()
gbr_operations = ['n_estimators', 'loss', 'learning_rate', 'max_depth', 'min_samples_split',
'min_samples_leaf', 'subsample', 'max_features', 'alpha']
custom_search_space = {'gbr': {'max_depth': (hp.choice, [[3, 7, 31, 127, 8191, 131071]])}}
custom_search_space_without_replace = SearchSpace(custom_search_space=custom_search_space,
replace_default_search_space=False)
custom_search_space_with_replace = SearchSpace(custom_search_space=custom_search_space,
replace_default_search_space=True)
default_operations = default_search_space.get_operation_parameter_range('gbr')
custom_without_replace_operations = custom_search_space_without_replace.get_operation_parameter_range('gbr')
custom_with_replace_operations = custom_search_space_with_replace.get_operation_parameter_range('gbr')
assert default_operations == gbr_operations
assert custom_without_replace_operations == gbr_operations
assert custom_with_replace_operations == ['max_depth']
|
171042
|
from functools import lru_cache
from typing import List, Set, Union, Optional
import yaml
from .root import HConfig
from .options import options_for
class Host:
"""
A host object is a convenient way to loading host inventory
items into a single object.
The default is to load "hostname", "os", and "options" to the host object,
however, it can easily be extended for developer needs.
.. code:: python
import yaml
from hier_config.host import Host
options = yaml.load(open("./tests/fixtures/options_ios.yml"), loader=yaml.SafeLoader())
host = Host("example.rtr", "ios", options)
# Example of loading running config and generated configs into a host object
host.load_running_config_from_file("./tests/files/running_config.conf)
host.load_generated_config_from_file("./tests/files/generated_config.conf)
# Example of loading hier-config tags into a host object
host.load_tags("./tests/fixtures/tags_ios.yml")
# Example of creating a remediation config without a tag targeting specific config
host.remediation_config()
# Example of creating a remediation config with a tag ("safe") targeting a specific config.
host.remediation_config_filtered_text({"safe"}, set()})
"""
def __init__(
self,
hostname: str,
os: str,
hconfig_options: dict = None,
):
self.hostname = hostname
self.os = os
self.hconfig_options = (
hconfig_options if hconfig_options else options_for(self.os)
)
self._hconfig_tags: List[dict] = []
self._running_config: Optional[HConfig] = None
self._generated_config: Optional[HConfig] = None
def __repr__(self) -> str:
return f"Host(hostname={self.hostname})"
@property
def running_config(self) -> Optional[HConfig]:
"""running configuration property"""
if self._running_config is None:
self._running_config = self._get_running_config()
return self._running_config
@property
def generated_config(self) -> Optional[HConfig]:
"""generated configuration property"""
if self._generated_config is None:
self._generated_config = self._get_generated_config()
return self._generated_config
@lru_cache()
def remediation_config(self) -> HConfig:
"""
Once self.running_config and self.generated_config have been created,
create self.remediation_config
"""
if self.running_config and self.generated_config:
remediation = self.running_config.config_to_get_to(self.generated_config)
else:
raise AttributeError("Missing host.running_config or host.generated_config")
remediation.add_sectional_exiting()
remediation.set_order_weight()
remediation.add_tags(self.hconfig_tags)
return remediation
@lru_cache()
def rollback_config(self) -> HConfig:
"""
Once a self.running_config and self.generated_config have been created,
generate a self.rollback_config
"""
if self.running_config and self.generated_config:
rollback = self.generated_config.config_to_get_to(self.running_config)
else:
raise AttributeError("Missing host.running_config or host.generated_config")
rollback.add_sectional_exiting()
rollback.set_order_weight()
rollback.add_tags(self.hconfig_tags)
return rollback
@property
def hconfig_tags(self) -> List[dict]:
"""hier-config tags property"""
return self._hconfig_tags
def load_running_config_from_file(self, file: str) -> None:
config = self._load_from_file(file)
if not isinstance(config, str):
raise TypeError
self.load_running_config(config)
def load_running_config(self, config_text: str) -> None:
self._running_config = self._load_config(config_text)
def load_generated_config_from_file(self, file: str) -> None:
config = self._load_from_file(file)
if not isinstance(config, str):
raise TypeError
self.load_generated_config(config)
def load_generated_config(self, config_text: str) -> None:
self._generated_config = self._load_config(config_text)
def remediation_config_filtered_text(
self, include_tags: Set[str], exclude_tags: Set[str]
) -> str:
config = self.remediation_config()
if include_tags or exclude_tags:
children = config.all_children_sorted_by_tags(include_tags, exclude_tags)
else:
children = config.all_children_sorted()
return "\n".join(c.cisco_style_text() for c in children)
def load_tags(self, tags: list) -> None:
"""
Loads lineage rules that set tags
Example:
Specify to load lineage rules from a dictionary.
.. code:: python
tags = [{"lineage": [{"startswith": "interface"}], "add_tags": "interfaces"}]
host.load_tags(tags)
:param tags: tags
"""
self._hconfig_tags = tags
def load_tags_from_file(self, file: str) -> None:
tags_from_file = self._load_from_file(file, True)
if not isinstance(tags_from_file, list):
raise TypeError
self.load_tags(tags_from_file)
def _load_config(self, config_text: str) -> HConfig:
hier = HConfig(host=self)
hier.load_from_string(config_text)
return hier
@staticmethod
def _load_from_file(name: str, parse_yaml: bool = False) -> Union[list, dict, str]:
"""Opens a config file and loads it as a string."""
with open(name) as file: # pylint: disable=unspecified-encoding
content = file.read()
if parse_yaml:
content = yaml.safe_load(content)
return content
def _get_running_config(self) -> HConfig: # pylint: disable=no-self-use
return NotImplemented
def _get_generated_config(self) -> HConfig: # pylint: disable=no-self-use
return NotImplemented
|
171050
|
from pathlib import Path
import shutil
import glob
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.dataset
import vaex
path = Path(__file__).parent.parent
data_path = path / 'data'
countries = ['US', 'US', 'NL', 'FR', 'NL', 'NL']
years = [2020, 2021, 2020, 2020, 2019, 2020]
values = [1, 2, 3, 4, 5, 6]
table = pa.table({
'country': countries,
'year': years,
'value': values,
})
def test_partitioning_basics_hive():
shutil.rmtree(data_path / 'parquet_dataset_partitioned_hive', ignore_errors=True)
pq.write_to_dataset(table, data_path / 'parquet_dataset_partitioned_hive', partition_cols=['year', 'country'])
ds = pa.dataset.dataset(data_path / 'parquet_dataset_partitioned_hive', partitioning="hive") #, format="parquet", )
# import pdb; pdb.set_trace()
df = vaex.open(data_path / 'parquet_dataset_partitioned_hive', partitioning="hive")
# import pdb; pdb.set_trace()
assert set(df.value.tolist()) == set(values)
assert set(df.year.tolist()) == set(years)
assert set(df.country.tolist()) == set(countries)
def test_partitioning_write_parquet():
shutil.rmtree(data_path / 'parquet_dataset_partitioned_vaex', ignore_errors=True)
df = vaex.from_arrow_table(table)
df.export_partitioned(data_path / 'parquet_dataset_partitioned_vaex', ['country', 'year'])
df = vaex.open(data_path / 'parquet_dataset_partitioned_vaex', partitioning="hive")
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_vaex/*/*/*.parquet'))) == 5 # 5 unique values
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_vaex/country=US/year=2020/*.parquet'))) == 1
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_vaex/country=NL/year=2020/*.parquet'))) == 1
# import pdb; pdb.set_trace()
assert set(df.value.tolist()) == set(values)
assert set(df.year.tolist()) == set(years)
assert set(df.country.tolist()) == set(countries)
def test_partitioning_write_hdf5():
shutil.rmtree(data_path / 'parquet_dataset_partitioned_vaex', ignore_errors=True)
df = vaex.from_arrow_table(table)
df.export_partitioned(data_path / 'parquet_dataset_partitioned_vaex_my_choice/{subdir}/{i}.hdf5', ['country'])
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_vaex_my_choice/*/*.hdf5'))) == 3 # 3 unique values
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_vaex_my_choice/country=US/[012].hdf5'))) == 1
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_vaex_my_choice/country=NL/[012].hdf5'))) == 1
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_vaex_my_choice/country=FR/[012].hdf5'))) == 1
def test_partitioning_write_directory():
shutil.rmtree(data_path / 'parquet_dataset_partitioned_directory1', ignore_errors=True)
shutil.rmtree(data_path / 'parquet_dataset_partitioned_directory2', ignore_errors=True)
partitioning = pa.dataset.partitioning(
pa.schema([("country", pa.string())])
)
df = vaex.from_arrow_table(table)
df.export_partitioned(data_path / 'parquet_dataset_partitioned_directory1', ['country'], directory_format='{value}')
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_directory1/*/*.parquet'))) == 3 # 3 unique values
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_directory1/US/*.parquet'))) == 1
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_directory1/NL/*.parquet'))) == 1
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_directory1/FR/*.parquet'))) == 1
assert set(df.value.tolist()) == set(values)
assert set(df.year.tolist()) == set(years)
assert set(df.country.tolist()) == set(countries)
# now with 2 keys
partitioning = pa.dataset.partitioning(
pa.schema([("year", pa.int64()), ("country", pa.string())])
)
df.export_partitioned(data_path / 'parquet_dataset_partitioned_directory2', ['year', 'country'], directory_format='{value}')
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_directory2/*/*/*.parquet'))) == 5 # 5 unique values
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_directory2/2020/US/*.parquet'))) == 1
assert len(glob.glob(str(data_path / 'parquet_dataset_partitioned_directory2/2020/NL/*.parquet'))) == 1
df = vaex.open(data_path / 'parquet_dataset_partitioned_directory2', partitioning=partitioning)
assert set(df.value.tolist()) == set(values)
assert set(df.year.tolist()) == set(years)
assert set(df.country.tolist()) == set(countries)
|
171064
|
import math
from maya import OpenMaya, OpenMayaMPx
class Circler(OpenMayaMPx.MPxNode): #(1)
def compute(self, *args):
pass
def create(): #(2)
return OpenMayaMPx.asMPxPtr(Circler())
def init(): #(3)
return
nodeName = 'circler' #(4)
nodeTypeID = OpenMaya.MTypeId(0x60005) #(5)
def _toplugin(mobject): #(6)
return OpenMayaMPx.MFnPlugin(
mobject, '<NAME>', '0.01')
def initializePlugin(mobject):
plugin = _toplugin(mobject)
plugin.registerNode(nodeName, nodeTypeID, create, init)
def uninitializePlugin(mobject):
plugin = _toplugin(mobject)
plugin.deregisterNode(nodeTypeID)
# Fleshing out in later sections
class Circler(OpenMayaMPx.MPxNode):
inputFrame = OpenMaya.MObject() # input
frequency = OpenMaya.MObject() # input
scale = OpenMaya.MObject() # input
outSine = OpenMaya.MObject() # output
outCosine = OpenMaya.MObject() # output
def compute(self, plug, data): #(1)
if plug not in (Circler.outSine, Circler.outCosine): #(2)
return OpenMaya.MStatus.kUnknownParameter
inputData = data.inputValue(Circler.input) #(3)
scaleData = data.inputValue(Circler.scale)
framesData = data.inputValue(Circler.frames)
inputVal = inputData.asFloat() #(4)
scaleFactor = scaleData.asFloat()
framesPerCircle = framesData.asFloat()
angle = 6.2831853 * (inputVal/framesPerCircle) #(5)
sinResult = math.sin(angle) * scaleFactor
cosResult = math.cos(angle) * scaleFactor
sinHandle = data.outputValue(Circler.outSine) #(6)
cosHandle = data.outputValue(Circler.outCosine)
sinHandle.setFloat(sinResult) #(7)
cosHandle.setFloat(cosResult)
data.setClean(plug) #(8)
return OpenMaya.MStatus.kSuccess #(9)
def init():
nAttr = OpenMaya.MFnNumericAttribute() #(1)
kFloat = OpenMaya.MFnNumericData.kFloat
#(2) Setup the input attributes
Circler.input = nAttr.create('input', 'in', kFloat, 0.0)
nAttr.setStorable(True)
Circler.scale = nAttr.create('scale', 'sc', kFloat, 10.0)
nAttr.setStorable(True)
Circler.frames = nAttr.create('frames', 'fr', kFloat, 48.0)
nAttr.setStorable(True)
#(3) Setup the output attributes
Circler.outSine = nAttr.create('outSine', 'so', kFloat, 0.0)
nAttr.setWritable(False)
nAttr.setStorable(False)
Circler.outCosine = nAttr.create(
'outCosine', 'co', kFloat, 0.0)
nAttr.setWritable(False)
nAttr.setStorable(False)
#(4) Add the attributes to the node
Circler.addAttribute(Circler.input)
Circler.addAttribute(Circler.scale)
Circler.addAttribute(Circler.frames)
Circler.addAttribute(Circler.outSine)
Circler.addAttribute(Circler.outCosine)
#(5) Set the attribute dependencies
Circler.attributeAffects(Circler.input, Circler.outSine)
Circler.attributeAffects(Circler.input, Circler.outCosine)
Circler.attributeAffects(Circler.scale, Circler.outSine)
Circler.attributeAffects(Circler.scale, Circler.outCosine)
Circler.attributeAffects(Circler.frames, Circler.outSine)
Circler.attributeAffects(Circler.frames, Circler.outCosine)
|
171067
|
import random
from random import choice
import numpy as np
import pandas as pd
from pyspark.sql import SparkSession
from ydot.spark import smatrices
random.seed(37)
np.random.seed(37)
def get_spark_dataframe(spark):
n = 100
data = {
'a': [choice(['left', 'right']) for _ in range(n)],
'b': [choice(['high', 'mid', 'low']) for _ in range(n)],
'x1': np.random.normal(20, 1, n),
'x2': np.random.normal(3, 1, n),
'y': [choice([1.0, 0.0]) for _ in range(n)]
}
pdf = pd.DataFrame(data)
sdf = spark.createDataFrame(pdf)
return sdf
if __name__ == '__main__':
try:
spark = (SparkSession.builder
.master('local[4]')
.appName('local-testing-pyspark')
.getOrCreate())
sdf = get_spark_dataframe(spark)
y, X = smatrices('y ~ (x1 + x2 + a + b)**2', sdf)
y = y.toPandas()
X = X.toPandas()
print(X.head(10))
X.head(10).to_csv('two-way-interactions.csv', index=False)
except Exception as e:
print(e)
finally:
try:
spark.stop()
print('closed spark')
except Exception as e:
print(e)
|
171081
|
from threading import Thread, Lock
c = 0
lock = Lock()
def count_300():
global c
lock.acquire()
try:
while c < 30000:
c += 1
print(c)
finally:
lock.release()
def count_10000():
global c
x = 340
while c < 100000:
c += 1
print(c)
t_0 = Thread(target=count_300, name='1000', daemon=True)
t_0.start()
t_1 = Thread(target=count_10000, name='10')
t_1.start()
|
171101
|
from Rules.RuleTextTooBigCompWords import RuleTextTooBigCompWords
from Rules.RuleVisionBoxHasSoManyInvalidTexts import RuleVisionBoxHasSoManyInvalidTexts
from Rules.RuleBaseOnNeighbour import RuleBaseOnNeighbour
from Rules.RuleAlignVertically import RuleAlignVertically
from Rules.RuleBigLotChildren import RuleBigLotChildren
from Rules.RuleBoxHasOneWordTooSmall import RuleBoxHasOneWordTooSmall
from Rules.RuleBoxHasWordTooSmall import RuleBoxHasWordTooSmall
from Rules.RuleCharacterDistance import RuleCharacterDistance
from Rules.RuleWordOnlyOneTinyChild import RuleWordOnlyOneTinyChild
from Rules.RuleAllSpace import RuleAllSpace
from Rules.RuleLowConfidenceAndBoundaryCheck import RuleLowConfidenceAndBoundaryCheck
from Rules.RuleNoChildren import RuleNoChildren
from Rules.RuleNoHeight import RuleNoHeight
from Rules.RuleNoWidth import RuleNoWidth
from Rules.RuleOutOfBound import RuleOutOfBound
from Rules.RuleSmallHorizontalShape import RuleSmallHorizontalShape
from Rules.RuleSmallVericalShape import RuleSmallVericalShape
from Rules.RuleValidByList import RuleValidByList
from Utils import Environment
from Utils import Logger
class RuleManager:
def __init__(self, dipCalculator, ocrTesseractOCR, matLog, ocrTextWrappers, views):
self.mOCRRules = []
self.mVisionRules = []
self.mDipCalculator = dipCalculator
self.mViews = views
self.mOcrTesseractOCR = ocrTesseractOCR
self.mMatLog = matLog
self.mOcrTextWrappers = ocrTextWrappers
self.initOCRRules()
self.initVisionRules()
def initVisionRules(self):
self.mVisionRules.append( RuleTextTooBigCompWords(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mVisionRules.append( RuleVisionBoxHasSoManyInvalidTexts(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mVisionRules.append( RuleBaseOnNeighbour(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
def initOCRRules(self):
self.mOCRRules.append(RuleLowConfidenceAndBoundaryCheck(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleValidByList(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleNoWidth(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleNoHeight(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleOutOfBound(self.mDipCalculator, self.mOcrTesseractOCR,self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleSmallVericalShape(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleSmallHorizontalShape(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleAllSpace(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleAlignVertically(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleCharacterDistance(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleWordOnlyOneTinyChild(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleBigLotChildren(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleBoxHasWordTooSmall(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleBoxHasOneWordTooSmall(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
self.mOCRRules.append(RuleNoChildren(self.mDipCalculator, self.mOcrTesseractOCR, self.mMatLog, self.mOcrTextWrappers, self.mViews))
def acceptOCRRules(self,ocr):
firstTextValidator = None
for rule in self.mOCRRules:
textValidator = rule.accept(ocr)
if textValidator != None and not textValidator.valid :
firstTextValidator = textValidator
# print(textValidator.log)
break
for rule in self.mOCRRules:
textValidator = rule.accept(ocr)
if textValidator != None and not textValidator.valid:
Logger.append(Logger.RULE_INFO_LOG,"\t" + type(rule).__name__)
return firstTextValidator
def acceptVisionRules(self,invalidTexts, acceptedOcrTextWrappers):
for rule in self.mVisionRules:
match = rule.run(invalidTexts, acceptedOcrTextWrappers)
if match:
Logger.append(Logger.RULE_INFO_LOG, "\t" + type(rule).__name__)
|
171106
|
import asyncio
from multiprocessing.util import register_after_fork
from queue import Queue
from threading import (
Barrier,
BoundedSemaphore,
Condition,
Event,
Lock,
RLock,
Semaphore,
)
from aioprocessing.locks import _ContextManager
from .executor import _ExecutorMixin
from .mp import managers as _managers
AioBaseQueueProxy = _managers.MakeProxyType(
"AioQueueProxy",
(
"task_done",
"get",
"qsize",
"put",
"put_nowait",
"get_nowait",
"empty",
"join",
"_qsize",
"full",
),
)
class _AioProxyMixin(_ExecutorMixin):
_obj = None
def _async_call(self, method, *args, loop=None, **kwargs):
return asyncio.ensure_future(
self.run_in_executor(
self._callmethod, method, args, kwargs, loop=loop
)
)
class ProxyCoroBuilder(type):
""" Build coroutines to proxy functions. """
def __new__(cls, clsname, bases, dct):
coro_list = dct.get("coroutines", [])
existing_coros = set()
def find_existing_coros(d):
for attr in d:
if attr.startswith("coro_") or attr.startswith("thread_"):
existing_coros.add(attr)
# Determine if any bases include the coroutines attribute, or
# if either this class or a base class provides an actual
# implementation for a coroutine method.
find_existing_coros(dct)
for b in bases:
b_dct = b.__dict__
coro_list.extend(b_dct.get("coroutines", []))
find_existing_coros(b_dct)
bases += (_AioProxyMixin,)
for func in coro_list:
coro_name = "coro_{}".format(func)
if coro_name not in existing_coros:
dct[coro_name] = cls.coro_maker(func)
return super().__new__(cls, clsname, bases, dct)
@staticmethod
def coro_maker(func):
def coro_func(self, *args, loop=None, **kwargs):
return self._async_call(func, *args, loop=loop, **kwargs)
return coro_func
class AioQueueProxy(AioBaseQueueProxy, metaclass=ProxyCoroBuilder):
""" A Proxy object for AioQueue.
Provides coroutines for calling 'get' and 'put' on the
proxy.
"""
coroutines = ["get", "put"]
class AioAcquirerProxy(_managers.AcquirerProxy, metaclass=ProxyCoroBuilder):
pool_workers = 1
coroutines = ["acquire", "release"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._threaded_acquire = False
def _after_fork(obj):
obj._threaded_acquire = False
register_after_fork(self, _after_fork)
def coro_acquire(self, *args, **kwargs):
""" Non-blocking acquire.
We need a custom implementation here, because we need to
set the _threaded_acquire attribute to True once we have
the lock. This attribute is used by release() to determine
whether the lock should be released in the main thread,
or in the Executor thread.
"""
def lock_acquired(fut):
if fut.result():
self._threaded_acquire = True
out = self.run_in_executor(self.acquire, *args, **kwargs)
out.add_done_callback(lock_acquired)
return out
def __getstate__(self):
state = super().__getstate__()
state["_threaded_acquire"] = False
return state
def __setstate__(self, state):
super().__setstate__(state)
def release(self):
""" Release the lock.
If the lock was acquired in the same process via
coro_acquire, we need to release the lock in the
ThreadPoolExecutor's thread.
"""
if self._threaded_acquire:
out = self.run_in_thread(super().release)
else:
out = super().release()
self._threaded_acquire = False
return out
async def __aenter__(self):
await self.coro_acquire()
return None
async def __aexit__(self, *args, **kwargs):
self.release()
def __iter__(self):
yield from self.coro_acquire()
return _ContextManager(self)
class AioBarrierProxy(_managers.BarrierProxy, metaclass=ProxyCoroBuilder):
coroutines = ["wait"]
class AioEventProxy(_managers.EventProxy, metaclass=ProxyCoroBuilder):
coroutines = ["wait"]
class AioConditionProxy(_managers.ConditionProxy, metaclass=ProxyCoroBuilder):
coroutines = ["wait", "wait_for"]
class AioSyncManager(_managers.SyncManager):
""" A mp.Manager that provides asyncio-friendly objects. """
pass
AioSyncManager.register("AioQueue", Queue, AioQueueProxy)
AioSyncManager.register("AioBarrier", Barrier, AioQueueProxy)
AioSyncManager.register(
"AioBoundedSemaphore", BoundedSemaphore, AioAcquirerProxy
)
AioSyncManager.register("AioCondition", Condition, AioConditionProxy)
AioSyncManager.register("AioEvent", Event, AioQueueProxy)
AioSyncManager.register("AioLock", Lock, AioAcquirerProxy)
AioSyncManager.register("AioRLock", RLock, AioAcquirerProxy)
AioSyncManager.register("AioSemaphore", Semaphore, AioAcquirerProxy)
|
171181
|
from django.conf import settings
from django.core.management.base import BaseCommand
from qfieldcloud.core import geodb_utils, utils
class Command(BaseCommand):
help = "Check qfieldcloud status"
def handle(self, *args, **options):
results = {}
results["redis"] = "ok"
# Check if redis is visible
if not utils.redis_is_running():
results["redis"] = "error"
results["geodb"] = "ok"
# Check geodb
if not geodb_utils.geodb_is_running():
results["geodb"] = "error"
results["storage"] = "ok"
# Check if bucket exists (i.e. the connection works)
try:
s3_client = utils.get_s3_client()
s3_client.head_bucket(Bucket=settings.STORAGE_BUCKET_NAME)
except Exception:
results["storage"] = "error"
self.stdout.write(
self.style.SUCCESS("Everything seems to work properly: {}".format(results))
)
|
171187
|
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import ipdb;pdb=ipdb.set_trace
from collections import OrderedDict
# markdown format output
def _print_name_value(name_value, full_arch_name):
names = name_value.keys()
values = name_value.values()
num_values = len(name_value)
print(
'| Arch ' +
' '.join(['| {}'.format(name) for name in names]) +
' |'
)
print('|---' * (num_values+1) + '|')
if len(full_arch_name) > 15:
full_arch_name = full_arch_name[:8] + '...'
print(
'| ' + full_arch_name + ' ' +
' '.join(['| {:.3f}'.format(value) for value in values]) +
' |'
)
gt_anns = 'data/coco/annotations/person_keypoints_val2017.json'
# dt_anns = '/home/xyliu/2D_pose/deep-high-resolution-net.pytorch/person_keypoints.json'
dt_anns = '/home/xyliu/2D_pose/simple-pose-estimation/person_keypoints.json'
annType = 'keypoints'
cocoGt=COCO(gt_anns)
cocoDt=cocoGt.loadRes(dt_anns)
cocoEval = COCOeval(cocoGt,cocoDt,annType)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']
info_str = []
for ind, name in enumerate(stats_names):
info_str.append((name, cocoEval.stats[ind]))
name_values = OrderedDict(info_str)
model_name = 'openpose'
if isinstance(name_values, list):
for name_value in name_values:
_print_name_value(name_value, model_name)
else:
_print_name_value(name_values, model_name)
|
171199
|
from .single_stage import SingleStageDetectorWraper
from .two_stage import TwoStageDetectorWraper
__all__ = ['SingleStageDetectorWraper', 'TwoStageDetectorWraper']
|
171226
|
import getpass
import logging
import os
import pathlib
import pytest
from libtmux import exc
from libtmux.server import Server
from libtmux.test import TEST_SESSION_PREFIX, get_test_session_name, namer
logger = logging.getLogger(__name__)
@pytest.fixture(autouse=True, scope="session")
def home_path(tmp_path_factory: pytest.TempPathFactory):
return tmp_path_factory.mktemp("home")
@pytest.fixture(autouse=True, scope="session")
def user_path(home_path: pathlib.Path):
p = home_path / getpass.getuser()
p.mkdir()
return p
@pytest.fixture(autouse=True)
def home_path_default(user_path: pathlib.Path):
os.environ["HOME"] = str(user_path)
@pytest.fixture(scope="function")
def monkeypatch_plugin_test_packages(monkeypatch):
paths = [
"tests/fixtures/pluginsystem/plugins/tmuxp_test_plugin_bwb/",
"tests/fixtures/pluginsystem/plugins/tmuxp_test_plugin_bs/",
"tests/fixtures/pluginsystem/plugins/tmuxp_test_plugin_r/",
"tests/fixtures/pluginsystem/plugins/tmuxp_test_plugin_owc/",
"tests/fixtures/pluginsystem/plugins/tmuxp_test_plugin_awf/",
"tests/fixtures/pluginsystem/plugins/tmuxp_test_plugin_fail/",
]
for path in paths:
monkeypatch.syspath_prepend(os.path.abspath(os.path.relpath(path)))
@pytest.fixture(scope="function")
def socket_name(request):
return "tmuxp_test%s" % next(namer)
@pytest.fixture(scope="function")
def server(request, socket_name):
t = Server()
t.socket_name = socket_name
def fin():
t.kill_server()
request.addfinalizer(fin)
return t
@pytest.fixture(scope="function")
def session(server):
session_name = "tmuxp"
if not server.has_session(session_name):
server.cmd(
"-f",
"/dev/null", # use a blank config to reduce side effects
"new-session",
"-d", # detached
"-s",
session_name,
"/bin/sh", # use /bin/sh as a shell to reduce side effects
# normally, it'd be -c, but new-session is special
)
# find current sessions prefixed with tmuxp
old_test_sessions = [
s.get("session_name")
for s in server._sessions
if s.get("session_name").startswith(TEST_SESSION_PREFIX)
]
TEST_SESSION_NAME = get_test_session_name(server=server)
try:
session = server.new_session(session_name=TEST_SESSION_NAME)
except exc.LibTmuxException as e:
raise e
"""
Make sure that tmuxp can :ref:`test_builder_visually` and switches to
the newly created session for that testcase.
"""
try:
server.switch_client(session.get("session_id"))
except exc.LibTmuxException:
# server.attach_session(session.get('session_id'))
pass
for old_test_session in old_test_sessions:
logger.debug("Old test test session %s found. Killing it." % old_test_session)
server.kill_session(old_test_session)
assert TEST_SESSION_NAME == session.get("session_name")
assert TEST_SESSION_NAME != "tmuxp"
return session
|
171259
|
import asyncio
import logging
from typing import Dict, Callable, List, Optional, Union, Pattern
from .command import Command
from .lexer import Lexer
from .parser import Parser
from .. import AsyncRunnable, MessageTypes, EventTypes # interfaces & basics
from .. import Cert, HTTPRequester, WebhookReceiver, WebsocketReceiver, Gateway, Client # net related
from .. import User, Channel, PublicChannel, PublicTextChannel, Guild, Event, Message # concepts
log = logging.getLogger(__name__)
class Bot(AsyncRunnable):
"""
Represents a entity that handles msg/events and interact with users/khl server in manners that programmed.
"""
client: Client
_me: Optional[User]
_cmd_index: Dict[str, Command]
_event_index: Dict[EventTypes, List[Callable]]
def __init__(self, *, token: str = '', cert: Cert = None, client: Client = None, gate: Gateway = None,
out: HTTPRequester = None, compress: bool = True, port=5000, route='/khl-wh'):
"""
The most common usage: ``Bot(token='xxxxxx')``
That's enough.
:param cert: used to build requester and receiver
:param client: the bot relies on
:param gate: the client relies on
:param out: the gate's component
:param compress: used to tune the receiver
:param port: used to tune the WebhookReceiver
:param route: used to tune the WebhookReceiver
"""
if not token and not cert:
raise ValueError('require token or cert')
self._init_client(cert or Cert(token=token), client, gate, out, compress, port, route)
self._me = None
self._cmd_index = {}
self._event_index = {}
self.client.register(MessageTypes.TEXT, self._make_msg_handler())
self.client.register(MessageTypes.SYS, self._make_event_handler())
def _init_client(self, cert: Cert, client: Client, gate: Gateway, out: HTTPRequester, compress: bool, port, route):
"""
construct self.client from args.
you can init client with kinds of filling ways,
so there is a priority in the rule: client > gate > out = compress = port = route
:param cert: used to build requester and receiver
:param client: the bot relies on
:param gate: the client relies on
:param out: the gate's component
:param compress: used to tune the receiver
:param port: used to tune the WebhookReceiver
:param route: used to tune the WebhookReceiver
:return:
"""
if client:
self.client = client
return
if gate:
self.client = Client(gate)
return
# client and gate not in args, build them
_out = out if out else HTTPRequester(cert)
if cert.type == Cert.Types.WEBSOCKET:
_in = WebsocketReceiver(cert, compress)
elif cert.type == Cert.Types.WEBHOOK:
_in = WebhookReceiver(cert, port=port, route=route, compress=compress)
else:
raise ValueError(f'cert type: {cert.type} not supported')
self.client = Client(Gateway(_out, _in))
def _make_msg_handler(self) -> Callable:
"""
construct a function to receive msg from client, and interpret it with _cmd_index
"""
# use closure
async def handler(msg: Message):
for name, cmd in self._cmd_index.items():
try:
args = cmd.prepare(msg)
except Lexer.LexerException: # TODO: a more reasonable exception handle in lex and parse
continue
await cmd.execute(msg, *args)
return handler
def _make_event_handler(self) -> Callable:
async def handler(event: Event):
if event.event_type not in self._event_index:
return
if not self._event_index[event.event_type]:
return
for h in self._event_index[event.event_type]:
await h(self, event)
return handler
def add_command(self, cmd: Command) -> Command:
"""
register the cmd on current Bot
:param cmd: the Command going to be registered
:return: the cmd
"""
if cmd.name in self._cmd_index:
raise ValueError(f'cmd: {cmd.name} already exists')
self._cmd_index[cmd.name] = cmd
log.debug(f'cmd: {cmd.name} added')
return cmd
def get_command(self, name: str) -> Optional[Command]:
return self._cmd_index.get(name, None)
def remove_command(self, name: str):
if name in self._cmd_index:
del self._cmd_index[name]
def command(self, name: str = '', *, help: str = '', desc: str = '',
aliases: List[str] = (), prefixes: List[str] = ('/',), regex: Union[str, Pattern] = '',
lexer: Lexer = None, parser: Parser = None, rules: List[Callable] = ()):
"""
decorator, wrap a function in Command and register it on current Bot
:param name: the name of this Command, also used to trigger command in DefaultLexer
:param aliases: (DefaultLexer only) you can also trigger the command with aliases
:param prefixes: (DefaultLexer only) command prefix, default use '/'
:param regex: (RELexer only) pattern for the command
:param help: detailed manual
:param desc: short introduction
:param lexer: (Advanced) explicitly set the lexer
:param parser: (Advanced) explicitly set the parser
:param rules: only be executed if all rules are met
:return: wrapped Command
"""
args = {'help': help, 'desc': desc,
'aliases': aliases, 'prefixes': prefixes, 'regex': regex,
'lexer': lexer, 'parser': parser, 'rules': rules}
# use lambda cuz i do not wanna declare decorator() explicitly to take 3 blank lines
# did not init Lexer in advance cuz it needs func.__name__
# this is redundant stuff in constructor, there should be a better way
return lambda func: self.add_command(Command.command(name, **args)(func))
def add_event_handler(self, type: EventTypes, handler: Callable):
if type not in self._event_index:
self._event_index[type] = []
self._event_index[type].append(handler)
log.debug(f'event_handler {handler.__qualname__} for {type} added')
return handler
def on_event(self, type: EventTypes):
"""
decorator, register a function to handle events of the type
:param type: the type
:return: original func
"""
return lambda func: self.add_event_handler(type, func)
async def fetch_me(self, force_update: bool = False) -> User:
"""fetch detail of the bot it self as a ``User``"""
if force_update or not self._me or not self._me.is_loaded():
self._me = await self.client.fetch_me()
return self._me
@property
def me(self) -> User:
"""
get bot it self's data
RECOMMEND: use ``await fetch_me()``
CAUTION: please call ``await fetch_me()`` first to load data from khl server
designed as 'empty-then-fetch' will break the rule 'net-related is async'
:return: the bot's underlying User
"""
if self._me and self._me.is_loaded():
return self._me
raise ValueError('not loaded, please call `await fetch_me()` first')
async def fetch_public_channel(self, channel_id: str) -> PublicChannel:
"""fetch details of a public channel from khl"""
return await self.client.fetch_public_channel(channel_id)
async def fetch_guild(self, guild_id: str) -> Guild:
"""fetch details of a guild from khl"""
guild = Guild(_gate_=self.client.gate, id=guild_id)
await guild.load()
return guild
async def list_guild(self) -> List[Guild]:
"""list guilds the bot joined"""
return await self.client.list_guild()
@staticmethod
async def send(target: Channel, content: Union[str, List], *, temp_target_id: str = '', **kwargs):
"""
send a msg to a channel
``temp_target_id`` is only available in ChannelPrivacyTypes.GROUP
"""
if isinstance(target, PublicTextChannel):
kwargs['temp_target_id'] = temp_target_id
return await target.send(content, **kwargs)
async def upload_asset(self, file: str) -> str:
"""upload ``file`` to khl, and return the url to the file, alias for ``create_asset``"""
return await self.client.create_asset(file)
async def create_asset(self, file: str) -> str:
"""upload ``file`` to khl, and return the url to the file"""
return await self.client.create_asset(file)
async def kickout(self, guild: Guild, user: Union[User, str]):
"""kick ``user`` out from ``guild``"""
if guild.gate.requester != self.client.gate.requester:
raise ValueError('can not modify guild from other gate')
return await guild.kickout(user)
async def leave(self, guild: Guild):
"""leave from ``guild``"""
if guild.gate.requester != self.client.gate.requester:
raise ValueError('can not modify guild from other gate')
return await guild.leave()
def run(self):
try:
if not self.loop:
self.loop = asyncio.get_event_loop()
self.loop.run_until_complete(self.client.run())
except KeyboardInterrupt:
pass
except Exception as e:
log.error(e)
log.info('see you next time')
|
171298
|
import requests
import rdflib
from whyis import nanopub
import datetime
import pytz
import dateutil.parser
from dateutil.tz import tzlocal
from werkzeug.datastructures import FileStorage
from werkzeug.http import http_date
from setlr import FileLikeFromIter
import re
import os
from requests_testadapter import Resp
import magic
import mimetypes
import traceback
import sys
from whyis.namespace import np, prov, dc, sio
class Importer:
min_modified = 0
import_once = False
def last_modified(self, entity_name, db, nanopubs):
old_nps = [nanopubs.get(x) for x, in db.query('''select ?np where {
?np np:hasAssertion ?assertion.
?assertion a np:Assertion; prov:wasQuotedFrom ?mapped_uri.
}''', initNs=dict(np=np, prov=prov), initBindings=dict(mapped_uri=rdflib.URIRef(entity_name)))]
modified = None
for old_np in old_nps:
m = old_np.modified
if m is not None:
m = m
# m = pytz.utc.localize(m)
if m is None:
continue
if modified is None or m > modified:
print(m, modified, old_np.modified)
modified = m
return modified
def load(self, entity_name, db, nanopubs):
entity_name = rdflib.URIRef(entity_name)
print("Fetching", entity_name)
old_nps = [nanopubs.get(x) for x, in db.query('''select ?np where {
?np np:hasAssertion ?assertion.
?assertion a np:Assertion; prov:wasQuotedFrom ?mapped_uri.
}''', initNs=dict(np=np, prov=prov), initBindings=dict(mapped_uri=rdflib.URIRef(entity_name)))]
updated = self.modified(entity_name)
if updated is None:
updated = datetime.datetime.now(pytz.utc)
#try:
g = self.fetch(entity_name)
#except Exception as e:
# print("Error loading %s: %s" % (entity_name, e))
# traceback.print_exc(file=sys.stdout)
# return
for new_np in nanopubs.prepare(g):
print("Adding new nanopub:", new_np.identifier)
self.explain(new_np, entity_name)
new_np.add((new_np.identifier, sio.isAbout, entity_name))
if updated is not None:
new_np.pubinfo.add(
(new_np.assertion.identifier, dc.modified, rdflib.Literal(updated, datatype=rdflib.XSD.dateTime)))
for old_np in old_nps:
new_np.pubinfo.add((old_np.assertion.identifier, prov.invalidatedAtTime,
rdflib.Literal(updated, datatype=rdflib.XSD.dateTime)))
nanopubs.publish(new_np)
for old_np in old_nps:
print("retiring", old_np.identifier)
nanopubs.retire(old_np.identifier)
def explain(self, new_np, entity_name):
activity = rdflib.BNode()
new_np.provenance.add((activity, rdflib.RDF.type, self.app.NS.whyis.KnowledgeImport))
new_np.provenance.add((new_np.assertion.identifier, prov.wasGeneratedBy, activity))
new_np.provenance.add((activity, prov.used, rdflib.URIRef(entity_name)))
new_np.provenance.add((new_np.assertion.identifier, prov.wasQuotedFrom, rdflib.URIRef(entity_name)))
new_np.provenance.add((new_np.assertion.identifier, prov.wasDerivedFrom, rdflib.URIRef(entity_name)))
|
171324
|
input_data = '1901,12.3\n1902,45.6\n1903,78.9'
print('input data is:')
print(input_data)
as_lines = input_data.split('\n')
print('as lines:')
print(as_lines)
for line in as_lines:
fields = line.split(',')
year = int(fields[0])
value = float(fields[1])
print(year, ':', value)
|
171350
|
from __future__ import division
from __future__ import print_function
import json
import pdb
import math
all_params = json.load(open('config.json'))
dataset_name = all_params['dataset_name']
locals().update(all_params['experiment_setup'])
locals().update(all_params[dataset_name])
tcn_params['model_params']['encoder_params']['kernel_size'] //= sample_rate
tcn_params['model_params']['decoder_params']['kernel_size'] //= sample_rate
tcn_params['model_params']['mid_lstm_params'] = None
temp = []
for k in rl_params['k_steps']:
temp.append(math.ceil(k / sample_rate))
rl_params['k_steps'] = temp
temp = []
for k in rl_params['glimpse']:
temp.append(math.ceil(k / sample_rate))
rl_params['glimpse'] = temp
#tcn_params['train_params'] = None
|
171360
|
import zmq.backend.cython._device
import zmq.backend.cython._poll
import zmq.backend.cython._version
import zmq.backend.cython.constants
import zmq.backend.cython.context
import zmq.backend.cython.error
import zmq.backend.cython.message
import zmq.backend.cython.socket
import zmq.backend.cython.utils
import zmq.devices.monitoredqueue
|
171406
|
from .light_frontend import LightFrontend
from .full_frontend import FullFrontend
from .hybrid_frontend import HybridFrontend
from .composite_frontend import CompositeFrontend
from .replacement_frontend import ReplacementFrontend
|
171429
|
from django.db import models
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
|
171432
|
from autogoal.contrib import find_classes
from autogoal.kb import (
algorithm,
Sentence,
Seq,
Word,
Stem,
build_pipeline_graph,
Supervised,
Label,
)
from autogoal.grammar import generate_cfg, Symbol
class Algorithm:
def __init__(
self,
tokenizer: algorithm(Sentence, Seq[Word]),
stemmer: algorithm(Word, Stem),
stopword: algorithm(Seq[Word], Seq[Word]),
) -> None:
self.tokenizer = tokenizer
self.stemmer = stemmer
self.stopword = stopword
def test_find_nltk_implementations():
grammar = generate_cfg(Algorithm, find_classes(include=["*.nltk.*"]))
assert Symbol("Algorithm[[Sentence],Seq[Word]]") in grammar._productions
assert Symbol("Algorithm[[Word],Stem]") in grammar._productions
assert Symbol("Algorithm[[Seq[Word]],Seq[Word]]") in grammar._productions
from autogoal.contrib.nltk import FeatureSeqExtractor
from autogoal.contrib.sklearn import CRFTagger
def test_crf_pipeline():
graph = build_pipeline_graph(
input_types=(Seq[Seq[Word]], Supervised[Seq[Seq[Label]]]),
output_type=Seq[Seq[Label]],
registry=[FeatureSeqExtractor, CRFTagger],
)
pipeline = graph.sample()
|
171446
|
class Pipeline:
def __init__(self, ready):
self._queue = []
self._ready = ready
@property
def empty(self):
return not self._queue
def queue(self, task):
print("queued")
self._queue.append(task)
task.add_done_callback(self._task_done)
def _task_done(self, task):
print('Done', task.result())
pop_idx = 0
for task in self._queue:
if not task.done():
break
self.write(task)
pop_idx += 1
if pop_idx:
self._queue[:pop_idx] = []
def write(self, task):
self._ready(task)
print('Written', task.result())
if __name__ == '__main__':
import asyncio
async def coro(sleep):
await asyncio.sleep(sleep)
return sleep
from uvloop import new_event_loop
loop = new_event_loop()
asyncio.set_event_loop(loop)
pipeline = Pipeline()
def queue(x):
t = loop.create_task(coro(x))
pipeline.queue(t)
loop.call_later(2, lambda: queue(2))
loop.call_later(12, lambda: queue(2))
queue(1)
queue(10)
queue(5)
queue(1)
loop.run_forever()
|
171480
|
from MiscUtils.DataTable import DataTable
from MiscUtils.Funcs import hostName as HostName
from SitePage import SitePage
class SelectDatabase(SitePage):
def writeSideBar(self):
self.writeln('<a href="?showHelp=1" class="SideBarLink">Help</a>')
def writeContent(self):
self.saveFieldsToCookies()
self.writeDBForm(action='BrowseClasses')
self.writeRecentDatabases()
self.writeKnownDatabases()
if self.request().hasField('showHelp'):
self.writeHelp()
def writeDBForm(self, method='get', action=''):
if method:
method = 'method="%s"' % method
if action:
action = 'action="%s"' % action
source = '''\
name,type,comment,value
database,text,"e.g., MySQL"
host,text
user,text
password,password
'''
fields = DataTable()
fields.readString(source)
req = self.request()
wr = self.writeln
self.writeHeading('Enter database connection info:')
wr('<form %(method)s %(action)s>' % locals())
wr('<table>')
for field in fields:
field['value'] = req.value(field['name'], '')
wr('<tr><td>%(name)s:</td><td></td><td>'
'<input type="%(type)s" name="%(name)s" value="%(value)s">'
'</td><td>%(comment)s</td></tr>' % field)
wr('<tr><td colspan="2"> </td><td style="text-align:right">'
'<input type="submit" value="OK"></td><td> </td></tr>')
wr('</table></form>')
def writeRecentDatabases(self):
self.writeHeading('Select a recent database:')
self.writeln('<p>None</p>')
def writeKnownDatabases(self):
self.writeHeading('Select a known database:')
knownDBs = self.setting('KnownDatabases')
hostName = HostName()
if not hostName:
hostName = '_default_'
dbs = knownDBs.get(hostName, []) + knownDBs.get('_all_', [])
if dbs:
for db in dbs:
self.writeDB(db)
else:
self.writeln('<p>None</p>')
def writeDB(self, db):
# Set title
title = '%(database)s on %(host)s' % db
if db.get('user', ''):
title += ' with ' + db['user']
# Build up args for links
args = []
for key in self.dbKeys():
if key in db:
args.append('%s=%s' % (key, db[key]))
args = '&'.join(args)
# If the db connection info specifies a password, then
# the user can click through immediately.
# Otherwise, the click goes back to the same page with
# the fields filled out so that the user can enter the password.
if db.get('password'):
self.write('<p><a href="BrowseClasses?%s">%s</a>'
' (password included)' % (args, title))
else:
self.writeln('<p><a href="?%s">%s</a>'
' (password required)' % (args, title))
def dbKeys(self):
"""Get keys for database connections.
Returns a list of the valid keys that can be used for a
"database connection dictionary". These dictionaries are
found in the config file and in the recent list.
"""
return ['database', 'host', 'user', 'password']
|
171505
|
class EventRecorder(object):
def __init__(self):
super(EventRecorder, self).__init__()
self.events = {}
self.timestamp = 0
def record(self, event_name, **kwargs):
assert event_name not in self.events, "Event {} already recorded".format(event_name)
self.timestamp += 1
self.events[event_name] = Event(self.timestamp, kwargs)
def __getitem__(self, event_name):
return self.events[event_name]
class Event(object):
happened = True
def __init__(self, timestamp, info):
super(Event, self).__init__()
self.timestamp = timestamp
self.info = info
|
171516
|
from sqlalchemy import *
import sqlalchemy.schema
import uuid
from sqlalchemy.sql import select
from migrate import *
import migrate.changeset
from migrate.changeset.constraint import ForeignKeyConstraint, PrimaryKeyConstraint
metadata = MetaData()
def make_uuid():
return unicode(uuid.uuid4())
## Tables and columns changed in the model
## Versioned:
## ('package', 'id'),
## ('package_tag', ('id', 'package_id', 'tag_id')),
## ('package_extra', ('id', 'package_id')),
## ('package_resource', ('id', 'package_id')),
## Versions:
## ('package_revision', 'id'),
## ('package_tag_revision', ('id', 'package_id', 'tag_id')),
## ('package_extra_revision', ('id', 'package_id')),
## ('package_resource_revision', ('id', 'package_id')),
## Non-versioned:
## ('tag', 'id'),
## ('rating', 'package_id'),
## ('package_search', 'package_id'),
## ('package_role', 'package_id'),
## ('package_group', 'package_id'),
def upgrade(migrate_engine):
global metadata
metadata = MetaData()
metadata.bind = migrate_engine
primary_table_name = 'package'
foreign_tables = ['package_revision',
'package_tag', 'package_tag_revision',
'package_extra', 'package_extra_revision',
'package_resource', 'package_resource_revision',
'rating', 'package_search',
'package_role', 'package_group']
revision_table_name = 'package_revision'
convert_to_uuids(migrate_engine, primary_table_name, foreign_tables, revision_table_name)
primary_table_name = 'package_resource'
foreign_tables = ['package_resource_revision']
revision_table_name = 'package_resource_revision'
convert_to_uuids(migrate_engine, primary_table_name, foreign_tables, revision_table_name)
primary_table_name = 'package_tag'
foreign_tables = ['package_tag_revision']
revision_table_name = 'package_tag_revision'
convert_to_uuids(migrate_engine, primary_table_name, foreign_tables, revision_table_name)
primary_table_name = 'package_extra'
foreign_tables = ['package_extra_revision']
revision_table_name = 'package_extra_revision'
convert_to_uuids(migrate_engine, primary_table_name, foreign_tables, revision_table_name)
primary_table_name = 'tag'
foreign_tables = ['package_tag', 'package_tag_revision']
revision_table_name = None
convert_to_uuids(migrate_engine, primary_table_name, foreign_tables, revision_table_name)
drop_sequencies(migrate_engine)
def convert_to_uuids(migrate_engine, primary_table_name, foreign_tables, revision_table_name=None):
'''Convert an id column in Primary Table to string type UUIDs.
How it works:
1 drop all foreign key constraints
2 alter type of revision id and foreign keys
3 create foreign key constraints (using cascade!)
4 create uuids for revisions (auto cascades elsewhere!)
@param primary_table_name - table containing the primary key id column
@param foreign_tables - names of tables which have this same key as a
foreign key constraint
@param revision_table_name - if primary_table is versioned, supply the name
of its related revision table, so that it can be updated at the same
time.
'''
#print('** Processing %s' % primary_table_name)
#print('*** Dropping fk constraints')
dropped_fk_constraints = drop_constraints_and_alter_types(primary_table_name, foreign_tables, revision_table_name)
#print('*** Adding fk constraints (with cascade)')
add_fk_constraints(migrate_engine, dropped_fk_constraints, primary_table_name)
#print('*** Creating UUIDs')
create_uuids(migrate_engine, primary_table_name, revision_table_name)
def drop_constraints_and_alter_types(primary_table_name, foreign_tables, revision_table_name):
# 1 drop all foreign key constraints
dropped_fk_constraints = []
primary_table = Table(primary_table_name, metadata, autoload=True)
for table_name in foreign_tables:
table = Table(table_name, metadata, autoload=True)
for constraint in table.constraints.copy():
if isinstance(constraint, sqlalchemy.schema.ForeignKeyConstraint):
foreign_key_cols = [key.column for key in constraint.elements]
fk_col = foreign_key_cols[0]
if fk_col.table == primary_table:
orig_fk = ForeignKeyConstraint(constraint.columns, foreign_key_cols, name=constraint.name, table=table)
orig_fk.drop()
dropped_fk_constraints.append((constraint.columns, foreign_key_cols, constraint.name, table.name))
#print 'CON', dropped_fk_constraints[-1]
# 2 alter type of primary table id and foreign keys
id_col = constraint.table.columns[constraint.columns[0]]
id_col.alter(type=UnicodeText)
primary_table = Table(primary_table_name, metadata, autoload=True)
id_col = primary_table.c['id']
id_col.alter(type=UnicodeText)
if revision_table_name:
# Revision table id column type changed as well
revision_table = Table(revision_table_name, metadata, autoload=True)
id_col = revision_table.c['id']
id_col.alter(type=UnicodeText)
return dropped_fk_constraints
def add_fk_constraints(migrate_engine, dropped_fk_constraints, primary_table_name):
# 3 create foreign key constraints
for fk_constraint in dropped_fk_constraints:
# cascade doesn't work
# see http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=48
# new_fk = ForeignKeyConstraint(*fk_constraint, onupdate='CASCADE')
# new_fk = ForeignKeyConstraint(*fk_constraint)
# new_fk.create()
# So we create via hand ...
constraint_columns, foreign_key_cols, constraint_name, table_name = fk_constraint
oursql = '''ALTER TABLE %(table)s
ADD CONSTRAINT %(fkeyname)s
FOREIGN KEY (%(col_name)s)
REFERENCES %(primary_table_name)s (id)
''' % {'table':table_name, 'fkeyname':constraint_name,
'col_name':constraint_columns[0],
'primary_table_name':primary_table_name}
migrate_engine.execute(oursql)
def create_uuids(migrate_engine, primary_table_name, revision_table_name):
# have changed type of cols so recreate metadata
metadata = MetaData(migrate_engine)
# 4 create uuids for primary entities and in related tables
primary_table = Table(primary_table_name, metadata, autoload=True)
if revision_table_name:
revision_table = Table(revision_table_name, metadata, autoload=True)
# fetchall wouldn't be optimal with really large sets of data but here <20k
ids = [ res[0] for res in
migrate_engine.execute(select([primary_table.c.id])).fetchall() ]
for count,id in enumerate(ids):
# if count % 100 == 0: print(count, id)
myuuid = make_uuid()
update = primary_table.update().where(primary_table.c.id==id).values(id=myuuid)
migrate_engine.execute(update)
if revision_table_name:
# ensure each id in revision table match its continuity id.
q = revision_table.update().values(id=revision_table.c.continuity_id)
migrate_engine.execute(q)
def drop_sequencies(migrate_engine):
sequencies = ['package_extra', 'package_extra_revision', 'package',
'package_resource', 'package_resource_revision',
'package_revision',' package_tag', 'package_tag_revision',
'revision', 'tag']
for sequence in sequencies:
migrate_engine.execute('ALTER TABLE %s ALTER COLUMN id DROP DEFAULT;' % sequence)
for sequence in sequencies:
migrate_engine.execute('drop sequence %s_id_seq;' % sequence)
def downgrade(migrate_engine):
raise NotImplementedError()
|
171525
|
import warnings
from pymysql.tests import base
import pymysql.cursors
class CursorTest(base.PyMySQLTestCase):
def setUp(self):
super(CursorTest, self).setUp()
conn = self.connections[0]
self.safe_create_table(
conn,
"test", "create table test (data varchar(10))",
)
cursor = conn.cursor()
cursor.execute(
"insert into test (data) values "
"('row1'), ('row2'), ('row3'), ('row4'), ('row5')")
cursor.close()
self.test_connection = pymysql.connect(**self.databases[0])
self.addCleanup(self.test_connection.close)
def test_cleanup_rows_unbuffered(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.SSCursor)
cursor.execute("select * from test as t1, test as t2")
for counter, row in enumerate(cursor):
if counter > 10:
break
del cursor
self.safe_gc_collect()
c2 = conn.cursor()
with warnings.catch_warnings(record=True) as log:
warnings.filterwarnings("always")
c2.execute("select 1")
self.assertGreater(len(log), 0)
self.assertEqual(
"Previous unbuffered result was left incomplete",
str(log[-1].message))
self.assertEqual(
c2.fetchone(), (1,)
)
self.assertIsNone(c2.fetchone())
def test_cleanup_rows_buffered(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.Cursor)
cursor.execute("select * from test as t1, test as t2")
for counter, row in enumerate(cursor):
if counter > 10:
break
del cursor
self.safe_gc_collect()
c2 = conn.cursor()
c2.execute("select 1")
self.assertEqual(
c2.fetchone(), (1,)
)
self.assertIsNone(c2.fetchone())
|
171613
|
import numpy as np
def trust_region_solver(M, g, d_max, max_iter=2000, stepsize=1.0e-3):
"""Solves trust region problem with gradient descent
maximize 1/2 * x^T M x + g^T x
s.t. |x|_2 <= d_max
initialize x = g / |g| * d_max
"""
x = g / np.linalg.norm(g) * d_max
for _ in range(max_iter):
# gradient ascent
x = x + stepsize * (M @ x + g)
# projection to sphere
x = x / np.linalg.norm(x) * d_max
## debug
#loss = 0.5 * x.T @ M @ x + g.T @ x
#print(f'Loss: {loss}')
return x
|
171635
|
class InvalidWithdrawal(Exception):
pass
raise InvalidWithdrawal("You don't have $50 in your account")
|
171657
|
from datetime import timedelta
from django.core.management import call_command
from jcasts.users.factories import UserFactory
class TestNewEpisodesEmails:
def test_command(self, db, mocker):
yes = UserFactory(send_email_notifications=True)
UserFactory(send_email_notifications=False)
UserFactory(send_email_notifications=True, is_active=False)
mock_send = mocker.patch("jcasts.episodes.emails.send_new_episodes_email.delay")
call_command("send_new_episodes_emails")
assert len(mock_send.mock_calls) == 1
assert mock_send.call_args == (
(
yes,
timedelta(days=7),
),
)
|
171687
|
from __future__ import annotations
from .abs import abs_val
def abs_min(x: list[int]) -> int:
"""
>>> abs_min([0,5,1,11])
0
>>> abs_min([3,-10,-2])
-2
>>> abs_min([])
Traceback (most recent call last):
...
ValueError: abs_min() arg is an empty sequence
"""
if len(x) == 0:
raise ValueError("abs_min() arg is an empty sequence")
j = x[0]
for i in x:
if abs_val(i) < abs_val(j):
j = i
return j
def main():
a = [-3, -1, 2, -11]
print(abs_min(a)) # = -1
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
|
171712
|
import importlib
def create_storage(app):
"""
Load specified storage and return the object.
"""
if 'STORAGE' not in app.config:
raise Exception("Missing STORAGE config key")
storage = importlib.import_module('.' + app.config['STORAGE'], __name__)
return storage.create_storage(app)
|
171735
|
import http.client
import json
import subprocess
import os
tempDir = os.getenv('XDG_RUNTIME_DIR', '.')
conn = http.client.HTTPConnection('localhost', 8888)
params = """{
"level": "debug",
"media": "image/png",
"input_type": "text"
}"""
def getCaptcha():
conn.request("POST", "/v1/captcha", body=params)
response = conn.getresponse()
if response:
responseStr = response.read()
return json.loads(responseStr)
def getAndSolve(idStr):
conn.request("GET", "/v1/media?id=" + idStr)
response = conn.getresponse()
if response:
responseBytes = response.read()
fileName = tempDir + "/captcha.png"
with open(fileName, "wb") as f:
f.write(responseBytes)
ocrResult = subprocess.Popen("gocr " + fileName, shell=True, stdout=subprocess.PIPE)
ocrAnswer = ocrResult.stdout.readlines()[0].strip().decode()
return ocrAnswer
def postAnswer(captchaId, ans):
reply = {"answer": ans, "id" : captchaId}
conn.request("POST", "/v1/answer", json.dumps(reply))
response = conn.getresponse()
if response:
return response.read()
print(responseStr)
for i in range(0, 10000):
captcha = getCaptcha()
captchaId = captcha["id"]
ans = getAndSolve(captchaId)
print(i, postAnswer(captchaId, ans))
|
171808
|
import sys, os
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CURRENT_DIR, "..", ".."))
import constants
import requests
HTTP_ERROR_CODE_START = 400
HTTP_ERROR_MESSAGE_FORMAT= "Site '%s' returned error '%d'"
REQUEST_ERROR_FORMAT = "Requesting connection to '%s' errored!"
HYPERTEXT_FORMAT_CODES = [ "http://", "https://" ]
OPEN_WEB_BROWSER = "sensible-browser \"%s\" 2>&1 /dev/null &"
if sys.platform.startswith(constants.MAC_OS_X_IDENTIFIER):
OPEN_WEB_BROWSER = "open \"%s\" 2>&1 /dev/null &"
SYS_CMD = lambda x : os.system(OPEN_WEB_BROWSER % (x,))
def open_web_browser(query, **kwargs):
sites = []
parsed = kwargs["nlp"](unicode(query.replace(" dot ", ".")))
for token in parsed:
if token.like_url:
sites.append(str(token.text))
kwargs["log_func"]( sites, tolerance=2 )
for site in sites:
for hypertext_code in HYPERTEXT_FORMAT_CODES:
try:
url = "%s%s" % (hypertext_code, site)
response = requests.get(url)
except:
kwargs["log_func"](REQUEST_ERROR_FORMAT % (site,), tolerance=1)
continue
if response.status_code < HTTP_ERROR_CODE_START:
SYS_CMD(site)
break
else:
error_msg = HTTP_ERROR_MESSAGE_FORMAT % (site, response.status_code)
kwargs["log_func"](error_msg, tolerance=1)
TRIGGER_MODEL = "OPEN_WEB_BROWSER.model"
FUNC = open_web_browser
|
171813
|
from flask_login import current_user
from flask_wtf import FlaskForm
from flask_wtf.file import FileAllowed, FileField, FileRequired
import pytz
from wtforms import ValidationError
from wtforms.fields import IntegerField, SelectField, StringField
from wtforms.validators import DataRequired, NumberRange
from wtforms_components import TimeField
from busy_beaver.clients import meetup
from busy_beaver.common.datetime_utilities import add_gmt_offset_to_timezone
from busy_beaver.models import UpcomingEventsGroup
ALLOWED_FILE_TYPES = ["jpg", "jpeg", "png"]
TIMEZONES = [(pytz.timezone(tz), tz) for tz in pytz.common_timezones]
TZ_CHOICES = sorted(
add_gmt_offset_to_timezone(TIMEZONES), key=lambda x: int(x[1][3:5]), reverse=True
)
WEEKDAYS = [
("Sunday",) * 2,
("Monday",) * 2,
("Tuesday",) * 2,
("Wednesday",) * 2,
("Thursday",) * 2,
("Friday",) * 2,
("Saturday",) * 2,
]
class GitHubSummaryConfigurationForm(FlaskForm):
channel = SelectField(label="Channel")
summary_post_time = TimeField("Time to post", validators=[DataRequired()])
summary_post_timezone = SelectField(
label="Timezone", choices=TZ_CHOICES, default="UTC"
)
class UpcomingEventsConfigurationForm(FlaskForm):
channel = SelectField(label="Channel")
post_day_of_week = SelectField("Day to post", choices=WEEKDAYS, default="Monday")
post_time = TimeField("Time to post", validators=[DataRequired()])
post_timezone = SelectField(label="Timezone", choices=TZ_CHOICES, default="UTC")
post_num_events = IntegerField(
label="Number of events to show", validators=[NumberRange(min=1)]
)
class AddNewGroupConfigurationForm(FlaskForm):
meetup_urlname = StringField("URL identifer", validators=[DataRequired()])
def validate_meetup_urlname(form, field):
group_to_add = field.data
matching_group = (
UpcomingEventsGroup.query.filter(
UpcomingEventsGroup.meetup_urlname.ilike(group_to_add)
)
.filter_by(configuration=current_user.installation.upcoming_events_config)
.first()
)
if matching_group:
raise ValidationError("Group already added")
group_name = meetup.get_urlname(group_to_add)
if not group_name:
raise ValidationError("Group does not exist")
field.data = group_name
class OrganizationNameForm(FlaskForm):
organization_name = StringField("Organization Name", validators=[DataRequired()])
class OrganizationLogoForm(FlaskForm):
logo = FileField(
"Upload Logo",
validators=[
FileRequired(),
FileAllowed(ALLOWED_FILE_TYPES, "PNG / JPG Images only!"),
],
)
|
171818
|
from random import randint
from pynput.keyboard import Key, Listener
output = 'kld' + str(randint(0, 10000)) + '.txt'
with open(output, 'w') as f:
f.close()
def on_press(key):
with open(output, 'a') as f:
f.write('{0} pressed\n'.format(key))
f.close()
def on_release(key):
with open(output, 'a') as f:
f.write('{0} released\n'.format(key))
f.close()
if key == Key.esc:
return False
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
|
171881
|
import six
import json
from prompt_toolkit.application import get_app
from prompt_toolkit.filters import to_filter
from prompt_toolkit.mouse_events import MouseEventType
from prompt_toolkit.key_binding.key_bindings import KeyBindings
from prompt_toolkit.layout.containers import Window
from prompt_toolkit.layout.controls import FormattedTextControl
from freud.model import db
from freud.ui.text_buffers import summary_buffer
class ButtonManager:
"""
Button logic flows through here. Button Manager saves the current button,
previous button, list of buttons, and provides a click handler.
"""
current_button = None
prev_button = None
buttons = None
def __init__(self, name):
self.name = name
def click_handler(self):
result = db.fetch_one(name=self.name)
output = {
'name': result.name,
'method': result.method,
'url': result.url
}
if result.headers:
output.update({
'headers': json.loads(result.headers)
})
if result.body:
try:
body = json.loads(result.body)
except json.decoder.JSONDecodeError:
body = str(result.body).splitlines()
output.update({
'body': body
})
if result.auth:
auth = json.loads(result.auth)
output.update({
'auth': {
'type': auth['type'],
'user': auth['user']
}
})
summary_buffer.read_only = to_filter(False)
summary_buffer.text = json.dumps(output, indent=2)
summary_buffer.read_only = to_filter(True)
type(self).current_button = self.name
app = get_app()
type(self).prev_button = app.layout.current_window
@classmethod
def update_buttons(cls, app):
windows = [w for w in app.layout.find_all_windows()]
cls.buttons = []
for window in windows:
if 'CustomButton' in str(window):
cls.buttons.append(window)
if cls.prev_button is None and len(cls.buttons) > 0:
cls.prev_button = cls.buttons[0]
return cls.buttons
class SortOrder:
""" Saves the sort order for reference by application """
sort_by = None
order = None
class CustomButton:
"""
Taken from Python Prompt Toolkit's Button class for customization
Clickable button.
:param text: The caption for the button.
:param handler: `None` or callable. Called when the button is clicked.
:param width: Width of the button.
"""
def __init__(self, text, handler=None):
assert isinstance(text, six.text_type)
assert handler is None or callable(handler)
self.text = text
self.handler = handler
self.control = FormattedTextControl(
self._get_text_fragments,
key_bindings=self._get_key_bindings(),
show_cursor=False,
focusable=True)
def get_style():
if get_app().layout.has_focus(self):
return 'class:button.focused'
return 'class:button'
self.window = Window(
self.control,
height=1,
style=get_style,
dont_extend_width=True,
dont_extend_height=True)
def _get_text_fragments(self):
text = self.text
def handler(mouse_event):
if mouse_event.event_type == MouseEventType.MOUSE_UP:
self.handler()
return [
('class:button.arrow', '', handler),
('class:button.text', text, handler),
('class:button.arrow', '', handler),
]
def _get_key_bindings(self):
kb = KeyBindings()
@kb.add(' ')
@kb.add('enter')
def _(event):
if self.handler is not None:
self.handler()
return kb
def __pt_container__(self):
return self.window
def on_startup(app):
""" Run from __main__ after render """
ButtonManager.update_buttons(app)
if not summary_buffer.text:
# When starting app, select first server if none selected
select_item(app)
class SingleClick:
""" Provides mouse click key """
def __init__(self):
self.event_type = MouseEventType.MOUSE_UP
def select_item(event):
""" Simulate mouse click """
# Don't try to select button if there are none
if len(ButtonManager.buttons) > 0:
try:
event.app.layout.current_window.content.text()[1][2](SingleClick())
# If app is passed in, rather than event
except AttributeError:
event.layout.current_window.content.text()[1][2](SingleClick())
|
171892
|
import tensorflow as tf
from experiment.mapping.model.transform_both import FixSpaceModel
from experiment.utils.variables import weight_variable, bias_variable
class FixSpaceSeparateTransformationModel(FixSpaceModel):
def __init__(self, config, config_global, logger):
super(FixSpaceSeparateTransformationModel, self).__init__(config, config_global, logger)
def build(self, data, sess):
self.build_input(data, sess)
W1_src = weight_variable('W1_src', [data.embedding_size, data.embedding_size])
b1_src = bias_variable('b1_src', [data.embedding_size])
W1_target = weight_variable('W1_target', [data.embedding_size, data.embedding_size])
b1_target = bias_variable('b1_target', [data.embedding_size])
source_rep = tf.nn.tanh(tf.nn.xw_plus_b(self.input_source, W1_src, b1_src))
translation_rep = tf.nn.tanh(tf.nn.xw_plus_b(self.input_translation, W1_target, b1_target))
random_other_rep = tf.nn.tanh(tf.nn.xw_plus_b(self.input_random_other, W1_target, b1_target))
self.create_outputs(source_rep, translation_rep, random_other_rep)
component = FixSpaceSeparateTransformationModel
|
171932
|
import unittest
from common_utils import VerboseTestCase
import subprocess
class TestLinearReorder(VerboseTestCase):
def test_linear_reorder(self):
with subprocess.Popen('DNNL_VERBOSE=1 python -u linear_reorder.py', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p:
segmentation = {
'fp32': {'reorder_for_pack': 2, 'reorder_for_dtype': 0, 'reorder_for_format': 0, 'redundent_reorder' : 0,},
'bf16': {'reorder_for_pack': 3, 'reorder_for_dtype': 0, 'reorder_for_format': 0, 'redundent_reorder' : 0,},
} # there should be only reorders on prepack, if any other reorder appears, will cause fail
seg = None
for line in p.stdout.readlines():
line = str(line, 'utf-8').strip()
if line.endswith('***************'):
seg = line.strip().split(',')[0]
continue
# Following is to check if there is the reorder number is as excepted
if self.is_dnnl_verbose(line) and self.ReorderForPack(line):
segmentation[seg]['reorder_for_pack'] -= 1
self.assertTrue(segmentation[seg]['reorder_for_pack'] >=0, "show unexpected reorder for pack")
if self.is_dnnl_verbose(line) and self.OnlyReorderDtype(line):
segmentation[seg]['reorder_for_dtype'] -= 1
self.assertTrue(segmentation[seg]['reorder_for_dtype'] >=0, "show unexpected reorder for dtype")
if self.is_dnnl_verbose(line) and self.OnlyReorderFormat(line):
segmentation[seg]['reorder_for_format'] -= 1
self.assertTrue(segmentation[seg]['reorder_for_format'] >=0, "show unexpected reorder for format")
if self.is_dnnl_verbose(line) and self.RedundantReorder(line):
segmentation[seg]['redundent_reorder'] -= 1
self.assertTrue(segmentation[seg]['redundent_reorder'] >=0, "show unexpected redundent reorder")
if __name__ == '__main__':
test = unittest.main()
|
171964
|
import numpy as np
from .Layer import Layer
class Sign():
"""Sign Layer
f(x) = 1 for x > 0
f(x) = 0 for x = 0
f(x) = -1 for x < 0
Attributes:
input_shape = [N, C, H, W]: The shape of the input tensor
output_shape = [N, C, H, W]: The shape of the resulting output tensor, must match the input shape
"""
def __init__(self, graph, node, input_shape):
super().__init__(input_shape, input_shape, "sign")
# self.input_shape = get_tensor_shape(graph, node.output[0])
# self.output_shape = input_shape
# self.name = "sign"
class Sigmoid(Layer):
"""Sigmoid activation
f(x) = 1 / (1 + exp(-x))
Attributes:
input_shape = [N, C, H, W]: The shape of the input tensor
output_shape = [N, C, H, W]: The shape of the resulting output tensor, must match the input shape
"""
def __init__(self, graph, node, input_shape):
super().__init__(input_shape, input_shape, "sigmoid")
class Relu(Layer):
"""Rectified Linear Unit
f(x) = max(0, x)
Attributes:
input_shape = [N, C, H, W]: The shape of the input tensor
output_shape = [N, C, H, W]: The shape of the resulting output tensor, must match the input shape
"""
def __init__(self, graph, node, input_shape):
super().__init__(input_shape, input_shape, "relu")
class LeakyRelu(Layer):
"""Leaky Rectified Linear Unit
f(x) = alpha * x for x < 0
f(x) = x for x >= 0
Attributes:
input_shape = [N, C, H, W]: The shape of the input tensor
output_shape = [N, C, H, W]: The shape of the resulting output tensor, must match the input shape
alpha: Coefficient of leakage
"""
def __init__(self, graph, node, input_shape):
super().__init__(input_shape, input_shape, "leakyrelu")
class Step(Layer):
"""Step Layer
f(x) = high for x > threshold
f(x) = high for x = threshold and threshold_is_high
f(x) = low for x = threshold and not threshold_is_high
f(x) = low for x < threshold
This is the Activation Layer in a binary neural net as it has only two distinct outputs (in comparison
to the three outputs of Sign Layers). There is no official support for Step Layers in ONNX.
To generate a net with Step Layers, use the following ONNX structure:
Greater + Where or
Less + Where
The code generator will convert this into a Step Layer if the binary argument is passed.
Example in PyTorch:
x = torch.where(x > 0, torch.tensor([1.0]), torch.tensor([-1.0]))
When a BatchNormalization Layer follows directly afterwards, the scales and biases are embedded as thresholds
of the Step Layer. The following holds since x is an integer:
x * s - b > 0
x > int(b / s)
The output is directly packed into ints of size binary_word_size. This is done by setting each bit individually.
The following sets the c'th leftmost bit to 1 or 0:
output |= (1U << ((binary_word_size-1) - c % binary_word_size))
output &= ~(1U << ((binary_word_size-1) - c % binary_word_size))
Attributes:
input_shape = [N, C, H, W]: The shape of the input tensor
output_shape = [N, C, H, W]: The shape of the resulting output tensor, must match the input shape
threshold: The threshold, can be scalar or numpy array
low: Value selected at indices where x < threshold
high: Value selected at indices where x > threshold
threshold_is_high: Whether high value is selected where x = threshold
"""
def __init__(self, input_shape, threshold, low, high):
super().__init__(input_shape, input_shape, "step")
self.threshold = threshold
self.low = low
self.high = high
self.threshold_is_high = True
# class Softmax(Layer):
# """Softmax (normalized exponential)
# To combat numerical issues when doing softmax computation, a common trick is used that shifts
# the input vector by subtracting the maximum element in it from all elements.
# z = x - max(x)
# numerator = np.exp(z)
# denominator = np.sum(numerator)
# softmax = numerator/denominator
# Attributes:
# output_shape = [N, D]: The dimension of the output tensor
# """
# def __init__(self, output_shape):
# self.input_shape = self.output_shape = output_shape
# def render(self, backend, **kwargs):
# code_init = ''
# code_alloc = super(Softmax, self).render('alloc', output_shape=self.output_shape, backend=backend, **kwargs)
# code_predict = super(Softmax, self).render('softmax', output_size=self.output_shape[1], backend=backend,**kwargs)
# return code_init, code_alloc, code_predict
# def output_type(self, input_type, backend):
# return 'float'
class LogSoftmax(Layer):
"""Log of Softmax
To combat numerical issues when doing softmax computation, a common trick is used that shifts
the input vector by subtracting the maximum element in it from all elements.
z = x - max(x)
numerator = np.exp(z)
denominator = np.sum(numerator)
softmax = numerator/denominator
logsoftmax = np.log(softmax)
Attributes:
output_shape = [N, D]: The dimension of the output tensor
"""
def __init__(self, graph, node, input_shape):
super().__init__(input_shape, input_shape, "logsoftmax")
|
171972
|
import dateutil.parser
import freezegun
from behaving import environment as benv
PERSONAS = {}
def before_all(context):
benv.before_all(context)
def after_all(context):
benv.after_all(context)
def before_feature(context, feature):
benv.before_feature(context, feature)
def after_feature(context, feature):
benv.after_feature(context, feature)
def before_scenario(context, scenario):
benv.before_scenario(context, scenario)
context.personas = PERSONAS
def after_scenario(context, scenario):
benv.after_scenario(context, scenario)
def before_step(context, step):
if hasattr(context, 'frozen_current_time'):
if not hasattr(context, 'freezer'):
context.freezer = freezegun.freeze_time(
context.frozen_current_time, tick=context.freeze_time_with_tick
)
else:
context.freezer.time_to_freeze = dateutil.parser.parse(
context.frozen_current_time
)
context.freezer.tick = context.freeze_time_with_tick
context.freezer.start()
def after_step(context, step):
if hasattr(context, 'freezer') and hasattr(context, 'frozen_current_time'):
context.freezer.stop()
|
172001
|
from minos.common.testing import (
MockedDatabaseClient,
)
from minos.networks import (
BrokerPublisherQueueDatabaseOperationFactory,
)
from ..collections import (
MockedBrokerQueueDatabaseOperationFactory,
)
class MockedBrokerPublisherQueueDatabaseOperationFactory(
BrokerPublisherQueueDatabaseOperationFactory, MockedBrokerQueueDatabaseOperationFactory
):
"""For testing purposes"""
MockedDatabaseClient.set_factory(
BrokerPublisherQueueDatabaseOperationFactory, MockedBrokerPublisherQueueDatabaseOperationFactory
)
|
172015
|
import functools
from .common import InfoExtractor
from ..utils import (
OnDemandPagedList,
traverse_obj,
unified_strdate,
)
class GronkhIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gronkh\.tv/(?:watch/)?stream/(?P<id>\d+)'
_TESTS = [{
'url': 'https://gronkh.tv/stream/536',
'info_dict': {
'id': '536',
'ext': 'mp4',
'title': 'GTV0536, 2021-10-01 - MARTHA IS DEAD #FREiAB1830 !FF7 !horde !archiv',
'view_count': 19491,
'thumbnail': 'https://01.cdn.vod.farm/preview/6436746cce14e25f751260a692872b9b.jpg',
'upload_date': '20211001'
},
'params': {'skip_download': True}
}, {
'url': 'https://gronkh.tv/watch/stream/546',
'only_matching': True,
}]
def _real_extract(self, url):
id = self._match_id(url)
data_json = self._download_json(f'https://api.gronkh.tv/v1/video/info?episode={id}', id)
m3u8_url = self._download_json(f'https://api.gronkh.tv/v1/video/playlist?episode={id}', id)['playlist_url']
formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, id)
if data_json.get('vtt_url'):
subtitles.setdefault('en', []).append({
'url': data_json['vtt_url'],
'ext': 'vtt',
})
self._sort_formats(formats)
return {
'id': id,
'title': data_json.get('title'),
'view_count': data_json.get('views'),
'thumbnail': data_json.get('preview_url'),
'upload_date': unified_strdate(data_json.get('created_at')),
'formats': formats,
'subtitles': subtitles,
}
class GronkhFeedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gronkh\.tv(?:/feed)?/?(?:#|$)'
IE_NAME = 'gronkh:feed'
_TESTS = [{
'url': 'https://gronkh.tv/feed',
'info_dict': {
'id': 'feed',
},
'playlist_count': 16,
}, {
'url': 'https://gronkh.tv',
'only_matching': True,
}]
def _entries(self):
for type_ in ('recent', 'views'):
info = self._download_json(
f'https://api.gronkh.tv/v1/video/discovery/{type_}', 'feed', note=f'Downloading {type_} API JSON')
for item in traverse_obj(info, ('discovery', ...)) or []:
yield self.url_result(f'https://gronkh.tv/watch/stream/{item["episode"]}', GronkhIE, item.get('title'))
def _real_extract(self, url):
return self.playlist_result(self._entries(), 'feed')
class GronkhVodsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gronkh\.tv/vods/streams/?(?:#|$)'
IE_NAME = 'gronkh:vods'
_TESTS = [{
'url': 'https://gronkh.tv/vods/streams',
'info_dict': {
'id': 'vods',
},
'playlist_mincount': 150,
}]
_PER_PAGE = 25
def _fetch_page(self, page):
items = traverse_obj(self._download_json(
'https://api.gronkh.tv/v1/search', 'vods', query={'offset': self._PER_PAGE * page, 'first': self._PER_PAGE},
note=f'Downloading stream video page {page + 1}'), ('results', 'videos', ...))
for item in items or []:
yield self.url_result(f'https://gronkh.tv/watch/stream/{item["episode"]}', GronkhIE, item['episode'], item.get('title'))
def _real_extract(self, url):
entries = OnDemandPagedList(functools.partial(self._fetch_page), self._PER_PAGE)
return self.playlist_result(entries, 'vods')
|
172032
|
import pytest
from munch import DefaultMunch
from market_maker.orders_manager import OrdersManager
from market_maker.gateways import gateway_interface
from market_maker.definitions import (
ApiResult,
OrderRequest,
OrderType,
OrderSide,
)
class bittest_storage():
def __init__(self):
self.uid_to_eid = {}
self.eid_to_uid = {}
class bittest_adapter(gateway_interface.GatewayInterface):
def __init__(self):
super().__init__()
self.is_ready_flag = True
self.storage = bittest_storage()
self.config = DefaultMunch()
self.config.name = "bittest"
self.orders_sent = 0
self.orders_amended = 0
self.orders_cancelled = 0
self.amend_orders_data = []
self.new_orders_data = []
def set_order_update_callback(self, callback):
pass
async def send_order(self, order_request):
res = ApiResult()
res.success = True
self.orders_sent += 1
self.new_orders_data.append(order_request)
return res
async def send_orders(self, orders_request):
res = ApiResult()
res.success = True
self.orders_sent += len(orders_request)
for _request in orders_request:
self.new_orders_data.append(_request)
return res
async def amend_orders(self, new, old):
res = ApiResult()
res.success = True
self.orders_amended += len(new)
return res
async def amend_order(self, i, j):
res = ApiResult()
res.success = True
self.orders_amended += 1
self.amend_orders_data.append((i, j))
return res
async def cancel_order(self, cancel_request):
res = ApiResult()
res.success = True
self.orders_cancelled += 1
return res
async def cancel_orders(self, cancel_requests):
res = ApiResult()
res.success = True
self.orders_cancelled += len(cancel_requests)
return res
async def cancel_active_orders(self):
pass
async def start(self):
pass
async def stop(self):
pass
def is_ready(self):
return self.is_ready_flag
async def listen(self):
pass
@pytest.mark.asyncio
async def test_place_orders_1():
adapter = bittest_adapter()
om = OrdersManager(adapter)
buy_order = OrderRequest()
buy_order.side = OrderSide.buy
buy_order.type = OrderType.limit
buy_order.price = 100.0
buy_order.quantity = 1.0
sell_order = OrderRequest()
sell_order.side = OrderSide.sell
sell_order.type = OrderType.limit
sell_order.price = 100.0
sell_order.quantity = 1.0
await om.place_orders([buy_order, sell_order])
assert om.exchange_adapter.orders_sent == 2
assert len(om.orders.values()) == 2
|
172050
|
from django.utils.translation import gettext as _
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from onadata.apps.api.models.project import Project
from onadata.apps.api.models.project_xform import ProjectXForm
from onadata.apps.logger.models.xform import XForm
from onadata.libs.utils.model_tools import queryset_iterator
class Command(BaseCommand):
help = _(u"Check for forms not in a project"
u" and move them to the default project")
def handle(self, *args, **options):
print "Task started ..."
# Get all the users
for user in queryset_iterator(User.objects.all()):
# For each user get the forms which are projectless
for form in queryset_iterator(XForm.objects
# JNM TEMPORARY
#.select_related('projectxform')
.filter(projectxform=None,
user=user)):
# Create the default project
self.create_and_assign_project(user, form)
print "Task completed ..."
def create_and_assign_project(self, user, form):
name = user.username + '\'s Project'
# Check if exists first
projects = Project.objects.filter(organization=user, name=name)
if not len(projects):
metadata = {'description': 'Default Project'}
project = Project.objects.create(name=name,
organization=user,
created_by=user,
metadata=metadata)
print "Created project " + project.name
else:
project = projects[0]
# Link the project to the form
ProjectXForm.objects.create(xform=form,
project=project,
created_by=user)
print "Added " + form.id_string + " to project " + project.name
|
172168
|
import matplotlib.pyplot as plt
import numpy as np
from keras.datasets import mnist
from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam
def build_generator(latent_dim: int):
"""
Build discriminator network
:param latent_dim: latent vector size
"""
model = Sequential([
Dense(128, input_dim=latent_dim),
LeakyReLU(alpha=0.2),
BatchNormalization(momentum=0.8),
Dense(256),
LeakyReLU(alpha=0.2),
BatchNormalization(momentum=0.8),
Dense(512),
LeakyReLU(alpha=0.2),
BatchNormalization(momentum=0.8),
Dense(np.prod((28, 28, 1)), activation='tanh'),
# reshape to MNIST image size
Reshape((28, 28, 1))
])
model.summary()
# the latent input vector z
z = Input(shape=(latent_dim,))
generated = model(z)
# build model from the input and output
return Model(z, generated)
def build_discriminator():
"""
Build discriminator network
"""
model = Sequential([
Flatten(input_shape=(28, 28, 1)),
Dense(256),
LeakyReLU(alpha=0.2),
Dense(128),
LeakyReLU(alpha=0.2),
Dense(1, activation='sigmoid'),
], name='discriminator')
model.summary()
image = Input(shape=(28, 28, 1))
output = model(image)
return Model(image, output)
def train(generator, discriminator, combined, steps, batch_size):
"""
Train the GAN system
:param generator: generator
:param discriminator: discriminator
:param combined: stacked generator and discriminator
we'll use the combined network when we train the generator
:param steps: number of alternating steps for training
:param batch_size: size of the minibatch
"""
# Load the dataset
(x_train, _), _ = mnist.load_data()
# Rescale in [-1, 1] interval
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
x_train = np.expand_dims(x_train, axis=-1)
# Discriminator ground truths
real = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
latent_dim = generator.input_shape[1]
for step in range(steps):
# Train the discriminator
# Select a random batch of images
real_images = x_train[np.random.randint(0, x_train.shape[0], batch_size)]
# Random batch of noise
noise = np.random.normal(0, 1, (batch_size, latent_dim))
# Generate a batch of new images
generated_images = generator.predict(noise)
# Train the discriminator
discriminator_real_loss = discriminator.train_on_batch(real_images, real)
discriminator_fake_loss = discriminator.train_on_batch(generated_images, fake)
discriminator_loss = 0.5 * np.add(discriminator_real_loss, discriminator_fake_loss)
# Train the generator
# random latent vector z
noise = np.random.normal(0, 1, (batch_size, latent_dim))
# Train the generator
# Note that we use the "valid" labels for the generated images
# That's because we try to maximize the discriminator loss
generator_loss = combined.train_on_batch(noise, real)
# Display progress
print("%d [Discriminator loss: %.4f%%, acc.: %.2f%%] [Generator loss: %.4f%%]" %
(step, discriminator_loss[0], 100 * discriminator_loss[1], generator_loss))
def plot_generated_images(generator):
"""
Display a nxn 2D manifold of digits
:param generator: the generator
"""
n = 10
digit_size = 28
# big array containing all images
figure = np.zeros((digit_size * n, digit_size * n))
latent_dim = generator.input_shape[1]
# n*n random latent distributions
noise = np.random.normal(0, 1, (n * n, latent_dim))
# generate the images
generated_images = generator.predict(noise)
# fill the big array with images
for i in range(n):
for j in range(n):
slice_i = slice(i * digit_size, (i + 1) * digit_size)
slice_j = slice(j * digit_size, (j + 1) * digit_size)
figure[slice_i, slice_j] = np.reshape(generated_images[i * n + j], (28, 28))
# plot the results
plt.figure(figsize=(6, 5))
plt.axis('off')
plt.imshow(figure, cmap='Greys_r')
plt.show()
if __name__ == '__main__':
print("GAN for new MNIST images with Keras")
latent_dim = 64
# Build and compile the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy',
optimizer=Adam(lr=0.0002, beta_1=0.5),
metrics=['accuracy'])
# Build the generator
generator = build_generator(latent_dim)
# Generator input z
z = Input(shape=(latent_dim,))
generated_image = generator(z)
# Only train the generator for the combined model
discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
real_or_fake = discriminator(generated_image)
# Stack the generator and discriminator in a combined model
# Trains the generator to deceive the discriminator
combined = Model(z, real_or_fake)
combined.compile(loss='binary_crossentropy',
optimizer=Adam(lr=0.0002, beta_1=0.5))
# train the GAN system
train(generator=generator,
discriminator=discriminator,
combined=combined,
steps=15000,
batch_size=128)
# display some random generated images
plot_generated_images(generator)
|
172201
|
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import scipy.linalg as la
def dist(A,B,C,D,alpha,beta):
"""
Parameters
----------
A
B
C
D
alpha
beta
Return
------
f
g
"""
AC=C-A
CD=D-C
BA=A-B
u0 = np.dot(AC,AC)
u4 = np.dot(BA,BA)
u5 = np.dot(CD,CD)
u1 = np.dot(CD,AC)
u2 = np.dot(BA,AC)
u3 = np.dot(CD,BA)
f = u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5
M = A - alpha*BA
N = C + beta*CD
g = np.dot(M-N,M-N)
return(f,g)
#A = rand(3)
#B = rand(3)
#C = rand(3)
#D = rand(3)
A = np.array([-199.92987677, 19.85458989, 78.87541506])
B = np.array([-138.412703 , 74.27783155, 96.88739959])
C = np.array([-170.65697276, 65.39777929, 86.91978279])
D = np.array([-194.55622849, 65.83291724, 42.14616457])
AC = C-A
CD = D-C
BA = A-B
u0 = np.dot(AC,AC)
u4 = np.dot(BA,BA)
u5 = np.dot(CD,CD)
u1 = np.dot(CD,AC)
u2 = np.dot(BA,AC)
u3 = np.dot(CD,BA)
a = np.linspace(-2,2,100)
b = np.linspace(-2.5,2.5,100)
alpha,beta = np.meshgrid(a,b)
f = u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5
Z = np.array([[u4,u3],[u3,u5]])
y = np.array([-u1,-u2])
print Z
print y
#print det(Z)
d2,d3 = dist(A,B,C,D,0.5,0.5)
print d2,d3
# <codecell>
# <codecell>
x = la.solve(Z,y)
yr = np.dot(Z,x)
print yr
print "alpha : ",x[0]
print "beta : ",x[1]
al = x[0]
be = x[1]
if al<0:
al = 0
if al>1:
al = 1
if be<0:
be = 0
if be>1:
be = 1
M = A + al*(B-A)
N = C + be*(D-C)
MO = A - 2*(B-A)
MI = A + 2*(B-A)
NO = C - 2*(D-C)
NI = C + 2*(D-C)
print "alpha : ",al
print "beta : ",be
# <codecell>
# <codecell>
# <markdowncell>
# \\( \pmatrix{\alpha \\\\ \beta }= \frac{1}{u_4 u_5 - u_3^2} \pmatrix{u_2 u_3 - u_1 u_5 \\\\ u_1 u_3-u_2 u_4 }\\)
# <codecell>
def dmin3d(A,B,C,D):
"""
dmin3d evaluate the minimal distance between 2 set of segments
this should be vectorized
A : (3xN) initial point segment 1
B (3xN) end point segment 1
C (3xN) starting point segment 2
D (3xN) starting point segment 2
"""
AC=C-A
CD=D-C
BA=A-B
u0 = np.dot(AC,AC)
u4 = np.dot(BA,BA)
u5 = np.dot(CD,CD)
u1 = np.dot(CD,AC)
u2 = np.dot(BA,AC)
u3 = np.dot(CD,BA)
den = u4*u5-u3*u3
alpha = (u2*u3-u1*u5)/(1.*den)
beta = (u1*u3-u2*u4)/(1.*den)
dmin = np.sqrt(u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5)
return(alpha,beta,dmin)
|
172215
|
import logging
from colorlog import ColoredFormatter
def setup_logging():
logger = logging.getLogger()
logger.setLevel(logging.INFO)
color_formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s] [%(levelname)-4s]%(reset)s - %(message)s",
datefmt='%d-%m-%y %H:%M:%S',
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'bold_yellow',
'ERROR': 'bold_red',
'CRITICAL': 'bold_red',
},
secondary_log_colors={},
style='%')
logging_handler = logging.StreamHandler()
logging_handler.setFormatter(color_formatter)
logger.addHandler(logging_handler)
#record logg
file_handler = logging.FileHandler('errors.log')
file_handler.setLevel(logging.ERROR)
file_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler.setFormatter(file_format)
logger.addHandler(file_handler)
# setup logging for script
setup_logging()
logger = logging.getLogger(__name__)
|
172263
|
class ActivePlan(object):
def __init__(self, join_observer_list, on_next, on_completed):
self.join_observer_list = join_observer_list
self.on_next = on_next
self.on_completed = on_completed
self.join_observers = {}
for join_observer in self.join_observer_list:
self.join_observers[join_observer] = join_observer
def dequeue(self):
for join_observer in self.join_observers.values():
join_observer.queue.pop(0)
def match(self):
has_values = True
for join_observer in self.join_observer_list:
if not len(join_observer.queue):
has_values = False
break
if has_values:
first_values = []
is_completed = False
for join_observer in self.join_observer_list:
first_values.append(join_observer.queue[0])
if join_observer.queue[0].kind == 'C':
is_completed = True
if is_completed:
self.on_completed()
else:
self.dequeue()
values = []
for value in first_values:
values.append(value.value)
self.on_next(*values)
|
172318
|
import cv2
import os
import numpy as np
import av
from torchvision.transforms import Compose, Resize, ToTensor
from PIL import Image
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from dataset import MaskDataset, get_img_files, get_img_files_eval
from nets.MobileNetV2_unet import MobileNetV2_unet
__author__ = 'roeiherz'
FILE_EXISTS_ERROR = (17, 'File exists')
N_CV = 5
IMG_SIZE = 224
RANDOM_STATE = 1
FPS = 5
def get_data_loaders(val_files):
val_transform = Compose([
Resize((IMG_SIZE, IMG_SIZE)),
ToTensor(),
])
val_loader = DataLoader(MaskDataset(val_files, val_transform),
batch_size=1,
shuffle=TabError,
pin_memory=True,
num_workers=4)
return val_loader
def create_folder(path):
"""
Checks if the path exists, if not creates it.
:param path: A valid path that might not exist
:return: An indication if the folder was created
"""
folder_missing = not os.path.exists(path)
if folder_missing:
# Using makedirs since the path hierarchy might not fully exist.
try:
os.makedirs(path)
except OSError as e:
if (e.errno, e.strerror) == FILE_EXISTS_ERROR:
print(e)
else:
raise
print('Created folder {0}'.format(path))
return folder_missing
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def video_to_frames(input_video, out_dir, refinment=1, fps=1):
"""
:param input_video: path for input video
:param out_dir: output path directory
:param refinement:
:param fps:
1: default fps
-1: automatic default depends differently per video
any other integer
:return:
"""
video = av.open(input_video)
rotation = int(video.streams[0].metadata.get('rotate', 0))
vidcap = cv2.VideoCapture(input_video)
# Jump using the fps inputs
if fps == -1:
duration = float(video.streams[0].duration * video.streams[0].time_base)
frames = video.streams[0].frames
fps = int(round(frames / duration))
count = 0
image_files = []
counter = 0
index = 0
while True:
success, image = vidcap.read()
if not success:
print("Finished/Error in video: {}".format(input_video))
break
counter += 1
if ((counter - 1) % refinment) > 0:
continue
image = rotate_bound(image, rotation)
outpath = os.path.join(out_dir, "%.6d.jpg" % (index))
if count % fps == 0:
cv2.imwrite(outpath, image)
image_files.append(outpath)
index += 1
count = count + 1
def images_to_video(outvid_path, input_folder):
"""
Create video from images
:param outvid_path: output path
:param input_folder:
:return:
"""
outvid = cv2.VideoWriter(outvid_path, cv2.VideoWriter_fourcc(*'MJPG'), 5.0, (224, 224))
for i in range(1, 1000):
if os.path.isfile(os.path.join(input_folder, 'frame' + str(i) + '.jpg')):
I = cv2.imread(os.path.join(input_folder, 'frame' + str(i) + '.jpg'))
outvid.write(I)
outvid.release()
return
if __name__ == '__main__':
# input_video = "/home/roei/Datasets/Accidents1K/Videos/0d1f5146-858f-48a5-8c9a-47b87fc8b6a8.mov"
input_video = "/home/roei/Downloads/incident-865ba5029fb5fefaae91b3e1e354f403.mp4"
output_video = "/home/roei/mobile-semantic-segmentation/outputs/"
model_path = "/home/roei/mobile-semantic-segmentation/outputs/UNET_224_weights_100000_days/0-best.pth"
uuid = os.path.basename(input_video).split('.')[0]
output_path = os.path.join(output_video, "{}_masked".format(os.path.basename(input_video).split('.')[0]))
output_shape = (720, 1280)
# Creates frames if they don't exists
if not os.path.exists(output_path):
create_folder(output_path)
# Process the network
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = "cpu"
# data_loader = get_data_loaders(frames)
model = MobileNetV2_unet(mode="eval")
model.load_state_dict(torch.load(model_path))
model.to(device)
model.eval()
transform = Compose([Resize((IMG_SIZE, IMG_SIZE)), ToTensor()])
# # Process the Video
video = av.open(input_video)
rotation = int(video.streams[0].metadata.get('rotate', 0))
# Video Reader
vidcap = cv2.VideoCapture(input_video)
# Jump using the fps inputs
fps = FPS
if fps == -1:
duration = float(video.streams[0].duration * video.streams[0].time_base)
frames = video.streams[0].frames
fps = int(round(frames / duration))
# Video Writer
outvid = cv2.VideoWriter(os.path.join(output_path, "{}.avi".format(uuid)),
cv2.VideoWriter_fourcc(*'MJPG'), float(fps), (output_shape[1], output_shape[0]))
count = 0
image_files = []
counter = 0
index = 0
while True:
success, image = vidcap.read()
if not success:
print("Finished/Error in video: {}".format(input_video))
break
counter += 1
if ((counter - 1) % 1) > 0:
continue
image = rotate_bound(image, rotation)
if count % FPS == 0:
with torch.no_grad():
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Apply transform to img
img_trf = Image.fromarray(img)
img_trf = transform(img_trf)
img_trf = img_trf.unsqueeze(0)
inputs = img_trf.to(device)
# Apply model to get output
outputs = model(inputs)
# Prepare image input and output mask for blending
i = inputs[0]
i = i.cpu().numpy().transpose((1, 2, 0)) * 255
i = i.astype(np.uint8)
o = outputs[0]
o = o.cpu().numpy().reshape(int(IMG_SIZE / 2), int(IMG_SIZE / 2)) * 255
o = cv2.resize(o.astype(np.uint8), (output_shape[1], output_shape[0]))
# Red color
mask = np.zeros((output_shape[0], output_shape[1], 3)).astype(np.uint8)
mask[:, :, 2] = o
# Blend both mask and image
org_resized_img = cv2.resize(image.astype(np.uint8), (output_shape[1], output_shape[0]))
blend = cv2.addWeighted(mask, 0.3, org_resized_img, 0.7, 0)
outvid.write(blend)
index += 1
count = count + 1
outvid.release()
print("Finished to processed video.")
|
172322
|
from datetime import datetime
import os.path
import functools
import collections
import copy
import re
import time
import logging
import numpy as np
import tensorflow as tf
from train_common import get_global_step, get_lr_and_max_steps, get_ops, run_op
import prune_algorithm.prune_common as pc
logging.basicConfig(level=logging.ERROR)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/tmp_train', """Directory where to write event logs and checkpoint.""")
tf.app.flags.DEFINE_string('data_dir', './data', """Path to the data directory.""")
tf.app.flags.DEFINE_string('dataset', 'cifar10', """Dataset name""")
tf.app.flags.DEFINE_string('network', 'vgg', """Network name""")
tf.app.flags.DEFINE_string('impt_method', 'correlation', """should be one of `correlation`, `cosine` and `inner_product`""")
tf.app.flags.DEFINE_string('normalize_method', 'max', """should be one of `max`, `l1` and `l2`""")
tf.app.flags.DEFINE_bool('conv_dense_separate', False, """whether pruning conv and dense layers separately""")
tf.app.flags.DEFINE_bool('merge_all', False, """only for networks with residual design, whether mean the importance within the same block""")
tf.app.flags.DEFINE_float('prune_rate', 0.01, """The global pruned ratio for network""")
tf.app.flags.DEFINE_integer('top_k', 3, """The global pruned ratio for network""")
tf.app.flags.DEFINE_bool('weight_decay_growing', False, """Whether use a larger weight_decay when finetuning than training""")
tf.app.flags.DEFINE_float('alpha', 1.0, """The weight of 'correlation' when calculating the importance""")
tf.app.flags.DEFINE_float('beta', 1.0, """The weight of 'computational cost' when calculating the importance""")
tf.app.flags.DEFINE_float('gamma', 1.0, """The weight of 'parameters' when calculating the importance""")
import config
# import prune_config
network, dataset, top_name, PruneAlg = config.parse_net_and_dataset()
def train_with_graph(weights_dict, channel_num_after_pruned, weight_decay, store_model_path):
train_args = config.args
num_gpus = train_args.num_gpus
train_batch_size = train_args.train_batch_size
test_batch_size = train_args.test_batch_size
init_lr = train_args.initial_learning_rate
epochs_per_decay = train_args.num_epochs_per_decay
lr_decay_factor = train_args.learning_rate_decay_factor
lr_staircase = train_args.get("staircase")
max_epochs = train_args.max_epochs
num_classes = dataset.num_classes
image_size = [dataset.height, dataset.width]
examples_for_train = dataset.num_examples_for_train
examples_for_test = dataset.num_examples_for_test
with tf.Graph().as_default(), tf.device('/cpu:0'):
###### get global step
global_step = get_global_step(store_model_path)
###### get learning rate and max_steps
lr, max_steps = get_lr_and_max_steps(examples_for_train, train_batch_size, num_gpus,
lr_decay_factor, epochs_per_decay, init_lr, global_step, lr_staircase, max_epochs)
###### get optimizer
opt = config.args.optimizer(lr)
###### Get data
tf_training = tf.placeholder(tf.bool, shape=())
train_dataset = dataset.train_input_fn(FLAGS.data_dir, train_batch_size, max_epochs, **config.args.data_augmentation_args).make_one_shot_iterator()
test_dataset = dataset.test_input_fn(FLAGS.data_dir, test_batch_size, **config.args.data_augmentation_args).make_one_shot_iterator()
###### put op on different GPU
train_args.learning_rate = lr
train_args.data_queue = [train_dataset, test_dataset]
train_args.global_step = global_step
train_args.max_steps = max_steps
train_args.examples_per_epoch_for_test = examples_for_test
train_args.weights_dict = weights_dict
train_args.channels_num = channel_num_after_pruned
train_args.weight_decay = weight_decay
ops = get_ops(opt, tf_training, network, dataset, num_classes, top_name, train_args)
###### run on session
run_op(ops, tf_training, store_model_path, train_args)
def train(_):
# corr_normal_factor = collections.OrderedDict()
ckpt = tf.train.get_checkpoint_state(os.path.join(FLAGS.train_dir))
store_model_path = ckpt.model_checkpoint_path
init_weight_decay = config.args.weight_decay
importance_coefficient = [FLAGS.alpha, FLAGS.beta, FLAGS.gamma]
print("store_model_path: " + store_model_path)
print("importance_coefficient: alpha %.2f, beta %.2f, gamma %.2f" % (FLAGS.alpha, FLAGS.beta, FLAGS.gamma))
print("weight decay growing: %d" % FLAGS.weight_decay_growing)
print("top_k: %d" % FLAGS.top_k)
print("prune rate: %.2f" % FLAGS.prune_rate)
## get old weights
if "resnet" in top_name:
weights_dict = network.get_weights_from_model(store_model_path, config.args.resnet_version)
else:
weights_dict = network.get_weights_from_model(store_model_path)
## get pruned channels
prune_args = {
"image_size": [dataset.height, dataset.width],
"importance_method": FLAGS.impt_method,
"importance_coefficient": [FLAGS.alpha, FLAGS.beta, FLAGS.gamma],
"top_k": FLAGS.top_k,
"num_classes": dataset.num_classes,
"normalize_method": FLAGS.normalize_method,
"conv_dense_separate": False if FLAGS.conv_dense_separate == 0 else True,
"merge_all": FLAGS.merge_all
}
prune_alg = PruneAlg(weights_dict, **prune_args)
cut_channels = prune_alg.get_prune_channels(FLAGS.prune_rate)
## get pruned weights
if "resnet" in top_name:
pruned_weights_dict = prune_alg.get_pruned_weights(cut_channels, config.args.resnet_version)
else:
pruned_weights_dict = prune_alg.get_pruned_weights(cut_channels)
cal_ratio, params_ratio = prune_alg.get_pruned_ratio()
pruned_cared_weights = prune_alg.get_pruned_cared_weights(pruned_weights_dict)
channel_num_after_pruned = prune_alg.get_channels_nums(pruned_cared_weights, channel_type='output')
## cal weight_decay
weight_decay = 1.1e-3 if FLAGS.weight_decay_growing else init_weight_decay
print("The number of channels after pruned: ", channel_num_after_pruned.values())
# print("Use correlation normalization factor: " + str(corr_normal_factor.values()))
print("Use weight decay: " + str(weight_decay))
## finetune the model
store_model_path = os.path.join(FLAGS.train_dir, "prune%.2f" % FLAGS.prune_rate) # model dir
train_with_graph(pruned_weights_dict, channel_num_after_pruned, weight_decay, store_model_path)
if __name__ == '__main__':
tf.app.run(main=train)
|
172346
|
COINS = (
(25, 'Quarters'),
(10, 'Dimes'),
(5, 'Nickels'),
(1, 'Pennies')
)
def loose_change(cents):
change = {'Pennies': 0, 'Nickels': 0, 'Dimes': 0, 'Quarters': 0}
cents = int(cents)
if cents <= 0:
return change
for coin_value, coin_name in COINS:
q, r = divmod(cents, coin_value)
change[coin_name] = q
cents = r
return change
|
172367
|
import mistune
from mistune import InlineLexer, BlockLexer
import re
try:
from .renderer_base import Block_Quote_Renderer, Header_Renderer
from .renderer_math import MathInlineMixin, MathRendererMixin, MathBlockMixin
except Exception:
from renderer_base import Block_Quote_Renderer, Header_Renderer
from renderer_math import MathInlineMixin, MathRendererMixin, MathBlockMixin
class TasklistRenderMixin:
def list_item(self, text):
"""render list item with task list support"""
# list_item implementation in mistune.Renderer
old_list_item = mistune.Renderer.list_item
new_list_item = lambda _, text: '<li class="task-list-item">%s</li>\n' % text
task_list_re = re.compile(r'\[[xX ]\] ')
m = task_list_re.match(text)
if m is None:
return old_list_item(self, text)
prefix = m.group()
checked = False
if prefix[1].lower() == 'x':
checked = True
if checked:
checkbox = '<input type="checkbox" class="task-list-item-checkbox" checked disabled/> '
else:
checkbox = '<input type="checkbox" class="task-list-item-checkbox" disabled /> '
return new_list_item(self, checkbox + text[m.end():])
class MathInlineLexer(MathInlineMixin, InlineLexer):
def __init__(self, *args, **kwargs):
super(MathInlineLexer, self).__init__(*args, **kwargs)
self.enable_math()
class MathBlockLexer(MathBlockMixin, BlockLexer):
def __init__(self, *args, **kwargs):
BlockLexer.__init__(self, *args, **kwargs)
self.enable_math()
class MDRenderer(
# HighlightMixin,
MathRendererMixin,
TasklistRenderMixin,
Block_Quote_Renderer,
Header_Renderer,
mistune.Renderer):
def __init__(self):
mistune.Renderer.__init__(self, escape = False, hard_wrap = True)
class MarkdownWithMath(mistune.Markdown):
def __init__(self, renderer, **kwargs):
if 'inline' not in kwargs:
kwargs['inline'] = MathInlineLexer
if 'block' not in kwargs:
kwargs['block'] = MathBlockLexer
super().__init__(renderer, **kwargs)
def output_block_math(self):
return self.inline(self.token["text"])
def create_markdown_parser():
class MathRendrerer(MathRendererMixin, mistune.Renderer):
def __init__(self, *args, **kwargs):
super(MathRendrerer, self).__init__(*args, **kwargs)
renderer = MDRenderer()
# inline_lexer = MathInlineLexer(renderer)
# block_lexer = MathBlockLexer() # FIXME: not work!!
parser = MarkdownWithMath(renderer=renderer)
return parser
if __name__ == "__main__":
math_test = '''
假设 $z = f(u,v)$ 在点,求 $z$ 在 $t$ 点的导数。 $**hello**$
$$
**hello**{f[g(x)]}'=2[g(x)] \\times g'(x)=2[2x+1] \\times 2=8x+4
$$
'''
# math_test = '''
# $$
# **hello**{f[g(x)]}'=2[g(x)] \\times g'(x)=2[2x+1] \\times 2=8x+4
# $$
# '''
parser = create_markdown_parser()
html = parser(math_test)
print(html)
|
172390
|
import torch
import torch.nn as nn
import numpy as np
import sys
from sdf import SDF
import pdb
class SDFLoss(nn.Module):
def __init__(self, right_faces, left_faces, grid_size=32, robustifier=None):
super(SDFLoss, self).__init__()
self.sdf = SDF()
self.register_buffer('right_face', torch.tensor(right_faces.astype(np.int32)))
self.register_buffer('left_face', torch.tensor(left_faces.astype(np.int32)))
self.grid_size = grid_size
self.robustifier = robustifier
@torch.no_grad()
def get_bounding_boxes(self, vertices):
bs = vertices.shape[0]
boxes = torch.zeros(bs, 2, 2, 3, device=vertices.device)
boxes[:, :, 0, :] = vertices.min(dim=2)[0]
boxes[:, :, 1, :] = vertices.max(dim=2)[0]
return boxes
def forward(self, vertices, scale_factor=0.2, return_per_vert_loss=False, return_origin_scale_loss=False):
assert not (return_origin_scale_loss and (not return_per_vert_loss))
# vertices: (bs, 2, 778, 3)
bs = vertices.shape[0]
num_hand = 2
boxes = self.get_bounding_boxes(vertices) # (bs, 2, 2, 3)
loss = torch.tensor(0., device=vertices.device)
# re-scale the input vertices
boxes_center = boxes.mean(dim=2).unsqueeze(dim=2) # (bs, 2, 1, 3)
boxes_scale = (1+scale_factor) * 0.5*(boxes[:,:,1] - boxes[:,:,0]).max(dim=-1)[0][:, :, None,None] # (bs, 2, 1, 1)
with torch.no_grad():
vertices_centered = vertices - boxes_center
vertices_centered_scaled = vertices_centered / boxes_scale
assert(vertices_centered_scaled.min() >= -1)
assert(vertices_centered_scaled.max() <= 1)
right_verts = vertices_centered_scaled[:, 0].contiguous()
left_verts = vertices_centered_scaled[:, 1].contiguous()
right_phi = self.sdf(self.right_face, right_verts, self.grid_size)
left_phi = self.sdf(self.left_face, left_verts, self.grid_size)
assert(right_phi.min() >= 0) # (bs, 32, 32, 32)
assert(left_phi.min() >= 0) # (bs, 32, 32, 32)
# concat left & right phi
# be aware of the order, input vertices the order is right, left
phi = [right_phi, left_phi]
losses = list()
losses_origin_scale = list()
for i in [0, 1]:
# vertices_local: (bs, 1, 778, 3)
vertices_local = (vertices[:, i:i+1] - boxes_center[:, 1-i].unsqueeze(dim=1)) / boxes_scale[:, i].unsqueeze(dim=1)
# vertices_grid: (bs, 778, 1, 1, 3)
vertices_grid = vertices_local.view(bs,-1,1,1,3)
# Sample from the phi grid
phi_val = nn.functional.grid_sample(
phi[1-i].unsqueeze(dim=1), vertices_grid, align_corners=True).view(bs, -1)
cur_loss = phi_val # (10, 778)
# robustifier: cur_loss = cur_loss^2 / (cur_loss^2 + robust^2)
if self.robustifier:
frac = (cur_loss / self.robustifier) ** 2
cur_loss = frac / (frac + 1)
cur_loss_bp = cur_loss / num_hand ** 2
cur_loss_os = cur_loss * boxes_scale[:, i, 0]
losses.append(cur_loss_bp)
losses_origin_scale.append(cur_loss_os)
loss = (losses[0] + losses[1])
loss = loss.sum(dim=1)
loss_per_vert = torch.cat((losses[0], losses[1]), dim=1)
loss_origin_scale = torch.cat((losses_origin_scale[0], losses_origin_scale[1]), dim=1)
if not return_per_vert_loss:
return loss
else:
if not return_origin_scale_loss:
return loss, loss_per_vert
else:
return loss, loss_per_vert, loss_origin_scale
|
172454
|
import argparse
import json
import logging
import _jsonnet
import tqdm
# These imports are needed for registry.lookup
# noinspection PyUnresolvedReferences
from src.datasets import yahoo_dataset, ag_news_dataset
# noinspection PyUnresolvedReferences
from src.models import han
# noinspection PyUnresolvedReferences
from src.models.preprocessors import han_preprocessor, bert_preprocessor
# noinspection PyUnresolvedReferences
from src.nlp import glove_embeddings, spacynlp
# noinspection PyUnresolvedReferences
from src.utils import registry
# noinspection PyUnresolvedReferences
from src.utils import vocab
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Preprocessor:
def __init__(self, config):
self.config = config
self.model_preprocessor = registry.instantiate(
callable=registry.lookup("model", config["model"]).Preprocessor,
config=config["model"],
unused_keys=("sentence_attention", "word_attention", "name", "final_layer_dim", "final_layer_dropout"),
)
def preprocess(self):
self.model_preprocessor.clear_items()
need_to_create_validation_set = False
for section in self.config["dataset"]:
data = registry.construct("dataset", self.config["dataset"][section])
if section == "val" and len(data) == 0:
need_to_create_validation_set = True
continue
for item in tqdm.tqdm(data, desc=f"pre-processing {section} section", dynamic_ncols=True):
to_add, validation_info = self.model_preprocessor.validate_item(item, section)
if to_add:
self.model_preprocessor.add_item(item, section, validation_info)
if need_to_create_validation_set:
self.model_preprocessor.create_validation_set(
val_split=self.config["dataset"]["val"].get("val_split", 0.1),
path=self.config["dataset"]["val"]["path"]
)
self.model_preprocessor.save()
def add_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True)
parser.add_argument("--config-args")
args = parser.parse_args()
return args
def main(args):
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={"args": args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
preprocessor = Preprocessor(config)
preprocessor.preprocess()
if __name__ == '__main__':
main(add_parser())
|
172486
|
from mesa.datacollection import DataCollector
from mesa import Model
from mesa.time import RandomActivation
from mesa_geo.geoagent import GeoAgent, AgentCreator
from mesa_geo import GeoSpace
import random
class SchellingAgent(GeoAgent):
"""Schelling segregation agent."""
def __init__(self, unique_id, model, shape, agent_type=None):
"""Create a new Schelling agent.
Args:
unique_id: Unique identifier for the agent.
agent_type: Indicator for the agent's type (minority=1, majority=0)
"""
super().__init__(unique_id, model, shape)
self.atype = agent_type
def step(self):
"""Advance agent one step."""
similar = 0
different = 0
neighbors = self.model.grid.get_neighbors(self)
if neighbors:
for neighbor in neighbors:
if neighbor.atype is None:
continue
elif neighbor.atype == self.atype:
similar += 1
else:
different += 1
# If unhappy, move:
if similar < different:
# Select an empty region
empties = [a for a in self.model.grid.agents if a.atype is None]
# Switch atypes and add/remove from scheduler
new_region = random.choice(empties)
new_region.atype = self.atype
self.model.schedule.add(new_region)
self.atype = None
self.model.schedule.remove(self)
else:
self.model.happy += 1
def __repr__(self):
return "Agent " + str(self.unique_id)
class SchellingModel(Model):
"""Model class for the Schelling segregation model."""
def __init__(self, density, minority_pc):
self.density = density
self.minority_pc = minority_pc
self.schedule = RandomActivation(self)
self.grid = GeoSpace()
self.happy = 0
self.datacollector = DataCollector({"happy": "happy"})
self.running = True
# Set up the grid with patches for every NUTS region
AC = AgentCreator(SchellingAgent, {"model": self})
agents = AC.from_file("nuts_rg_60M_2013_lvl_2.geojson")
self.grid.add_agents(agents)
# Set up agents
for agent in agents:
if random.random() < self.density:
if random.random() < self.minority_pc:
agent.atype = 1
else:
agent.atype = 0
self.schedule.add(agent)
def step(self):
"""Run one step of the model.
If All agents are happy, halt the model.
"""
self.happy = 0 # Reset counter of happy agents
self.schedule.step()
# self.datacollector.collect(self)
if self.happy == self.schedule.get_agent_count():
self.running = False
|
172509
|
from .start_manifest import StartManifest
from .vod_start_manifest import VodStartManifest
from .vod_dash_start_manifest import VodDashStartManifest
from .vod_hls_start_manifest import VodHlsStartManifest
|
172583
|
import torch
from torch import nn
from onconet.models.inflate import inflate_model
from onconet.models.blocks.factory import get_block
import pdb
MODEL_REGISTRY = {}
STRIPPING_ERR = 'Trying to strip the model although last layer is not FC.'
NO_MODEL_ERR = 'Model {} not in MODEL_REGISTRY! Available models are {} '
NO_OPTIM_ERR = 'Optimizer {} not supported!'
INVALID_NUM_BLOCKS_ERR = 'Invalid block_layout. Must be length 4. Received {}'
INVALID_BLOCK_SPEC_ERR = 'Invalid block specification. Must be length 2 with (block_name, num_repeats). Received {}'
NUM_MATCHING_LAYERS_MESSAGE = 'Loaded pretrained_weights for {} out of {} parameters.'
def RegisterModel(model_name):
"""Registers a configuration."""
def decorator(f):
MODEL_REGISTRY[model_name] = f
return f
return decorator
def get_model(args):
return get_model_by_name(args.model_name, True, args)
def get_model_by_name(name, allow_wrap_model, args):
'''
Get model from MODEL_REGISTRY based on args.model_name
args:
- name: Name of model, must exit in registry
- allow_wrap_model: whether or not override args.wrap_model and disable model_wrapping.
- args: run ime args from parsing
returns:
- model: an instance of some torch.nn.Module
'''
if not name in MODEL_REGISTRY:
raise Exception(
NO_MODEL_ERR.format(
name, MODEL_REGISTRY.keys()))
model = MODEL_REGISTRY[name](args)
allow_data_parallel = 'discriminator' not in name and ('mirai_full' not in args.model_name or allow_wrap_model)
return wrap_model(model, allow_wrap_model, args, allow_data_parallel)
def wrap_model(model, allow_wrap_model, args, allow_data_parallel=True):
try:
model._model.args.use_precomputed_hiddens = args.use_precomputed_hiddens
except:
pass
if args.multi_image and not args.model_name in ['mirai_full']:
model = inflate_model(model)
if allow_wrap_model and args.wrap_model:
model._model = strip_model(model._model)
if args.patch_size[0] > -1:
img_size = args.patch_size
else:
img_size = args.img_size
if args.multi_image:
img_size = ( args.num_images, *args.img_size)
args.hidden_dim = get_output_size(model, img_size, args.num_chan, args.cuda)
wrapped_model = ModelWrapper(model, args)
else:
wrapped_model = model
if args.state_dict_path is not None:
load_pretrained_weights(wrapped_model, torch.load(args.state_dict_path))
if args.num_gpus > 1 and args.data_parallel and not isinstance(wrapped_model, nn.DataParallel) and allow_data_parallel:
wrapped_model = nn.DataParallel(wrapped_model,
device_ids=range(args.num_gpus))
return wrapped_model
def load_model(path, args, do_wrap_model = True):
print('\nLoading model from [%s]...' % path)
try:
model = torch.load(path, map_location='cpu')
if isinstance(model, dict):
model = model['model']
if isinstance(model, nn.DataParallel):
model = model.module.cpu()
try:
model.args.use_pred_risk_factors_at_test = args.use_pred_risk_factors_at_test
except:
pass
try:
if hasattr(model, '_model'):
_model = model._model
else:
_model = model
_model.args.use_pred_risk_factors_at_test = args.use_pred_risk_factors_at_test
_model.args.use_precomputed_hiddens = args.use_precomputed_hiddens
_model.args.use_pred_risk_factors_if_unk = args.use_pred_risk_factors_if_unk
_model.args.pred_risk_factors = args.pred_risk_factors
_model.args.use_spatial_transformer = args.use_spatial_transformer
except:
pass
try:
args.img_only_dim = model._model.args.img_only_dim
except:
pass
if do_wrap_model:
model = {'model': wrap_model(model, True, args)}
except:
raise Exception(
"Sorry, snapshot {} does not exist!".format(path))
return model
def validate_block_layout(block_layout):
"""Confirms that a block layout is in the right format.
Arguments:
block_layout(list): A length n list where each of the n elements
is a list of lists where each inner list is of length 2 and
contains (block_name, num_repeats). This specifies the blocks
in each of the n layers of the ResNet.
Raises:
Exception if the block layout is formatted incorrectly.
"""
# Confirm that each layer is a list of block specifications where
# each block specification has length 2 (i.e. (block_name, num_repeats))
for layer_layout in block_layout:
for block_spec in layer_layout:
if len(block_spec) != 2:
raise Exception(INVALID_BLOCK_SPEC_ERR.format(block_spec))
def get_layers(block_layout):
"""Gets the layers for a ResNet given the desired layout of blocks.
Arguments:
block_layout(list): A length n list where each of the n elements
is a list of lists where each inner list is of length 2 and
contains (block_name, num_repeats). This specifies the blocks
in each of the n layers of the ResNet.
Returns:
layers(list): A list of list of block types conforming to num blocks.
"""
validate_block_layout(block_layout)
layers = []
for layer_layout in block_layout:
layer = []
for block_name, num_repeats in layer_layout:
block = get_block(block_name)
layer.extend([block]*num_repeats)
layers.append(layer)
return layers
def get_params(model):
'''
Helper function to get parameters of a model.
## TODO: specify parameters to get rather than getting all
'''
return model.parameters()
def get_optimizer(model, args):
'''
Helper function to fetch optimizer based on args.
'''
params = [param for param in model.parameters() if param.requires_grad]
if args.optimizer == 'adam':
return torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
elif args.optimizer == 'adagrad':
return torch.optim.Adagrad(params, lr=args.lr, weight_decay=args.weight_decay)
elif args.optimizer == 'sgd':
return torch.optim.SGD(params,
lr=args.lr,
weight_decay=args.weight_decay,
momentum=args.momentum )
else:
raise Exception(NO_OPTIM_ERR.format(args.optimizer))
def load_pretrained_weights(model, pretrained_state_dict):
"""Loads pretrained weights into a model (even if not all layers match).
Arguments:
model(Model): A PyTorch model.
pretrained_state_dict(dict): A dictionary mapping layer names
to pretrained weights.
"""
model_state_dict = model.state_dict()
# Filter out pretrained layers not in our model
matching_pretrained_state_dict = {
layer_name: weights
for layer_name, weights in pretrained_state_dict.items()
if (layer_name in model_state_dict and
pretrained_state_dict[layer_name].size() == model_state_dict[layer_name].size())
}
print(NUM_MATCHING_LAYERS_MESSAGE.format(len(matching_pretrained_state_dict),
len(model_state_dict)))
# Overwrite weights in existing state dict
model_state_dict.update(matching_pretrained_state_dict)
# Load the updated state dict
model.load_state_dict(model_state_dict)
def strip_model(model, num_layers_strip = 1):
"""
Remove the last pooling anf fc layers from the model.
:model: model to strip
:returns: stripped model
"""
all_children = list(model.named_children() )
layers_to_strip = all_children[ -1 * num_layers_strip: ]
for layer_name, layer in layers_to_strip:
if not type(layer) in [nn.modules.linear.Linear,
nn.modules.conv.Conv1d,
ModLinear, ModConv1d]:
raise STRIPPING_ERR
model._modules[layer_name] = ModelNOP()
return model
def get_output_size(model, shape, channels, cuda):
"""
Get the size of the output of the last layer of the model.
:model: the model
:shape: shape of the input image tuple(width, height)
:channels: amount of channels of input image (int)
:cuda: wether or not to use GPU
:returns: the size of the output of the last layer of the model.
"""
bs = 1
input = torch.rand(bs, channels, *shape)
if cuda:
input = input.cuda()
model = model.cuda()
output_feat = model.forward(input)
n_size = output_feat.data.view(bs, -1).size(1)
return n_size
class ModelNOP(nn.Module):
def __init__(self):
'''
Placeholder nn module. Returns input.
'''
super(ModelNOP, self).__init__()
def forward(self, x):
return x
class ModelWrapper(nn.Module):
def __init__(self, model, args):
'''
Given some model, add a linear layer and a softmax to fit it the task defined args.dataset
'''
super(ModelWrapper, self).__init__()
self._model = model
self.args = args
self.dropout = nn.Dropout(args.dropout)
if args.make_fc:
self.last_hidden = nn.Conv1d(1, args.num_classes, args.hidden_dim )
else:
self.last_hidden = nn.Linear(args.hidden_dim, args.num_classes)
def cuda(self, device=None):
self._model = self._model.cuda(device)
self.last_hidden = self.last_hidden.cuda(device)
return self
def forward(self, x):
'''
param x: a batch of image tensors
returns logit: logits over args.num_classes for x
'''
hidden = self._model(x)
hidden = self.dropout(hidden)
hidden = hidden.view(hidden.size()[0], -1)
if self.args.make_fc:
logit = self.last_hidden( hidden.unsqueeze(0).transpose(0,1)).squeeze(-1)
else:
logit = self.last_hidden(hidden)
# TODO: It looks like all wrapped models will not work with the current model_step because the current version
# of the model_step requires that output of a model to be logit, hidden, activ
return logit, hidden
|
172593
|
import gc
import numpy as np
import pandas as pd
def drop_duplicates_pharma(df):
"""
df: long-format dataframe of a patient
varref: variable reference table that contain the mean and standard deviation of values for a subset of variables
"""
df_dup = df[df.duplicated(["givenat", "pharmaid", "infusionid"], keep=False)]
for pharmaid in df_dup.pharmaid.unique():
for infusionid in df_dup[df_dup.pharmaid == pharmaid].infusionid.unique():
tmp = df_dup[(df_dup.pharmaid == pharmaid) & (df_dup.infusionid == infusionid)]
if len(tmp.recordstatus.unique()) == 1 and tmp.recordstatus.unique()[0] == 780:
for i in range(len(tmp)):
df.loc[tmp.index[i], "infusionid"] = "%s_%s" % (int(df.loc[tmp.index[i], "infusionid"]), i)
tmp = df[(df.pharmaid == pharmaid) & (
df.infusionid.apply(lambda x: "%s_" % (infusionid) in x if type(x) == str else False))]
elif len(tmp.recordstatus.unique()) == 1 and tmp.recordstatus.unique()[0] == 776:
if (tmp.givendose != 0).sum() == 1:
df.drop(tmp.index[tmp.givendose == 0], inplace=True)
else:
df.drop(tmp.index[:-1], inplace=True)
elif len(tmp.recordstatus.unique()) == 2 and 776 in tmp.recordstatus.unique():
df.drop(tmp.index[tmp.recordstatus != 776], inplace=True)
else:
raise Exception("Debug needed")
return df
def process_status780(df, acting_period):
'''
Convert the infusion channel with status injection/tablet to "infusion-like" channel.
'''
infusionid = int(df.iloc[0].infusionid)
start_code = 524
stop_code = 776
df.set_index("givenat", inplace=True)
drug_giventime_780 = df.index.tolist()
df_new = []
for i, dt in enumerate(drug_giventime_780):
tmp = df.loc[[dt]].copy()
endtime_780 = dt + np.timedelta64(acting_period, "m")
tmp.loc[endtime_780, "givendose"] = tmp.loc[dt, "givendose"]
tmp.loc[endtime_780, "recordstatus"] = stop_code
tmp.loc[endtime_780, "infusionid"] = "%d_%d" % (infusionid, i)
tmp.loc[dt, "givendose"] = 0
tmp.loc[dt, "recordstatus"] = start_code
tmp.loc[dt, "infusionid"] = "%d_%d" % (infusionid, i)
df_new.append(tmp.reset_index())
df_new = pd.concat(df_new).sort_values("givenat")
return df_new
def process_single_infusion(df, acting_period):
'''
Convert given dose from a single infusion channel to rate
'''
infusionid = int(df.iloc[0].infusionid)
if len(df.recordstatus.unique()) == 1 and df.recordstatus.unique()[0] == 780:
df = process_status780(df, acting_period)
df_rate = []
for sub_infusionid in df.infusionid.unique():
tmp = df[df.infusionid == sub_infusionid].copy()
try:
assert ((tmp.recordstatus == 524).sum() == 1)
except AssertionError:
tmp.set_index("givenat", inplace=True)
beg_time = tmp.index[0] - np.timedelta64(acting_period, "m")
tmp.loc[beg_time, "givendose"] = 0
tmp.loc[beg_time, "recordstatus"] = 524
tmp.loc[beg_time, "infusionid"] = sub_infusionid
tmp.sort_index(inplace=True)
tmp.reset_index(inplace=True)
try:
assert ((tmp.recordstatus == 776).sum() == 1)
except AssertionError:
pass
tmp.loc[:, "rate"] = 0
tmp.loc[tmp.index[:-1], "rate"] = tmp.givendose.values[1:] / (tmp.givenat.diff() / np.timedelta64(1,
"m")).values[
1:]
tmp.rename(columns={"rate": str(sub_infusionid)}, inplace=True)
df_rate.append(tmp[["givenat", str(sub_infusionid)]].set_index("givenat"))
df_rate = pd.concat(df_rate, axis=1).sum(axis=1).to_frame(name=str(infusionid))
return df_rate
def convert_cumul_value_to_rate(df, cumul_urine_id_lst, general_table):
pid = df.iloc[0].patientid
short_gap = 5 / 60
rec_adm_time = general_table.loc[pid].admissiontime
# if the first HR measuremet time is earlier than recorded admission time, then we estimated
# the "true" admission time to be the earlier of these two time points.
if df[df.variableid == 200]["value"].notnull().sum() > 0:
hr_first_meas_time = df.loc[df[df.variableid == 200]["value"].notnull().index[0], "datetime"]
esti_adm_time = min(rec_adm_time, hr_first_meas_time)
else:
esti_adm_time = rec_adm_time
df_urine = df[df.variableid.isin(cumul_urine_id_lst)]
if len(df_urine) == 0:
return df
else:
for vid in df_urine.variableid.unique():
df_tmp = df_urine[df_urine.variableid == vid] # table of a single urine variable
index_pre_general_table = df_tmp.index[df_tmp.datetime < esti_adm_time - np.timedelta64(15 * 60 + 30,
"s")] # number of records before general_tableission time
if len(index_pre_general_table) == 0:
pass
elif len(index_pre_general_table) == 1:
# if there's one record before general_tableission, reset datetime from system reset time 12pm to the general_tableission time
index_pre_general_table = df_tmp.index[df_tmp.datetime < esti_adm_time]
df.loc[index_pre_general_table[0], 'datetime'] = esti_adm_time
else:
index_pre_general_table = df_tmp.index[df_tmp.datetime < esti_adm_time]
df.drop(index_pre_general_table[:-1], inplace=True)
df.loc[index_pre_general_table[-1], 'datetime'] = esti_adm_time
df_tmp = df[df.variableid == vid]
if df_tmp.duplicated(["datetime"]).sum() == 0:
pass
else:
df.drop(df_tmp.index[df_tmp.duplicated(["datetime"])], inplace=True)
# delete urine record if therre's only one left
if (df.variableid == vid).sum() < 2:
df.drop(df.index[df.variableid == vid], inplace=True)
continue
# compute the cumulative values over the entire icu stay
df_tmp = df[df.variableid == vid]
t_reset = df_tmp[(df_tmp["value"].diff() < 0) | (
df_tmp.index == df_tmp.index[0])].datetime # reset time for the cumulative counting
idx_not_reset = df_tmp[(df_tmp["value"].diff() >= 0) & (df_tmp.index != df_tmp.index[0])].index
for i in np.arange(1, len(t_reset)):
tmp = df_tmp[df_tmp.datetime >= t_reset.iloc[i]]
if i < len(t_reset) - 1:
tmp = tmp[tmp.datetime < t_reset.iloc[i + 1]]
df.loc[tmp.index, 'value'] += df.loc[df_tmp.index[df_tmp.datetime < t_reset.iloc[i]][-1], 'value']
# drop the time point with time difference from the previous time point that is smaller than half an hour
df_tmp = df[df.variableid == vid]
tdiff = (df_tmp.datetime.diff().iloc[1:] / np.timedelta64(3600, 's'))
if (tdiff < short_gap).sum() > 0:
df.drop(df_tmp.index[1:][tdiff.values < short_gap], inplace=True)
if (df.variableid == vid).sum() < 2:
df.drop(df.index[df.variableid == vid], inplace=True)
continue
# debug if the cumulative value is not strictly increasing
df_tmp = df[df.variableid == vid]
vdiff = df_tmp["value"].diff()
try:
assert ((vdiff < 0).sum() == 0)
except AssertionError:
import ipdb
ipdb.set_trace()
gc.collect()
for vid in df_urine.variableid.unique():
df_tmp = df[df.variableid == vid]
if len(df_tmp) == 0:
continue
elif len(df_tmp) == 1:
continue
else:
tdiff = (df_tmp.datetime.diff() / np.timedelta64(3600, 's'))
df.loc[df_tmp.index[1:], 'value'] = (df_tmp["value"].diff().iloc[1:] / tdiff.iloc[1:]).values
# logging.info(tdiff.loc[df.loc[df_tmp.index,'value']>1e+4]*np.timedelta64(3600, 's'))
# df.loc[df_tmp.index[0],'value'] = df.loc[df_tmp.index[1],'value']
df.loc[df_tmp.index[0], 'value'] = 0
for vid in df_urine.variableid.unique():
df_tmp = df[df.variableid == vid]
# df.drop(df_tmp.index[df_tmp['value'] > 1e+6], inplace=True)
return df
|
172597
|
import io
import os
import logging
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.filedialog as filedialog
import tkinter.messagebox as tkmessagebox
import libs.CFCrypto as CFCrypto
import libs.CFCryptoX as CFCryptoX
from libs.CFCanvas import CFCanvas
from libs.Util import set_combobox_item
from libs.Util import IMG_EXT_LIST
logging.basicConfig(level=logging.INFO)
# 窗口类
class Window(ttk.Frame):
def __init__(self, master=None, **kwargs):
super().__init__(master, padding=2)
# 选择使用哪种加密模式,ECB或CBC
self.cryptModeOption = tk.StringVar()
self.cryptModeCombobox = ttk.Combobox(self, width=10, textvariable=self.cryptModeOption)
self.cryptModeCombobox.grid(row=0, column=0, sticky=('w', 'e'))
self.cryptModeCombobox.state(('readonly',))
self.cryptModeCombobox.config(values=["ECB", "CBC"])
set_combobox_item(self.cryptModeCombobox, "ECB", True)
self.cryptoOptionCombobox = ttk.Combobox(self, state="readonly", values=["解密文件", "不需解密", "解密保名"], width=10)
self.cryptoOption = tk.StringVar()
self.cryptoOptionCombobox['textvariable'] = self.cryptoOption
self.cryptoOptionCombobox.grid(sticky=('w', 'e'), row=0, column=1)
self.passwordEntry = tk.Entry(self, show="*", width=40)
self.password = tk.StringVar()
self.passwordEntry['textvariable'] = self.password
self.passwordEntry.grid(sticky=('w', 'e'), row=0, column=2)
self.pageOptionCombobox = ttk.Combobox(self, state="readonly", values=["单页", "双页"], width=10)
self.pageOption = tk.StringVar()
self.pageOptionCombobox['textvariable'] = self.pageOption
self.pageOptionCombobox.grid(sticky=('w', 'e'), row=0, column=3)
self.orderOptionCombobox = ttk.Combobox(self, state="readonly", values=["左开", "右开"], width=10)
self.orderOption = tk.StringVar()
self.orderOptionCombobox['textvariable'] = self.orderOption
self.orderOptionCombobox.grid(sticky=('w', 'e'), row=0, column=4)
self.fileFromButton = ttk.Button(self, text="选择文件", width=10)
self.fileFromButton.grid(sticky=('w', 'e'), row=0, column=5)
self.fileFromButton['command'] = self.file_from_button_callback
self.refreshButton = ttk.Button(self, text="重新加载", width=10)
self.refreshButton.grid(sticky=('w', 'e'), row=0, column=6)
self.refreshButton['command'] = self.refresh_button_callback
self.imgCanvas = CFCanvas(500, 500, self)
self.imgCanvas.grid(sticky=('w', 'e', 'n', 's'), row=1, column=0, columnspan=7)
self.imgSizeNameLabel = tk.Label(self, text="调整大小", width=10)
self.imgSizeNameLabel.grid(sticky=('e',), row=2, column=0)
self.imgSizeScale = ttk.Scale(self, orient="horizontal", from_=1, to=100)
self.imgSizeScale.grid(sticky=('w', 'e'), row=2, column=1, columnspan=2)
self.imgSizeScale.bind('<ButtonRelease-1>', self.set_img_size)
self.imgSizeScale.bind('<B1-Motion>', self.set_img_size_info)
self.imgSizeInfoLabel = tk.Label(self, width=10)
self.imgSizeInfo = tk.StringVar()
self.imgSizeInfoLabel['textvariable'] = self.imgSizeInfo
self.imgSizeInfoLabel.grid(sticky=('w', 'e'), row=2, column=3)
self.prevImgButton = ttk.Button(self, text="<")
self.prevImgButton.grid(sticky=('w', 'n', 's'), row=2, column=4)
self.prevImgButton['command'] = self.prev_img_button_callback
self.nextImgButton = ttk.Button(self, text=">")
self.nextImgButton.grid(sticky=('w', 'n', 's'), row=2, column=5)
self.nextImgButton['command'] = self.next_img_button_callback
self.rotateImgButton = ttk.Button(self, text="旋转")
self.rotateImgButton.grid(sticky=('w',), row=2, column=6)
self.rotateImgButton['command'] = self.rotate_img_button_callback
self.imgInfoLabel = tk.Label(self, text="图片信息")
self.imgInfo = tk.StringVar()
self.imgInfoLabel['textvariable'] = self.imgInfo
self.imgInfoLabel.grid(sticky=('w',), row=3, column=1)
self.jumpPageNumberLabel = tk.Label(self, text="跳转页码:")
self.jumpPageNumberLabel.grid(sticky=('e',), row=3, column=4)
self.jumpPageNumberEntry = tk.Entry(self, width=10)
self.jumpPageNumber = tk.StringVar()
self.jumpPageNumberEntry['textvariable'] = self.jumpPageNumber
self.jumpPageNumberEntry.grid(sticky=('w', 'e'), row=3, column=5)
self.jumpPageNumberButton = ttk.Button(self, text="GO", width=10)
self.jumpPageNumberButton.grid(sticky=('w', 'e'), row=3, column=6)
self.jumpPageNumberButton['command'] = self.jump_page_callback
# 存储图片地址列表,用于前后翻页
self.img_list = []
# 保存当前的图片路径
self.current_img_path = ""
# 初始化下拉列表,设置默认值
self.init_default_combobox_item()
# 设置图片最大的宽度
self.img_max_width = 1960
# 设置默认的图片宽度,并设置图片大小滑动条的位置
self.zoom_width = self.img_max_width * 0.22
# 图片需要逆时针旋转的角度
self.rotate_angle = 0
self.imgSizeScale.set(self.zoom_width * 100 / self.img_max_width)
self.imgSizeInfo.set(str(self.zoom_width * 100 // self.img_max_width) + "%")
# 绑定键盘事件
self.master.bind("<Key>", self.key_event)
self.jumpPageNumberEntry.bind("<Return>", self.jump_page_callback)
# 主窗口大小发生变化时,居中图片
self.master.bind("<Configure>", self.img_center)
# 绑定鼠标滚轴到图片缩放
self.master.bind("<MouseWheel>", self.process_wheel)
self.master.columnconfigure(0, weight=1)
self.master.rowconfigure(0, weight=1)
self.grid(row=0, column=0, sticky=(tk.N, tk.S, tk.E, tk.W))
self.columnconfigure(2, weight=1)
self.rowconfigure(1, weight=1)
# 保存传入的初始参数,如果传入参数则直接打开图片
self.kwargs = dict(**kwargs)
if self.kwargs:
self.open_img(**self.kwargs)
# 选择加密解密使用的模式
def choose_crypt_mode(self):
if self.cryptModeOption.get() == "ECB":
return CFCrypto
elif self.cryptModeOption.get() == "CBC":
return CFCryptoX
# 跳转到指定页码
def jump_page_callback(self, event=None):
try:
page_number = int(self.jumpPageNumber.get())
if 0 < page_number <= len(self.img_list):
self.current_img_path = self.img_list[page_number-1]
self.img_show()
self.set_img_info()
except Exception as e:
logging.error("Jump page number error!")
# 绑定鼠标滚轴到图片缩放
def process_wheel(self, event=None):
img_size_scale = self.imgSizeScale.get()
if event.delta > 0:
if img_size_scale * 1.2 <= 100:
self.imgSizeScale.set(img_size_scale * 1.2)
else:
self.imgSizeScale.set(100.0)
else:
if img_size_scale * 0.8 >= 5:
self.imgSizeScale.set(img_size_scale * 0.8)
else:
self.imgSizeScale.set(5.0)
self.set_img_size_info()
self.set_img_size()
# 初始化下拉列表,设置默认值
def init_default_combobox_item(self):
# 设置默认的选项
set_combobox_item(self.cryptoOptionCombobox, "不需解密", True)
# 设置单页显示
set_combobox_item(self.pageOptionCombobox, "单页", True)
# 设置双页阅读顺序
set_combobox_item(self.orderOptionCombobox, "左开", True)
# 根据图片路径,将当前文件夹内所有图片保存在图片列表,用于前后翻页显示
def set_img_list(self):
crypto_algorithm = self.choose_crypt_mode()
img_dir_path = self.current_img_path[:self.current_img_path.rindex("/") + 1]
crypto_option = self.cryptoOption.get()
if crypto_option == "解密文件":
self.img_list = []
# 解密后的图片名称临时列表,用于排序
decrypt_img_name_list = []
for img_name in os.listdir(img_dir_path):
try:
decrypt_img_name = crypto_algorithm.StringCrypto(self.password.get()).decrypt(img_name)
if os.path.splitext(decrypt_img_name.lower())[1][1:] in IMG_EXT_LIST:
decrypt_img_name_list.append(decrypt_img_name)
except Exception as e:
logging.error("Decrypt img name error!")
# 将解密后的图片名称列表排序,再加密后放入img_list中,用于前后翻页顺序显示
decrypt_img_name_list.sort()
for decrypt_img_name in decrypt_img_name_list:
img_name = crypto_algorithm.StringCrypto(self.password.get()).encrypt(decrypt_img_name)
self.img_list.append(os.path.join(img_dir_path, img_name))
elif crypto_option == "解密保名" or crypto_option == "不需解密":
self.img_list = [os.path.join(img_dir_path, img_name) for img_name in os.listdir(img_dir_path)
if os.path.splitext(img_name.lower())[1][1:] in IMG_EXT_LIST]
# 解密字符串方法
def decrypt_string(self, str):
crypto_algorithm = self.choose_crypt_mode()
try:
decrypt_str = crypto_algorithm.StringCrypto(self.password.get()).decrypt(str)
except Exception as e:
logging.error("Decrypt img name error!")
decrypt_str = ""
return decrypt_str
# 设置显示图片信息
def set_img_info(self):
page_option = self.pageOption.get()
crypto_option = self.cryptoOption.get()
if crypto_option == "解密文件":
img_name = self.decrypt_string(os.path.basename(self.current_img_path))
print(img_name)
else:
img_name = os.path.basename(self.current_img_path)
if not self.img_list or self.current_img_path not in self.img_list:
self.imgInfo.set("")
elif page_option == "单页":
img_index = self.img_list.index(self.current_img_path)
index_str = str(img_index + 1) + "/" + str(len(self.img_list))
self.imgInfo.set(index_str + " : " + img_name)
elif page_option == "双页":
img_index = self.img_list.index(self.current_img_path)
index_str = str(img_index + 1) + "/" + str(len(self.img_list))
if img_index < len(self.img_list) - 1:
img_index_next = img_index + 1
index_str_next = str(img_index_next + 1) + "/" + str(len(self.img_list))
if crypto_option == "解密文件":
img_name_next = self.decrypt_string(os.path.basename(self.img_list[img_index_next]))
else:
img_name_next = os.path.basename(self.img_list[img_index_next])
order_option = self.orderOption.get()
if order_option == "左开":
self.imgInfo.set(index_str + ", " + index_str_next + " : " + img_name + " | " + img_name_next)
else:
self.imgInfo.set(index_str_next + ", " + index_str + " : " + img_name_next + " | " + img_name)
else:
self.imgInfo.set(index_str + " : " + img_name)
def key_event(self, event=None):
# 右方向键下一张图片
if event.keycode == 39:
self.next_img_button_callback()
# 左方向键上一张图片
elif event.keycode == 37:
self.prev_img_button_callback()
# 选择待显示的图片,填充图片路径,设置图片地址列表
def file_from_button_callback(self, event=None):
img_path = filedialog.askopenfilename()
if img_path:
self.current_img_path = img_path
self.set_img_list()
self.img_show()
self.set_img_info()
# 传入图片地址,解密选项和密码来打开图片
def open_img(self, img_path="", password="", crypto_option="不需解密", crypto_mode="ECB", page_option="单页", order_option="左开"):
if img_path and os.path.isfile(img_path):
self.current_img_path = os.path.abspath(img_path).replace("\\", "/")
self.password.set(str(password))
if page_option in ["单页", "双页"]:
self.pageOption.set(page_option)
if order_option in ["左开", "右开"]:
self.orderOption.set(order_option)
if crypto_option in ["解密文件", "不需解密", "解密保名"]:
self.cryptoOption.set(crypto_option)
if crypto_mode in ["ECB", "CBC"]:
self.cryptModeOption.set(crypto_mode)
self.set_img_list()
self.img_show()
self.set_img_info()
# 重新加载图片
def refresh_button_callback(self, event=None):
self.set_img_list()
self.img_show()
self.set_img_info()
# 设置密码输入栏中的内容显示或者隐藏
def password_show_button_callback(self, event=None):
if self.passwordEntry["show"] == "*":
self.passwordEntry["show"] = ""
else:
self.passwordEntry["show"] = "*"
# 向前翻页显示图片
def prev_img_button_callback(self, event=None):
page_option = self.pageOption.get()
self.rotate_angle = 0
if not self.img_list:
return
elif self.current_img_path not in self.img_list:
index = len(self.img_list)
else:
index = self.img_list.index(self.current_img_path)
if page_option == "单页":
if index == 0:
return
else:
self.current_img_path = self.img_list[index - 1]
elif page_option == "双页":
if index == 0:
return
elif index == 1:
self.current_img_path = self.img_list[index - 1]
else:
self.current_img_path = self.img_list[index - 2]
self.img_show()
self.set_img_info()
# 向后翻页显示图片
def next_img_button_callback(self, event=None):
page_option = self.pageOption.get()
self.rotate_angle = 0
if not self.img_list:
return
elif self.current_img_path not in self.img_list:
index = -1
else:
index = self.img_list.index(self.current_img_path)
if page_option == "单页":
if index >= len(self.img_list) - 1:
return
else:
self.current_img_path = self.img_list[index + 1]
elif page_option == "双页":
if index >= len(self.img_list) - 2:
return
else:
self.current_img_path = self.img_list[index + 2]
self.img_show()
self.set_img_info()
# 逆时针旋转图片
def rotate_img_button_callback(self, event=None):
# 逆时针旋转90度
self.rotate_angle += 90
# 超过360度取余
self.rotate_angle %= 360
self.img_show()
def img_center(self, event=None):
if self.imgCanvas:
self.imgCanvas.img_center()
# 拖动图片大小滑动条时,显示图片大小百分比
def set_img_size_info(self, event=None):
self.zoom_width = int(self.imgSizeScale.get() * self.img_max_width / 100)
self.imgSizeInfo.set(str(self.zoom_width * 100 // self.img_max_width) + "%")
# 设置当前显示的图片的大小,保持横纵比缩放
def set_img_size(self, event=None):
self.set_img_size_info()
self.img_show()
# 静态图片显示
def default_img_show(self, img_path):
self.imgCanvas.default_img_show(img_path, self.rotate_angle, self.zoom_width)
# 双页静态图片显示
def default_double_img_show(self, img_path, next_img_path, order_option):
self.imgCanvas.default_double_img_show(img_path, next_img_path,
order_option, self.rotate_angle, self.zoom_width)
def default_gif_show(self, img_path):
self.imgCanvas.default_gif_show(img_path, self.rotate_angle, self.zoom_width)
# 加密静态图片显示
def crypto_img_show(self, img_path):
crypto_algorithm = self.choose_crypt_mode()
img_file_like = io.BytesIO(crypto_algorithm.ByteCrypto(self.password.get()).decrypt(img_path))
self.imgCanvas.default_img_show(img_file_like, self.rotate_angle, self.zoom_width)
# 双页加密静态图片显示
def crypto_double_img_show(self, img_path, next_img_path, order_option):
crypto_algorithm = self.choose_crypt_mode()
img_file_like = io.BytesIO(crypto_algorithm.ByteCrypto(self.password.get()).decrypt(img_path))
next_img_file_like = io.BytesIO(crypto_algorithm.ByteCrypto(self.password.get()).decrypt(next_img_path))
self.imgCanvas.default_double_img_show(img_file_like, next_img_file_like, order_option,
self.rotate_angle, self.zoom_width)
# 加密动态图片显示
def crypto_gif_show(self, img_path):
crypto_algorithm = self.choose_crypt_mode()
img_file_like = io.BytesIO(crypto_algorithm.ByteCrypto(self.password.get()).decrypt(img_path))
self.imgCanvas.default_gif_show(img_file_like, self.rotate_angle, self.zoom_width)
def cancel_img(self):
self.imgCanvas.cancel_img()
self.imgCanvas = None
# 根据不同图片类型和解密选项,显示图片
def img_show(self, event=None):
crypto_algorithm = self.choose_crypt_mode()
page_option = self.pageOption.get()
self.imgCanvas.cancel_img()
crypto_option = self.cryptoOption.get()
# 双页显示的顺序设定
order_option = self.orderOption.get()
# 如果路径不存在直接返回
if not self.current_img_path or not os.path.exists(self.current_img_path):
return
img_name = os.path.basename(self.current_img_path)
if crypto_option == "解密文件":
try:
decrypt_img_name = crypto_algorithm.StringCrypto(self.password.get()).decrypt(img_name)
# 如果图片后缀不支持,则直接返回
if os.path.splitext(decrypt_img_name.lower())[1][1:] not in IMG_EXT_LIST:
tkmessagebox.showerror("错误", "文件格式不支持")
return
if page_option == "单页":
if os.path.splitext(decrypt_img_name)[1] == ".gif":
self.crypto_gif_show(self.current_img_path)
else:
self.crypto_img_show(self.current_img_path)
elif page_option == "双页":
index = self.img_list.index(self.current_img_path)
# 如果已经到了最后一页,则只显示列表末尾两页
if index == len(self.img_list) - 1:
next_img_path = self.current_img_path
self.current_img_path = self.img_list[index - 1]
else:
next_img_path = self.img_list[index + 1]
self.crypto_double_img_show(self.current_img_path, next_img_path, order_option)
except ValueError as e:
logging.error("Decrypt img error!")
tkmessagebox.showerror("错误", "图片解密失败")
elif crypto_option == "不需解密":
try:
# 如果图片后缀不支持,则直接返回
if os.path.splitext(img_name.lower())[1][1:] not in IMG_EXT_LIST:
tkmessagebox.showerror("错误", "文件格式不支持")
return
if page_option == "单页":
if os.path.splitext(self.current_img_path)[1] == ".gif":
self.default_gif_show(self.current_img_path)
else:
self.default_img_show(self.current_img_path)
elif page_option == "双页":
index = self.img_list.index(self.current_img_path)
# 如果已经到了最后一页,则只显示列表末尾两页
if index == len(self.img_list) - 1:
next_img_path = self.current_img_path
self.current_img_path = self.img_list[index - 1]
else:
next_img_path = self.img_list[index + 1]
self.default_double_img_show(self.current_img_path, next_img_path, order_option)
except OSError as e:
logging.error("Img format error!")
tkmessagebox.showerror("错误", "图片格式错误")
elif crypto_option == "解密保名":
try:
# 如果图片后缀不支持,则直接返回
if os.path.splitext(img_name.lower())[1][1:] not in IMG_EXT_LIST:
tkmessagebox.showerror("错误", "文件格式不支持")
return
if page_option == "单页":
if os.path.splitext(self.current_img_path)[1] == ".gif":
self.crypto_gif_show(self.current_img_path)
else:
self.crypto_img_show(self.current_img_path)
elif page_option == "双页":
index = self.img_list.index(self.current_img_path)
# 如果已经到了最后一页,则只显示列表末尾两页
if index == len(self.img_list) - 1:
next_img_path = self.current_img_path
self.current_img_path = self.img_list[index - 1]
else:
next_img_path = self.img_list[index + 1]
self.crypto_double_img_show(self.current_img_path, next_img_path, order_option)
except ValueError as e:
logging.error("Decrypt img error!")
tkmessagebox.showerror("错误", "图片解密失败")
# 通过外部参数直接打开图片窗口
def main_window(img_path="", password="", crypto_option="不需解密", crypto_mode="ECB", page_option="单页", order_option="左开"):
app = Window(master=None, img_path=img_path, password=password,
crypto_option=crypto_option, crypto_mode=crypto_mode, page_option=page_option, order_option=order_option)
app.master.title("图片查看器")
app.master.minsize(600, 600)
app.mainloop()
if __name__ == '__main__':
app = Window()
# 设置窗口标题:
app.master.title("图片查看器")
app.master.minsize(600, 600)
# 主消息循环:
app.mainloop()
|
172605
|
from __future__ import division
import numpy as np
import covariance as cov
from gp import GaussianProcess
#import com.ntraft.covariance as cov
#from com.ntraft.gp import GaussianProcess
import matplotlib
# The 'MacOSX' backend appears to have some issues on Mavericks.
import sys
if sys.platform.startswith('darwin'):
matplotlib.use('TkAgg')
import matplotlib.pyplot as pl
# This is the true unknown function we are trying to approximate
x1 = lambda x: x.flatten()
x2 = lambda x: x.flatten() # y = x
# x2 = lambda x: 2*np.ones_like(x) # constant
# x2 = lambda x: np.sin(0.9*x).flatten() # sin
# Sample some input points and noisy versions of the function evaluated at
# these points.
N = 20 # number of training points
n = 40 # number of test points
s = 0.00000 # noise variance
# T = np.random.uniform(-5, 0, size=(N,))
T = np.linspace(-10, -5, N)
# T = np.linspace(-90, 0, N)
T[-1] = 19.6 # set a goal point
# T[-1] = 175 # set a goal point
x = x1(T) + s*np.random.randn(N)
y = x2(T) + s*np.random.randn(N)
# points we're going to make predictions at.
Ttest = np.linspace(-5, 20, n)
#Ttest = np.linspace(0, 180, n)
axis = [-20, 35, -10, 25]
#axis = [-200, 400, -90, 200]
# Build our Gaussian process.
# xkernel = cov.sq_exp_kernel(2.5, 1)
# ykernel = cov.sq_exp_kernel(2.5, 1)
# kernel = cov.matern_kernel(2.28388, 2.52288)
# kernel = cov.linear_kernel(-2.87701)
# xkernel = cov.summed_kernel(cov.sq_exp_kernel(2.5, 1), cov.noise_kernel(0.01))
# ykernel = cov.summed_kernel(cov.sq_exp_kernel(2.5, 1), cov.noise_kernel(0.01))
# Cafeteria Hyperparams (pre-evaluated)
# xkernel = cov.summed_kernel(
# cov.matern_kernel(33.542, 47517),
# cov.linear_kernel(315.46),
# cov.noise_kernel(0.53043)
# )
# ykernel = cov.summed_kernel(
# cov.matern_kernel(9.8147, 155.36),
# cov.linear_kernel(17299),
# cov.noise_kernel(0.61790)
# )
# Cafeteria Hyperparams
xkernel = cov.summed_kernel(
#cov.sq_exp_kernel(-1),
cov.matern_kernel(np.exp(1.9128), np.exp(2*5.3844)),
cov.linear_kernel(np.exp(-.5*-2.8770)),
cov.noise_kernel(np.exp(2*-0.3170))
)
ykernel = cov.summed_kernel(
#cov.sq_exp_kernel(-1),
cov.matern_kernel(np.exp(1.2839), np.exp(2*2.5229)),
cov.linear_kernel(np.exp(-3.2*-4.8792)),
cov.noise_kernel(np.exp(2*-0.2407))
)
xgp = GaussianProcess(T, x, Ttest, xkernel)
ygp = GaussianProcess(T, y, Ttest, ykernel)
# PLOTS:
# draw samples from the prior at our test points.
xs = xgp.sample_prior(10)
ys = ygp.sample_prior(10)
pl.figure(1)
pl.plot(xs, ys)
pl.title('Ten samples from the GP prior')
# draw samples from the posterior
ns = 100
xs = xgp.sample(ns)
ys = ygp.sample(ns)
# illustrate the possible paths.
'''pl.figure(2)
pl.subplots_adjust(0.05, 0.1, 0.95, 0.9)
pl.subplot(2,2,1)
pl.plot(x, y, 'yo', ms=8)
ne = 10
pl.plot(xs[:,0:ne], ys[:,0:ne], 'g-')
pl.title('{} samples from the GP posterior'.format(ne))
pl.axis(axis)
pl.subplot(2,2,2)
pl.plot(x, y, 'yo', ms=8)
pl.plot(xs, ys, 'g-')
pl.title('{} samples from the GP posterior'.format(ns))
pl.axis(axis)
pl.subplot(2,2,3)
pl.plot(x, y, 'yo', ms=8)
pl.plot(x1(Ttest), x2(Ttest), 'b-')
pl.plot(xgp.mu, ygp.mu, 'r--', lw=2)
pl.title('Predictive mean and ground truth')
pl.axis(axis)
pl.subplot(2,2,4)
pl.plot(x, y, 'yo', ms=8)
xmean = np.mean(xs, 1)
ymean = np.mean(ys, 1)
pl.plot(xmean, ymean, 'r--', lw=2)
pl.title('Mean of {} samples'.format(ns))
pl.axis(axis)'''
pl.show()
|
172635
|
import torch
from torch.utils.data import Dataset
import glob
import tifffile as T
from libtiff import TIFF
import numpy as np
def range_normalize(v):
v = (v - v.mean(axis=(1, 2), keepdims=True)) / (v.std(axis=(1, 2), keepdims=True) + 1e-12)
v_min, v_max = v.min(axis=(1, 2), keepdims=True), v.max(axis=(1, 2), keepdims=True)
v = (v - v_min) / (v_max - v_min + 1e-5)
return v
def smart_padding(img, data_shape, lables_shape, stride):
if img.shape[0] < data_shape[0]:
img = np.pad(img, ((0, data_shape[0] - img.shape[0]), (0, 0), (0, 0)), mode='reflect')
if img.shape[1] < data_shape[1]:
img = np.pad(img, ((0, 0), (0, data_shape[1] - img.shape[1]), (0, 0)), mode='reflect')
if img.shape[2] < data_shape[2]:
img = np.pad(img, ((0, 0), (0, 0), (0, data_shape[2] - img.shape[1])), mode='reflect')
dz = int(np.floor((img.shape[0] - data_shape[0]) / stride[0] + 1))
dy = int(np.floor((img.shape[1] - data_shape[1]) / stride[1] + 1))
dx = int(np.floor((img.shape[2] - data_shape[2]) / stride[2] + 1))
effective_data_shape = (
data_shape[0] * dz - (data_shape[0] - stride[0]) * (dz - 1),
data_shape[1] * dy - (data_shape[1] - stride[1]) * (dy - 1),
data_shape[2] * dx - (data_shape[2] - stride[2]) * (dx - 1)
)
if effective_data_shape[0] < img.shape[0]:
img = np.pad(img,
(
(0, (data_shape[0] * (dz + 1) - (data_shape[0] - stride[0]) * dz) - img.shape[0]),
(0, 0),
(0, 0)),
mode='reflect')
if effective_data_shape[1] < img.shape[1]:
img = np.pad(img,
(
(0, 0),
(0, (data_shape[1] * (dy + 1) - (data_shape[1] - stride[1]) * dy) - img.shape[1]),
(0, 0)),
mode='reflect')
if effective_data_shape[2] < img.shape[2]:
img = np.pad(img,
(
(0, 0),
(0, 0),
(0, (data_shape[2] * (dx + 1) - (data_shape[2] - stride[2]) * dx) - img.shape[2])),
mode='reflect')
effective_data_shape = img.shape
effective_lable_shape = (
effective_data_shape[0] - (data_shape[0] - lables_shape[0]),
effective_data_shape[1] - (data_shape[1] - lables_shape[1]),
effective_data_shape[2] - (data_shape[2] - lables_shape[2])
)
if effective_lable_shape[0] < img.shape[0]:
img = np.pad(img, (((data_shape[0] - lables_shape[0]) // 2,
(data_shape[0] - lables_shape[0]) // 2 + (data_shape[0] - lables_shape[0]) % 2),
(0, 0),
(0, 0)),
mode='reflect')
if effective_lable_shape[1] < img.shape[1]:
img = np.pad(img, ((0, 0),
((data_shape[1] - lables_shape[1]) // 2,
(data_shape[1] - lables_shape[1]) // 2 + (data_shape[1] - lables_shape[1]) % 2),
(0, 0)),
mode='reflect')
if effective_lable_shape[2] < img.shape[2]:
img = np.pad(img, ((0, 0),
(0, 0),
((data_shape[2] - lables_shape[2]) // 2,
(data_shape[2] - lables_shape[2]) // 2 + (
data_shape[2] - lables_shape[2]) % 2)),
mode='reflect')
return img
class Single_Image_Eval(Dataset):
def __init__(self,
image_path='HaftJavaherian_DeepVess2018_GroundTruthImage.tif',
label_path='HaftJavaherian_DeepVess2018_GroundTruthLabel.tif',
data_shape=(7, 33, 33),
lables_shape=(1, 4, 4),
stride=(1, 1, 1),
range_norm=False):
self.range_norm = range_norm
try:
img = T.imread(image_path)
except:
img = []
tif = TIFF.open(image_path)
for _image in tif.iter_images():
img.append(_image)
img = np.stack(img, 0)
try:
lbl = T.imread(label_path)
except:
lbl = []
tif = TIFF.open(label_path)
for _lable in tif.iter_images():
lbl.append(_lable)
lbl = np.stack(lbl, 0)
img = smart_padding(img, data_shape, lables_shape, stride)
lbl = smart_padding(lbl, data_shape, lables_shape, stride)
self.org_shape = img.shape
self.img = img.astype(np.float32)
self.lbl = lbl.astype(np.float32)
self.shape = self.img.shape
self.data_shape = data_shape
self.lables_shape = lables_shape
self.stride = stride
self.dz = int(np.floor((self.shape[0] - data_shape[0]) / stride[0] + 1))
self.dy = int(np.floor((self.shape[1] - data_shape[1]) / stride[1] + 1))
self.dx = int(np.floor((self.shape[2] - data_shape[2]) / stride[2] + 1))
self.effective_data_shape = (
data_shape[0] * self.dz - (data_shape[0] - stride[0]) * (self.dz - 1),
data_shape[1] * self.dy - (data_shape[1] - stride[1]) * (self.dy - 1),
data_shape[2] * self.dx - (data_shape[2] - stride[2]) * (self.dx - 1)
)
self.effective_lable_shape = (
self.effective_data_shape[0] - (data_shape[0] - lables_shape[0]),
self.effective_data_shape[1] - (data_shape[1] - lables_shape[1]),
self.effective_data_shape[2] - (data_shape[2] - lables_shape[2])
)
self.effective_lable_idx = (
((data_shape[0] - lables_shape[0]) // 2,
self.effective_data_shape[0] - (
(data_shape[0] - lables_shape[0]) // 2 + (data_shape[0] - lables_shape[0]) % 2)),
((data_shape[1] - lables_shape[1]) // 2,
self.effective_data_shape[1] - (
(data_shape[1] - lables_shape[1]) // 2 + (data_shape[1] - lables_shape[1]) % 2)),
((data_shape[2] - lables_shape[2]) // 2,
self.effective_data_shape[2] - (
(data_shape[2] - lables_shape[2]) // 2 + (data_shape[2] - lables_shape[2]) % 2))
)
self.lbl_z = ((data_shape[0] - lables_shape[0]) // 2,
(data_shape[0] - lables_shape[0]) // 2 + lables_shape[0])
self.lbl_y = ((data_shape[1] - lables_shape[1]) // 2,
(data_shape[1] - lables_shape[1]) // 2 + lables_shape[1])
self.lbl_x = ((data_shape[2] - lables_shape[2]) // 2,
(data_shape[2] - lables_shape[2]) // 2 + lables_shape[2])
self.max_iter = self.dz * self.dy * self.dx
def __len__(self):
return self.max_iter
def __getitem__(self, index):
z, y, x = np.unravel_index(index, (self.dz, self.dy, self.dx))
z = z * self.stride[0]
y = y * self.stride[1]
x = x * self.stride[2]
v = self.img[z: z + self.data_shape[0],
y: y + self.data_shape[1],
x: x + self.data_shape[2]]
lbl = self.lbl[z: z + self.data_shape[0],
y: y + self.data_shape[1],
x: x + self.data_shape[2]]
lbl = lbl[self.lbl_z[0]: self.lbl_z[1],
self.lbl_y[0]: self.lbl_y[1],
self.lbl_x[0]: self.lbl_x[1]]
# Normalize
if self.range_norm:
v = range_normalize(v)
else:
v = (v - v.mean(axis=(1, 2), keepdims=True)) / (v.std(axis=(1, 2), keepdims=True) + 1e-12)
# To Tensor
data = torch.Tensor(v).unsqueeze(0)
lables = torch.Tensor(lbl // self.lbl.max()).long()
return data, lables
class Directory_Image_Train(Dataset):
def __init__(self,
images_path,
labels_path,
max_iter=1000,
data_shape=(7, 33, 33),
lables_shape=(1, 4, 4),
stride=(1, 1, 1),
range_norm=False):
self.range_norm = range_norm
images = sorted(glob.glob(images_path + '/*tif'))
labels = sorted(glob.glob(labels_path + '/*tif'))
self.org_shape = []
self.shape = []
self.img = []
self.lbl = []
self.data_shape = data_shape
self.lables_shape = lables_shape
self.stride = stride
self.dz = []
self.dy = []
self.dx = []
self.effective_data_shape = []
self.effective_lable_shape = []
self.effective_lable_idx = []
self.lbl_z = []
self.lbl_y = []
self.lbl_x = []
for img_path, lbl_path in zip(images, labels):
try:
img = T.imread(img_path)
except:
img = []
tif = TIFF.open(img_path)
for _image in tif.iter_images():
img.append(_image)
img = np.stack(img, 0)
try:
lbl = T.imread(lbl_path)
except:
lbl = []
tif = TIFF.open(lbl_path)
for _lable in tif.iter_images():
lbl.append(_lable)
lbl = np.stack(lbl, 0)
img = smart_padding(img, data_shape, lables_shape, stride)
lbl = smart_padding(lbl, data_shape, lables_shape, stride)
self.org_shape.append(img.shape)
self.img.append(img.astype(np.float32))
self.lbl.append(lbl.astype(np.float32))
shape = img.shape
self.shape.append(shape)
dz = int(np.floor((shape[0] - data_shape[0]) / stride[0] + 1))
dy = int(np.floor((shape[1] - data_shape[1]) / stride[1] + 1))
dx = int(np.floor((shape[2] - data_shape[2]) / stride[2] + 1))
effective_data_shape = (
data_shape[0] * dz - (data_shape[0] - stride[0]) * (dz - 1),
data_shape[1] * dy - (data_shape[1] - stride[1]) * (dy - 1),
data_shape[2] * dx - (data_shape[2] - stride[2]) * (dx - 1)
)
effective_lable_shape = (
effective_data_shape[0] - (data_shape[0] - lables_shape[0]),
effective_data_shape[1] - (data_shape[1] - lables_shape[1]),
effective_data_shape[2] - (data_shape[2] - lables_shape[2])
)
effective_lable_idx = (
((data_shape[0] - lables_shape[0]) // 2,
effective_data_shape[0] - (
(data_shape[0] - lables_shape[0]) // 2 + (data_shape[0] - lables_shape[0]) % 2)),
((data_shape[1] - lables_shape[1]) // 2,
effective_data_shape[1] - (
(data_shape[1] - lables_shape[1]) // 2 + (data_shape[1] - lables_shape[1]) % 2)),
((data_shape[2] - lables_shape[2]) // 2,
effective_data_shape[2] - (
(data_shape[2] - lables_shape[2]) // 2 + (data_shape[2] - lables_shape[2]) % 2))
)
lbl_z = ((data_shape[0] - lables_shape[0]) // 2,
(data_shape[0] - lables_shape[0]) // 2 + lables_shape[0])
lbl_y = ((data_shape[1] - lables_shape[1]) // 2,
(data_shape[1] - lables_shape[1]) // 2 + lables_shape[1])
lbl_x = ((data_shape[2] - lables_shape[2]) // 2,
(data_shape[2] - lables_shape[2]) // 2 + lables_shape[2])
self.dz.append(dz)
self.dy.append(dy)
self.dx.append(dx)
self.effective_data_shape.append(effective_data_shape)
self.effective_lable_shape.append(effective_lable_shape)
self.effective_lable_idx.append(effective_lable_idx)
self.lbl_z.append(lbl_z)
self.lbl_y.append(lbl_y)
self.lbl_x.append(lbl_x)
self.max_iter = max_iter
def __len__(self):
return self.max_iter
def __getitem__(self, index):
i = np.random.randint(0, len(self.img))
z = np.random.randint(0, self.dz[i])
y = np.random.randint(0, self.dy[i])
x = np.random.randint(0, self.dx[i])
z = z * self.stride[0]
y = y * self.stride[1]
x = x * self.stride[2]
v = self.img[i][z: z + self.data_shape[0],
y: y + self.data_shape[1],
x: x + self.data_shape[2]]
lbl = self.lbl[i][z: z + self.data_shape[0],
y: y + self.data_shape[1],
x: x + self.data_shape[2]]
lbl = lbl[self.lbl_z[i][0]: self.lbl_z[i][1],
self.lbl_y[i][0]: self.lbl_y[i][1],
self.lbl_x[i][0]: self.lbl_x[i][1]]
# Normalize
if self.range_norm:
v = range_normalize(v)
else:
v = (v - v.mean(axis=(1, 2), keepdims=True)) / (v.std(axis=(1, 2), keepdims=True) + 1e-12)
# To Tensor
data = torch.Tensor(v).unsqueeze(0)
lables = torch.Tensor(lbl // 255).long()
return data, lables
|
172686
|
import argparse
import numpy as np
import torch
import os, sys
import math
from subspace_inference import models, losses, posteriors, utils
# from swag.posteriors import SWAG, EllipticalSliceSampling, BenchmarkPyro, BenchmarkVIModel
from regression import run
from bayesian_benchmarks.data import get_regression_data
from bayesian_benchmarks.models.nnet.neural_linear import NLRegressionRunner
parser = argparse.ArgumentParser()
parser.add_argument("--model", default='RegNet', nargs='?', type=str)
parser.add_argument("--dataset", default='energy', nargs='?', type=str)
parser.add_argument("--split", default=0, nargs='?', type=int)
parser.add_argument("--seed", default=0, nargs='?', type=int)
parser.add_argument('--database_path', default='', help='output database')
parser.add_argument('--dir', type=str, default=None, required=True, help='training directory (default: None)')
parser.add_argument('--epochs', type=int, default=50, metavar='N', help='number of epochs to train (default: 200)')
parser.add_argument('--save_freq', type=int, default=25, metavar='N', help='save frequency (default: 25)')
parser.add_argument('--eval_freq', type=int, default=5, metavar='N', help='evaluation frequency (default: 5)')
parser.add_argument('--lr_init', type=float, default=0.01, metavar='LR', help='initial learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4, help='weight decay (default: 1e-4)')
parser.add_argument('--batch_size', type=int, default=400, metavar='N', help='input batch size (default: 128)')
parser.add_argument('--model_variance', action='store_true', help='whether NN should also model variance')
parser.add_argument('--noise_var', action='store_true', help='whether NN should have a noise variance term')
parser.add_argument('--no_schedule', action='store_true', help='store schedule')
parser.add_argument('--uci-small', action='store_true')
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
#torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
args.device = None
if torch.cuda.is_available():
args.device = torch.device('cuda')
else:
args.device = torch.device('cpu')
print('Preparing directory %s' % args.dir)
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, 'command.sh'), 'w') as f:
f.write(' '.join(sys.argv))
f.write('\n')
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
print('Preparing dataset %s' % args.dataset)
dataset = get_regression_data(args.dataset, split=args.split)
print(dataset.N, dataset.D, dataset.name)
print('Using model %s' % args.model)
model_cfg = getattr(models, args.model)
print('Preparing model')
print(*model_cfg.args)
if not args.uci_small:
if dataset.N > 6000:
model_cfg.kwargs['dimensions'] = [1000, 1000, 500, 50]
else:
model_cfg.kwargs['dimensions'] = [1000, 500, 50,]
else:
# similarly to DVI paper;
# protein dataset case
if dataset.N > 40000:
model_cfg.kwargs['dimensions'] = [100]
else:
model_cfg.kwargs['dimensions'] = [50]
if args.batch_size is None:
args.batch_size = dataset.N // 10
print('Using batch size', args.batch_size)
if args.epochs is 0:
args.epochs = int(np.ceil(6000 * args.batch_size / dataset.N))
print('Number of epochs is: ', args.epochs)
print(model_cfg.kwargs)
if args.model_variance:
print('Model has heteroscedastic regression')
output_dim=2
noise_var = None
else:
output_dim = 1
noise_var = 0.5
#todo: incorporate into command line args
criterion = losses.GaussianLikelihood
# define a regressionrunner class to fit w/in confines of regression.py
regression_model = NLRegressionRunner(
base = model_cfg.base,
epochs = args.epochs,
criterion = criterion,
batch_size=args.batch_size,
momentum = args.momentum, wd=args.wd, lr_init=args.lr_init,
use_cuda = torch.cuda.is_available(),
const_lr=args.no_schedule, double_bias_lr=True,
model_variance=args.model_variance,
input_dim=dataset.D, output_dim=output_dim, apply_var=args.noise_var, **model_cfg.kwargs
)
mname = args.model + 'NL_LP'
bb_args = argparse.Namespace(model=mname, dataset=args.dataset, split=args.split, seed=args.seed, database_path=args.database_path)
bb_result = run(bb_args, data=dataset, model=regression_model, is_test=args.database_path=='')
#print(bb_result)
print([(k, bb_result[k])] for k in sorted(bb_result))
utils.save_checkpoint(
args.dir,
args.epochs,
model_state_dict=regression_model.model.state_dict(),
optimizer=regression_model.optimizer.state_dict(),
result=bb_result
)
|
172710
|
import copy
import logging
import numpy
import theano
from theano import tensor
from theano.gradient import disconnected_grad
from blocks.bricks import (
Bias, Identity, Initializable, MLP, Tanh, Softmax, Random)
from blocks.bricks.attention import SequenceContentAttention
from blocks.bricks.base import application
from blocks.bricks.recurrent import (
BaseRecurrent, RecurrentStack, recurrent)
from blocks_extras.bricks.sequence_generator2 import (
SequenceGenerator, SoftmaxReadout, Feedback)
from blocks_extras.bricks.attention2 import AttentionRecurrent
from blocks.bricks.lookup import LookupTable
from blocks.graph import ComputationGraph
from blocks.model import Model
from blocks.filter import VariableFilter
from blocks.serialization import load_parameters
from blocks.utils import dict_union, dict_subset
from lvsr.bricks import (
Encoder, InitializableSequence, EditDistanceReward, BleuReward,
RecurrentWithExtraInput, ConvEncoder)
from lvsr.bricks.readouts import (
ReinforceReadout, CriticReadout, ActorCriticReadout)
from lvsr.bricks.attention import SequenceContentAndConvAttention
from lvsr.utils import global_push_initialization_config
from lvsr.beam_search import BeamSearch
logger = logging.getLogger(__name__)
class Bottom(Initializable):
"""
A bottom class that mergers possibly many input sources into one
sequence.
The bottom is responsible for allocating variables for single and
multiple sequences in a batch.
In speech recognition this will typically be the identity transformation
ro a small MLP.
Attributes
----------
vector_input_sources : list of str
discrete_input_sources : list of str
Parameters
----------
input_dims : dict
Maps input source to their dimensions, only for vector sources.
input_num_chars : dict
Maps input source to their range of values, only for discrete sources.
"""
vector_input_sources = []
discrete_input_sources = []
def __init__(self, input_dims, input_num_chars, **kwargs):
super(Bottom, self).__init__(**kwargs)
self.input_dims = input_dims
self.input_num_chars = input_num_chars
class LookupBottom(Bottom):
discrete_input_sources = ['inputs']
def __init__(self, dim, **kwargs):
super(LookupBottom, self).__init__(**kwargs)
self.dim = dim
self.mask = tensor.matrix('inputs_mask')
self.batch_inputs = {
'inputs': tensor.lmatrix('inputs')}
self.single_inputs = {
'inputs': tensor.lvector('inputs')}
self.children = [LookupTable(self.input_num_chars['inputs'], self.dim)]
@application(inputs=['inputs'], outputs=['outputs'])
def apply(self, inputs):
return self.children[0].apply(inputs)
def batch_size(self, inputs):
return inputs.shape[1]
def num_time_steps(self, inputs):
return inputs.shape[0]
def single_to_batch_inputs(self, inputs):
# Note: this code supports many inputs, which are all sequences
inputs = {n: v[:, None, :] if v.ndim == 2 else v[:, None]
for (n, v) in inputs.items()}
inputs_mask = tensor.ones((self.num_time_steps(**inputs),
self.batch_size(**inputs)))
return inputs, inputs_mask
def get_dim(self, name):
if name == 'outputs':
return self.dim
return super(LookupBottom, self).get_dim(name)
class SpeechBottom(Bottom):
"""
A Bottom specialized for speech recognition that accets only one input
- the recordings.
"""
vector_input_sources = ['recordings']
def __init__(self, activation, dims=None, **kwargs):
super(SpeechBottom, self).__init__(**kwargs)
self.num_features = self.input_dims['recordings']
if activation is None:
activation = Tanh()
if dims:
child = MLP([activation] * len(dims),
[self.num_features] + dims,
name="bottom")
self.output_dim = child.output_dim
else:
child = Identity(name='bottom')
self.output_dim = self.num_features
self.children.append(child)
self.mask = tensor.matrix('recordings_mask')
self.batch_inputs = {
'recordings': tensor.tensor3('recordings')}
self.single_inputs = {
'recordings': tensor.matrix('recordings')}
@application(inputs=['recordings'], outputs=['outputs'])
def apply(self, recordings):
return self.children[0].apply(recordings)
def batch_size(self, recordings):
return recordings.shape[1]
def num_time_steps(self, recordings):
return recordings.shape[0]
def single_to_batch_inputs(self, inputs):
# Note: this code supports many inputs, which are all sequences
inputs = {n: v[:, None, :] if v.ndim == 2 else v[:, None]
for (n, v) in inputs.items()}
inputs_mask = tensor.ones((self.num_time_steps(**inputs),
self.batch_size(**inputs)))
return inputs, inputs_mask
def get_dim(self, name):
if name == 'outputs':
return self.output_dim
return super(SpeechBottom, self).get_dim(name)
def _downsize_dim(value, times):
if isinstance(value, int):
return value / times
elif isinstance(value, list):
value = list(value)
for i in range(len(value)):
value[i] /= times
return value
raise ValueError
def _downsize_config(config, times):
for option in ['dim_dec', 'dim_matcher', 'dim_output_embedding',
'dims_bidir', 'post_merge_dims']:
value = config.get(option)
if value is not None:
config[option] = _downsize_dim(value, times)
for option in ['dim', 'dims']:
value = config['bottom'].get(option)
if value is not None:
config['bottom'][option] = _downsize_dim(value, times)
return config
class EncoderDecoder(Initializable, Random):
"""Encapsulate all reusable logic.
This class plays a few roles: (a) it's a top brick that knows
how to combine bottom, bidirectional and recognizer network, (b)
it has the inputs variables and can build whole computation graphs
starting with them (c) it hides compilation of Theano functions
and initialization of beam search. I find it simpler to have it all
in one place for research code.
Parameters
----------
All defining the structure and the dimensions of the model. Typically
receives everything from the "net" section of the config.
"""
def __init__(self,
input_dims,
input_num_chars,
bos_label, eos_label,
num_labels,
dim_dec, dims_bidir,
enc_transition, dec_transition,
use_states_for_readout,
attention_type,
criterion,
bottom,
lm=None, token_map=None,
bidir=True, window_size=None,
max_length=None, subsample=None,
dims_top=None, extra_input_dim=None,
prior=None, conv_n=None,
post_merge_activation=None,
post_merge_dims=None,
dim_matcher=None,
embed_outputs=True,
dim_output_embedding=None,
reuse_bottom_lookup_table=False,
dec_stack=1,
conv_num_filters=1,
data_prepend_eos=True,
# softmax is the default set in SequenceContentAndConvAttention
energy_normalizer=None,
# for speech this is the approximate phoneme duration in frames
max_decoded_length_scale=1,
# for criterions involving generation of outputs, whether
# or not they should be generated by the recognizer itself
generate_predictions=True,
compute_targets=True,
extra_generation_steps=3,
**kwargs):
all_arguments = copy.deepcopy(locals())
all_arguments.update(copy.deepcopy(kwargs))
del all_arguments['kwargs']
del all_arguments['self']
if post_merge_activation is None:
post_merge_activation = Tanh()
super(EncoderDecoder, self).__init__(**kwargs)
self.bos_label = bos_label
self.eos_label = eos_label
self.data_prepend_eos = data_prepend_eos
self.rec_weights_init = None
self.initial_states_init = None
self.enc_transition = enc_transition
self.dec_transition = dec_transition
self.dec_stack = dec_stack
self.criterion = criterion
self.generate_predictions = generate_predictions
self.extra_generation_steps = extra_generation_steps
self.compute_targets = compute_targets
self.max_decoded_length_scale = max_decoded_length_scale
post_merge_activation = post_merge_activation
if dim_matcher is None:
dim_matcher = dim_dec
# The bottom part, before BiRNN
bottom_class = bottom.pop('bottom_class')
bottom = bottom_class(
input_dims=input_dims, input_num_chars=input_num_chars,
name='bottom',
**bottom)
# BiRNN
if dims_bidir:
if not subsample:
subsample = [1] * len(dims_bidir)
encoder = Encoder(self.enc_transition, dims_bidir,
bottom.get_dim(bottom.apply.outputs[0]),
subsample, bidir=bidir)
elif window_size:
encoder = ConvEncoder(
max_length, bottom.get_dim(bottom.apply.outputs[0]), window_size)
else:
raise ValueError("Don't know which Encoder to use")
dim_encoded = encoder.get_dim(encoder.apply.outputs[0])
# The top part, on top of BiRNN but before the attention
if dims_top:
top = MLP([Tanh()],
[dim_encoded] + dims_top + [dim_encoded], name="top")
else:
top = Identity(name='top')
if dec_stack == 1:
transition = self.dec_transition(
dim=dim_dec, activation=Tanh(), name="transition")
else:
assert not extra_input_dim
transitions = [self.dec_transition(dim=dim_dec,
activation=Tanh(),
name="transition_{}".format(trans_level))
for trans_level in xrange(dec_stack)]
transition = RecurrentStack(transitions=transitions,
skip_connections=True)
# Choose attention mechanism according to the configuration
if attention_type == "content":
attention = SequenceContentAttention(
state_names=transition.apply.states,
attended_dim=dim_encoded, match_dim=dim_matcher,
name="cont_att")
elif attention_type == "content_and_conv":
attention = SequenceContentAndConvAttention(
state_names=transition.apply.states,
conv_n=conv_n,
conv_num_filters=conv_num_filters,
attended_dim=dim_encoded, match_dim=dim_matcher,
prior=prior,
energy_normalizer=energy_normalizer,
name="conv_att")
else:
raise ValueError("Unknown attention type {}"
.format(attention_type))
if not embed_outputs:
raise ValueError("embed_outputs=False is not supported any more")
if not reuse_bottom_lookup_table:
embedding = LookupTable(num_labels + 1,
dim_dec if
dim_output_embedding is None
else dim_output_embedding)
else:
embedding = bottom.children[0]
feedback = Feedback(
embedding=embedding,
output_names=[s for s in transition.apply.sequences
if s != 'mask'])
# Create a readout
readout_config = dict(
num_tokens=num_labels,
input_names=(transition.apply.states if use_states_for_readout else [])
+ [attention.take_glimpses.outputs[0]],
name="readout")
if post_merge_dims:
readout_config['merge_dim'] = post_merge_dims[0]
readout_config['post_merge'] = InitializableSequence([
Bias(post_merge_dims[0]).apply,
post_merge_activation.apply,
MLP([post_merge_activation] * (len(post_merge_dims) - 1) + [Identity()],
# MLP was designed to support Maxout is activation
# (because Maxout in a way is not one). However
# a single layer Maxout network works with the trick below.
# For deeper Maxout network one has to use the
# Sequence brick.
[d//getattr(post_merge_activation, 'num_pieces', 1)
for d in post_merge_dims] + [num_labels]).apply,
], name='post_merge')
if 'reward' in criterion and criterion['name'] != 'log_likelihood':
if criterion['reward'] == 'edit_distance':
readout_config['reward_brick'] = EditDistanceReward(
self.bos_label, self.eos_label)
elif criterion['reward'] == 'delta_edit_distance':
readout_config['reward_brick'] = EditDistanceReward(
self.bos_label, self.eos_label, deltas=True)
elif criterion['reward'] == 'bleu':
readout_config['reward_brick'] = BleuReward(
self.bos_label, self.eos_label, deltas=False)
elif criterion['reward'] == 'delta_bleu':
readout_config['reward_brick'] = BleuReward(
self.bos_label, self.eos_label, deltas=True)
else:
raise ValueError("Unknown reward type")
if criterion['name'] == 'log_likelihood':
readout_class = SoftmaxReadout
elif criterion['name'] == 'critic':
readout_class = CriticReadout
criterion_copy = dict(criterion)
del criterion_copy['name']
readout_config.update(**criterion_copy)
elif criterion['name'] == 'reinforce':
readout_class = ReinforceReadout
readout_config['merge_names'] = list(readout_config['input_names'])
readout_config['entropy'] = criterion.get('entropy')
readout_config['input_names'] += ['attended', 'attended_mask']
elif criterion['name'] in ['sarsa', 'actor_critic']:
readout_class = ActorCriticReadout
if criterion['name'] == 'actor_critic':
critic_arguments = dict(all_arguments)
# No worries, critic will not compute log likelihood values.
# We
critic_arguments['criterion'] = {
'name': 'critic',
'value_softmax': criterion.get('value_softmax'),
'same_value_for_wrong': criterion.get('same_value_for_wrong'),
'groundtruth_word_bonus': criterion.get('groundtruth_word_bonus'),
'dueling_outputs': criterion.get('dueling_outputs')}
critic_arguments['name'] = 'critic'
if criterion.get('critic_uses_actor_states'):
critic_arguments['extra_input_dim'] = dim_dec
if (criterion.get('value_softmax')
or criterion.get('same_value_for_wrong')
or criterion.get('dueling_outputs')):
# Add an extra output for the critic
critic_arguments['num_labels'] = num_labels + 1
if criterion.get('force_bidir'):
critic_arguments['dims_bidir'] = [dim_dec]
critic_arguments['reuse_bottom_lookup_table'] = True
critic_arguments['input_num_chars'] = {'inputs': num_labels}
if criterion.get('downsize_critic'):
critic_arguments = _downsize_config(
critic_arguments, criterion['downsize_critic'])
critic = EncoderDecoder(**critic_arguments)
readout_config['critic'] = critic
readout_config['merge_names'] = list(readout_config['input_names'])
readout_config['freeze_actor'] = criterion.get('freeze_actor')
readout_config['freeze_critic'] = criterion.get('freeze_critic')
readout_config['critic_uses_actor_states'] = criterion.get('critic_uses_actor_states')
readout_config['critic_uses_groundtruth'] = criterion.get('critic_uses_groundtruth')
readout_config['critic_burnin_steps'] = criterion.get('critic_burnin_steps')
readout_config['critic_loss'] = criterion.get('critic_loss')
readout_config['discount'] = criterion.get('discount')
readout_config['entropy_reward_coof'] = criterion.get('entropy_reward_coof')
readout_config['cross_entropy_reward_coof'] = criterion.get('cross_entropy_reward_coof')
readout_config['value_penalty'] = criterion.get('value_penalty')
readout_config['value_penalty_type'] = criterion.get('value_penalty_type')
readout_config['critic_policy_t'] = criterion.get('critic_policy_t')
readout_config['bos_token'] = bos_label
readout_config['accumulate_outputs'] = criterion.get('accumulate_outputs')
readout_config['use_value_biases'] = criterion.get('use_value_biases')
readout_config['actor_grad_estimate'] = criterion.get('actor_grad_estimate')
readout_config['input_names'] += ['attended', 'attended_mask']
# Note, that settings below are for the "clean" mode.
# When get_cost_graph() is run with training=True, they
# are temporarily overriden with the "real" settings from
# "criterion"
readout_config['compute_targets'] = True
readout_config['trpo_coef'] = 0.0
readout_config['solve_bellman'] = True
else:
raise ValueError("Unknown criterion {}".format(criterion['name']))
readout = readout_class(**readout_config)
if lm:
raise ValueError("LM is currently not supported")
recurrent = AttentionRecurrent(transition, attention)
if extra_input_dim:
recurrent = RecurrentWithExtraInput(
recurrent, "extra_inputs", extra_input_dim, name="with_extra_inputs")
generator = SequenceGenerator(
recurrent=recurrent, readout=readout, feedback=feedback,
name="generator")
# Remember child bricks
self.encoder = encoder
self.bottom = bottom
self.top = top
self.generator = generator
self.softmax = Softmax()
self.children = [encoder, top, bottom, generator, self.softmax]
# Create input variables
self.inputs = self.bottom.batch_inputs
self.inputs_mask = self.bottom.mask
self.labels = tensor.lmatrix('labels')
self.labels_mask = tensor.matrix("labels_mask")
self.predicted_labels = tensor.lmatrix('predicted_labels')
self.predicted_mask = tensor.matrix('predicted_mask')
self.prefix_labels = tensor.lmatrix('prefix_labels')
self.prefix_steps = tensor.lscalar('prefix_steps')
self.single_inputs = self.bottom.single_inputs
self.single_labels = tensor.lvector('labels')
self.single_predicted_labels = tensor.lvector('predicted_labels')
self.n_steps = tensor.lscalar('n_steps')
# Configure mixed_generate
if criterion['name'] == 'actor_critic':
critic = self.generator.readout.critic
self.mixed_generate.sequences = []
self.mixed_generate.states = (
['step'] +
self.generator.recurrent.apply.states +
['critic_' + name for name in critic.generator.recurrent.apply.states])
self.mixed_generate.outputs = (
['samples', 'step'] +
self.generator.recurrent.apply.outputs +
['critic_' + name for name in critic.generator.recurrent.apply.outputs])
self.mixed_generate.contexts = (
self.generator.recurrent.apply.contexts +
['critic_' + name for name in critic.generator.recurrent.apply.contexts]
+ ['groundtruth', 'groundtruth_mask'])
self.initial_states.outputs = self.mixed_generate.states
self.prefix_generate.sequences = []
self.prefix_generate.states = ['step'] + self.generator.recurrent.apply.states
self.prefix_generate.outputs = ['samples', 'step'] + self.generator.recurrent.apply.outputs
self.prefix_generate.contexts = self.generator.recurrent.apply.contexts
def push_initialization_config(self):
super(EncoderDecoder, self).push_initialization_config()
if self.rec_weights_init:
rec_weights_config = {'weights_init': self.rec_weights_init,
'recurrent_weights_init': self.rec_weights_init}
global_push_initialization_config(self,
rec_weights_config,
BaseRecurrent)
if self.initial_states_init:
global_push_initialization_config(self,
{'initial_states_init': self.initial_states_init})
@application
def costs(self, **kwargs):
# pop inputs we know about
prediction = kwargs.pop('prediction')
prediction_mask = kwargs.pop('prediction_mask')
groundtruth = kwargs.pop('groundtruth', None)
groundtruth_mask = kwargs.pop('groundtruth_mask', None)
inputs_mask = kwargs.pop('inputs_mask')
extra_inputs = kwargs.pop('extra_inputs', None)
# the rest is for bottom
bottom_processed = self.bottom.apply(**kwargs)
encoded, encoded_mask = self.encoder.apply(
input_=bottom_processed, mask=inputs_mask)
encoded = self.top.apply(encoded)
costs_kwargs = dict(
prediction=prediction, prediction_mask=prediction_mask,
groundtruth=groundtruth, groundtruth_mask=groundtruth_mask,
attended=encoded, attended_mask=encoded_mask)
if extra_inputs:
costs_kwargs['extra_inputs'] = extra_inputs
return self.generator.costs(**costs_kwargs)
@application
def generate(self, return_initial_states=False, **kwargs):
inputs_mask = kwargs.pop('inputs_mask')
n_steps = kwargs.pop('n_steps')
encoded, encoded_mask = self.encoder.apply(
input_=self.bottom.apply(**kwargs),
mask=inputs_mask)
encoded = self.top.apply(encoded)
return self.generator.generate(
n_steps=n_steps if n_steps is not None else self.n_steps,
batch_size=encoded.shape[1],
attended=encoded,
attended_mask=encoded_mask,
return_initial_states=return_initial_states,
as_dict=True)
@recurrent
def prefix_generate(self, return_initial_states=True, **kwargs):
step = kwargs.pop('step')
sampling_inputs = dict_subset(
kwargs, self.generator.readout.sample.inputs)
samples, scores = self.generator.readout.sample(**sampling_inputs)
prefix_mask = tensor.lt(step, self.prefix_steps)
samples = (prefix_mask * self.prefix_labels[step[0]]
+ (1 - prefix_mask) * samples)
feedback = self.generator.feedback.apply(samples, as_dict=True)
states_contexts = dict_subset(
kwargs,
self.generator.recurrent.apply.states
+ self.generator.recurrent.apply.contexts)
states_outputs = self.generator.recurrent.apply(
as_dict=True, iterate=False,
**dict_union(feedback, states_contexts))
return ([samples, step + 1]
+ states_outputs.values())
@recurrent
def mixed_generate(self, return_initial_states=True, **kwargs):
critic = self.generator.readout.critic
groundtruth = kwargs.pop('groundtruth')
groundtruth_mask = kwargs.pop('groundtruth_mask')
step = kwargs.pop('step')
sampling_inputs = dict_subset(
kwargs, self.generator.readout.sample.inputs)
actor_scores = self.generator.readout.scores(**sampling_inputs)
critic_inputs = {
name: kwargs['critic_' + name]
for name in critic.generator.readout.merge_names}
critic_outputs = critic.generator.readout.outputs(
groundtruth, groundtruth_mask, **critic_inputs)
epsilon = numpy.array(self.generator.readout.epsilon,
dtype=theano.config.floatX)
actor_probs = tensor.exp(actor_scores)
# This is a poor man's 1-hot argmax
critic_probs = self.softmax.apply(critic_outputs * 1000)
probs = (actor_probs * (tensor.constant(1) - epsilon)
+ critic_probs * epsilon)
x = self.theano_rng.uniform(size=(probs.shape[0],))
samples = (tensor.gt(x[:, None], tensor.cumsum(probs, axis=1))
.astype(theano.config.floatX)
.sum(axis=1)
.astype('int64'))
samples = tensor.minimum(samples, probs.shape[1] - 1)
actor_feedback = self.generator.feedback.apply(samples, as_dict=True)
actor_states_contexts = dict_subset(
kwargs,
self.generator.recurrent.apply.states
+ self.generator.recurrent.apply.contexts)
actor_states_outputs = self.generator.recurrent.apply(
as_dict=True, iterate=False,
**dict_union(actor_feedback, actor_states_contexts))
critic_feedback = critic.generator.feedback.apply(samples, as_dict=True)
critic_states_contexts = {
name: kwargs['critic_' + name]
for name in
critic.generator.recurrent.apply.states
+ critic.generator.recurrent.apply.contexts}
critic_apply_kwargs = dict(
as_dict=True, iterate=False,
**dict_union(critic_feedback, critic_states_contexts))
if self.generator.readout.critic_uses_actor_states:
critic_apply_kwargs['extra_inputs'] = actor_states_outputs['states']
critic_states_outputs = critic.generator.recurrent.apply(**critic_apply_kwargs)
return ([samples, step + 1]
+ actor_states_outputs.values()
+ critic_states_outputs.values())
@application
def initial_states(self, batch_size, *args, **kwargs):
critic = self.generator.readout.critic
result = ([tensor.zeros((batch_size,), dtype='int64')]
+ self.generator.initial_states(batch_size, *args, **kwargs))
critic_kwargs = {name[7:]: kwargs[name] for name in kwargs if name.startswith('critic_')}
# This method can be called for two different recurrent application method,
# "mixed_generate" and "prefix_generate". That's why this dirty hack is needed.
if critic_kwargs:
result += critic.generator.initial_states(batch_size, **critic_kwargs)
return result
def get_dim(self, name):
critic = self.generator.readout.critic
if name.startswith('critic_'):
return critic.generator.get_dim(name[7:])
elif name == 'step':
return 0
else:
return self.generator.get_dim(name)
@application
def mask_for_prediction(self, prediction, groundtruth_mask=None,
extra_generation_steps=None):
prediction_mask = tensor.lt(
tensor.cumsum(tensor.eq(prediction, self.eos_label)
.astype(theano.config.floatX), axis=0),
1).astype(theano.config.floatX)
prediction_mask = tensor.roll(prediction_mask, 1, 0)
prediction_mask = tensor.set_subtensor(
prediction_mask[0, :], tensor.ones_like(prediction_mask[0, :]))
if groundtruth_mask:
max_lengths = groundtruth_mask.sum(axis=0) + extra_generation_steps
prediction_mask *= tensor.lt(
tensor.arange(prediction.shape[0])[:, None], max_lengths[None, :])
return prediction_mask
def load_params(self, path):
cg = self.get_cost_graph()
with open(path, 'r') as src:
param_values = load_parameters(src)
Model(cg.outputs).set_parameter_values(param_values)
def get_generate_graph(self, use_mask=True, n_steps=None,
return_initial_states=False,
use_softmax_t=False):
if use_softmax_t:
self.generator.readout.softmax_t = self.criterion.get('softmax_t', 1.0)
inputs_mask = None
if use_mask:
inputs_mask = self.inputs_mask
result = self.generate(
n_steps=n_steps, inputs_mask=inputs_mask,
return_initial_states=return_initial_states,
**self.inputs)
self.generator.readout.softmax_t = 1.
return result
def get_mixed_generate_graph(self, n_steps=None,
return_initial_states=False):
critic = self.generator.readout.critic
attended, attended_mask = self.encoder.apply(
input_=self.bottom.apply(**self.inputs),
mask=self.inputs_mask)
attended = self.top.apply(attended)
critic_attended, critic_attended_mask = critic.encoder.apply(
input_=critic.bottom.apply(inputs=self.labels),
mask=self.labels_mask)
critic_attended = critic.top.apply(critic_attended)
return self.mixed_generate(
n_steps=n_steps, batch_size=attended.shape[1],
return_initial_states=return_initial_states, as_dict=True,
attended=attended, attended_mask=attended_mask,
critic_attended=critic_attended, critic_attended_mask=critic_attended_mask,
groundtruth=self.labels, groundtruth_mask=self.labels_mask)
def get_prefix_generate_graph(self, n_steps=None,
return_initial_states=False):
attended, attended_mask = self.encoder.apply(
input_=self.bottom.apply(**self.inputs),
mask=self.inputs_mask)
attended = self.top.apply(attended)
return self.prefix_generate(
n_steps=n_steps, batch_size=attended.shape[1],
return_initial_states=return_initial_states, as_dict=True,
attended=attended, attended_mask=attended_mask)
def get_cost_graph(self, batch=True, use_prediction=False,
training=False, groundtruth_as_predictions=False,
with_mixed_generation=False):
# "use_predictions" means use the Theano input variable
# for predictions.
readout = self.generator.readout
if training and self.criterion['name'] == 'actor_critic':
logger.debug("Switching to training mode")
readout.compute_targets = self.compute_targets
readout.trpo_coef = self.criterion.get('trpo_coef', 0.0)
if 'solve_bellman' in self.criterion:
readout.solve_bellman = self.criterion['solve_bellman']
if with_mixed_generation and 'epsilon' in self.criterion:
readout.epsilon = self.criterion['epsilon']
if batch:
inputs, inputs_mask = self.inputs, self.inputs_mask
groundtruth, groundtruth_mask = self.labels, self.labels_mask
prediction, prediction_mask = self.predicted_labels, self.predicted_mask
else:
inputs, inputs_mask = self.bottom.single_to_batch_inputs(
self.single_inputs)
groundtruth = self.single_labels[:, None]
groundtruth_mask = self.mask_for_prediction(groundtruth)
prediction = self.single_predicted_labels[:, None]
prediction_mask = self.mask_for_prediction(prediction)
if self.cost_involves_generation() and not groundtruth_as_predictions:
if ((training and self.generate_predictions) or
(not training and not use_prediction)):
generation_routine = (self.get_mixed_generate_graph
if with_mixed_generation
else self.get_generate_graph)
generated = generation_routine(
n_steps=self.labels.shape[0] + self.extra_generation_steps)
prediction = disconnected_grad(generated['samples'])
prediction_mask = self.mask_for_prediction(
prediction, groundtruth_mask, self.extra_generation_steps)
else:
logger.debug("Using provided predictions")
cost = self.costs(inputs_mask=inputs_mask,
prediction=prediction, prediction_mask=prediction_mask,
groundtruth=groundtruth, groundtruth_mask=groundtruth_mask,
**inputs)
else:
if use_prediction:
cost = self.costs(inputs_mask=inputs_mask,
prediction=prediction, prediction_mask=prediction_mask,
**inputs)
else:
cost = self.costs(inputs_mask=inputs_mask,
prediction=groundtruth, prediction_mask=groundtruth_mask,
groundtruth=groundtruth, groundtruth_mask=groundtruth_mask,
**inputs)
cost_cg = ComputationGraph(cost)
# This *has to* be done only when
# "training" or "with_mixed_generation" is True,
# but it does not hurt to do it every time.
logger.debug("Switching back to the normal mode")
readout = self.generator.readout
readout.compute_targets = True
readout.trpo_coef = 0.0
readout.solve_bellman = True
readout.epsilon = 0.
return cost_cg
def analyze(self, inputs, groundtruth, prediction):
"""Compute cost and aligment."""
if not hasattr(self, "_analyze"):
input_variables = list(self.single_inputs.values())
input_variables.append(self.single_labels)
input_variables.append(self.single_predicted_labels)
cg = self.get_cost_graph(batch=False, use_prediction=True)
costs = cg.outputs[0]
weights, = VariableFilter(
bricks=[self.generator], name="weights")(cg)
energies = VariableFilter(
bricks=[self.generator], name="energies")(cg)
energies_output = [energies[0][:, 0, :] if energies
else tensor.zeros_like(weights)]
self._analyze = theano.function(
input_variables,
[costs[0], weights[:, 0, :]] + energies_output,
on_unused_input='warn')
input_values_dict = dict(inputs)
input_values_dict['labels'] = groundtruth
input_values_dict['predicted_labels'] = prediction
return self._analyze(**input_values_dict)
def init_beam_search(self, beam_size):
"""Compile beam search and set the beam size.
See Blocks issue #500.
"""
if hasattr(self, '_beam_search') and self.beam_size == beam_size:
# Only recompile if the user wants a different beam size
return
self.beam_size = beam_size
generated = self.get_generate_graph(use_mask=False, n_steps=3)
cg = ComputationGraph(generated.values())
samples, = VariableFilter(
applications=[self.generator.generate], name="samples")(cg)
self._beam_search = BeamSearch(beam_size, samples)
self._beam_search.compile()
def beam_search(self, inputs, **kwargs):
# When a recognizer is unpickled, self.beam_size is available
# but beam search has to be recompiled.
self.init_beam_search(self.beam_size)
inputs = dict(inputs)
max_length = int(self.bottom.num_time_steps(**inputs) /
self.max_decoded_length_scale)
search_inputs = {}
for var in self.inputs.values():
search_inputs[var] = inputs.pop(var.name)[:, numpy.newaxis, ...]
if inputs:
raise Exception(
'Unknown inputs passed to beam search: {}'.format(
inputs.keys()))
outputs, search_costs = self._beam_search.search(
search_inputs, self.eos_label,
max_length,
ignore_first_eol=self.data_prepend_eos,
**kwargs)
return outputs, search_costs
def init_generate(self):
generated = self.get_generate_graph(use_mask=False)
cg = ComputationGraph(generated['samples'])
self._do_generate = cg.get_theano_function()
def sample(self, inputs, n_steps=None):
if not hasattr(self, '_do_generate'):
self.init_generate()
batch, unused_mask = self.bottom.single_to_batch_inputs(inputs)
batch['n_steps'] = n_steps if n_steps is not None \
else int(self.bottom.num_time_steps(**batch) /
self.max_decoded_length_scale)
sample = self._do_generate(**batch)[0]
sample = list(sample[:, 0])
if self.eos_label in sample:
sample = sample[:sample.index(self.eos_label) + 1]
return sample
def __getstate__(self):
state = dict(self.__dict__)
for attr in ['_analyze', '_beam_search']:
state.pop(attr, None)
return state
def __setstate__(self, state):
self.__dict__.update(state)
# To use bricks used on a GPU first on a CPU later
try:
emitter = self.generator.readout.emitter
del emitter._theano_rng
except:
pass
def cost_involves_generation(self):
return self.criterion['name'] in ['reinforce', 'sarsa', 'actor_critic']
|
172777
|
import sqlite3
import xml.etree.ElementTree as et
from pymzml import spec
from pymzml.run import Reader
def create_database_from_file(db_name, file_path):
conn = sqlite3.connect(db_name + ".db")
Run = Reader("./tests/data/example.mzML")
with conn:
cursor = conn.cursor()
cursor.execute("CREATE TABLE Spectra(ID INT, xml TEXT)")
for spectrum in Run:
params = (spectrum.ID, spectrum.to_string())
cursor.execute("INSERT INTO Spectra VALUES(?, ?)", params)
return True
class SQLiteDatabase(object):
"""
Example implementation of a database Connector,
which can be used to make :py:func:`pymzml.run.Reader` accept paths to
sqlite db files.
We initialize with a path to a database and implement
a custom __getitem__ function to retrieve the spectra
"""
def __init__(self, path):
"""
"""
connection = sqlite3.connect(path)
self.cursor = connection.cursor()
def __getitem__(self, key):
"""
Execute a SQL request, process the data and return a spectrum object.
Args:
key (str or int): unique identifier for the given spectrum in the
database
"""
self.cursor.execute("SELECT * FROM spectra WHERE id=?", key)
ID, element = self.cursor.fetchone()
element = et.XML(element)
if "spectrum" in element.tag:
spectrum = spec.Spectrum(element)
elif "chromatogram" in element.tag:
spectrum = spec.Chromatogram(element)
return spectrum
def get_spectrum_count(self):
self.cursor.execute("SELECT COUNT(*) from spectra")
num = self.cursor.fetchone()[0]
return num
def read(self, size=-1):
# implement read so it starts reading in first ID,
# if end reached switches to next id and so on ...
return '<spectrum index="0" id="controllerType=0 controllerNumber=1 scan=1" defaultArrayLength="917"></spectrum>\n'
if __name__ == "__main__":
# This is what the Reader class does
my_iter = iter(et.iterparse(SQLiteDatabase("test.db")))
# Now you can iter your database
for x in my_iter:
print(x)
# Retrieve a specific spectrum from your database
db = SQLiteDatabase("test.db")
unique_id = 5
my_spec = db[unique_id]
|
172793
|
import os
import yaml
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import ConversationHandler
from commons import authentication, checkAdmin, checkId
from config import config
from translations import i18n
config = config["transmission"]
TSL_LIMIT = 'limited'
TSL_NORMAL = 'normal'
def transmission(update, context):
if not config["enable"]:
context.bot.send_message(
chat_id=update.effective_message.chat_id,
text=i18n.t("addarr.Transmission.NotEnabled"),
)
return ConversationHandler.END
if not checkId(update):
context.bot.send_message(
chat_id=update.effective_message.chat_id, text=i18n.t("addarr.Authorize")
)
return TSL_NORMAL
if not checkAdmin(update):
context.bot.send_message(
chat_id=update.effective_message.chat_id,
text=i18n.t("addarr.NotAdmin"),
)
return TSL_NORMAL
keyboard = [[
InlineKeyboardButton(
'\U0001F40C '+i18n.t("addarr.Transmission.TSL"),
callback_data=TSL_LIMIT
),
InlineKeyboardButton(
'\U0001F406 '+i18n.t("addarr.Transmission.Normal"),
callback_data=TSL_NORMAL
),
]]
markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text(
i18n.t("addarr.Transmission.Speed"), reply_markup=markup
)
return TSL_NORMAL
def changeSpeedTransmission(update, context):
if not checkId(update):
if (
authentication(update, context) == "added"
): # To also stop the beginning command
return ConversationHandler.END
choice = update.callback_query.data
command = f"transmission-remote {config['host']}"
if config["authentication"]:
command += (
" --auth "
+ config["username"]
+ ":"
+ config["password"]
)
message = None
if choice == TSL_NORMAL:
command += ' --no-alt-speed'
message = i18n.t("addarr.Transmission.ChangedToNormal")
elif choice == TSL_LIMIT:
command += ' --alt-speed'
message=i18n.t("addarr.Transmission.ChangedToTSL"),
os.system(command)
context.bot.send_message(
chat_id=update.effective_message.chat_id,
text=message,
)
return ConversationHandler.END
|
172853
|
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic.base import RedirectView
from django.contrib.auth import views as auth_views
urlpatterns = [
# Examples:
# url(r'^$', 'bhr_site.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', admin.site.urls),
url(r'^bhr/', include('bhr.urls')),
url(r'^accounts/login/$', auth_views.LoginView.as_view(), {'template_name': 'login.html'}, name='accounts_login'),
url(r'^accounts/logout/$', auth_views.LogoutView.as_view(), name='logout'),
url(r'^$', RedirectView.as_view(url='/bhr', permanent=False), name='siteroot'),
]
|
172856
|
import numpy
import csb.test as test
from csb.numeric import log
from csb.statistics.pdf.parameterized import ParameterizedDensity
from csb.statistics.pdf.parameterized import ParameterValueError, ParameterizationError
from csb.statistics.pdf.parameterized import AbstractParameter, Parameter, NonVirtualParameter
class Location(NonVirtualParameter):
def _validate(self, value):
return float(value)
class Scale(Parameter):
def _validate(self, value):
return float(value)
def _compute(self, base_value):
if base_value == 0.0:
return numpy.inf
else:
return 1.0 / base_value ** 0.5
def bind_to(self, base):
if base.name != "precision":
raise ValueError(base)
super(Scale, self).bind_to(base)
class DoubleScale(Parameter):
def _validate(self, value):
return float(value)
def _compute(self, base_value):
return base_value * 2.0
class Precision(Parameter):
def _validate(self, value):
if value < 0:
raise ParameterValueError(self.name, value)
return float(value)
class FancyGaussian(ParameterizedDensity):
def __init__(self, mu=0, precision=1):
super(FancyGaussian, self).__init__()
self._register('mu')
self._register('sigma')
self._register('precision')
loc = Location(mu)
prec = Precision(precision)
sigma = Scale(0)
sigma.bind_to(prec)
self.set_params(mu=loc, sigma=sigma, precision=prec)
@property
def mu(self):
return self['mu'].value
@property
def sigma(self):
return self['sigma'].value
@property
def precision(self):
return self['precision'].value
def log_prob(self, x):
mu = self.mu
sigma = self.sigma
return log(1.0 / numpy.sqrt(2 * numpy.pi * sigma ** 2)) - (x - mu) ** 2 / (2 * sigma ** 2)
@test.unit
class TestAbstractGenericParameter(test.Case):
"""
Use AbstractParameter as a generic class which accepts values
of any type.
"""
def setUp(self):
class Value(object):
pass
class Param(AbstractParameter):
def _validate(self, value):
if not isinstance(value, Value):
raise TypeError(value)
return value
self.value = Value()
self.param = Param(self.value)
def testValue(self):
self.assertIs(self.param.value, self.value)
def testSet(self):
self.assertRaises(TypeError, self.param.set, 3)
@test.unit
class TestParameter(test.Case):
"""
This is the main test case with complete coverage for AbstractParameter's
methods and behavior. Covers also Parameter.
computed -- leaf
/
base -- computed2
\
computed3
"""
def setUp(self):
self.base = Precision(1.2)
self.computed = Scale(100, base=self.base)
self.computed2 = Scale(200, base=self.base)
self.computed3 = Scale(300, base=self.base)
self.leaf = DoubleScale(400, base=self.computed)
def testConstrucor(self):
# make sure newly constructed parameters are left in a consistent state
# to avoid unnecessary consistency updates
self.assertTrue(self.base._consistent)
self.assertTrue(Scale(1)._consistent)
def testName(self):
self.assertEqual(self.base.name, "precision")
self.assertEqual(self.computed.name, "scale")
self.assertEqual(Scale(name="TesT").name, "TesT")
def testValue(self):
self.assertEqual(self.base.value, 1.2)
self.assertEqual(self.computed.value, 1.0 / numpy.sqrt(self.base.value))
self.assertEqual(self.computed2.value, 1.0 / numpy.sqrt(self.base.value))
self.assertEqual(self.leaf.value, self.computed.value * 2)
# turn self.base into a virtual parameter
self.base.bind_to(Precision(12.2))
self.assertEqual(self.base.value, 12.2)
def testIsVirtual(self):
self.assertFalse(self.base.is_virtual)
self.assertTrue(self.computed.is_virtual)
self.base.bind_to(Precision(12.2))
self.assertTrue(self.base.is_virtual)
def testSet(self):
base_initial_value = self.base._value
# recompute all derivatives from the initial value of base
self.assertEqual(self.computed._value, 100)
self.leaf._ensure_consistency()
self.computed2._ensure_consistency()
self.computed3._ensure_consistency()
# set self.base - it should remain consistent because it is not computed
self.assertTrue(self.base._consistent)
self.base.set(2.2)
self.assertTrue(self.base._consistent)
self.assertEqual(self.base.value, 2.2)
# self.computed and self.leaf should be inconsistent now that their base is updated
self.assertFalse(self.computed._consistent)
self.assertFalse(self.leaf._consistent)
self.assertEqual(self.computed._value, 1.0 / numpy.sqrt(base_initial_value))
self.assertEqual(self.leaf._value, 2.0 / numpy.sqrt(base_initial_value))
# retrieving self.computed's value should trigger updates up to self.computed
recomputed = self.computed.value
self.assertTrue(self.computed._consistent)
self.assertEqual(recomputed, 1.0 / numpy.sqrt(self.base._value))
# self.leaf is still inconsistent
self.assertFalse(self.leaf._consistent)
self.assertEqual(self.leaf._value, 2.0 / numpy.sqrt(base_initial_value))
self.assertIs(self.leaf._nearest_consistent_base()[-1], self.computed)
# until we request its value
recomputed = self.leaf.value
self.assertTrue(self.leaf._consistent)
self.assertEqual(recomputed, 2.0 / numpy.sqrt(self.base._value))
self.assertEqual(recomputed, 2.0 * self.computed._value)
# make sure the other two branches are still inconsistent
initial_value = 1.0 / numpy.sqrt(base_initial_value)
self.assertEqual(self.computed2._value, initial_value)
self.assertEqual(self.computed3._value, initial_value)
# until they get used
recomputed = self.computed2.value
self.assertTrue(self.computed2._consistent)
self.assertEqual(recomputed, 1.0 / numpy.sqrt(self.base._value))
# attempt to set self.computed - not allowed
self.assertRaises(ParameterizationError, self.computed.set, 2)
# attempt to set a negative Precision
self.assertRaises(ParameterValueError, self.base.set, -2)
# attempt to assigned non-float - not allowed in the Parameter specialization
self.assertRaises(ParameterValueError, Parameter().set, object())
def testBindTo(self):
# can't bind self.base to itself
self.assertRaises(ParameterizationError, self.base.bind_to, self.base)
# deeper circular dependency
self.assertRaises(ParameterizationError, self.base.bind_to, self.computed)
# self.base is not virtual and therefore must be consistent
self.assertTrue(self.base._consistent)
# make it virtual - should get inconsistent now
self.base.bind_to(Precision(12.2))
self.assertFalse(self.base._consistent)
self.assertTrue(self.base.is_virtual)
# retrieving its value should trigger the consistency cascade
self.assertEqual(self.base.value, 12.2)
self.assertTrue(self.base._consistent)
def testFindBaseParameter(self):
self.assertIs(self.base.find_base_parameter(), self.base)
self.assertIs(self.computed.find_base_parameter(), self.base)
@test.unit
class TestNonVirtualParameter(test.Case):
"""
Make sure explicit NonVirtualParameter-s are updatable and
refuse binding requests
"""
def setUp(self):
self.param = Location()
def testConstructor(self):
base = Parameter()
self.assertRaises(ParameterizationError, lambda: Location(base=base))
def testIsVirtual(self):
self.assertFalse(self.param.is_virtual)
def testBindTo(self):
base = Parameter()
self.assertRaises(ParameterizationError, self.param.bind_to, base)
def testSet(self):
self.param.set(22)
self.assertEqual(self.param.value, 22)
@test.unit
class TestParameterizedDensity(test.Case):
def setUp(self):
self.pdf = FancyGaussian(2, 5)
def testConstructor(self):
class Density(ParameterizedDensity):
def __init__(self, p):
super(Density, self).__init__()
self._register('p')
self.set_params(p=p)
def log_prob(self, x):
return x
self.assertRaises(TypeError, Density, 4)
def testProperties(self):
self.assertEqual(self.pdf.mu, 2)
self.assertEqual(self.pdf.precision, 5)
self.assertAlmostEqual(self.pdf.sigma, 0.4472, places=3)
def testParameterChaining(self):
self.assertEqual(self.pdf.precision, 5)
self.assertAlmostEqual(self.pdf.sigma, 0.4472, places=3)
self.pdf['precision'].set(2)
self.assertEqual(self.pdf.precision, 2)
self.assertAlmostEqual(self.pdf.sigma, 0.7071, places=3)
def testAssignment(self):
self.pdf['sigma'] = Scale(55)
self.assertEqual(self.pdf.sigma, 55)
self.assertEqual(self.pdf['sigma'].name, 'scale')
def assign(i):
self.pdf['sigma'] = i
self.assertRaises(TypeError, assign, 55)
if __name__ == '__main__':
test.Console()
|
172858
|
import os
from click.testing import CliRunner
from twine.utils import TEST_REPOSITORY
from hatch.cli import hatch
from hatch.env import install_packages
from hatch.settings import SETTINGS_FILE, copy_default_settings, save_settings
from hatch.utils import env_vars, temp_chdir, temp_move_path
from hatch.venv import create_venv, venv
from ..utils import requires_internet
PACKAGE_NAME = 'e00f69943529ccc38058'
USERNAME = '__token__'
PASSWORD = (
'<KEY>'
'<KEY>'
)
ENV_VARS = {'TWINE_PASSWORD': PASSWORD}
@requires_internet
def test_cwd():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build'])
os.chdir(os.path.join(d, 'dist'))
with env_vars(ENV_VARS):
result = runner.invoke(hatch, ['release', '-u', USERNAME, '-t'])
assert result.exit_code == 0
@requires_internet
def test_username_env():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build'])
os.chdir(os.path.join(d, 'dist'))
with temp_move_path(SETTINGS_FILE, d):
settings = copy_default_settings()
settings['pypi_username'] = ''
save_settings(settings)
extra_env_vars = {'TWINE_USERNAME': USERNAME, **ENV_VARS}
with env_vars(extra_env_vars):
result = runner.invoke(hatch, ['release', '-t'])
assert result.exit_code == 0
@requires_internet
def test_cwd_dist_exists():
with temp_chdir():
runner = CliRunner()
runner.invoke(hatch, ['init', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build'])
with env_vars(ENV_VARS):
result = runner.invoke(hatch, ['release', '-u', USERNAME, '-t'])
assert result.exit_code == 0
@requires_internet
def test_package():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build', '-p', PACKAGE_NAME])
package_dir = os.path.join(d, PACKAGE_NAME)
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir, evars=ENV_VARS):
os.chdir(package_dir)
install_packages(['-e', '.'])
os.chdir(d)
result = runner.invoke(hatch, ['release', PACKAGE_NAME, '-u', USERNAME, '-t'])
assert result.exit_code == 0
def test_package_not_exist():
with temp_chdir() as d:
runner = CliRunner()
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir, evars=ENV_VARS):
result = runner.invoke(hatch, ['release', PACKAGE_NAME, '-u', USERNAME, '-t'])
assert result.exit_code == 1
assert '`{}` is not an editable package.'.format(PACKAGE_NAME) in result.output
@requires_internet
def test_local():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build', '-p', PACKAGE_NAME])
package_dir = os.path.join(d, PACKAGE_NAME)
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir, evars=ENV_VARS):
install_packages(['-e', package_dir])
result = runner.invoke(hatch, ['release', '-l', '-u', USERNAME, '-t'])
assert result.exit_code == 0
def test_local_not_exist():
with temp_chdir() as d:
runner = CliRunner()
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
result = runner.invoke(hatch, ['release', '-l'])
assert result.exit_code == 1
assert 'There are no local packages available.' in result.output
@requires_internet
def test_local_multiple():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', 'ok', '--basic', '-ne'])
runner.invoke(hatch, ['new', 'ko', '--basic', '-ne'])
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
install_packages(['-e', os.path.join(d, 'ok')])
install_packages(['-e', os.path.join(d, 'ko')])
result = runner.invoke(hatch, ['release', '-l'])
assert result.exit_code == 1
assert (
'There are multiple local packages available. '
'Select one with the optional argument.'
) in result.output
@requires_internet
def test_path_relative():
with temp_chdir():
runner = CliRunner()
runner.invoke(hatch, ['init', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build'])
with env_vars(ENV_VARS):
result = runner.invoke(hatch, ['release', '-p', 'dist', '-u', USERNAME, '-t'])
print(result.output)
assert result.exit_code == 0
@requires_internet
def test_path_full():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['new', 'ko', '--basic', '-ne'])
runner.invoke(hatch, ['build', '-p', PACKAGE_NAME])
build_dir = os.path.join(d, PACKAGE_NAME, 'dist')
os.chdir(os.path.join(d, 'ko'))
with env_vars(ENV_VARS):
result = runner.invoke(hatch, ['release', '-p', build_dir, '-u', USERNAME, '-t'])
assert result.exit_code == 0
def test_path_full_not_exist():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', PACKAGE_NAME, '--basic', '-ne'])
full_path = os.path.join(d, 'dist')
result = runner.invoke(hatch, ['release', '-p', full_path])
assert result.exit_code == 1
assert 'Directory `{}` does not exist.'.format(full_path) in result.output
@requires_internet
def test_config_username():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build'])
with temp_move_path(SETTINGS_FILE, d):
settings = copy_default_settings()
settings['pypi_username'] = USERNAME
save_settings(settings)
with env_vars(ENV_VARS):
result = runner.invoke(hatch, ['release', '-p', 'dist', '-t'])
assert result.exit_code == 0
def test_config_not_exist():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build'])
with temp_move_path(SETTINGS_FILE, d):
with env_vars(ENV_VARS):
result = runner.invoke(hatch, ['release', '-p', 'dist', '-t'])
assert result.exit_code == 1
assert 'Unable to locate config file. Try `hatch config --restore`.' in result.output
def test_config_username_empty():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build'])
with temp_move_path(SETTINGS_FILE, d):
settings = copy_default_settings()
settings['pypi_username'] = ''
save_settings(settings)
with env_vars(ENV_VARS):
result = runner.invoke(hatch, ['release', '-p', 'dist', '-t'])
assert result.exit_code == 1
assert (
'A username must be supplied via -u/--username, '
'in {} as pypi_username, or in the TWINE_USERNAME environment variable.'.format(SETTINGS_FILE)
) in result.output
def test_strict():
with temp_chdir():
runner = CliRunner()
runner.invoke(hatch, ['init', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build'])
with env_vars(ENV_VARS):
result = runner.invoke(hatch, ['release', '-p', 'dist', '-u', USERNAME, '-t', '-s'])
assert result.exit_code == 1
def test_repository_local():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build', '-p', PACKAGE_NAME])
package_dir = os.path.join(d, PACKAGE_NAME)
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
# Make sure there's no configuration
with temp_move_path(os.path.expanduser("~/.pypirc"), d):
with venv(venv_dir, evars=ENV_VARS):
install_packages(['-e', package_dir])
# Will error, since there's no configuration parameter for
# this URL
result = runner.invoke(hatch, ['release', '-l', '-u', USERNAME, '-r', TEST_REPOSITORY])
assert result.exit_code == 1
@requires_internet
def test_repository_url_local():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build', '-p', PACKAGE_NAME])
package_dir = os.path.join(d, PACKAGE_NAME)
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir, evars=ENV_VARS):
install_packages(['-e', package_dir])
result = runner.invoke(hatch, ['release', '-l', '-u', USERNAME,
'--repo-url', TEST_REPOSITORY])
assert result.exit_code == 0
@requires_internet
def test_repository_and_repository_url_local():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build', '-p', PACKAGE_NAME])
package_dir = os.path.join(d, PACKAGE_NAME)
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir, evars=ENV_VARS):
install_packages(['-e', package_dir])
result = runner.invoke(hatch, ['release', '-l', '-u', USERNAME,
'--repo', TEST_REPOSITORY,
'--repo-url', TEST_REPOSITORY])
assert result.exit_code == 0
@requires_internet
def test_repository_env_vars():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build', '-p', PACKAGE_NAME])
package_dir = os.path.join(d, PACKAGE_NAME)
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
extra_env_vars = {'TWINE_REPOSITORY': TEST_REPOSITORY, 'TWINE_REPOSITORY_URL': TEST_REPOSITORY, **ENV_VARS}
with venv(venv_dir, evars=extra_env_vars):
install_packages(['-e', package_dir])
result = runner.invoke(hatch, ['release', '-l', '-u', USERNAME])
assert result.exit_code == 0
@requires_internet
def test_repository_and_test():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', PACKAGE_NAME, '--basic', '-ne'])
runner.invoke(hatch, ['build', '-p', PACKAGE_NAME])
package_dir = os.path.join(d, PACKAGE_NAME)
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir, evars=ENV_VARS):
install_packages(['-e', package_dir])
result = runner.invoke(hatch, ['release', '-l', '-u', USERNAME,
'-r', TEST_REPOSITORY,
'-t'])
assert result.exit_code == 1
assert "Cannot specify both --test and --repo." in result.output
with venv(venv_dir, evars=ENV_VARS):
result = runner.invoke(hatch, ['release', '-l', '-u', USERNAME,
'--repo-url', TEST_REPOSITORY,
'-t'])
assert result.exit_code == 1
assert "Cannot specify both --test and --repo-url." in result.output
with venv(venv_dir, evars=ENV_VARS):
result = runner.invoke(hatch, ['release', '-l', '-u', USERNAME,
'-r', TEST_REPOSITORY,
'-ru', TEST_REPOSITORY,
'-t'])
assert result.exit_code == 1
assert "Cannot specify both --test and --repo." in result.output
assert "Cannot specify both --test and --repo-url." in result.output
|
172863
|
def file_url(input_dict):
from discomll import dataset
if input_dict["range"] == "true":
urls = [url.strip() for url in input_dict["url"].split("\n") if url != ""]
else:
urls = [[url.strip()] for url in input_dict["url"].split("\n") if url != ""]
for url in urls:
if url[0].split("://")[0] == "https":
raise Exception("Dataset should be accessible over HTTP.")
del (input_dict["url"])
X_indices_splited = input_dict["X_indices"].replace(" ", "").split("-")
if len(X_indices_splited) == 2:
a, b = X_indices_splited
if not a.isdigit() or not b.isdigit():
raise Exception("Feature indices should be integers. Example: 1-10")
X_indices = range(int(a), int(b))
else:
X_indices = [int(v) for v in input_dict["X_indices"].replace(" ", "").split(",") if v != ""]
del (input_dict["X_indices"])
input_dict["data_type"] = "gzip" if input_dict["data_type"] == "true" else ""
if input_dict["atr_meta"] == "numeric":
X_meta = ["c" for i in range(len(X_indices))]
elif input_dict["atr_meta"] == "discrete":
X_meta = ["d" for i in range(len(X_indices))]
else:
X_meta = input_dict["custom"]
data = dataset.Data(data_tag=urls,
X_indices=X_indices,
X_meta=X_meta,
generate_urls=True if input_dict["range"] == "true" else False,
**input_dict)
print data.params
return {"dataset": data}
def big_data_apply_classifier(input_dict):
if "naivebayes_fitmodel" in input_dict["fitmodel_url"]:
return naivebayes_predict(input_dict)
elif "logreg_fitmodel" in input_dict["fitmodel_url"]:
return logreg_predict(input_dict)
elif "linsvm_fitmodel" in input_dict["fitmodel_url"]:
return linsvm_predict(input_dict)
elif "kmeans_fitmodel" in input_dict["fitmodel_url"]:
return kmeans_predict(input_dict)
elif "fddt_fitmodel" in input_dict["fitmodel_url"]:
return dt_predict(input_dict)
elif "drf_fitmodel" in input_dict["fitmodel_url"]:
return rf_predict(input_dict)
elif "dwfr_fitmodel" in input_dict["fitmodel_url"]:
return wrf_predict(input_dict)
elif "linreg_fitmodel" in input_dict["fitmodel_url"]:
return linreg_predict(input_dict)
def lwlr_fit_predict(input_dict):
from discomll.regression import locally_weighted_linear_regression
predictions_url = locally_weighted_linear_regression.fit_predict(
fitting_data=input_dict["fitting_dataset"],
training_data=input_dict["training_dataset"],
tau=input_dict["tau"],
save_results=True)
return {"string": predictions_url}
def dt_fit(input_dict):
from discomll.ensemble import forest_distributed_decision_trees
random_state = None if input_dict["seed"] == "None" else int(input_dict["seed"])
fitmodel_url = forest_distributed_decision_trees.fit(input_dict["dataset"],
trees_per_chunk=input_dict["trees_per_subset"],
max_tree_nodes=input_dict["tree_nodes"],
min_samples_leaf=input_dict["min_samples_leaf"],
min_samples_split=input_dict["min_samples_split"],
class_majority=input_dict["majority"],
bootstrap=input_dict["bootstrap"] == "true",
measure=input_dict["measure"],
accuracy=input_dict["accuracy"],
separate_max=input_dict["separate_max"] == "true",
random_state=random_state,
save_results=True)
return {"fitmodel_url": fitmodel_url}
def dt_predict(input_dict):
from discomll.ensemble import forest_distributed_decision_trees
predictions_url = forest_distributed_decision_trees.predict(input_dict["dataset"],
fitmodel_url=input_dict["fitmodel_url"],
save_results=True)
return {"string": predictions_url}
def rf_fit(input_dict):
from discomll.ensemble import distributed_random_forest
random_state = None if input_dict["seed"] == "None" else int(input_dict["seed"])
fitmodel_url = distributed_random_forest.fit(input_dict["dataset"],
trees_per_chunk=input_dict["trees_per_subset"],
max_tree_nodes=input_dict["tree_nodes"],
min_samples_leaf=input_dict["min_samples_leaf"],
min_samples_split=input_dict["min_samples_split"],
class_majority=input_dict["majority"],
measure=input_dict["measure"],
accuracy=input_dict["accuracy"],
separate_max=input_dict["separate_max"] == "true",
random_state=random_state,
save_results=True)
return {"fitmodel_url": fitmodel_url}
def rf_predict(input_dict):
from discomll.ensemble import distributed_random_forest
predictions_url = distributed_random_forest.predict(input_dict["dataset"],
fitmodel_url=input_dict["fitmodel_url"],
save_results=True)
return {"string": predictions_url}
def wrf_fit(input_dict):
from discomll.ensemble import distributed_weighted_forest_rand
random_state = None if input_dict["seed"] == "None" else int(input_dict["seed"])
fitmodel_url = distributed_weighted_forest_rand.fit(input_dict["dataset"],
trees_per_chunk=input_dict["trees_per_subset"],
max_tree_nodes=input_dict["tree_nodes"],
num_medoids=input_dict["num_medoids"],
min_samples_leaf=input_dict["min_samples_leaf"],
min_samples_split=input_dict["min_samples_split"],
class_majority=input_dict["majority"],
measure=input_dict["measure"],
accuracy=input_dict["accuracy"],
separate_max=input_dict["separate_max"] == "true",
random_state=random_state,
save_results=True)
return {"fitmodel_url": fitmodel_url}
def wrf_predict(input_dict):
from discomll.ensemble import distributed_weighted_forest_rand
predictions_url = distributed_weighted_forest_rand.predict(input_dict["dataset"],
fitmodel_url=input_dict["fitmodel_url"],
coeff=input_dict["coeff"],
save_results=True)
return {"string": predictions_url}
def linsvm_fit(input_dict):
from discomll.classification import linear_svm
fitmodel_url = linear_svm.fit(input_dict["dataset"],
nu=input_dict["nu"],
save_results=True)
return {"fitmodel_url": fitmodel_url}
def linsvm_predict(input_dict):
from discomll.classification import linear_svm
predictions_url = linear_svm.predict(input_dict["dataset"],
fitmodel_url=input_dict["fitmodel_url"],
save_results=True)
return {"string": predictions_url}
def linreg_fit(input_dict):
from discomll.regression import linear_regression
fitmodel_url = linear_regression.fit(input_dict["dataset"],
save_results=True)
return {"fitmodel_url": fitmodel_url}
def linreg_predict(input_dict):
from discomll.regression import linear_regression
predictions_url = linear_regression.predict(input_dict["dataset"],
fitmodel_url=input_dict["fitmodel_url"],
save_results=True)
return {"string": predictions_url}
def kmeans_fit(input_dict):
from discomll.clustering import kmeans
random_state = None if input_dict["seed"] == "None" else int(input_dict["seed"])
fitmodel_url = kmeans.fit(input_dict["dataset"],
n_clusters=input_dict["clusters"],
max_iterations=input_dict["itr"],
random_state=random_state,
save_results=True)
return {"fitmodel_url": fitmodel_url}
def kmeans_predict(input_dict):
from discomll.clustering import kmeans
predictions_url = kmeans.predict(input_dict["dataset"],
fitmodel_url=input_dict["fitmodel_url"],
save_results=True)
return {"string": predictions_url}
def logreg_fit(input_dict):
from discomll.classification import logistic_regression
fitmodel_url = logistic_regression.fit(input_dict["dataset"],
alpha=input_dict["alpha"],
max_iterations=input_dict["itr"],
save_results=True)
return {"fitmodel_url": fitmodel_url}
def logreg_predict(input_dict):
from discomll.classification import logistic_regression
predictions_url = logistic_regression.predict(input_dict["dataset"],
fitmodel_url=input_dict["fitmodel_url"],
save_results=True)
return {"string": predictions_url}
def naivebayes_fit(input_dict):
from discomll.classification import naivebayes
fitmodel_url = naivebayes.fit(input_dict["dataset"], save_results=True)
return {"fitmodel_url": fitmodel_url}
def naivebayes_predict(input_dict):
from discomll.classification import naivebayes
m = 1 if input_dict["m"] == "" else input_dict["m"]
predictions_url = naivebayes.predict(input_dict["dataset"],
fitmodel_url=input_dict["fitmodel_url"],
m=input_dict["m"],
save_results=True)
return {"string": predictions_url}
def results_to_file(input_dict):
# implementation is in visualization_views.py
return {}
def measure_distribution(input_dict):
# implementation is in visualization_views.py
return {}
def model_view(input_dict):
# implementation is in visualization_views.py
return {}
def bigdata_ca(input_dict):
# implementation is in visualization_views.py
return {}
def bigdata_mse(input_dict):
# implementation is in visualization_views.py
return {}
|
172901
|
import argparse
import os.path as osp
import random
from time import perf_counter as t
import yaml
from yaml import SafeLoader
import torch
import torch_geometric.transforms as T
import torch.nn.functional as F
import torch.nn as nn
from torch_geometric.datasets import Planetoid, CitationFull
from torch_geometric.utils import dropout_adj, to_undirected, is_undirected
from torch_geometric.nn import GCNConv
import numpy as np
from torch_geometric.utils import to_undirected, to_scipy_sparse_matrix
from datasets import get_citation_dataset
from model_digcl import Encoder, Model, drop_feature
from eval_digcl import label_classification
from get_adj import *
import warnings
warnings.filterwarnings('ignore')
def train(model: Model, x, edge_index):
model.train()
optimizer.zero_grad()
edge_index_1, edge_weight_1 = cal_fast_appr(
alpha_1, edge_index, x.shape[0], x.dtype)
edge_index_2, edge_weight_2 = cal_fast_appr(
alpha_2, edge_index, x.shape[0], x.dtype)
x_1 = drop_feature(x, drop_feature_rate_1)
x_2 = drop_feature(x, drop_feature_rate_2)
z1 = model(x_1, edge_index_1, edge_weight_1)
z2 = model(x_2, edge_index_2, edge_weight_2)
loss = model.loss(z1, z2, batch_size=0)
loss.backward()
optimizer.step()
return loss.item()
def test(model: Model, dataset, x, edge_index, edge_weight, y, final=False):
model.eval()
z = model(x, edge_index, edge_weight)
label_classification(z, y, data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='DBLP')
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--config', type=str, default='config_digcl.yaml')
parser.add_argument('--alpha', type=float, default=0.1)
parser.add_argument('--recache', action="store_true",
help="clean up the old adj data", default=True)
parser.add_argument('--normalize-features',
action="store_true", default=True)
parser.add_argument('--adj-type', type=str, default='or')
parser.add_argument('--curr-type', type=str, default='log')
args = parser.parse_args()
assert args.gpu_id in range(0, 8)
torch.cuda.set_device(args.gpu_id)
config = yaml.load(open(args.config), Loader=SafeLoader)[args.dataset]
torch.manual_seed(config['seed'])
random.seed(2021)
learning_rate = config['learning_rate']
num_hidden = config['num_hidden']
num_proj_hidden = config['num_proj_hidden']
activation = ({'relu': F.relu, 'prelu': nn.PReLU(), 'rrelu': nn.RReLU()})[
config['activation']]
base_model = ({'GCNConv': GCNConv})[config['base_model']]
num_layers = config['num_layers']
alpha_1 = 0.1
drop_feature_rate_1 = config['drop_feature_rate_1']
drop_feature_rate_2 = config['drop_feature_rate_2']
tau = config['tau']
num_epochs = config['num_epochs']
weight_decay = config['weight_decay']
path = osp.join(osp.expanduser('.'), 'datasets')
print(args.normalize_features)
dataset = get_citation_dataset(
args.dataset, args.alpha, args.recache, args.normalize_features, args.adj_type)
print("Num of edges ", dataset[0].num_edges)
data = dataset[0]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data = data.to(device)
edge_index_init, edge_weight_init = cal_fast_appr(
alpha_1, data.edge_index, data.x.shape[0], data.x.dtype)
encoder = Encoder(dataset.num_features, num_hidden, activation,
base_model=base_model, k=num_layers).to(device)
model = Model(encoder, num_hidden, num_proj_hidden, tau).to(device)
optimizer = torch.optim.Adam(
model.parameters(), lr=learning_rate, weight_decay=weight_decay)
start = t()
prev = start
for epoch in range(1, num_epochs + 1):
a = 0.9
b = 0.1
if args.curr_type == 'linear':
alpha_2 = a-(a-b)/(num_epochs+1)*epoch
elif args.curr_type == 'exp':
alpha_2 = a - (a-b)/(np.exp(3)-1) * \
(np.exp(3*epoch/(num_epochs+1))-1)
elif args.curr_type == 'log':
alpha_2 = a - (a-b)*(1/3*np.log(epoch/(num_epochs+1)+np.exp(-3)))
elif args.curr_type == 'fixed':
alpha_2 = 0.9
else:
print('wrong curr type')
exit()
loss = train(model, data.x, data.edge_index)
now = t()
print(f'(T) | Epoch={epoch:03d}, loss={loss:.4f}, '
f'this epoch {now - prev:.4f}, total {now - start:.4f}')
prev = now
print("=== Final ===")
test(model, dataset, data.x, edge_index_init,
edge_weight_init, data.y, final=True)
|
172931
|
from oidcmsg.oauth2 import AccessTokenResponse
import pytest
from oidcrp.entity import Entity
from oidcrp.util import rndstr
KEYDEF = [{"type": "EC", "crv": "P-256", "use": ["sig"]}]
class TestRP():
@pytest.fixture(autouse=True)
def create_service(self):
client_config = {
'client_id': 'client_id',
'client_secret': 'another password'
}
services = {
'token': {
'class': 'oidcrp.oauth2.client_credentials.cc_access_token.CCAccessToken'
},
'refresh_token': {
'class': 'oidcrp.oauth2.client_credentials.cc_refresh_access_token'
'.CCRefreshAccessToken'
}
}
self.entity = Entity(config=client_config, services=services)
self.entity.client_get("service",'accesstoken').endpoint = 'https://example.com/token'
self.entity.client_get("service",'refresh_token').endpoint = 'https://example.com/token'
def test_token_get_request(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.entity.client_get("service",'accesstoken')
_info = _srv.get_request_parameters(request_args=request_args)
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info['body'] == 'grant_type=client_credentials'
assert _info['headers'] == {
'Authorization': 'Basic Y2xpZW50X2lkOmFub3RoZXIrcGFzc3dvcmQ=',
'Content-Type': 'application/x-www-form-urlencoded'
}
def test_token_parse_response(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.entity.client_get("service",'accesstoken')
_request_info = _srv.get_request_parameters(request_args=request_args)
response = AccessTokenResponse(**{
"access_token": "2Y<PASSWORD>FZFE<PASSWORD>",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "<PASSWORD>",
"example_parameter": "example_value"
})
_response = _srv.parse_response(response.to_json(), sformat="json")
# since no state attribute is involved, a key is minted
_key = rndstr(16)
_srv.update_service_context(_response, key=_key)
info = _srv.client_get("service_context").state.get_item(AccessTokenResponse, 'token_response', _key)
assert '__expires_at' in info
def test_refresh_token_get_request(self):
_srv = self.entity.client_get("service",'accesstoken')
_srv.update_service_context({
"access_token": "2YotnFZFEjr1zCs<PASSWORD>AA",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "<PASSWORD>",
"example_parameter": "example_value"
})
_srv = self.entity.client_get("service",'refresh_token')
_id = rndstr(16)
_info = _srv.get_request_parameters(state_id=_id)
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info[
'body'] == 'grant_type=refresh_token'
assert _info['headers'] == {
'Authorization': 'Bearer tGzv3JOkF0XG5Qx2TlKWIA',
'Content-Type': 'application/x-www-form-urlencoded'
}
def test_refresh_token_parse_response(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.entity.client_get("service",'accesstoken')
_request_info = _srv.get_request_parameters(request_args=request_args)
response = AccessTokenResponse(**{
"access_token": "<KEY>",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "tGzv<PASSWORD>",
"example_parameter": "example_value"
})
_response = _srv.parse_response(response.to_json(), sformat="json")
# since no state attribute is involved, a key is minted
_key = rndstr(16)
_srv.update_service_context(_response, key=_key)
info = _srv.client_get("service_context").state.get_item(AccessTokenResponse, 'token_response', _key)
assert '__expires_at' in info
# Move from token to refresh token service
_srv = self.entity.client_get("service",'refresh_token')
_request_info = _srv.get_request_parameters(request_args=request_args, state=_key)
refresh_response = AccessTokenResponse(**{
"access_token": '<KEY>',
"token_type": "example",
"expires_in": 3600,
"refresh_token": 'lhNX<PASSWORD>',
})
_response = _srv.parse_response(refresh_response.to_json(), sformat="json")
_srv.update_service_context(_response, key=_key)
info = _srv.client_get("service_context").state.get_item(AccessTokenResponse, 'token_response', _key)
assert '__expires_at' in info
def test_2nd_refresh_token_parse_response(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.entity.client_get("service",'accesstoken')
_request_info = _srv.get_request_parameters(request_args=request_args)
response = AccessTokenResponse(**{
"access_token": "<KEY>",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "<PASSWORD>",
"example_parameter": "example_value"
})
_response = _srv.parse_response(response.to_json(), sformat="json")
# since no state attribute is involved, a key is minted
_key = rndstr(16)
_srv.update_service_context(_response, key=_key)
info = _srv.client_get("service_context").state.get_item(AccessTokenResponse, 'token_response', _key)
assert '__expires_at' in info
# Move from token to refresh token service
_srv = self.entity.client_get("service",'refresh_token')
_request_info = _srv.get_request_parameters(request_args=request_args, state=_key)
refresh_response = AccessTokenResponse(**{
"access_token": '<KEY>',
"token_type": "example",
"expires_in": 3600,
"refresh_token": '<PASSWORD>',
})
_response = _srv.parse_response(refresh_response.to_json(), sformat="json")
_srv.update_service_context(_response, key=_key)
info = _srv.client_get("service_context").state.get_item(AccessTokenResponse, 'token_response', _key)
assert '__expires_at' in info
_request_info = _srv.get_request_parameters(request_args=request_args, state=_key)
assert _request_info['headers'] == {
'Authorization': 'Bearer {}'.format(refresh_response["refresh_token"]),
'Content-Type': 'application/x-www-form-urlencoded'
}
|
173009
|
import redis
import logging
from django.conf.urls import url
from django.conf import settings
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from . import consumers
logger = logging.getLogger('awx.main.routing')
class AWXProtocolTypeRouter(ProtocolTypeRouter):
def __init__(self, *args, **kwargs):
try:
r = redis.Redis.from_url(settings.BROKER_URL)
for k in r.scan_iter('asgi:*', 500):
logger.debug(f"cleaning up Redis key {k}")
r.delete(k)
except redis.exceptions.RedisError as e:
logger.warn("encountered an error communicating with redis.")
raise e
super().__init__(*args, **kwargs)
websocket_urlpatterns = [
url(r'websocket/$', consumers.EventConsumer),
url(r'websocket/broadcast/$', consumers.BroadcastConsumer),
]
application = AWXProtocolTypeRouter(
{
'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
}
)
|
173013
|
from rest_framework import status
from rest_framework.reverse import reverse
from resource_tracker.models import ResourceGroupAttributeDefinition
from tests.test_resource_tracker.test_api.base_test_api import BaseTestAPI
class TestAttributeDefinitionCreate(BaseTestAPI):
def setUp(self):
super(TestAttributeDefinitionCreate, self).setUp()
self.url = reverse('api_attribute_definition_list_create', args=[self.rg_physical_servers.id])
def _check_attribute_definition_create(self, data):
number_attribute_before = ResourceGroupAttributeDefinition.objects.all().count()
response = self.client.post(self.url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ResourceGroupAttributeDefinition.objects.latest('id').name, data["name"])
self.assertEqual(ResourceGroupAttributeDefinition.objects.latest('id').resource_group.id,
self.rg_physical_servers.id)
try:
self.assertEqual(ResourceGroupAttributeDefinition.objects.latest('id').consume_from.id, data["consume_from"])
self.assertEqual(ResourceGroupAttributeDefinition.objects.latest('id').produce_for.id, data["produce_for"])
except AttributeError: # consumer and producer may be None
pass
self.assertEqual(ResourceGroupAttributeDefinition.objects.latest('id').help_text, data["help_text"])
self.assertEqual(number_attribute_before + 1,
ResourceGroupAttributeDefinition.objects.all().count())
def test_attribute_definition_create(self):
data = {
"name": "new_attribute",
"consume_from": None,
"produce_for": None,
"help_text": "help"
}
self._check_attribute_definition_create(data)
def test_attribute_definition_create_with_attached_pool(self):
data = {
"name": "new_attribute",
"consume_from": self.rp_vcenter_memory_attribute.id,
"produce_for": self.rp_vcenter_vcpu_attribute.id,
"help_text": ""
}
self._check_attribute_definition_create(data)
def test_cannot_create_attribute_definition_when_non_existing_consumer(self):
data = {
"name": "new_attribute",
"consume_from": 99999,
"produce_for": None,
"help_text": ""
}
response = self.client.post(self.url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
173071
|
from social_peewee.storage import database_proxy, BaseModel, PeeweeUserMixin, \
PeeweeNonceMixin, PeeweeAssociationMixin, PeeweeCodeMixin, BasePeeweeStorage
|
173118
|
import re
from nb_utils import line_macros
"""
TODO: Separation of concerns between LessonPreprocessor and MacroProcessor is
muddled. For historical reasons, LessonPreprocessor currently owns the logic for
expander macros. Eventually, would like to move all macro stuff here (and in
modules like line_macros.py)
"""
class RmCellException(Exception):
pass
class MacroProcessor(object):
def __init__(self, cfg):
self.cfg = cfg
def process_cell(self, cell):
src = cell['source']
try:
self.apply_cell_macros(src)
except RmCellException:
return None # Indicator to remove this cell from nb
src = self.apply_line_macros(src)
cell['source'] = src
return cell
def apply_cell_macros(self, src):
cell_macro_pattern = r'#%%(.+)%%\s*$'
topline = src.split('\n')[0]
match = re.match(cell_macro_pattern, topline)
if match:
# TODO: Quick hack for now (only current cell-level macros in use
# are RM and RM_IF). Later, should break out into per-macro functions,
# as in line_macros.py
name, args = self._parse_inner_macro_string(match.group(1))
if name == 'RM':
raise RmCellException
elif name == 'RM_IF':
assert len(args) == 1
if args[0]:
raise RmCellException
else:
assert False, "Unrecognized cell-level macro name: {}".format(name)
def apply_line_macros(self, src):
# NB: + is greedy, so macro names can still include underscores.
# (Might need to restrict inner match character set to avoid false
# positives)
line_macro_pattern = r'\s*#_(.+)_\s*$'
lines = src.split('\n')
i = 0
newlines = []
while i < len(lines):
l = lines[i]
match = re.match(line_macro_pattern, l)
if match:
assert i+1 < len(lines), ("Macro {} has no following line to "
"act on").format(l)
nextline = lines[i+1]
macro_name, args = self._parse_inner_macro_string(match.group(1))
fn = getattr(line_macros, macro_name)
res = fn(nextline, *args)
if res is not None:
newlines.append(res)
# Jump ahead by 2 (moving past the macro line, and the following
# line that it transformed)
i += 2
else:
newlines.append(l)
i += 1
return '\n'.join(newlines)
def _parse_inner_macro_string(self, macro):
args = []
if macro.endswith(')'):
macro, argstr = macro[:-1].split('(')
# XXX: I guess this is assuming <= 1 arg? Which hasn't been violated so far.
args = [argstr.strip()] if argstr.strip() else []
return macro, self._transform_macro_args(args)
def _transform_macro_args(self, args):
def transform(arg):
if arg == 'PROD':
return not self.cfg.get('testing', False)
if arg == 'DAILY':
return self.cfg.get('daily', False)
if arg == 'NOTDAILY':
return not self.cfg.get('daily', False)
else:
return arg
return list(map(transform, args))
# Not used?
def _delete_macro_line(src, match):
a, b = match.spam()
try:
line_start = src.rindex('\n', a) + 1
except IndexError:
# The macro was on the first line of the cell
line_start = 0
line_end = src.find('\n', b)
# Remove up to and including the newline
return src[:line_start] + src[line_end+1:]
|
173119
|
class CompressString(object):
def compress(self, string):
if string is None or not string:
return string
result = ''
prev_char = string[0]
count = 0
for char in string:
if char == prev_char:
count += 1
else:
result += self._calc_partial_result(prev_char, count)
prev_char = char
count = 1
result += self._calc_partial_result(prev_char, count)
return result if len(result) < len(string) else string
def _calc_partial_result(self, prev_char, count):
return prev_char + (str(count) if count > 1 else '')
|
173163
|
data = (
((-0.195090, 0.980785), (0.000000, 1.000000)),
((-0.382683, 0.923880), (-0.195090, 0.980785)),
((-0.555570, 0.831470), (-0.382683, 0.923880)),
((-0.707107, 0.707107), (-0.555570, 0.831470)),
((-0.831470, 0.555570), (-0.707107, 0.707107)),
((-0.923880, 0.382683), (-0.831470, 0.555570)),
((-0.980785, 0.195090), (-0.923880, 0.382683)),
((-0.651678, 0.500014), (0.831491, 0.344416)),
((0.831491, 0.344416), (-0.817293, 0.175582)),
((-0.882707, 0.175581), (0.768508, 0.344415)),
((0.768508, 0.344415), (-0.748323, 0.500013)),
((-0.748323, 0.500013), (0.563604, 0.636396)),
((0.563604, 0.636396), (-0.500013, 0.748323)),
((-0.500013, 0.748323), (0.255585, 0.831492)),
((0.923879, 0.382684), (0.980785, 0.195091)),
((0.831469, 0.555571), (0.923879, 0.382684)),
((0.707106, 0.707108), (0.831469, 0.555571)),
((0.555569, 0.831470), (0.707106, 0.707108)),
((0.382682, 0.923880), (0.555569, 0.831470)),
((0.195089, 0.980786), (0.382682, 0.923880)),
((0.000000, 1.000000), (0.195089, 0.980786)),
((0.255585, 0.831492), (-0.175581, 0.882707)),
((-0.175581, 0.882707), (-0.000000, 0.900000)),
((-0.399988, 0.748323), (0.636395, 0.636397)),
((0.344414, 0.831492), (-0.399988, 0.748323)),
((-0.124420, 0.882707), (0.344414, 0.831492)),
((-0.195090, -0.980785), (0.000000, -1.000000)),
((-0.382683, -0.923880), (-0.195090, -0.980785)),
((-0.555570, -0.831470), (-0.382683, -0.923880)),
((-0.707107, -0.707107), (-0.555570, -0.831470)),
((-0.831470, -0.555570), (-0.707107, -0.707107)),
((-0.923880, -0.382683), (-0.831470, -0.555570)),
((-0.980785, -0.195090), (-0.923880, -0.382683)),
((-1.000000, -0.000000), (-0.980785, -0.195090)),
((-0.651678, -0.500014), (0.831491, -0.344416)),
((0.831491, -0.344416), (-0.817293, -0.175582)),
((-0.817293, -0.175582), (0.900000, -0.000001)),
((0.800000, -0.000000), (-0.882707, -0.175581)),
((-0.882707, -0.175581), (0.768508, -0.344415)),
((0.768508, -0.344415), (-0.748323, -0.500013)),
((-0.748323, -0.500013), (0.563604, -0.636396)),
((0.563604, -0.636396), (-0.500013, -0.748323)),
((-0.500013, -0.748323), (0.255585, -0.831492)),
((0.980785, -0.195091), (1.000000, -0.000001)),
((0.923879, -0.382684), (0.980785, -0.195091)),
((0.831469, -0.555571), (0.923879, -0.382684)),
((0.707106, -0.707108), (0.831469, -0.555571)),
((0.555569, -0.831470), (0.707106, -0.707108)),
((0.382682, -0.923880), (0.555569, -0.831470)),
((0.195089, -0.980786), (0.382682, -0.923880)),
((0.000000, -1.000000), (0.195089, -0.980786)),
((0.255585, -0.831492), (-0.175581, -0.882707)),
((-0.175581, -0.882707), (-0.000000, -0.900000)),
((-0.399988, -0.748323), (0.636395, -0.636397)),
((0.344414, -0.831492), (-0.399988, -0.748323)),
((-0.124420, -0.882707), (0.344414, -0.831492)),
((-1.000000, -0.000000), (-0.980785, 0.195090)),
((-0.000000, 0.900000), (-0.124420, 0.882707)),
((0.636395, 0.636397), (-0.651678, 0.500014)),
((-0.817293, 0.175582), (0.900000, -0.000001)),
((0.800000, -0.000000), (-0.882707, 0.175581)),
((0.980785, 0.195091), (1.000000, -0.000001)),
((-0.000000, -0.900000), (-0.124420, -0.882707)),
((0.636395, -0.636397), (-0.651678, -0.500014)),
)
|
173169
|
from contextlib import contextmanager
import os
from math import sqrt
from fontTools.pens.basePen import BasePen
from fontTools.pens.recordingPen import RecordingPen
from fontTools.ttLib.tables.otTables import CompositeMode, ExtendMode
import cairo
from .base import Canvas, Surface
from .sweepGradient import buildSweepGradientPatches
_compositeModeMap = {
CompositeMode.CLEAR: cairo.OPERATOR_CLEAR,
CompositeMode.SRC: cairo.OPERATOR_SOURCE,
CompositeMode.DEST: cairo.OPERATOR_DEST,
CompositeMode.SRC_OVER: cairo.OPERATOR_OVER,
CompositeMode.DEST_OVER: cairo.OPERATOR_DEST_OVER,
CompositeMode.SRC_IN: cairo.OPERATOR_IN,
CompositeMode.DEST_IN: cairo.OPERATOR_DEST_IN,
CompositeMode.SRC_OUT: cairo.OPERATOR_OUT,
CompositeMode.DEST_OUT: cairo.OPERATOR_DEST_OUT,
CompositeMode.SRC_ATOP: cairo.OPERATOR_ATOP,
CompositeMode.DEST_ATOP: cairo.OPERATOR_DEST_ATOP,
CompositeMode.XOR: cairo.OPERATOR_XOR,
CompositeMode.PLUS: cairo.OPERATOR_ADD,
CompositeMode.SCREEN: cairo.OPERATOR_SCREEN,
CompositeMode.OVERLAY: cairo.OPERATOR_OVERLAY,
CompositeMode.DARKEN: cairo.OPERATOR_DARKEN,
CompositeMode.LIGHTEN: cairo.OPERATOR_LIGHTEN,
CompositeMode.COLOR_DODGE: cairo.OPERATOR_COLOR_DODGE,
CompositeMode.COLOR_BURN: cairo.OPERATOR_COLOR_BURN,
CompositeMode.HARD_LIGHT: cairo.OPERATOR_HARD_LIGHT,
CompositeMode.SOFT_LIGHT: cairo.OPERATOR_SOFT_LIGHT,
CompositeMode.DIFFERENCE: cairo.OPERATOR_DIFFERENCE,
CompositeMode.EXCLUSION: cairo.OPERATOR_EXCLUSION,
CompositeMode.MULTIPLY: cairo.OPERATOR_MULTIPLY,
CompositeMode.HSL_HUE: cairo.OPERATOR_HSL_HUE,
CompositeMode.HSL_SATURATION: cairo.OPERATOR_HSL_SATURATION,
CompositeMode.HSL_COLOR: cairo.OPERATOR_HSL_COLOR,
CompositeMode.HSL_LUMINOSITY: cairo.OPERATOR_HSL_LUMINOSITY,
}
_extendModeMap = {
ExtendMode.PAD: cairo.Extend.PAD,
ExtendMode.REPEAT: cairo.Extend.REPEAT,
ExtendMode.REFLECT: cairo.Extend.REFLECT,
}
class CairoPen(BasePen):
def __init__(self, context):
super().__init__(None)
self.context = context
def _moveTo(self, pt):
self.context.move_to(*pt)
def _lineTo(self, pt):
self.context.line_to(*pt)
def _curveToOne(self, pt1, pt2, pt3):
self.context.curve_to(*pt1, *pt2, *pt3)
def _closePath(self):
self.context.close_path()
class CairoCanvas(Canvas):
def __init__(self, context):
self.context = context
self._pen = CairoPen(context)
@staticmethod
def newPath():
return RecordingPen()
@contextmanager
def savedState(self):
self.context.save()
yield
self.context.restore()
@contextmanager
def compositeMode(self, compositeMode):
self.context.push_group()
yield
self.context.pop_group_to_source()
self.context.set_operator(_compositeModeMap[compositeMode])
self.context.paint()
def transform(self, transform):
m = cairo.Matrix()
m.xx, m.yx, m.xy, m.yy, m.x0, m.y0 = transform
self.context.transform(m)
def clipPath(self, path):
self.context.new_path()
path.replay(self._pen)
self.context.clip()
def drawPathSolid(self, path, color):
self.context.set_source_rgba(*color)
self.context.new_path()
path.replay(self._pen)
self.context.fill()
def drawPathLinearGradient(
self, path, colorLine, pt1, pt2, extendMode, gradientTransform
):
gr = cairo.LinearGradient(pt1[0], pt1[1], pt2[0], pt2[1])
gr.set_extend(_extendModeMap[extendMode])
for stop, color in colorLine:
gr.add_color_stop_rgba(stop, *color)
self._drawGradient(path, gr, gradientTransform)
def drawPathRadialGradient(
self,
path,
colorLine,
startCenter,
startRadius,
endCenter,
endRadius,
extendMode,
gradientTransform,
):
gr = cairo.RadialGradient(
startCenter[0],
startCenter[1],
startRadius,
endCenter[0],
endCenter[1],
endRadius,
)
gr.set_extend(_extendModeMap[extendMode])
for stop, color in colorLine:
gr.add_color_stop_rgba(stop, *color)
self._drawGradient(path, gr, gradientTransform)
def drawPathSweepGradient(
self,
path,
colorLine,
center,
startAngle,
endAngle,
extendMode,
gradientTransform,
):
self.context.save()
self.context.new_path()
path.replay(self._pen)
self.context.clip()
self.transform(gradientTransform)
# alloc the mesh pattern
pat = cairo.MeshPattern()
# find current path' extent
x1, y1, x2, y2 = self.context.clip_extents()
maxX = max(d * d for d in (x1 - center[0], x2 - center[0]))
maxY = max(d * d for d in (y1 - center[1], y2 - center[1]))
R = sqrt(maxX + maxY)
patches = buildSweepGradientPatches(
colorLine, center, R, startAngle, endAngle, useGouraudShading=False
)
for (P0, color0), C0, C1, (P1, color1) in patches:
# draw patch
pat.begin_patch()
pat.move_to(center[0], center[1])
pat.line_to(P0[0], P0[1])
pat.curve_to(C0[0], C0[1], C1[0], C1[1], P1[0], P1[1])
pat.line_to(center[0], center[1])
pat.set_corner_color_rgba(0, *color0)
pat.set_corner_color_rgba(1, *color0)
pat.set_corner_color_rgba(2, *color1)
pat.set_corner_color_rgba(3, *color1)
pat.end_patch()
self.context.set_source(pat)
self.context.paint()
self.context.restore()
# TODO: blendMode for PaintComposite)
def _drawGradient(self, path, gradient, gradientTransform):
self.context.new_path()
path.replay(self._pen)
self.context.save()
self.transform(gradientTransform)
self.context.set_source(gradient)
self.context.fill()
self.context.restore()
class CairoPixelSurface(Surface):
fileExtension = ".png"
def __init__(self):
self._surfaces = []
@contextmanager
def canvas(self, boundingBox):
x, y, xMax, yMax = boundingBox
width = xMax - x
height = yMax - y
surface = self._setupCairoSurface(width, height)
self._surfaces.append((surface, (width, height)))
context = cairo.Context(surface)
context.translate(-x, height + y)
context.scale(1, -1)
yield CairoCanvas(context)
def _setupCairoSurface(self, width, height):
return cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
def saveImage(self, path):
surface, _ = self._surfaces[-1]
surface.flush()
surface.write_to_png(os.fspath(path))
surface.finish()
class CairoPDFSurface(CairoPixelSurface):
fileExtension = ".pdf"
def _setupCairoSurface(self, width, height):
return cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (0, 0, width, height))
def saveImage(self, path):
_, (width, height) = self._surfaces[0]
pdfSurface = cairo.PDFSurface(path, width, height)
pdfContext = None
for surface, (width, height) in self._surfaces:
pdfSurface.set_size(width, height)
if pdfContext is None:
# It's important to call the first set_size() *before*
# the context is created, or we'll get an additional
# empty page
pdfContext = cairo.Context(pdfSurface)
pdfContext.set_source_surface(surface, 0.0, 0.0)
pdfContext.paint()
pdfContext.show_page()
pdfSurface.flush()
class CairoSVGSurface(CairoPDFSurface):
fileExtension = ".svg"
def saveImage(self, path):
surface, (width, height) = self._surfaces[-1]
svgSurface = cairo.SVGSurface(path, width, height)
pdfContext = cairo.Context(svgSurface)
pdfContext.set_source_surface(surface, 0.0, 0.0)
pdfContext.paint()
svgSurface.flush()
|
173194
|
import click
import pathlib
import sys
import sqlalchemy as sa
from dynaconf import settings
from passlib.handlers.sha2_crypt import sha256_crypt
BASE_DIR = pathlib.Path(__file__).parent.parent
sys.path.append(str(BASE_DIR.absolute()))
from auth.models import users # noqa
@click.command()
@click.option('--login', help='user login', required=True)
@click.option('--password', help='user password', required=True)
@click.option('--is_superuser', is_flag=True, default=False, help='user is superuser')
def create_user(login: str, password: str, is_superuser: bool) -> None:
engine = sa.create_engine(settings.DATABASE.DSN)
with engine.connect() as conn:
conn.execute(
users.insert(
{
'login': login,
'passwd': <PASSWORD>),
'is_superuser': is_superuser,
},
),
)
if __name__ == '__main__':
create_user()
|
173206
|
import unittest
import numpy as np
from spn.algorithms.Inference import log_likelihood
from spn.algorithms.MPE import mpe
from spn.io.CPP import get_cpp_function, setup_cpp_bridge, get_cpp_mpe_function
from spn.io.Graphics import plot_spn
from spn.structure.Base import get_nodes_by_type
from spn.structure.leaves.parametric.Inference import add_parametric_inference_support
from spn.structure.leaves.parametric.Parametric import Gaussian, Bernoulli
class TestCPP(unittest.TestCase):
def setUp(self):
add_parametric_inference_support()
def test_binary(self):
A = 0.4 * (
Bernoulli(p=0.8, scope=0)
* (
0.3 * (Bernoulli(p=0.7, scope=1) * Bernoulli(p=0.6, scope=2))
+ 0.7 * (Bernoulli(p=0.5, scope=1) * Bernoulli(p=0.4, scope=2))
)
) + 0.6 * (Bernoulli(p=0.8, scope=0) * Bernoulli(p=0.7, scope=1) * Bernoulli(p=0.6, scope=2))
setup_cpp_bridge(A)
spn_cc_eval_func_bernoulli = get_cpp_function(A)
num_data = 200000
data = (
np.random.binomial(1, 0.3, size=(num_data)).astype("float32").tolist()
+ np.random.binomial(1, 0.3, size=(num_data)).astype("float32").tolist()
+ np.random.binomial(1, 0.3, size=(num_data)).astype("float32").tolist()
)
data = np.array(data).reshape((-1, 3))
num_nodes = len(get_nodes_by_type(A))
lls_matrix = np.zeros((num_data, num_nodes))
# Test for every single lls_maxtrix element.
_ = log_likelihood(A, data, lls_matrix=lls_matrix)
c_ll = spn_cc_eval_func_bernoulli(data)
self.assertTrue(np.allclose(lls_matrix, c_ll))
### Testing for MPE.
spn_cc_mpe_func_bernoulli = get_cpp_mpe_function(A)
# drop some data.
for i in range(data.shape[0]):
drop_data = np.random.binomial(data.shape[1] - 1, 0.5)
data[i, drop_data] = np.nan
cc_completion = spn_cc_mpe_func_bernoulli(data)
py_completion = mpe(A, data)
self.assertTrue(np.allclose(py_completion, cc_completion))
if __name__ == "__main__":
unittest.main()
|
173223
|
class TestScheduleJobFileData:
test_job_connection = {
"Name": "TestIntegrationConnection",
"ConnectorTypeName": "POSTGRESQL",
"Host": "localhost",
"Port": 5432,
"Sid": "",
"DatabaseName": "test_pdi_integration",
"User": "postgres",
"Password": "<PASSWORD>"
}
test_file_connection = {
"Name": "TestIntegrationConnectionFile",
"ConnectorTypeName": "CSV",
"Host": "",
"Port": 0,
"User": "",
"Password": ""
}
test_data_operation = {
"Name": "TEST_JOB_FILE_DATA_OPERATION",
"Contacts": [
{"Email": "<EMAIL>"}
],
"Integrations": [
{
"Limit": 100,
"ProcessCount": 1,
"Integration": {
"Code": "TEST_CSV_TO_DB_INTEGRATION",
"SourceConnections": {
"ConnectionName": "TestIntegrationConnectionFile",
"File": {
"Folder": "",
"FileName": "test.csv",
"Csv": {
"HasHeader": True,
"Header": "Id;Name",
"Separator": ";",
}
},
"Columns": "Id,Name",
},
"TargetConnections": {
"ConnectionName": "TestIntegrationConnection",
"Database": {
"Schema": "test",
"TableName": "test_integration_target",
"Query": ""
},
"Columns": "Id,Name",
},
"IsTargetTruncate": True,
"IsDelta": True,
"Comments": "Test data_integration record",
}
},
{
"Limit": 100,
"ProcessCount": 1,
"Integration": {
"Code": "TEST_DB_TO_CSV_NONE_HEADER_INTEGRATION",
"SourceConnections": {
"ConnectionName": "TestIntegrationConnection",
"Database": {
"Schema": "test",
"TableName": "test_integration_target",
"Query": ""
},
"Columns": "Id,Name",
},
"TargetConnections": {
"ConnectionName": "TestIntegrationConnectionFile",
"File": {
"Folder": "",
"FileName": "test_new_none_header.csv",
"Csv": {
"HasHeader": False,
"Header": "",
"Separator": ",",
}
},
"Columns": "Id,Name",
},
"IsTargetTruncate": True,
"IsDelta": True,
"Comments": "Test data_integration record",
}
},
{
"Limit": 100,
"ProcessCount": 1,
"Integration": {
"Code": "TEST_CSV_TO_CSV_INTEGRATION_WITHOUT_HEADER",
"SourceConnections": {
"ConnectionName": "TestIntegrationConnectionFile",
"File": {
"Folder": "",
"FileName": "test_new_none_header.csv",
"Csv": {
"HasHeader": False,
"Header": "Id,Name",
"Separator": ",",
}
},
"Columns": "Name,Id",
},
"TargetConnections": {
"ConnectionName": "TestIntegrationConnectionFile",
"File": {
"Folder": "",
"FileName": "test_new_change_column_order.csv",
"Csv": {
"HasHeader": True,
"Header": "Name;Id",
"Separator": ";",
}
},
"Columns": "Name,Id",
},
"IsTargetTruncate": True,
"IsDelta": True,
"Comments": "Test data_integration record",
}
},
{
"Limit": 100,
"ProcessCount": 1,
"Integration": {
"Code": "TEST_CSV_TO_CSV_INTEGRATION",
"SourceConnections": {
"ConnectionName": "TestIntegrationConnectionFile",
"File": {
"Folder": "",
"FileName": "test_new_none_header.csv",
"Csv": {
"HasHeader": False,
"Header": "Id,Name",
"Separator": ",",
}
},
"Columns": "Id",
},
"TargetConnections": {
"ConnectionName": "TestIntegrationConnectionFile",
"File": {
"Folder": "",
"FileName": "test_new_only_id.csv",
"Csv": {
"HasHeader": True,
"Header": "Id",
"Separator": ";",
}
},
"Columns": "Id",
},
"IsTargetTruncate": True,
"IsDelta": True,
"Comments": "Test data_integration record",
}
},
]
}
|
173231
|
from .mnist import MNIST, FashionMNIST
from .caltech import Caltech101, Caltech256
from .celeba import CelebA
from .cifar import CIFAR10, CIFAR100
from .cityscapes import Cityscapes
from .clevr import CLEVRClassification
from .country211 import Country211
from .coco import CocoCaptions, CocoDetection
from .dtd import DTD
from .eurosat import EuroSAT
from .fer2013 import FER2013
from .gtsrb import GTSRB
from .imagenet import ImageNet
from .voc import VOCDetection, VOCSegmentation
from .folder import DatasetFolder, ImageFolder
from .fakedata import FakeData
from .fgvc_aircraft import FGVCAircraft
from .flowers102 import Flowers102
from .flickr import Flickr8k, Flickr30k
from .food101 import Food101
from .inaturalist import INaturalist
from .kitti import Kitti
from .lfw import LFWPairs, LFWPeople
from .lsun import LSUN, LSUNClass
from .omniglot import Omniglot
from .oxford_iiit_pet import OxfordIIITPet
from .pcam import PCAM
from .phototour import PhotoTour
from .places365 import Places365
from .rendered_sst2 import RenderedSST2
from .sbd import SBDataset
from .sbu import SBU
from .semeion import SEMEION
from .stanford_cars import StanfordCars
from .stl10 import STL10
from .svhn import SVHN
from .sun397 import SUN397
from .usps import USPS
from .vision import VisionDataset
from .widerface import WIDERFace
__all__ = [
"MNIST",
"FashionMNIST",
"CIFAR10",
"CIFAR100",
"Caltech101",
"Caltech256",
"CelebA",
"Cityscapes",
"CLEVRClassification",
"Country211",
"CocoCaptions",
"CocoDetection",
"ImageNet",
"VOCDetection",
"VOCSegmentation",
"DatasetFolder",
"DTD",
"EuroSAT",
"GTSRB",
"ImageFolder",
"FakeData",
"FER2013",
"FGVCAircraft",
"Flowers102",
"Flickr8k",
"Flickr30k",
"Food101",
"INaturalist",
"Kitti",
"LFWPairs",
"LFWPeople",
"LSUN",
"LSUNClass",
"Omniglot",
"OxfordIIITPet",
"PCAM",
"PhotoTour",
"Places365",
"RenderedSST2",
"SBDataset",
"SBU",
"SUN397",
"SEMEION",
"StanfordCars",
"STL10",
"SVHN",
"USPS",
"VisionDataset",
"WIDERFace",
]
|
173241
|
import django.dispatch
#: Sent after a user has registered an account with the store.
#:
#: :param sender: The form which was submitted.
#: :type sender: ``satchmo_store.accounts.forms.RegistrationForm``
#:
#: :param contact: The contact that was saved to the database.
#: :type contact: ``satchmo_store.contact.models.Contact``
#:
#: :param subscribed: A boolean reflecting whether or not the user subscribed
#: to a newsletter
#:
#: :default: False
#:
#: :param data: The ``cleaned_data`` dictionary of the submitted form.
satchmo_registration = django.dispatch.Signal()
#: Sent after a user account has been verified. This signal is also sent right
#: after an account is created if account verification is disabled.
#:
#: :param sender: An instance of ``satchmo_store.models.Contact`` if the account
#: was verified via email (Note: this is the same argument as ``contact``), or
#: an instance of ``satchmo_store.accounts.forms.RegistrationForm`` if account
#: verification is disabled.
#:
#: :param contact: The contact that was registered.
#: :type contact: ``satchmo_store.models.Contact``
satchmo_registration_verified = django.dispatch.Signal()
|
173243
|
import os
import re
from django.conf import settings
from statsd.defaults.django import statsd
DATADOG_METRICS = False
DATADOG_TAGS = None
if settings.DATADOG_API_KEY:
from datadog import initialize
options = {
'api_key': settings.DATADOG_API_KEY,
'app_key': settings.DATADOG_APP_KEY
}
initialize(**options)
from datadog import statsd as datadog_statsd
DATADOG_METRICS = True
DATADOG_TAGS = [f"env:{os.environ.get('ENVIRONMENT')}"]
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Helium Edu"
__version__ = "1.4.46"
def increment(metric, request=None, ignore_staff=True, ignore_anonymous=False):
if request and ignore_staff and request.user.is_authenticated and request.user.is_staff:
return
if request and ignore_anonymous and not request.user.is_authenticated:
return
statsd.incr(f"platform.{metric}")
if DATADOG_METRICS:
datadog_statsd.increment(f"platform.{metric}", tags=DATADOG_TAGS)
def request_start(request):
metric_id = f"platform.request.{re.sub('[^a-zA-Z]+', '', request.path)}.{request.method}"
timer = statsd.timer(metric_id, rate=1)
timer.start()
return {
'Request-Timer': timer,
'Request-Metric-ID': metric_id,
'Request-Metric-Start': int(round(timer._start_time * 1000))
}
def request_stop(metrics, response):
metrics['Request-Timer'].stop()
metrics['Request-Metric-Millis'] = metrics['Request-Timer'].ms
statsd.incr(metrics['Request-Metric-ID'])
statsd.incr(f"{metrics['Request-Metric-ID']}.{response.status_code}")
if DATADOG_METRICS:
datadog_statsd.increment(metrics['Request-Metric-ID'], tags=DATADOG_TAGS)
datadog_statsd.increment(f"{metrics['Request-Metric-ID']}.{response.status_code}", tags=DATADOG_TAGS)
datadog_statsd.timing(metrics['Request-Metric-ID'], metrics['Request-Timer'].ms, tags=DATADOG_TAGS)
metrics.pop('Request-Timer')
for name, value in metrics.items():
response._headers[name] = (name, str(value))
|
173260
|
import struct
import numpy
import GLWindow
import ModernGL
# This example is not working with NPOT Textures
width, height = 640, 460
pixels = numpy.round(numpy.random.rand(width, height)).astype('float32')
grid = numpy.dstack(numpy.mgrid[0:height, 0:width][::-1]).astype('int32')
wnd = GLWindow.create_window(width, height)
ctx = ModernGL.create_context()
prog = ctx.program(
ctx.vertex_shader('''
#version 330
in vec2 vert;
out vec2 text;
void main() {
text = vert;
gl_Position = vec4((vert * 2.0 - 1.0) * 1, 0.0, 1.0);
}
'''),
ctx.fragment_shader('''
#version 330
uniform sampler2D Texture;
in vec2 text;
out vec4 color;
void main() {
color = texture(Texture, text);
}
'''),
])
trans = ctx.program(
ctx.vertex_shader('''
#version 330
uniform sampler2D Texture;
uniform int Width;
uniform int Height;
in ivec2 text;
out float vert;
#define LIVING 0.0
#define DEAD 1.0
bool cell(int x, int y) {
return texelFetch(Texture, ivec2((x + Width) % Width, (y + Height) % Height), 0).r < 0.5;
}
void main() {
bool living = cell(text.x, text.y);
int neighbours = 0;
if (cell(text.x - 1, text.y - 1)) neighbours++;
if (cell(text.x - 1, text.y + 0)) neighbours++;
if (cell(text.x - 1, text.y + 1)) neighbours++;
if (cell(text.x + 1, text.y - 1)) neighbours++;
if (cell(text.x + 1, text.y + 0)) neighbours++;
if (cell(text.x + 1, text.y + 1)) neighbours++;
if (cell(text.x + 0, text.y + 1)) neighbours++;
if (cell(text.x + 0, text.y - 1)) neighbours++;
if (living) {
vert = (neighbours == 2 || neighbours == 3) ? LIVING : DEAD;
} else {
vert = (neighbours == 3) ? LIVING : DEAD;
}
}
'''),
varyings=['vert']
)
trans.uniforms['Width'].value = width
trans.uniforms['Height'].value = height
texture = ctx.texture((width, height), 1, pixels.tobytes(), floats=True)
texture.filter = ModernGL.NEAREST
texture.swizzle = 'RRR1'
texture.use()
vbo = ctx.buffer(struct.pack('8f', 0, 0, 0, 1, 1, 0, 1, 1))
vao = ctx.simple_vertex_array(prog, vbo, ['vert'])
text = ctx.buffer(grid.tobytes())
tao = ctx.simple_vertex_array(trans, text, ['text'])
pbo = ctx.buffer(reserve=pixels.nbytes)
while wnd.update():
ctx.viewport = wnd.viewport
tao.transform(pbo)
texture.write(pbo)
vao.render(ModernGL.TRIANGLE_STRIP)
|
173298
|
import argparse
import numpy as np
from pathlib import Path
import cv2
from model import get_model
from noise_model import get_noise_model
import sys
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
import tensorflow_datasets as tfds
def get_args():
parser = argparse.ArgumentParser(description="Test trained model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--image_dir", type=str, required=True, help="test image dir")
parser.add_argument("--model", type=str, default="srresnet", help="model architecture ('srresnet' or 'unet')")
# parser.add_argument("--weight_file", type=str, required=True, help="trained weight file")
parser.add_argument("--test_noise_model", type=str, default="gaussian,25,25", help="noise model for test images")
parser.add_argument("--output_dir", type=str, default=None, help="if set, save resulting images otherwise show result using imshow")
args = parser.parse_args()
return args
def get_image(image):
image = np.clip(image, 0, 255)
return image.astype(dtype=np.uint8)
def main():
height = 512
width = 512
noise = 'gauss'
mode = 'clean'
args = get_args()
image_dir = args.image_dir
weight_file = 'weights_{}_{}.hdf5'.format(noise, mode) #args.weight_file
if mode != 'clean':
val_noise_model = get_noise_model(args.test_noise_model)
else:
model = get_model(height, width, args.model)
model.load_weights(weight_file)
model.summary()
# saved_model
tf.saved_model.save(model, 'saved_model_{}_{}_{}_{}x{}'.format(args.model, noise, mode, height, width))
# pb
full_model = tf.function(lambda inputs: model(inputs))
full_model = full_model.get_concrete_function(inputs=[tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype)])
frozen_func = convert_variables_to_constants_v2(full_model, lower_control_flow=False)
frozen_func.graph.as_graph_def()
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=".",
name="noise2noise_{}_{}_{}_{}x{}_float32.pb".format(args.model, noise, mode, height, width),
as_text=False)
# No Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('noise2noise_{}_{}_{}_{}x{}_float32.tflite'.format(args.model, noise, mode, height, width), 'wb') as w:
w.write(tflite_model)
print("tflite convert complete! - noise2noise_{}_{}_{}_{}x{}_float32.tflite".format(args.model, noise, mode, height, width))
# Weight Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
with open('noise2noise_{}_{}_{}_{}x{}_weight_quant.tflite'.format(args.model, noise, mode, height, width), 'wb') as w:
w.write(tflite_model)
print('Weight Quantization complete! - noise2noise_{}_{}_{}_{}x{}_weight_quant.tflite'.format(args.model, noise, mode, height, width))
# Float16 Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_quant_model = converter.convert()
with open('noise2noise_{}_{}_{}_{}x{}_float16_quant.tflite'.format(args.model, noise, mode, height, width), 'wb') as w:
w.write(tflite_quant_model)
print('Float16 Quantization complete! - noise2noise_{}_{}_{}_{}x{}_float16_quant.tflite'.format(args.model, noise, mode, height, width))
def representative_dataset_gen():
for data in raw_test_data.take(10):
image = data['image'].numpy()
image = tf.image.resize(image, (height, width))
image = image[np.newaxis,:,:,:]
# image = image / 127.5 - 1.0
yield [image]
raw_test_data, info = tfds.load(name="coco/2017", with_info=True, split="test", data_dir="~/TFDS", download=False)
# Integer Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
tflite_quant_model = converter.convert()
with open('noise2noise_{}_{}_{}_{}x{}_integer_quant.tflite'.format(args.model, noise, mode, height, width), 'wb') as w:
w.write(tflite_quant_model)
print('Integer Quantization complete! - noise2noise_{}_{}_{}_{}x{}_integer_quant.tflite'.format(args.model, noise, mode, height, width))
# Full Integer Quantization - Input/Output=int8
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
converter.representative_dataset = representative_dataset_gen
tflite_quant_model = converter.convert()
with open('noise2noise_{}_{}_{}_{}x{}_full_integer_quant.tflite'.format(args.model, noise, mode, height, width), 'wb') as w:
w.write(tflite_quant_model)
print('Integer Quantization complete! - noise2noise_{}_{}_{}_{}x{}_full_integer_quant.tflite'.format(args.model, noise, mode, height, width))
# # EdgeTPU
# import subprocess
# result = subprocess.check_output(["edgetpu_compiler", "-s", "noise2noise_{}_{}_{}_{}x{}_full_integer_quant.tflite".format(args.model, noise, mode, height, width)])
# print(result)
sys.exit(0)
if args.output_dir:
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
image_paths = list(Path(image_dir).glob("*.*"))
for image_path in image_paths:
image = cv2.imread(str(image_path))
h, w, _ = image.shape
image = image[:(h // 16) * 16, :(w // 16) * 16] # for stride (maximum 16)
h, w, _ = image.shape
out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
noise_image = val_noise_model(image)
pred = model.predict(np.expand_dims(noise_image, 0))
denoised_image = get_image(pred[0])
out_image[:, :w] = image
out_image[:, w:w * 2] = noise_image
out_image[:, w * 2:] = denoised_image
if args.output_dir:
cv2.imwrite(str(output_dir.joinpath(image_path.name))[:-4] + ".png", out_image)
else:
cv2.imshow("result", out_image)
key = cv2.waitKey(-1)
# "q": quit
if key == 113:
return 0
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.