id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1652499
|
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim.optimizer
from torch.optim import lr_scheduler
from torch.utils.tensorboard import SummaryWriter
import augmentations
import torch_resizer
import utils
class Network: # The base network
def __init__(self, config, device, upsample_scale=2):
self.config = config
self.upsample_scale = upsample_scale
self.channels_in = 3
self.channels_out = 3
self.device = device
self.net = self.build_network()
self.optimizer = self.define_opt()
self.loss_mask_spatial = self.config['data']['params']['augmentation_params']['crop_sizes']['loss_mask_spatial']
self.loss_mask_temporal = self.config['data']['params']['augmentation_params']['crop_sizes']['loss_mask_temporal']
self.lit_pixels = self.calc_lit_pixels()
assert self.lit_pixels > 0, f'assertion error: no crop left after masking'
self.loss_fn = self.define_loss()
self.writer = SummaryWriter(os.path.join(config['trainer']['working_dir'], 'logs_dir'))
# total number of epochs
self.epochs = self.config['num_epochs']
# current or start epoch number
self.epoch = 0
self.iter_per_epoch = self.config['num_iter_per_epoch']
self.save_every = self.config['save_every']
self.scheduler = self.define_lr_sched()
def build_network(self): # BASE version. Other modes override this function
"""
take the network flag or parameters from config and create network
:return: net - a torch class/object that can be trained
"""
net = nn.Sequential(
nn.ConvTranspose3d(in_channels=self.channels_in, out_channels=128, kernel_size=3, padding=1, stride=(self.upsample_scale, 1, 1),
output_padding=(self.upsample_scale - 1, 0, 0)),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(3, 3, 3), padding=1, padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(3, 3, 3), padding=1, padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding=(0, 1, 1), padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding=(0, 1, 1), padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding=(0, 1, 1), padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding=(0, 1, 1), padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=3, padding=1, padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=3, padding=1, padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=self.channels_out, kernel_size=3, padding=1, padding_mode='replicate'),
nn.ReLU(),
).to(self.device)
return net
def define_loss(self):
loss_name = self.config['loss']['name']
if loss_name == 'MSE':
return torch.nn.MSELoss(reduction='sum')
else:
assert False, f'assertion error in define_opt(), loss does not exist, is {loss_name}'
def define_opt(self):
opt_name = self.config['optimization']['name']
learning_rate = self.config['optimization']['params']['lr']
if opt_name == 'SGD':
momentum = self.config['optimization']['params']['SGD_momentum']
return torch.optim.SGD(self.net.parameters(), lr=learning_rate, momentum=momentum)
elif opt_name == 'Adam':
return torch.optim.Adam(self.net.parameters(), lr=learning_rate)
else:
assert False, f'assertion error in define_opt(), optimizer does not exist, is {opt_name}'
def define_lr_sched(self):
gamma = self.config['lr_sched']['params']['gamma']
milestones = self.config['lr_sched']['params']['milestones']
step_size = self.config['lr_sched']['params']['step_size']
if self.config['lr_sched']['name'] == 'MultiStepLR':
return lr_scheduler.MultiStepLR(self.optimizer, milestones=milestones, gamma=gamma)
elif self.config['lr_sched']['name'] == 'StepLR':
return lr_scheduler.StepLR(self.optimizer, step_size=int(self.epochs * step_size), gamma=gamma)
else:
print('****************** NO LR_SCHED DEFINED SETTING DEFAULT *****************************')
return lr_scheduler.StepLR(self.optimizer, step_size=self.epochs // 10, gamma=1 / 1.5)
def calc_lit_pixels(self):
spatial = self.config['data']['params']['augmentation_params']['crop_sizes']['crop_size_spatial']
temporal = self.config['data']['params']['augmentation_params']['crop_sizes']['crop_size_temporal']
lit_mask = [temporal - 2 * self.loss_mask_temporal, spatial - 2 * self.loss_mask_spatial,
spatial - 2 * self.loss_mask_spatial, 3]
return np.prod(lit_mask)
def forward_zstsr(self, input_tensor): # BASE version. Other modes override this function
return self.net(input_tensor)
def calc_loss(self, output, hr_gt):
"""
calc loss according to the flags in config
:param output: the output from the net. May need to add input if residual
:param hr_gt_torch: the hr gt from the tuple
:return: the loss
"""
loss_name = self.config['loss']['name']
# To remove spatial and temporal masking
t = self.loss_mask_temporal
t_end = output.shape[2] - t
s = self.loss_mask_spatial
s_end_ver = output.shape[3] - s
s_end_hor = output.shape[4] - s
shape_masked = np.prod(
output[:, :, t:t_end, s:s_end_ver, s:s_end_hor].shape)
if loss_name == 'MSE':
return torch.sum(
(output[:, :, t:t_end, s:s_end_ver, s:s_end_hor].to(self.device) -
hr_gt[:, :, t:t_end, s:s_end_ver, s:s_end_hor].to(self.device)) ** 2.0) / shape_masked
else:
assert False, f'assertion error in calc_loss(), loss not MSE, is {loss_name}'
def train(self, data_loader_object, cumulative_scale):
"""
:param data_loader_object: data_handler object that holds the video tensor and can make all necessary augmentations
:param cumulative_scale: indicates the current training location in the global config. Needed for saving the model.
:return: train_logs. loss vectors for each epoch
"""
# epochs
for e in range(self.epoch, self.epochs):
t = time.time()
np.random.seed()
self.optimizer.zero_grad()
if e % self.config['val_every'] == self.config['val_every'] - 1:
if self.config['debug']:
print('Debug!\nDebug!\nNo validation!\nDebug!\nDebug!\n')
else:
print(f'applying val at epoch {e}')
self.validation(data_loader_object, cumulative_scale=cumulative_scale, epoch=e)
if e % self.config['save_every'] == self.config['save_every'] - 1:
print(f'saved model at epoch {e}')
self.save_model(epoch=e, overwrite=False, cumulative_scale=cumulative_scale)
# iterations per epochs
it = 0
for (hr_gt, lr) in data_loader_object:
hr_prediction = self.forward_zstsr(lr.to(self.device))
loss = self.calc_loss(hr_prediction, hr_gt)
it += 1
print(f'epoch:{e}, loss:{loss.item():.7f}. Time: {(time.time() - t):.2f}, lr={self.optimizer.param_groups[0]["lr"]}')
loss.backward()
self.optimizer.step()
self.scheduler.step()
self.writer.add_scalars('loss', {'loss': loss.item()})
self.writer.add_scalars('lr', {'lr': self.optimizer.param_groups[0]["lr"]})
# save final trained model as well
self.save_model(epoch=self.epochs, overwrite=False, cumulative_scale=cumulative_scale)
self.writer.close()
return
def validation(self, data_loader_object, cumulative_scale, epoch):
"""
apply eval on video temporally downscaled by working scale, test return to original video
:param epoch: to save with curent epoch#
:return: None, but creates the files in output folder
"""
HTR_val_tensor = data_loader_object.dataset.video_tensor # input in this training, but for val it's the HTR
# clip trailing number of frames, so for instance even (not odd) when upsample_scale==2
HTR_val_tensor = HTR_val_tensor[:HTR_val_tensor.shape[0] - HTR_val_tensor.shape[0] % self.upsample_scale, ...]
LTR_val_tensor = augmentations.blur_sample_tensor(HTR_val_tensor, sample_axis=0,
sample_jump=self.upsample_scale,
blur_flag=data_loader_object.dataset.blur_flag)
predicted_val = self.eval(LTR_val_tensor)
val_loss = self.calc_loss(torch.from_numpy(np.expand_dims(predicted_val, 0)).float(), torch.from_numpy(np.expand_dims(HTR_val_tensor, 0)).float())
self.writer.add_scalars('val_loss', {'val_loss': val_loss})
print(f'VALIDATION AFTER epoch:{epoch}, loss:{val_loss:.5f}')
val_dir = os.path.join(self.config['trainer']['working_dir'], 'validation', f'cumulative_scale_{cumulative_scale}', f'epoch_{epoch}_loss_{val_loss:.5f}')
utils.save_output_result(predicted_val, val_dir)
def eval(self, video_tensor):
"""
take the input video and upscale it
:param data: data_handler object, contains the whole video, on which we run the network to produce an upsampled video
:return:
"""
video_tensor = np.copy(video_tensor)
# this tensor will be filled with crops and returned
prediction_video = np.zeros([self.upsample_scale * video_tensor.shape[0], video_tensor.shape[1], video_tensor.shape[2], video_tensor.shape[3]])
if self.config['debug']:
prediction_video = self.debug_eval(prediction_video, video_tensor)
return prediction_video
# Helper function for calculating the sizes needed for operating in crops
f_pad, f_pad_output, f_starts_input, f_starts_outputs, h_pad, h_starts, net_f_output, net_h, net_w, \
size_frames, size_height, size_width, w_pad, w_starts = self.eval_calc_param_sizes(video_tensor)
# Pad the video on all sides by needed factor
video_tensor = np.pad(video_tensor, [(f_pad, f_pad), (h_pad, h_pad), (w_pad, w_pad), (0, 0)], 'symmetric')
# create a [f,h,w,c] block of size defined above
for f_ind, f_start in enumerate(f_starts_input):
print(f'EVAL: frame start:{f_start}')
for h_ind, h_start in enumerate(h_starts):
for w_ind, w_start in enumerate(w_starts):
if (f_start + size_frames - 1) > (video_tensor.shape[0]) or (h_start + size_height - 1) > \
video_tensor.shape[1] or (w_start + size_width - 1) > video_tensor.shape[2]:
print('eval error: should not reach here - size issue')
continue
crop = video_tensor[f_start:f_start + size_frames, h_start:h_start + size_height,
w_start:w_start + size_width, :]
net_output = self.eval_forward_crop(crop)
# snip and save in the entire output video
try:
# snip edges - according to the padding parameter
net_output = net_output[f_pad_output:-f_pad_output, h_pad:-h_pad, w_pad:-w_pad, :]
# Notice: size in "frames" axis in the output is twice the net_size in the input
prediction_video[f_starts_outputs[f_ind]:f_starts_outputs[f_ind] + net_f_output,
h_start:h_start + net_h, w_start:w_start + net_w, :] = net_output.detach().cpu().numpy()
except:
print('eval error: should not reach here - cropping/stitching issue')
return prediction_video
def debug_eval(self, prediction_video, video_tensor):
print(f'Debug!\nDebug!\nDebug!\nDebug!\nDebug!\nDebug!\nDebug!\nDebug!\nDebug!\nDebug!\n')
debug_method = 'copy_frame' # 'copy_frame' or 'interpolate'. If neither, returns zeros
if debug_method == 'copy_frame':
for frame_up_idx in range(prediction_video.shape[0]):
prediction_video[frame_up_idx, :, :, :] = video_tensor[int(frame_up_idx / self.upsample_scale), :, :, :]
elif debug_method == 'interpolate':
resizer = torch_resizer.Resizer(video_tensor.shape[:], scale_factor=(self.upsample_scale, 1, 1, 1),
output_shape=[video_tensor.shape[0] * self.upsample_scale, video_tensor.shape[1], video_tensor.shape[2], video_tensor.shape[3]],
kernel='cubic', antialiasing=True, device='cuda')
prediction_video = resizer.forward(torch.tensor(video_tensor).to(self.device)).to(self.device).cpu().numpy()
return prediction_video.squeeze()
def eval_calc_param_sizes(self, video_tensor):
size_frames = self.config['data']['params']['eval_params']['size_frames']
size_height = self.config['data']['params']['eval_params']['size_height']
size_width = self.config['data']['params']['eval_params']['size_width']
f_pad = self.config['data']['params']['eval_params']['pad_frames']
h_pad = self.config['data']['params']['eval_params']['pad_height']
w_pad = self.config['data']['params']['eval_params']['pad_width']
f_pad_output = self.upsample_scale * f_pad
net_f = size_frames - 2 * f_pad # The actual size added by each forward, need to remove the padding. 2 because each side
net_f_output = self.upsample_scale * net_f
net_h = size_height - 2 * h_pad
net_w = size_width - 2 * w_pad
# The start points for crops, advance in each axis by its net_size each crop
f_starts_input = np.arange(0, video_tensor.shape[0], net_f)
f_starts_input[-1] = video_tensor.shape[0] - net_f # For final crop at each dim
f_starts_outputs = self.upsample_scale * f_starts_input # output is *scale the frames
h_starts = np.arange(0, video_tensor.shape[1], net_h)
h_starts[-1] = video_tensor.shape[1] - net_h
w_starts = np.arange(0, video_tensor.shape[2], net_w)
w_starts[-1] = video_tensor.shape[2] - net_w
return f_pad, f_pad_output, f_starts_input, f_starts_outputs, h_pad, h_starts, \
net_f_output, net_h, net_w, size_frames, size_height, size_width, w_pad, w_starts
def eval_forward_crop(self, crop):
"""
helper function for eval - prepares and forwards the crop
"""
# prep to send to torch (GPU)
permutation_np_to_torch = (3, 0, 1, 2) # move channels to first
crop = np.transpose(crop, permutation_np_to_torch)
video_tensor_torch = torch.unsqueeze(torch.from_numpy(crop).float(), dim=0).to(self.device)
# EVAL current block
self.net.eval()
with torch.no_grad():
# the value is automatically converted to numpy and squeezed to [c,f,h,w]
net_output = torch.squeeze(self.forward_zstsr(video_tensor_torch).to(self.device))
# transpose back to [f,h,w,c]
net_output = net_output.permute((1, 2, 3, 0))
return net_output
def save_model(self, epoch=None, scale=None, overwrite=False, cumulative_scale=2):
"""
Saves the model (state-dict, optimizer and lr_sched
:return:
"""
if overwrite:
checkpoint_list = [i for i in os.listdir(os.path.join(self.config['trainer']['working_dir'])) if i.endswith('.pth.tar')]
if len(checkpoint_list) != 0:
os.remove(os.path.join(self.config['trainer']['working_dir'], checkpoint_list[-1]))
filename = 'checkpoint{}{}.pth.tar'.format('' if epoch is None else '-e{:05d}'.format(epoch),
'' if scale is None else '-s{:02d}'.format(scale))
folder = os.path.join(self.config['trainer']['working_dir'], 'saved_models', f'cumulative_scale_{cumulative_scale}')
os.makedirs(folder, exist_ok=True)
torch.save({'epoch': epoch,
'sd': self.net.state_dict(),
'opt': self.optimizer.state_dict()},
# 'lr_sched': self.scheduler.state_dict()},
os.path.join(folder, filename))
def load_model(self, filename):
checkpoint = torch.load(filename)
self.net.load_state_dict(checkpoint['sd'], strict=False)
self.optimizer.load_state_dict(checkpoint['opt'])
self.epoch = checkpoint['epoch']
|
1652523
|
import os
from airflow.configuration import conf
from ewah.dag_factories import dags_from_yml_file
for dag in dags_from_yml_file(conf.get("core", "dags_folder") + os.sep + "dags.yml"):
# Must add the individual DAGs to the global namespace,
# otherwise airflow does not find the DAGs!
globals()[dag._dag_id] = dag
|
1652590
|
from ..base import BaseTopazTest
class TestTrueObject(BaseTopazTest):
def test_name(self, space):
space.execute("TrueClass")
def test_to_s(self, space):
w_res = space.execute("return true.to_s")
assert space.str_w(w_res) == "true"
def test_inspect(self, space):
w_res = space.execute("return true.inspect")
assert space.str_w(w_res) == "true"
def test_eql(self, space):
w_res = space.execute("return true == false")
assert self.unwrap(space, w_res) is False
w_res = space.execute("return true == true")
assert self.unwrap(space, w_res) is True
def test_and(self, space):
w_res = space.execute("return true & 3")
assert w_res is space.w_true
w_res = space.execute("return true & false")
assert w_res is space.w_false
def test_or(self, space):
w_res = space.execute("return true | 3")
assert w_res is space.w_true
w_res = space.execute("return true | nil")
assert w_res is space.w_true
def test_xor(self, space):
assert space.execute("return true ^ nil") is space.w_true
assert space.execute("return true ^ false") is space.w_true
assert space.execute("return true ^ true") is space.w_false
assert space.execute("return true ^ 1") is space.w_false
def test_singleton_class(self, space):
w_res = space.execute("return true.singleton_class == TrueClass")
assert w_res is space.w_true
class TestFalseObject(BaseTopazTest):
def test_name(self, space):
space.execute("FalseClass")
def test_to_s(self, space):
w_res = space.execute("return false.to_s")
assert space.str_w(w_res) == "false"
def test_inspect(self, space):
w_res = space.execute("return false.inspect")
assert space.str_w(w_res) == "false"
def test_eql(self, space):
w_res = space.execute("return false == false")
assert self.unwrap(space, w_res) is True
w_res = space.execute("return false == true")
assert self.unwrap(space, w_res) is False
def test_and(self, space):
w_res = space.execute("return false & 3")
assert w_res is space.w_false
w_res = space.execute("return false & false")
assert w_res is space.w_false
def test_or(self, space):
w_res = space.execute("return false | 3")
assert w_res is space.w_true
w_res = space.execute("return false | nil")
assert w_res is space.w_false
def test_xor(self, space):
assert space.execute("return false ^ nil") is space.w_false
assert space.execute("return false ^ false") is space.w_false
assert space.execute("return false ^ true") is space.w_true
assert space.execute("return false ^ 1") is space.w_true
def test_singleton_class(self, space):
w_res = space.execute("return false.singleton_class == FalseClass")
assert w_res is space.w_true
|
1652599
|
from haystack import indexes
from mozdns.mx.models import MX
from mozdns.mozdns_index import MozdnsIndex
class MXIndex(MozdnsIndex, indexes.Indexable):
server = indexes.CharField(model_attr='server')
def get_model(self):
return MX
|
1652650
|
from lumen.sources import Source
def test_resolve_module_type():
assert Source._get_type('lumen.sources.base.Source') is Source
|
1652664
|
import asyncio
import json
import logging
import aiohttp
from demonhunter.core.loggers.logfile import FileLogger
class BaseHandler:
# https://svn.nmap.org/nmap/nmap-service-probes
def save_data(self, data):
if self.honeypot.sqlite:
self.save_in_sqlite(data)
if self.honeypot.logfile:
self.save_logfile(data)
self.alter_agents(data)
def alter_agents(self, data):
for agent in self.honeypot.agents:
asyncio.ensure_future(agent.send_data(data))
def save_in_sqlite(self, data):
pass
def save_logfile(self, data):
self.honeypot.file_logger.log(data)
class BaseHoneypot(object):
active_attacks = 0
def __init__(self, logfile=None, sqlite=None, interfaces=['0.0.0.0']):
self.logfile = logfile
self.sqlite = sqlite
self.interfaces = interfaces
if self.logfile:
self.file_logger = FileLogger(self.logfile)
self.agents = []
def add_agent(self, agent):
self.agents.append(agent)
def create_server(self, loop):
coro = loop.create_server(lambda: self.handler(self), self.interfaces, self.port)
server = loop.run_until_complete(coro)
for socket in server.sockets:
logging.info('Serving on {0}'.format(socket.getsockname()))
return server
class Agent():
def __init__(self, manager_address, honeypots, token):
for honeypot in honeypots:
honeypot.agents.append(self)
self.manager_address = manager_address
if manager_address.endswith('/'):
self.manager_address = manager_address[:-1]
self.token = token
@property
def _address(self):
return "%s/agents/call/%s/" % (self.manager_address, self.token)
async def send_data(self, data):
async with aiohttp.ClientSession() as session:
async with session.post(self._address, json=data) as resp:
pass
|
1652721
|
import argparse
import logging
from pathlib import Path
from subs2cia.sources import get_and_partition_streams, AVSFile, Stream
from pprint import pprint
def get_args():
parser = argparse.ArgumentParser(description=f'frame capture manual testing')
parser.add_argument('-V', '--video', metavar='<input file>', dest='infiles', default=None, required=True,
nargs='+', type=str, help='Input files to probe')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False,
help='Verbose output if set.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
sources = [AVSFile(Path(source)) for source in args.infiles]
for s in sources:
s.probe() # run ffprobe
s.get_type() # determine filetype from ffprobe results (video, audio, subtitle, unknown)
pprint(sources)
partitioned_streams = get_and_partition_streams(sources) # dict of lists of Streams
pprint(partitioned_streams)
for k in ['subtitle', 'audio', 'video']:
print(f"Available {k} streams:")
for idx, stream in enumerate(partitioned_streams[k]):
desc_str = ''
if "codec_name" in stream.stream_info:
desc_str = desc_str + "codec: " + stream.stream_info['codec_name'] + ", "
if "tags" in stream.stream_info:
tags = stream.stream_info['tags']
if "language" in tags:
desc_str = desc_str + "lang_code: " + tags['language'] + ", "
if "title" in tags:
desc_str = desc_str + "title: " + tags['title'] + ", "
if desc_str == '':
desc_str = f"Stream {idx: 3}: no information found"
else:
desc_str = f"Stream {idx: 3}: {desc_str}"
print(desc_str)
print("")
|
1652731
|
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from collections import defaultdict as dd
import numpy as np
import sklearn
from torch.utils.data import Dataset
from core.utils import feature_utils
from core.utils import data_utils
from core.utils import settings
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') # include timestamp
class CNNMatchDataset(Dataset):
def __init__(self, file_dir, matrix_size1, matrix_size2, build_index_window, seed, shuffle):
self.file_dir = file_dir
self.build_index_window = build_index_window
self.matrix_title_size = matrix_size1
self.matrix_author_size = matrix_size2
# load training pairs
pos_pairs = data_utils.load_json(file_dir, 'pos-pairs-train.json')
pos_pairs = [(p['c'], p['n']) for p in pos_pairs]
neg_pairs = data_utils.load_json(file_dir, 'neg-pairs-train.json')
neg_pairs = [(p['c'], p['n']) for p in neg_pairs]
labels = [1] * len(pos_pairs) + [0] * len(neg_pairs)
pairs = pos_pairs + neg_pairs
n_matrix = len(pairs) * 2
self.X_title = np.zeros((n_matrix * 2, self.matrix_title_size, self.matrix_title_size))
self.X_author = np.zeros((n_matrix * 2, self.matrix_author_size, self.matrix_author_size))
self.Y = np.zeros(n_matrix * 2, dtype=np.long)
count = 0
for i, pair in enumerate(pairs):
if i % 100 == 0:
logger.info('pairs to matrices %d', i)
cpaper, npaper = pair
cur_y = labels[i]
matrix1 = self.titles_to_matrix(cpaper['title'], npaper['title'])
self.X_title[count] = feature_utils.scale_matrix(matrix1)
matrix2 = self.authors_to_matrix(cpaper['authors'], npaper['authors'])
self.X_author[count] = feature_utils.scale_matrix(matrix2)
self.Y[count] = cur_y
count += 1
# transpose
self.X_title[count] = feature_utils.scale_matrix(matrix1.transpose())
self.X_author[count] = feature_utils.scale_matrix(matrix2.transpose())
self.Y[count] = cur_y
count += 1
if shuffle:
self.X_title, self.X_author, self.Y = sklearn.utils.shuffle(
self.X_title, self.X_author, self.Y,
random_state=seed
)
self.N = len(self.Y)
def __len__(self):
return self.N
def __getitem__(self, idx):
return self.X_title[idx], self.X_author[idx], self.Y[idx]
def get_noisy_papers_test(self):
return data_utils.load_json_lines(self.file_dir, 'noisy-papers-test.dat')
def titles_to_matrix(self, title1, title2):
twords1 = feature_utils.get_words(title1)[: self.matrix_title_size]
twords2 = feature_utils.get_words(title2)[: self.matrix_title_size]
matrix = -np.ones((self.matrix_title_size, self.matrix_title_size))
for i, word1 in enumerate(twords1):
for j, word2 in enumerate(twords2):
matrix[i][j] = (1 if word1 == word2 else -1)
return matrix
def authors_to_matrix(self, authors1, authors2):
matrix = -np.ones((self.matrix_author_size, self.matrix_author_size))
author_num = int(self.matrix_author_size/2)
try:
for i in range(author_num):
row = 2 * i
a1 = authors1[i].lower().split()
first_name1 = a1[0][0]
last_name1 = a1[-1][0]
col = row
a2 = authors2[i].lower().split()
first_name2 = a2[0][0]
last_name2 = a2[-1][0]
matrix[row][col] = feature_utils.name_equal(first_name1, first_name2)
matrix[row][col+1] = feature_utils.name_equal(first_name1, last_name2)
matrix[row+1][col] = feature_utils.name_equal(last_name1, first_name2)
matrix[row+1][col+1] = feature_utils.name_equal(last_name1, last_name2)
except Exception as e:
pass
return matrix
def get_id2cpapers(self):
cpapers_train = data_utils.load_json_lines(self.file_dir, 'clean-papers-train.dat')
cpapers_test = data_utils.load_json_lines(self.file_dir, 'clean-papers-test.dat')
cpapers = cpapers_train + cpapers_test
id2paper = {}
for paper in cpapers:
paper['id'] = str(paper['id'])
pid = paper['id']
id2paper[pid] = paper
# data_utils.dump_json(id2paper, self.file_dir, 'clean-id2paper.json')
return id2paper
def build_cpapers_inverted_index(self):
logger.info('build inverted index for cpapers')
cpapers_train = data_utils.load_json_lines(self.file_dir, 'clean-papers-train.dat')
cpapers_test = data_utils.load_json_lines(self.file_dir, 'clean-papers-test.dat')
papers = cpapers_train + cpapers_test
word2ids = dd(list)
for paper in papers:
pid = str(paper['id'])
title = paper['title']
words = feature_utils.get_words(title.lower(), window=self.build_index_window)
for word in words:
word2ids[word].append(pid)
for word in word2ids:
word2ids[word] = list(set(word2ids[word]))
# data_utils.dump_json(word2ids, self.file_dir, 'clean-papers-inverted-index.json')
logger.info('building inverted index completed')
return word2ids
def get_candidates_by_inverted_index(self, npaper, word2ids):
title = npaper['title'].lower()
words = feature_utils.get_words(title, window=self.build_index_window)
cids_to_freq = dd(int)
for word in words:
if word in word2ids:
cur_cids = word2ids[word]
for cid in cur_cids:
cids_to_freq[cid] += 1
sorted_items = sorted(cids_to_freq.items(), key=lambda kv: kv[1], reverse=True)[:20]
cand_cids = [item[0] for item in sorted_items]
return cand_cids
if __name__ == '__main__':
dataset = CNNMatchDataset(file_dir=settings.PAPER_DATA_DIR,
matrix_size1=7, matrix_size2=4, build_index_window=5,
seed=42, shuffle=True)
|
1652736
|
import sys
from pathlib import Path
sys.path.append("..")
from energym import make
from energym.envs.utils.kpi import KPI
from energym.envs.utils.weather import EPW, MOS
def test_can_run_gym_interface_on_apartments_thermal():
env = make("ApartmentsThermal-v0")
episodes = 2
n_steps_per_episode = 100
for _ in range(episodes):
observation = env.get_output()
print(observation)
for _ in range(n_steps_per_episode):
action = env.sample_random_action()
observation = env.step(action)
print("Episode finished")
env.reset()
env.close()
def test_can_run_gym_interface_on_apartments_grid():
env = make("ApartmentsGrid-v0")
episodes = 2
n_steps_per_episode = 100
for _ in range(episodes):
observation = env.get_output()
print(observation)
for _ in range(n_steps_per_episode):
action = env.sample_random_action()
observation = env.step(action)
print("Episode finished")
env.reset()
env.close()
def test_can_run_gym_interface_on_apartments2_thermal():
env = make("Apartments2Thermal-v0")
episodes = 2
n_steps_per_episode = 100
for _ in range(episodes):
observation = env.get_output()
print(observation)
for _ in range(n_steps_per_episode):
action = env.sample_random_action()
observation = env.step(action)
print("Episode finished")
env.reset()
env.close()
def test_can_run_gym_interface_on_apartments2_grid():
env = make("Apartments2Grid-v0")
episodes = 2
n_steps_per_episode = 100
for _ in range(episodes):
observation = env.get_output()
print(observation)
for _ in range(n_steps_per_episode):
action = env.sample_random_action()
observation = env.step(action)
print("Episode finished")
env.reset()
env.close()
def test_can_run_gym_interface_on_offices_thermostat():
env = make("OfficesThermostat-v0")
episodes = 2
n_steps_per_episode = 100
for _ in range(episodes):
observation = env.get_output()
print(observation)
for _ in range(n_steps_per_episode):
action = env.sample_random_action()
observation = env.step(action)
print("Episode finished")
env.reset()
env.close()
def test_can_run_gym_interface_on_mixeduse_fan_fcu():
env = make("MixedUseFanFCU-v0")
episodes = 2
n_steps_per_episode = 100
for _ in range(episodes):
observation = env.get_output()
print(observation)
for _ in range(n_steps_per_episode):
action = env.sample_random_action()
observation = env.step(action)
print("Episode finished")
env.reset()
env.close()
def test_can_run_gym_interface_on_seminarcenter_thermostat():
env = make("SeminarcenterThermostat-v0")
episodes = 2
n_steps_per_episode = 100
for _ in range(episodes):
observation = env.get_output()
print(observation)
for _ in range(n_steps_per_episode):
action = env.sample_random_action()
observation = env.step(action)
print("Episode finished")
env.reset()
env.close()
def test_can_run_gym_interface_on_seminarcenter_full():
env = make("SeminarcenterFull-v0")
episodes = 2
n_steps_per_episode = 100
for _ in range(episodes):
observation = env.get_output()
print(observation)
for _ in range(n_steps_per_episode):
action = env.sample_random_action()
observation = env.step(action)
print("Episode finished")
env.reset()
env.close()
def full_test():
test_can_run_gym_interface_on_apartments_thermal()
test_can_run_gym_interface_on_apartments_grid()
test_can_run_gym_interface_on_apartments2_thermal()
test_can_run_gym_interface_on_apartments2_grid()
test_can_run_gym_interface_on_offices_thermostat()
test_can_run_gym_interface_on_mixeduse_fan_fcu()
test_can_run_gym_interface_on_seminarcenter_thermostat()
test_can_run_gym_interface_on_seminarcenter_full()
def epw_test():
energym_path = Path(__file__).resolve().parent.parent.parent
print(energym_path)
weather_eplus = EPW()
path_eplus = (
energym_path
/ "simulation\\energyplus\\offices\\wf\\GRC_Athens.167160_IWEC.epw"
)
print(path_eplus)
weather_eplus.read(path_eplus)
print(weather_eplus.get_forecast(1, 1, 1, 24))
weather_mod = MOS()
path_mod = energym_path / "simulation\\modelica\\dummy\\wf\\Basel_Fixed.mos"
weather_mod.read(path_mod)
print(weather_mod.get_forecast(3600, 24))
def test_KPI_construct():
kpi_opts1 = {
"kpi1": {"name": "Z01_T", "type": "avg_dev", "target": [19, 23]},
"kpi2": {"name": "Z01_T", "type": "tot_viol", "target": [19, 23]},
"kpi3": {"name": "Fa_Pw_All", "type": "avg"},
"kpi4": {"name": "Fa_Pw_All", "type": "sum"},
}
kpi = KPI(kpi_opts1)
assert len(kpi.kpi_dict) == 4
def test_KPI_add():
kpi_opts1 = {
"kpi1": {"name": "Z01_T", "type": "avg_dev", "target": 22.0},
"kpi2": {
"name": "Z01_T",
"type": "tot_viol",
"target": [19.0, 23.0],
},
"kpi3": {"name": "Fa_Pw_All", "type": "avg"},
"kpi4": {"name": "Fa_Pw_All", "type": "sum"},
}
kpi = KPI(kpi_opts1)
obs = {
"Fa_Pw_All": [100.0],
"Z01_T": [21.0],
"Ext_T": [15.1],
"Something_else": [3],
}
kpi.add_observation(obs)
res_dict = kpi.get_kpi()
assert res_dict["kpi1"]["kpi"] == 1.0
assert res_dict["kpi2"]["kpi"] == 0
assert res_dict["kpi3"]["kpi"] == 100.0
assert res_dict["kpi4"]["kpi"] == 100.0
obs = {
"Fa_Pw_All": [300.0],
"Z01_T": [25.0],
"Ext_T": [15.1],
"Something_else": [3],
}
kpi.add_observation(obs)
res_dict = kpi.get_kpi()
assert res_dict["kpi1"]["kpi"] == 2.0
assert res_dict["kpi2"]["kpi"] == 1
assert res_dict["kpi3"]["kpi"] == 200.0
assert res_dict["kpi4"]["kpi"] == 400.0
def test_KPI_cumulative():
kpi_opts1 = {
"kpi1": {"name": "Z01_T", "type": "tot_viol", "target": [19, 23]},
"kpi2": {"name": "Z02_T", "type": "tot_viol", "target": [19, 23]},
"kpi3": {"name": "Z03_T", "type": "tot_viol", "target": [19, 23]},
"kpi4": {"name": "Fa_Pw_All", "type": "sum"},
}
kpi = KPI(kpi_opts1)
obs = {
"Fa_Pw_All": [100.0],
"Z01_T": [21.0],
"Z02_T": [15.1],
"Z03_T": [3],
}
kpi.add_observation(obs)
kpi_val = kpi.get_cumulative_kpi("_T", "tot_viol", "sum")
assert kpi_val == 2
if __name__ == "__main__":
full_test()
epw_test()
test_KPI_construct()
test_KPI_add()
test_KPI_cumulative()
|
1652760
|
import pytest
from fragile.core.utils import get_plangym_env
class TestUtils:
def test_get_plangym_env(self):
class dummy_shape:
shape = (2, 2)
class DummyEnv:
def __init__(self):
class dummy_n:
n = 1
self.action_space = dummy_n
self.observation_space = dummy_shape
def get_state(self):
return dummy_shape
class DummySwarm:
@property
def env(self):
class dummy_env:
_env = DummyEnv
return dummy_env
swarm = DummySwarm()
with pytest.raises(TypeError):
get_plangym_env(swarm)
class DummySwarm:
@property
def env(self):
from fragile.core.env import DiscreteEnv
return DiscreteEnv(DummyEnv())
swarm = DummySwarm()
with pytest.raises(TypeError):
get_plangym_env(swarm)
|
1652824
|
from periphery import I2C
def main():
print("i2c connection test")
# Open i2c-2 controller
i2c = I2C("/dev/i2c-2")
print("Write to I2C")
readbuffer = bytearray(19)
# Create a list of messages to send to slave
# First message is example of writing string
# Second message reads bytes up to size of readbuffer
msg = [ I2C.Message("Hello from Atlas\n".encode("utf-8")),
I2C.Message(readbuffer, read=True) ]
# Send messages to address 4
i2c.transfer(4, msg)
print(f'Read from I2C: {msg[1].data.decode("utf-8")}')
i2c.close()
if __name__ == "__main__":
main()
|
1652833
|
import torch.nn as nn
import torch
import math
class GCN(nn.Module):
def __init__(self, hid_size=256):
super(GCN, self).__init__()
self.hid_size = hid_size
self.W = nn.Parameter(torch.FloatTensor(self.hid_size, self.hid_size//2).cuda())
self.b = nn.Parameter(torch.FloatTensor(self.hid_size//2, ).cuda())
self.linear_gcn = nn.Linear(hid_size // 2 *2, hid_size//2)
self.init()
def init(self):
stdv = 1/math.sqrt(self.hid_size//2)
self.W.data.uniform_(-stdv, stdv)
self.b.data.uniform_(-stdv, stdv)
def forward(self, inp, adj, is_relu=True):
inp = torch.matmul(inp, self.W)+self.b # [BS, SL, HS]
out = torch.matmul(adj, inp) # adj [BS, SL, SL]
batch_size, seq_len, _ = inp.size()
if len(adj.size()) > 3:
out = self.linear_gcn(out.transpose(0, 1).contiguous().transpose(1, 2).contiguous().view(batch_size, seq_len, -1))
if is_relu==True:
out = nn.functional.relu(out)
return out
|
1652841
|
from Engine.GUI import *
EnabledHealerFriend = False
class HealerFriend:
def __init__(self, root):
self.HealerFriend = GUI('HealerFriend', 'Module: Healer Friend')
self.HealerFriend.DefaultWindow('DefaultWindow')
def SetHealerFriend():
global EnabledHealerFriend
if not EnabledHealerFriend:
EnabledHealerFriend = True
ButtonEnabled.configure(text='HealerFriend: ON')
ScanHealerFriend()
else:
EnabledHealerFriend = False
ButtonEnabled.configure(text='HealerFriend: OFF')
def ScanHealerFriend():
if EnabledHealerFriend:
print("Try Lock HealerFriend")
print("Try This")
root.after(300, ScanHealerFriend)
CheckPrint = tk.BooleanVar()
LowMana = tk.BooleanVar()
self.HealerFriend.addButton('Ok', self.HealerFriend.destroyWindow, [84, 29, 130, 504], [127, 17, 8], [123, 13, 5])
global EnabledHealerFriend
if not EnabledHealerFriend:
ButtonEnabled = self.HealerFriend.addButton('HealerFriend: OFF', SetHealerFriend, [328, 29, 12, 469],
[127, 17, 8], [123, 13, 5])
else:
ButtonEnabled = self.HealerFriend.addButton('HealerFriend: ON', SetHealerFriend, [328, 29, 12, 469],
[127, 17, 8], [123, 13, 5])
ButtonPrint = self.HealerFriend.addCheck(CheckPrint, [10, 408], [120, 98, 51], 0, "Print on Tibia's screen")
ButtonLowMana = self.HealerFriend.addCheck(LowMana, [10, 440], [120, 98, 51], 0, "Low Mana Warnings")
self.HealerFriend.loop()
|
1652869
|
from __future__ import division
import numpy as np
from numpy import pi, exp
from ..Contour import Contour
from ..Paths import ComplexLine, ComplexArc
class AnnulusSector(Contour):
"""
A sector of an annulus in the complex plane.
Parameters
----------
center : complex
The center of the annulus sector.
radii : tuple
Tuple of length two of the form (inner_radius, outer_radius)
phiRange : tuple
Tuple of length two of the form (phi0, phi1).
The segment of the contour containing inner and outer circular
arcs will be joined, counter clockwise from phi0 to phi1.
Examples
--------
.. plot::
:include-source:
from numpy import pi
from cxroots import AnnulusSector
annulusSector = AnnulusSector(center=0.2, radii=(0.5, 1.25), phiRange=(-pi/4, pi/4))
annulusSector.show()
.. plot::
:include-source:
from numpy import pi
from cxroots import AnnulusSector
annulusSector = AnnulusSector(center=0.2, radii=(0.5, 1.25), phiRange=(pi/4, -pi/4))
annulusSector.show()
"""
def __init__(self, center, radii, phiRange):
self.center = center
self.axisName = ('r', 'phi')
if phiRange[0] > phiRange[1]:
phiRange = (phiRange[0], phiRange[1]+2*pi)
phi0, phi1 = self.phiRange = phiRange
# r > 0
r0, r1 = self.radii = radii
if r0 < 0 or r1 <= 0:
raise ValueError('Radius > 0')
# verticies [[radius0,phi0],[radius0,phi1],[radius1,phi1],[radius0,phi1]]
self.z1 = z1 = center + r0*exp(1j*phi0)
self.z2 = z2 = center + r1*exp(1j*phi0)
self.z3 = z3 = center + r1*exp(1j*phi1)
self.z4 = z4 = center + r0*exp(1j*phi1)
segments = [ComplexLine(z1,z2),
ComplexArc(center,r1,phi0,phi1-phi0),
ComplexLine(z3,z4),
ComplexArc(center,r0,phi1,phi0-phi1)]
super(AnnulusSector, self).__init__(segments)
def __str__(self):
return 'Annulus sector: center={center.real:.3f}{center.imag:+.3f}i, r0={radii[0]:.3f}, r1={radii[1]:.3f}, phi0={phiRange[0]:.3f}, phi1={phiRange[1]:.3f}'.format(center=self.center, radii=self.radii, phiRange=self.phiRange)
@property
def centralPoint(self):
# get the central point within the contour
r = (self.radii[0] + self.radii[1])/2
phi = (self.phiRange[0] + self.phiRange[1])/2
return r*exp(1j*phi)
@property
def area(self):
return (self.radii[1]**2 - self.radii[0]**2)*abs(self.phiRange[1] - self.phiRange[0])%(2*pi)/2
def contains(self, z):
""" Returns True if the point z lies within the contour, False if otherwise """
angle = np.angle(z - self.center)%(2*pi) # np.angle maps to [-pi,pi]
radiusCorrect = self.radii[0] < abs(z - self.center) < self.radii[1]
phi = np.mod(self.phiRange, 2*pi)
if phi[0] > phi[1]:
angleCorrect = phi[0] < angle <= 2*pi or 0 <= angle < phi[1]
else:
angleCorrect = phi[0] < angle < phi[1]
return radiusCorrect and angleCorrect
def subdivide(self, axis, divisionFactor=0.5):
"""
Subdivide the contour
Parameters
----------
axis : str, can be either 'r' or 'phi'
The axis along which the line subdividing the contour is a constant.
divisionFactor : float in range (0,1), optional
Determines the point along 'axis' at which the line dividing the box is placed
Returns
-------
box1 : AnnulusSector
If axis is 'r' then phiRange and the inner radius is the same as original AnnulusSector
with the outer radius determined by the divisionFactor.
If axis is 'phi' then the radii and phiRange[0] is the same as the original AnnulusSector
with phiRange[1] determined by the divisionFactor.
box2 : AnnulusSector
If axis is 'r' then phiRange and the outer radius is the same as original AnnulusSector
with the inner radius determined equal to the outer radius of box1.
If axis is 'phi' then the radii and phiRange[1] is the same as the original AnnulusSector
with phiRange[0] equal to phiRange[1] of box1.
"""
r0, r1 = self.radii
phi0, phi1 = self.phiRange
if axis == 0 or axis == self.axisName[0]:
divisionPoint = r0 + divisionFactor*(r1-r0)
box1 = AnnulusSector(self.center, [r0, divisionPoint], self.phiRange)
box2 = AnnulusSector(self.center, [divisionPoint, r1], self.phiRange)
# reuse line segments from original box where possible
# this allows the cached integrals to be used
box1.segments[3] = self.segments[3]
box2.segments[1] = self.segments[1]
box1.segments[1]._reversePath = box2.segments[3]
box2.segments[3]._reversePath = box1.segments[1]
elif axis == 1 or axis == self.axisName[1]:
divisionPoint = phi0 + divisionFactor*(phi1-phi0)
box1 = AnnulusSector(self.center, self.radii, [phi0, divisionPoint])
box2 = AnnulusSector(self.center, self.radii, [divisionPoint, phi1])
box1.segments[0] = self.segments[0]
box2.segments[2] = self.segments[2]
box1.segments[2]._reversePath = box2.segments[0]
box2.segments[0]._reversePath = box1.segments[2]
for box in [box1, box2]:
box._createdBySubdivisionAxis = axis
box._parentBox = self
self._childBoxes = [box1, box2]
return box1, box2
def randomPoint(self):
"""Returns a random point inside the contour of the AnnulusSector."""
r = np.random.uniform(*self.radii)
phiRange = np.mod(self.phiRange, 2*pi)
if phiRange[0] > phiRange[1]:
phi = random.choice([np.random.uniform(phiRange[0], 2*pi),
np.random.uniform(0, phiRange[1])])
else:
phi = np.random.uniform(*phiRange)
return r*exp(1j*phi) + self.center
|
1652895
|
import numpy as np
from nilabels.tools.detections.get_segmentation import intensity_segmentation, otsu_threshold, MoG_array
# ----- Test get segmentation ----
def test_intensity_segmentation_1():
im_array = np.random.randint(0, 5, [10, 10], np.uint8)
output_segm = intensity_segmentation(im_array)
# if the input is a segmentation with 5 labels, the segmentation is the input.
np.testing.assert_array_equal(im_array, output_segm)
def test_intensity_segmentation_2():
seed_segm = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5])
seed_image = np.linspace(0, 5, len(seed_segm))
segm = np.stack([seed_segm, ]*6)
image = np.stack([seed_image, ]*6)
output_segm = intensity_segmentation(image, num_levels=6)
np.testing.assert_array_equal(segm, output_segm)
segm_transposed = segm.T
image_transposed = image.T
output_segm_transposed = intensity_segmentation(image_transposed, num_levels=6)
np.testing.assert_array_equal(segm_transposed, output_segm_transposed)
def test_otsu_threshold_bad_input():
with np.testing.assert_raises(IOError):
otsu_threshold(np.random.rand(40, 40), side='spam')
def test_otsu_threshold_side_above():
arr = np.zeros([20, 20])
arr[:10, :] = 1
arr[10:, :] = 2
arr_thr = otsu_threshold(arr, side='above', return_as_mask=False)
expected_arr_thr = np.zeros([20, 20])
expected_arr_thr[10:, :] = 2
np.testing.assert_array_equal(arr_thr, expected_arr_thr)
def test_otsu_threshold_side_below():
arr = np.zeros([20, 20])
arr[:10, :] = 1
arr[10:, :] = 2
arr_thr = otsu_threshold(arr, side='below', return_as_mask=False)
expected_arr_thr = np.zeros([20, 20])
expected_arr_thr[:10, :] = 1
np.testing.assert_array_equal(arr_thr, expected_arr_thr)
def test_otsu_threshold_as_mask():
arr = np.zeros([20, 20])
arr[:10, :] = 1
arr[10:, :] = 2
arr_thr = otsu_threshold(arr, side='above', return_as_mask=True)
expected_arr_thr = np.zeros([20, 20])
expected_arr_thr[10:, :] = 1
np.testing.assert_array_equal(arr_thr, expected_arr_thr)
def test_MoG_array_1():
arr = np.zeros([20, 20, 20])
arr[:10, ...] = 1
arr[10:, ...] = 2
crisp, prob = MoG_array(arr, K=2)
expected_crisp = np.zeros([20, 20, 20])
expected_crisp[:10, ...] = 0
expected_crisp[10:, ...] = 1
expected_prob = np.zeros([20, 20, 20, 2])
expected_prob[:10, ..., 0] = 1
expected_prob[10:, ..., 1] = 1
np.testing.assert_array_equal(crisp, expected_crisp)
np.testing.assert_array_equal(prob, expected_prob)
if __name__ == '__main__':
test_intensity_segmentation_1()
test_intensity_segmentation_2()
test_otsu_threshold_bad_input()
test_otsu_threshold_side_above()
test_otsu_threshold_side_below()
test_otsu_threshold_as_mask()
test_MoG_array_1()
|
1652935
|
import argparse
import csv
import cPickle as pickle
import collections
from HTMLParser import HTMLParser
import operator
import re
import json
import sklearn.preprocessing
# Taken from
# https://github.com/tensorflow/tensorflow/blob/16254e75e2fe4bb0f879b45fbad0c4b62c028011/tensorflow/models/rnn/translate/data_utils.py#L43
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
def basic_tokenizer(split_sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in split_sentence:
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w for w in words if w]
def parse_recipes(input_data):
# Example entry:
#"title": "Tweet New RSS Feed Item",
#"description": "Automatically tweet new RSS feed items.",
#"action_channel": "TwitterV2API",
#"event_channel": "RSSAPI",
#"action": "tweet",
#"event": "new_feed",
#"rule": "{u'message': u'{{title}}: {{link}}'}"
recipes = []
raw_data = json.load(input_data)
for item in raw_data:
if item["event_channel"] == None:
item["event_channel"]="None"
if item["action_channel"] == None:
item["action_channel"]="None"
if item["event"] == None:
item["event"]="None"
if item["action"] == None:
item["action"]="None"
recipes.append({
'recipe': item["title"],
'trigger_chan': item["event_channel"],
'trigger_func': item["event_channel"] + '.' + item["event"],
'trigger_func_pure': item["event"],
'action_chan': item["action_channel"],
'action_func': item["action_channel"] + '.' + item["action"],
'action_func_pure': item["action"],
'rule': item["rule"]
})
return recipes
def parse_Zapier(input_train, input_dev, input_test):
training_recipes = parse_recipes(input_train)
dev_recipes = parse_recipes(input_dev)
test_recipes = parse_recipes(input_test)
data = {}
for section_name, section_data in zip(
['train', 'dev','test'], [training_recipes, dev_recipes, test_recipes]):
data[section_name] = {}
for m in ['trigger', 'action']:
data[section_name][m] = []
for item in section_data:
data[section_name][m].append({'recipe': basic_tokenizer(item['recipe'].lower().split()),
'chan': item[m + '_chan'],
'func': item[m + '_func'],
})
return data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--output', required=True)
parser.add_argument('--input-train', help='Path to Zapier training data(json file)')
parser.add_argument('--input-dev', help='Path to Zapier dev data(json file)')
parser.add_argument('--input-test', help='Path to Zapier test data(json file)')
parser.add_argument('--word-list', help='Path to word list(json file)')
args = parser.parse_args()
with open(args.input_train) as input_train, open(args.input_dev) as input_dev, open(args.input_test) as input_test:
data = parse_Zapier(input_train, input_dev, input_test)
# Assign IDs to each word and label in the training data
vocab = collections.Counter()
for item in data['train']['action']:
for word in item['recipe']:
vocab[word] += 1
words_dec_freq = sorted(vocab.iteritems(),
key=operator.itemgetter(1),
reverse=True)
fout = open('Zapier_words.json','w')
word_list = []
for item in words_dec_freq:
new_item = {}
new_item["word"] = item[0]
new_item["freq"] = item[1]
word_list.append(new_item)
fout.write(json.dumps(word_list, indent = 1))
if args.word_list:
with open(args.word_list) as f:
word_list = json.load(f)
word_ids = {}
for item in word_list:
word_ids[item["word"]] = len(word_ids)
else:
word_ids = {k: i for i, (k, count) in enumerate(words_dec_freq)}
# Make {train,test}_{trigger,action}_{channels,functions}.
all_labels = collections.defaultdict()
labelers = {}
for m in ['trigger', 'action']:
for n in ['chans', 'funcs']:
label_type = m + '_' + n
labels = {}
for section in ['train', 'dev', 'test']:
if section not in data: continue
labels[section] = map(operator.itemgetter(n[:-1]), data[section][m])
labeler = sklearn.preprocessing.LabelEncoder()
labeler.fit(labels['train'] + labels['test'])
for section in ['train', 'dev', 'test']:
if section not in data: continue
labels[section] = labeler.transform(labels[section])
all_labels[label_type] = labels
labelers[label_type] = labeler
label_types = ('trigger_chans', 'trigger_funcs', 'action_chans',
'action_funcs')
outputs = {}
for section in ['train', 'dev', 'test']:
output = []
for i in xrange(len(data[section]['action'])):
words = data[section]['action'][i]['recipe']
item = {'ids': [word_ids.get(word, len(word_ids)) for word in words],
'labels': [all_labels[t][section][i] for t in label_types],
'label_names': [labelers[t].classes_[all_labels[t][section][i]]
for t in label_types],
'words': words}
output.append(item)
#output.append({'words': words, 'ids': ids, 'labels': labels})
outputs[section] = output
outputs['label_types'] = label_types
outputs['labelers'] = labelers
outputs['word_ids'] = word_ids
outputs['num_labels'] = [len(labelers[t].classes_) for t in label_types]
pickle.dump(outputs, open(args.output, 'w'), pickle.HIGHEST_PROTOCOL)
|
1652951
|
from datetime import datetime
from eth_utils import to_checksum_address
import datetime as dt
from src.queries import getAllAllocations, getActiveAllocations, getClosedAllocations, getAllocationDataById, \
getCurrentBlock
from src.helpers import initialize_rpc, initializeRewardManagerContract, ANYBLOCK_ANALYTICS_ID
import pandas as pd
import numpy as np
def calculateRewardsActiveAllocation(allocation_id, interval=1):
# initialize rewardManager Contract
reward_manager_contract = initializeRewardManagerContract()
# initialize web3 client
web3 = initialize_rpc()
# Grab allocation data by allocation_id
allocation = getAllocationDataById(allocation_id)
current_block = getCurrentBlock()
allocation_id = to_checksum_address(allocation['id'])
subgraph_id = allocation['subgraphDeployment']['id']
allocation_creation_block = allocation['createdAtBlockNumber']
subgraph_name = allocation['subgraphDeployment']['originalName']
# If depreciated / broken and has no name, use ipfsHash as name
if subgraph_name is None:
subgraph_name = allocation['subgraphDeployment']['ipfsHash']
# calculate the number of hours since the allocation took place
allocation_created_at = allocation['createdAt']
hours_since_allocation = dt.datetime.now() - datetime.fromtimestamp(allocation_created_at)
hours_since_allocation = hours_since_allocation.total_seconds() / 3600
# calculate the number of hours since the subgraph was created (age in hours)
subgraph_created_at = allocation['subgraphDeployment']['createdAt']
subgraph_hours_since_creation = dt.datetime.now() - datetime.fromtimestamp(subgraph_created_at)
subgraph_hours_since_creation = subgraph_hours_since_creation.total_seconds() / 3600
# get the amount of GRT allocated
allocated_tokens = int(allocation['allocatedTokens']) / 10 ** 18
# get the subgraph signal and stake
subgraph_signal = int(allocation['subgraphDeployment']['signalledTokens']) / 10 ** 18
subgraph_stake = int(allocation['subgraphDeployment']['stakedTokens']) / 10 ** 18
# get the subgraph IPFS hash
subgraph_ipfs_hash = allocation['subgraphDeployment']['ipfsHash']
# Initialize a delta reward between current and previous interval reward
accumulated_reward_minus_interval = 0
# iterate through the range from allocation creation block to current block +1 in interval steps
# we expect 270 Blocks per Hour. interval * 270 = Hour interval in blocks
data = []
temp_data = []
for block in range(allocation_creation_block, current_block + 1, (24 * 270)):
datetime_block = datetime.utcfromtimestamp(web3.eth.get_block(block).get('timestamp')).strftime(
'%Y-%m-%d')
try:
accumulated_reward = reward_manager_contract.functions.getRewards(allocation_id).call(
block_identifier=block) / 10 ** 18
except:
accumulated_reward = 0
# calculate the difference between the accumulated reward and the reward from last interval and calc
# the hourly rewards
reward_rate_day = (accumulated_reward - accumulated_reward_minus_interval) / interval
reward_rate_hour = reward_rate_day / 24
reward_rate_hour_per_token = reward_rate_hour / allocated_tokens
# set the currently accumulated reward fas the previous interval reward for next iteration
accumulated_reward_minus_interval = accumulated_reward
earnings_rate_all_indexers = reward_rate_hour / allocated_tokens * subgraph_stake
try:
stake_signal_ratio = subgraph_signal / subgraph_stake
except:
stake_signal_ratio = 0
datetime_block = datetime.utcfromtimestamp(web3.eth.get_block(block).get('timestamp')).strftime(
'%Y-%m-%d')
# create list with entries
temp_data.append({
"datetime": datetime_block,
"subgraph_name": subgraph_name,
"subgraph_ipfs_hash": subgraph_ipfs_hash,
"accumulated_reward": accumulated_reward,
"reward_rate_day": reward_rate_day,
"reward_rate_hour": reward_rate_hour,
"reward_rate_hour_per_token": reward_rate_hour_per_token,
"earnings_rate_all_indexers": earnings_rate_all_indexers,
"subgraph_age_in_hours": subgraph_hours_since_creation,
"subgraph_age_in_days": subgraph_hours_since_creation / 24,
"subgraph_created_at": datetime.utcfromtimestamp(
allocation['subgraphDeployment']['createdAt']).strftime('%Y-%m-%d'),
"subgraph_signal": subgraph_signal,
"subgraph_stake": subgraph_stake,
"subgraph_signal_ratio": stake_signal_ratio,
"block_height": block,
"allocated_tokens": allocated_tokens,
"allocation_id": allocation_id,
"allocation_created_timestamp": datetime.utcfromtimestamp(allocation_created_at).strftime('%Y-%m-%d'),
"allocation_created_epoch": allocation['createdAtEpoch'],
"allocation_status": "Open",
"timestamp": datetime.utcfromtimestamp(
web3.eth.get_block(block).get('timestamp')).strftime('%Y-%m-%d'),
})
data.append(temp_data)
df = pd.DataFrame(temp_data)
return df
def calculateRewardsAllActiveAllocations(indexer_id, interval=1):
"""Calculates the pending rewards in given interval for all active allocation
Parameters
-------
interval (int): supply interval for pending rewards calculation in hours. Standard is 1h
indexer_id (str) : supply indexer id for reward calculation on all allocations
"""
# grab all active allocations
active_allocations = getActiveAllocations(indexer_id=indexer_id)
if active_allocations:
active_allocations = active_allocations['allocations']
df = pd.DataFrame(columns=["datetime",
"subgraph_name",
"subgraph_ipfs_hash",
"accumulated_reward",
"reward_rate_day",
"reward_rate_hour",
"reward_rate_hour_per_token",
"earnings_rate_all_indexers",
"subgraph_age_in_hours",
"subgraph_age_in_days",
"subgraph_created_at",
"subgraph_signal",
"subgraph_stake",
"subgraph_signal_ratio",
"block_height",
"allocated_tokens",
"allocation_id",
"allocation_created_timestamp",
"allocation_created_epoch",
"allocation_status",
"timestamp"
])
# append all active allocations to a temp list with allocation ID
for allocation in active_allocations:
df_temp = calculateRewardsActiveAllocation(allocation_id=allocation['id'], interval=interval)
df = df.append(df_temp)
else:
df = pd.DataFrame(columns=["datetime",
"subgraph_name",
"subgraph_ipfs_hash",
"accumulated_reward",
"reward_rate_day",
"reward_rate_hour",
"reward_rate_hour_per_token",
"earnings_rate_all_indexers",
"subgraph_age_in_hours",
"subgraph_age_in_days",
"subgraph_created_at",
"subgraph_signal",
"subgraph_stake",
"subgraph_signal_ratio",
"block_height",
"allocated_tokens",
"allocation_id",
"allocation_created_timestamp",
"allocation_created_epoch",
"allocation_status",
"timestamp"
])
return df
def calculateRewardsAllClosedAllocations(indexer_id):
"""Calculates the rewards and data for all closed Allocations.
Parameters
-------
indexer_id (str) : supply indexer id for reward calculation on all allocations
"""
# grab all active allocations
closed_allocations = getClosedAllocations(indexer_id=indexer_id)
temp_data = []
if closed_allocations:
for allocation in closed_allocations['totalAllocations']:
if allocation.get('subgraphDeployment').get('signalledTokens'):
subgraph_signal = int(allocation.get('subgraphDeployment').get('signalledTokens')) / 10 ** 18
else:
subgraph_signal = 0
if allocation.get('subgraphDeployment').get('stakedTokens'):
subgraph_stake = int(allocation.get('subgraphDeployment').get('stakedTokens')) / 10 ** 18
else:
subgraph_stake = 0
try:
subgraph_signal_ratio = subgraph_stake / subgraph_signal
except ZeroDivisionError:
subgraph_signal_ratio = 0
subgraph_created_at = allocation['subgraphDeployment']['createdAt']
subgraph_hours_since_creation = dt.datetime.now() - datetime.fromtimestamp(subgraph_created_at)
subgraph_hours_since_creation = subgraph_hours_since_creation.total_seconds() / 3600
created_at = datetime.utcfromtimestamp(
allocation.get('createdAt')).strftime('%Y-%m-%d')
closed_at = datetime.utcfromtimestamp(
allocation.get('closedAt')).strftime('%Y-%m-%d')
if (datetime.strptime(closed_at, "%Y-%m-%d") - datetime.strptime(created_at, "%Y-%m-%d")).days > 0:
allocation_duration_days = (datetime.strptime(closed_at, "%Y-%m-%d") - datetime.strptime(created_at, "%Y-%m-%d")).days
else:
allocation_duration_days = 1
reward_rate_day = (int(allocation.get('indexingRewards')) / 10 ** 18) / allocation_duration_days
temp_data.append({
'created_at': created_at,
'closed_at': closed_at,
"subgraph_name": allocation.get('subgraphDeployment').get('originalName'),
"subgraph_ipfs_hash": allocation.get('subgraphDeployment').get('ipfsHash'),
"accumulated_reward": int(allocation.get('indexingRewards')) / 10 ** 18,
"reward_rate_day": reward_rate_day,
"reward_rate_hour": reward_rate_day / 24,
"reward_rate_hour_per_token": (reward_rate_day / 24) / (
int(allocation.get('allocatedTokens')) / 10 ** 18),
"earnings_rate_all_indexers": np.nan,
"subgraph_age_in_hours": subgraph_hours_since_creation,
"subgraph_age_in_days": subgraph_hours_since_creation / 24,
"subgraph_created_at": datetime.utcfromtimestamp(
allocation['subgraphDeployment']['createdAt']).strftime('%Y-%m-%d'),
"subgraph_signal": subgraph_signal,
"subgraph_stake": subgraph_stake,
"subgraph_signal_ratio": subgraph_signal_ratio,
"block_height": np.nan,
"allocation_id": allocation.get('id'),
"allocated_tokens": int(allocation.get('allocatedTokens')) / 10 ** 18,
"allocation_created_timestamp": datetime.utcfromtimestamp(allocation.get('createdAt')).strftime(
'%Y-%m-%d'),
"allocation_created_epoch": allocation.get('createdAtEpoch'),
"allocation_status": "Closed",
"timestamp": datetime.utcfromtimestamp(
allocation.get('closedAt')).strftime('%Y-%m-%d'),
})
df = pd.DataFrame(temp_data)
# explode dataframe between each created_at and closed_at create rows
df['day'] = df.apply(lambda row: pd.date_range(row['created_at'], row['closed_at'], freq='d'), axis=1)
df = df.explode('day').reset_index() \
.rename(columns={'day': 'datetime'}) \
.drop(columns=['created_at', 'closed_at', 'index'])
# Move Datetime to First column
col = df.pop("datetime")
df.insert(0, col.name, col)
# Calculate accumulated reward from reward rate day
df.sort_values(['allocation_id', 'datetime'], inplace=True)
# get cumulative sum of rewards
df_cumsum = df.groupby(by=['allocation_id', 'datetime'])['reward_rate_day'].sum() \
.groupby(level='allocation_id').cumsum().reset_index(name='accumulated_reward')
# drop previous accumulated_reward column
df.drop(columns=['accumulated_reward'], inplace=True)
# merge with main dataframe
df = pd.merge(left=df, right=df_cumsum, how="left", left_on=['allocation_id', 'datetime'],
right_on=["allocation_id", "datetime"])
# col accumulated_rewards to 3 position
col = df.pop("accumulated_reward")
df.insert(3, col.name, col)
# change datetime format
df['datetime'] = df['datetime'].dt.strftime("%Y-%m-%d")
else:
df = pd.DataFrame(columns=['created_at',
'closed_at',
"subgraph_name",
"subgraph_ipfs_hash",
"accumulated_reward",
"reward_rate_day",
"reward_rate_hour",
"reward_rate_hour_per_token",
"earnings_rate_all_indexers",
"subgraph_age_in_hours",
"subgraph_age_in_days",
"subgraph_created_at",
"subgraph_signal",
"subgraph_stake",
"subgraph_signal_ratio",
"block_height",
"allocation_id",
"allocated_tokens",
"allocation_created_timestamp",
"allocation_created_epoch",
"allocation_status",
"timestamp"])
return df
# calculateRewardsAllClosedAllocations(ANYBLOCK_ANALYTICS_ID)
|
1652961
|
import os
from optparse import make_option
from uliweb.core.commands import Command, get_answer
class DirCommand(Command):
name = 'dir'
args = 'directory [,...]'
help = 'Clear all files or file patterns in dirs.'
option_list = (
make_option('-e', '--extension', dest='extensions', action='append', default=[],
help='Only matches extension. E.g. .txt'),
make_option('-p', '--pattern', dest='pattern', action='append', default=[],
help='Only matches patterns. E.g. "kq*"'),
make_option('-x', '--exclude_extensions', dest='exclude_extensions', action='append', default=[],
help='Not matches extension.'),
make_option('-r', '--recursion', dest='recursion', action='store_true', default=False,
help='Recursion the directory.'),
make_option('-d', '--days', dest='days', type='int', default=7,
help='Delta days before now.'),
make_option('-t', '--test', dest='test', default=False, action='store_true',
help='Only display filenames but not really deleted them.'),
)
def handle(self, options, global_options, *args):
self.get_application(global_options)
for d in args:
self.clean_dir(d, extensions=options.extensions,
exclude_extensions=options.exclude_extensions,
days=options.days, recursion=options.recursion,
pattern=options.pattern,
verbose=global_options.verbose,
test=options.test)
def clean_dir(self, dir, extensions, exclude_extensions, recursion,
days, pattern, verbose=False, test=False):
from uliweb.utils.common import walk_dirs
import datetime
from uliweb.utils import date
import shutil
now = date.now()
i = 0
for f in walk_dirs(dir, include_ext=extensions, exclude_ext=exclude_extensions,
recursion=recursion, file_only=False,
use_default_pattern=False, patterns=pattern):
t = datetime.datetime.fromtimestamp(os.path.getmtime(f))
if not days or (days and (now-t).days >= days):
try:
if os.path.isfile(f):
if not test:
os.unlink(f)
if test or verbose:
print 'Clean filename {}...'.format(f)
else:
if not test:
shutil.rmtree(f)
if test or verbose:
print 'Clean directory {}...'.format(f)
i += 1
except:
import traceback
traceback.print_exc()
print 'Cleaned {} files'.format(i)
class ModelCommand(Command):
name = 'model'
args = 'model [,...]'
help = 'Clear all models. Model should has clear_data(days, cont) method.'
option_list = (
make_option('-d', '--days', dest='days', type='int', default=7,
help='Delta days before now.'),
make_option('-c', '--count', dest='count', type='int', default=5000,
help='Records count of cleaning at one time. Default is 5000.'),
)
def handle(self, options, global_options, *args):
self.get_application(global_options)
for d in args:
self.clean_model(d,
days=options.days,
count=options.count,
verbose=global_options.verbose)
def clean_model(self, model, days, count, verbose=False):
from uliweb import functions
import time
import logging
import types
from uliweb.orm import Begin, Commit, Rollback
log = logging.getLogger(__name__)
if verbose:
print 'Clean {}, days={}, count={} ...'.format(model, days, count)
M = functions.get_model(model)
if hasattr(M, 'clear_data'):
b = time.time()
t = 0
Begin()
try:
ret = M.clear_data(days, count)
Commit()
except Exception as e:
Rollback()
log.exception(e)
return
if isinstance(ret, types.GeneratorType):
while 1:
Begin()
try:
n = ret.next()
t += n
Commit()
except StopIteration:
break
except Exception as e:
Rollback()
log.exception(e)
break
else:
t = ret
print 'Used {} seconds to clean the {}, total records is {}'.format(time.time()-b, model, t)
else:
print 'There is no clear_data() function defined for {}'.format(model)
|
1652986
|
import requests
import rdflib
from whyis import nanopub
import datetime
import pytz
import dateutil.parser
from dateutil.tz import tzlocal
from werkzeug.datastructures import FileStorage
from werkzeug.http import http_date
from setlr import FileLikeFromIter
import re
import os
from requests_testadapter import Resp
import magic
import mimetypes
import traceback
import sys
from whyis.namespace import np, prov, dc, sio
class LocalFileAdapter(requests.adapters.HTTPAdapter):
def build_response_from_file(self, request):
file_path = request.url[7:]
mtime = os.path.getmtime(file_path)
dt = datetime.datetime.fromtimestamp(mtime, tzlocal())
mimetype = mimetypes.guess_type(file_path)[0]
if mimetype is None:
mimetype = magic.from_file(file_path, mime=True)
headers = {"Last-Modified": http_date(dt)}
if mimetype is not None:
headers['Content-Type'] = mimetype
with open(file_path, 'rb') as file:
buff = bytearray(os.path.getsize(file_path))
file.readinto(buff)
resp = Resp(buff, headers=headers)
r = self.build_response(request, resp)
return r
def send(self, request, stream=False, timeout=None,
verify=True, cert=None, proxies=None):
return self.build_response_from_file(request)
|
1653010
|
import tensorflow as tf
def training(loss, learning_rate, global_step):
#This motif is needed to hook up the batch_norm updates to the training
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss=loss, global_step=global_step)
return train_op
|
1653036
|
import glob
import os
import random
import numpy as np
from scipy.misc import imread
from refinement_net.core.Measures import compute_measures_for_binary_segmentation_single_image, IOU
from refinement_net.datasets import DataKeys
from refinement_net.datasets.DAVIS import DAVIS
from refinement_net.datasets.Dataset import FileListDataset
from refinement_net.scripts.eval.Datasets.EvalPascalMasked import EvalPascalMaskedDataset
NAME = "OSVOSworst"
DAVIS_PATH = DAVIS.DAVIS_DEFAULT_PATH
def get_fn_with_worst_iou(seq):
result_fn = None
result_gt = None
result_measure = None
files = glob.glob(seq + "/*.png")
seq_name = seq.split("/")[-1]
for file in files:
fname = file.split("/")[-1]
img = imread(file)
img = img / 255
gt_file = DAVIS_PATH + "/Annotations/480p/" + seq_name + "/" + fname
gt = imread(gt_file)
gt = gt / 255
measure = compute_measures_for_binary_segmentation_single_image(img, gt)
if measure is None:
print(fn_file, gt_file, measure)
if result_measure is None or measure[IOU] < result_measure[IOU]:
result_measure = measure
result_fn = DAVIS_PATH + "/JPEGImages/480p/" + seq_name + "/" + fname.replace(".png", ".jpg")
result_gt = gt_file
return result_fn, result_gt, result_measure
class OSVOSWorst(FileListDataset):
def __init__(self, config, subset, name=NAME):
super(OSVOSWorst, self).__init__(config, name, subset, num_classes=2, default_path=DAVIS_PATH)
self.iterative_training = config.bool("iterative_training", True)
self.eval_pascal_dataset = EvalPascalMaskedDataset(config, subset)
self.previous_epoch_data = self.eval_pascal_dataset.previous_epoch_data
self.save_images = config.bool("save_images", False)
self.img_dir = config.string("img_dir", str(random.randrange(1, 10000)))
def get_extraction_keys(self):
return self.eval_pascal_dataset.get_extraction_keys()
def postproc_example_before_assembly(self, tensors):
return self.eval_pascal_dataset.postproc_example_before_assembly(tensors)
def postproc_annotation(self, ann_filename, ann):
mask = super().postproc_annotation(ann_filename, ann)
mask = mask / 255
return {DataKeys.SEGMENTATION_LABELS: mask, DataKeys.RAW_SEGMENTATION_LABELS: mask,
DataKeys.IMAGE_FILENAMES: ann_filename}
def use_segmentation_mask(self, res):
self.eval_pascal_dataset.use_segmentation_mask(res)
def read_inputfile_lists(self):
pre_computed = DAVIS_PATH + "/pre_computed/"
imgs = []
gts = []
measures = []
# get all video sequences
seqs = [os.path.join(pre_computed, f) for f in os.listdir(pre_computed) if os.path.isdir(os.path.join(pre_computed, f))]
for seq in seqs:
fn, gt, measure = get_fn_with_worst_iou(seq)
measures += [measure]
imgs += [fn]
gts += [gt]
print(measures)
ious = [m[IOU] for m in measures]
print("Average IOU Initial: ", np.average(ious))
return imgs, gts
|
1653050
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import numpy as np
import cv2
from lib.show_images import debugShowBoxes
class BaseContoursHeatmap(object):
cv_thresh = cv2.THRESH_BINARY
cv_contour_method = cv2.CHAIN_APPROX_NONE
contour_mode = cv2.RETR_TREE
def __init__(self):
pass
def determenistic_boxes(self, orig, hmap, thresh=0.7, draw=False):
dfunc = partial(self._deterministic_threshold, thresh=thresh)
return self._base_get_bboxes(thresh_func=dfunc, orig=orig, hmap=hmap, draw=draw)
def edge_boxes(self, orig, hmap, draw=False):
return self._base_get_bboxes(thresh_func=self._edges_thresh, orig=orig, hmap=hmap, draw=draw)
def _base_get_bboxes(self, thresh_func, orig, hmap, draw=False):
o_shape = orig.shape
h_shape = hmap.shape
edges = thresh_func(hmap=hmap)
conts = self._get_contours(threshed_hmap=edges)
boxes = self._bboxes_from_contours(conts=conts)
if boxes.shape[0] > 0:
scales = [o_shape[0] / float(h_shape[0]), o_shape[1]/float(h_shape[1])]
scales = np.array(scales+scales)
boxes = boxes*scales
if draw:
debugShowBoxes(orig, boxes=boxes, wait=3000)
return boxes
return np.zeros(shape=(1, 4))
def _deterministic_threshold(self, hmap, thresh=0.7, scale=255):
hmap = (hmap*scale).astype(np.uint8)
_, thresh = cv2.threshold(hmap, int(scale * thresh), scale, self.cv_thresh)
return thresh
def _edges_thresh(self, hmap, thresh=0.5, scale=255):
hmap = (hmap * scale).astype(np.uint8)
edges = cv2.Canny(hmap, scale*thresh, scale)
return edges
def _binomial_threshold(self, hmap):
orig_shape = hmap.shape
p = hmap.flatten()
thresh = np.random.binomial(n=1, p=p).reshape(shape=orig_shape).astype(np.uint8)
return thresh
def _get_contours(self, threshed_hmap):
# support diffrenet versio of cv2.findContours
try:
_, poly, _ = cv2.findContours(threshed_hmap, self.contour_mode, self.cv_contour_method)
except:
poly, _ = cv2.findContours(threshed_hmap, self.contour_mode, self.cv_contour_method)
return poly
def _bboxes_from_contours(self, conts):
xywh = map(cv2.boundingRect, conts)
xyxy = map(xywh_to_xyxy, xywh)
return np.array(xyxy)
def xywh_to_xyxy(box):
x2 = box[0] + box[2]
y2 = box[1] + box[3]
return np.array([box[0], box[1], x2, y2])
|
1653081
|
from _groupsig import lib, ffi
from . import constants
import base64
def gml_init(code):
"""
Initializes a Group Membership List (GML) for schemes of the given type.
Parameters:
code: The code of the scheme.
Returns:
A native object representing the GML. Throws an Exception on error.
"""
gml = lib.gml_init(code)
if gml == ffi.NULL:
raise Exception('Error initializing GML.')
return gml
def gml_free(gml):
"""
Frees the native memory used to represent the given GML.
Parameters:
gml: The GML structure to free.
Returns:
IOK (1) or IERROR (0)
"""
return lib.gml_free(gml)
def gml_export(gml):
"""
Exports a GML to a Base64 string.
Parameters:
gml: The GML to export.
Returns:
A Base64 string. On error, an Exception is thrown.
"""
bgml = ffi.new("byte_t **")
bgml[0] = ffi.NULL
size = ffi.new("uint32_t *")
if lib.gml_export(bgml, size, gml) == constants.IERROR:
raise Exception('Error exporting GML.')
b64gml = base64.b64encode(ffi.buffer(bgml[0],size[0]))
b64gml = b64gml.decode('utf-8').replace('\n', '')
# lib.free(bgml[0])
return b64gml
def gml_import(code, b64gml):
"""
Imports a GML from a Base64 string.
Parameters:
code: The code of the scheme related to this GML.
b64gml: The Base64 string.
Returns:
The imported GML native data structure. Throws an Exception on error.
"""
b = base64.b64decode(b64gml)
gml = lib.gml_import(code, b, len(b))
if gml == ffi.NULL:
raise Exception('Error importing GML.')
return gml
|
1653096
|
import torch
from loguru import logger
def pad_batch(h_node, batch, max_input_len, get_mask=False):
num_batch = batch[-1] + 1
num_nodes = []
masks = []
for i in range(num_batch):
mask = batch.eq(i)
masks.append(mask)
num_node = mask.sum()
num_nodes.append(num_node)
# logger.info(max(num_nodes))
max_num_nodes = min(max(num_nodes), max_input_len)
padded_h_node = h_node.data.new(max_num_nodes, num_batch, h_node.size(-1)).fill_(0)
src_padding_mask = h_node.data.new(num_batch, max_num_nodes).fill_(0).bool()
for i, mask in enumerate(masks):
num_node = num_nodes[i]
if num_node > max_num_nodes:
num_node = max_num_nodes
padded_h_node[-num_node:, i] = h_node[mask][-num_node:]
src_padding_mask[i, : max_num_nodes - num_node] = True # [b, s]
if get_mask:
return padded_h_node, src_padding_mask, num_nodes, masks, max_num_nodes
return padded_h_node, src_padding_mask
def unpad_batch(padded_h_node, prev_h_node, num_nodes, origin_mask, max_num_nodes):
"""
padded_h_node: [s, b, f]
prev_h_node: [bxs, f]
batch: [n]
pad_mask: [b, s]
"""
for i, mask in enumerate(origin_mask):
num_node = num_nodes[i]
if num_node > max_num_nodes:
num_node = max_num_nodes
# cutoff mask
indices = mask.nonzero()
indices = indices[-num_node:]
mask = torch.zeros_like(mask)
mask[indices] = True
# logger.info("prev_h_node:", prev_h_node.size())
# logger.info("padded_h_node:", padded_h_node.size())
# logger.info("mask:", mask.size())
prev_h_node = prev_h_node.masked_scatter(mask.unsqueeze(-1), padded_h_node[-num_node:, i])
return prev_h_node
|
1653126
|
import networkx as nx
import numpy as np
class ClusteringCoeff:
"""
Concatenates to each node attribute the clustering coefficient of the
corresponding node.
"""
def __call__(self, graph):
if "a" not in graph:
raise ValueError("The graph must have an adjacency matrix")
clustering_coeff = nx.clustering(nx.Graph(graph.a))
clustering_coeff = np.array(
[clustering_coeff[i] for i in range(graph.n_nodes)]
)[:, None]
if "x" not in graph:
graph.x = clustering_coeff
else:
graph.x = np.concatenate((graph.x, clustering_coeff), axis=-1)
return graph
|
1653143
|
import click
from commands.load_metadata.all import all
from commands.load_metadata.continents import continents
from commands.load_metadata.countries import countries
from commands.load_metadata.economic_blocs import economic_blocs
from commands.load_metadata.establishments import establishments
from commands.load_metadata.hedu_course import hedu_course
from commands.load_metadata.industries import industries
from commands.load_metadata.inflections import inflections
from commands.load_metadata.metadata_command import metadata_command
from commands.load_metadata.municipalities import municipalities
from commands.load_metadata.occupations import occupations
from commands.load_metadata.ports import ports
from commands.load_metadata.products import products
from commands.load_metadata.regions import regions
from commands.load_metadata.sc_course import sc_course
from commands.load_metadata.states import states
from commands.load_metadata.territories import territories
from commands.load_metadata.universities import universities
@click.group()
def load_metadata():
pass
load_metadata.add_command(sc_course)
load_metadata.add_command(ports)
load_metadata.add_command(countries)
load_metadata.add_command(occupations)
load_metadata.add_command(products)
load_metadata.add_command(states)
load_metadata.add_command(regions)
load_metadata.add_command(continents)
load_metadata.add_command(territories)
load_metadata.add_command(economic_blocs)
load_metadata.add_command(municipalities)
load_metadata.add_command(industries)
load_metadata.add_command(hedu_course)
load_metadata.add_command(establishments)
load_metadata.add_command(inflections)
load_metadata.add_command(metadata_command)
load_metadata.add_command(universities)
load_metadata.add_command(all)
|
1653175
|
from itertools import combinations_with_replacement
import numpy as np
from scipy.stats import chisquare
from pandas_genomics.arrays.utils import required_ploidy
from pandas_genomics.scalars import MISSING_IDX
class InfoMixin:
"""
Genotype Mixin containing functions for calculating various information
"""
@property
def is_missing(self):
"""
Boolean array: True if the sample is missing all alleles
"""
return (self.allele_idxs == MISSING_IDX).all(axis=1)
@property
def is_homozygous(self):
"""
Boolean array: True if the sample is homozygous for any allele
"""
return (self.allele_idxs == self.allele_idxs[:, 0, np.newaxis]).all(axis=1)
@property
def is_heterozygous(self):
"""
Boolean array: True if the sample is heterozygous for any alleles
"""
return ~self.is_homozygous
@property
def is_homozygous_ref(self):
"""
Boolean array: True if the sample is homozygous for the reference allele
"""
return (self.allele_idxs == 0).all(axis=1)
@property
def is_homozygous_alt(self):
"""
Boolean array: True if the sample is homozygous for any non-reference allele
"""
return self.is_homozygous & ~self.is_homozygous_ref
@property
def maf(self) -> float:
"""
Calculate the Minor Allele Frequency (MAF) for the most-frequent alternate allele.
Missing alleles are ignored.
"""
total_nonmissing_alleles = (self.allele_idxs != MISSING_IDX).sum().sum()
if total_nonmissing_alleles == 0:
# All genotypes missing
return np.nan
allele_counts = np.bincount(self.allele_idxs.flatten())
if len(allele_counts) == 1:
# All reference
return 0.0
elif len(allele_counts) == MISSING_IDX:
# At least one missing allele is present: don't count them
allele_counts = allele_counts[:-1]
# Use highest alternate allele value
return allele_counts[1:].max() / total_nonmissing_alleles
@property
@required_ploidy(2, np.nan)
def hwe_pval(self) -> float:
"""
Calculate the probability that the samples are in HWE for diploid variants
Notes
-----
Generate expected counts using the allele frequencies and perform a chi-square test.
Ignore any samples with missing alleles.
Uses a typical number of degrees of freedom (the number of observed genotypes minus 1).
Returns np.nan if any expected counts are < 5
"""
# Take nonmissing allele indexes
nonmissing_aidxs = self.allele_idxs[self.allele_idxs.max(axis=1) != MISSING_IDX]
if len(nonmissing_aidxs) == 0:
return np.nan
# Get allele counts and frequency
allele_counts = np.bincount(nonmissing_aidxs.flatten())
total_gt = len(nonmissing_aidxs)
total_alleles = total_gt * 2
if total_gt < 2:
return np.nan # Too few samples to calculate
if len(allele_counts) == 1:
return 1.0 # All Reference
allele_freqs = allele_counts / (total_alleles)
# Chisq test
expected = []
observed = []
for a1, a2 in combinations_with_replacement(range(len(allele_freqs)), 2):
a1_freq = allele_freqs[a1]
a2_freq = allele_freqs[a2]
if a1 != a2:
# Heterozygous
expected.append(int(a1_freq * a2_freq * total_gt * 2))
else:
# Homozygous
expected.append(int(a1_freq * a2_freq * total_gt))
observed.append((nonmissing_aidxs == (a1, a2)).all(axis=1).sum())
# Return NaN if any expected counts are < 5
if min(expected) < 5:
return np.nan
chi, pval = chisquare(observed, expected)
return pval
|
1653215
|
import doctest
from insights.parsers import ls_ocp_cni_openshift_sdn
from insights.parsers.ls_ocp_cni_openshift_sdn import LsOcpCniOpenshiftSdn
from insights.tests import context_wrap
LS_CNI_OPENSHIFT_SDN = """
total 52
-rw-r--r--. 1 root root 64 Aug 5 23:26 10.130.0.102
-rw-r--r--. 1 root root 64 Aug 5 23:26 10.130.0.103
-rw-r--r--. 1 root root 64 Aug 6 22:52 10.130.0.116
-rw-r--r--. 1 root root 64 Aug 6 22:52 10.130.0.117
-rw-r--r--. 1 root root 64 Aug 5 06:59 10.130.0.15
-rw-r--r--. 1 root root 64 Aug 5 07:02 10.130.0.20
-rw-r--r--. 1 root root 12 Aug 6 22:52 last_reserved_ip.0
""".strip()
def test_ls_ocp_cni_openshift_sdn():
ls_ocp_cni_openshift_sdn = LsOcpCniOpenshiftSdn(
context_wrap(LS_CNI_OPENSHIFT_SDN, path='insights_commands/ls_-l_.var.lib.cni.networks.openshift-sdn'))
assert len(ls_ocp_cni_openshift_sdn.files_of("/var/lib/cni/networks/openshift-sdn")) == 7
assert ls_ocp_cni_openshift_sdn.files_of("/var/lib/cni/networks/openshift-sdn") == ['10.130.0.102', '10.130.0.103',
'10.130.0.116', '10.130.0.117',
'10.130.0.15', '10.130.0.20',
'last_reserved_ip.0']
def test_ls_ocp_cni_openshift_sdn_doc_examples():
env = {
'ls_ocp_cni_openshift_sdn': LsOcpCniOpenshiftSdn(
context_wrap(LS_CNI_OPENSHIFT_SDN, path='insights_commands/ls_-l_.var.lib.cni.networks.openshift-sdn')),
}
failed, total = doctest.testmod(ls_ocp_cni_openshift_sdn, globs=env)
assert failed == 0
|
1653274
|
from __future__ import division, print_function, absolute_import
import time
import os.path
import datetime
from sklearn.linear_model import LogisticRegression, LinearRegression, SGDClassifier, SGDRegressor
from rep.metaml._cache import CacheHelper
from rep.metaml.cache import CacheClassifier, CacheRegressor, cache_helper
from rep.test.test_estimators import generate_classification_data, check_classifier, check_regression
__author__ = '<NAME>'
def test_cache_helper():
cache = CacheHelper(folder='./.cache/rep', expiration_in_seconds=1000)
cache.store_in_cache('first', 'hash', 24)
cache.store_in_cache('first', 'hash', 42)
cache.store_in_cache('second', 'hash', 45)
assert cache.get_from_cache('first', 'hash') == (True, 42)
assert cache.get_from_cache('first', 'wrong_hash')[0] == False
cache.clear_cache()
assert cache.get_from_cache('first', 'hash')[0] == False
assert cache.get_from_cache('first', 'wrong_hash')[0] == False
cache.clear_cache()
def test_cache_expiration(folder='./.cache/rep'):
cache = CacheHelper(folder=folder, expiration_in_seconds=1000)
cache.store_in_cache('first', 'hash', 42)
assert cache.get_from_cache('first', 'hash') == (True, 42)
for file_name in os.listdir(cache.folder):
new_time = datetime.datetime.now() - datetime.timedelta(seconds=10)
new_time = time.mktime(new_time.timetuple())
file_path = os.path.join(cache.folder, file_name)
os.utime(file_path, (new_time, new_time))
# should be able to find
assert cache.get_from_cache('first', 'hash') == (True, 42)
for file_name in os.listdir(cache.folder):
new_time = datetime.datetime.now() - datetime.timedelta(seconds=2000)
new_time = time.mktime(new_time.timetuple())
file_path = os.path.join(cache.folder, file_name)
os.utime(file_path, (new_time, new_time))
# should not be able to find
assert cache.get_from_cache('first', 'hash')[0] == False
cache.clear_cache()
def test_cache_classifier():
cache_helper.clear_cache()
for Wrapper, Model in [(CacheClassifier, LogisticRegression), (CacheRegressor, LinearRegression)]:
X, y, weights = generate_classification_data(n_classes=2)
clf = Wrapper('first', Model()).fit(X, y)
assert clf._used_cache == False
clf = Wrapper('first', Model()).fit(X + 0, y + 0)
assert clf._used_cache == True
# changed name
clf = Wrapper('second', Model()).fit(X, y)
assert clf._used_cache == False
# changed data
X_new = X.copy()
X_new.iloc[0, 0] += 1
clf = Wrapper('first', Model()).fit(X_new, y)
assert clf._used_cache == False
# changed labels
y_new = y.copy()
y_new[0] += 1
clf = Wrapper('first', Model()).fit(X, y_new)
assert clf._used_cache == False
# added weights
clf = Wrapper('first', Model()).fit(X, y, sample_weight=None)
assert clf._used_cache == False
# changed parameters
clf = Wrapper('first', Model(n_jobs=2)).fit(X, y)
assert clf._used_cache == False
# fitting previous once again. Checking that overwriting is correct.
clf = Wrapper('first', Model(n_jobs=2)).fit(X, y)
assert clf._used_cache == True
cache_helper.clear_cache()
def test_models():
for _ in range(3):
clf = CacheClassifier('clf', SGDClassifier(loss='log'))
check_classifier(clf, has_staged_pp=False, has_importances=False)
reg = CacheRegressor('reg', SGDRegressor())
check_regression(reg, has_staged_predictions=False, has_importances=False)
cache_helper.clear_cache()
|
1653292
|
import pylab
#set line width
pylab.rcParams['lines.linewidth'] = 6
#set general font size
pylab.rcParams['font.size'] = 12
#set font size for labels on axes
pylab.rcParams['axes.labelsize'] = 18
#set size of numbers on x-axis
pylab.rcParams['xtick.major.size'] = 5
#set size of numbers on y-axis
pylab.rcParams['ytick.major.size'] = 5
#set size of markers
pylab.rcParams['lines.markersize'] = 10
def minkowskiDist(v1, v2, p):
#Assumes v1 and v2 are equal length arrays of numbers
dist = 0
for i in range(len(v1)):
dist += abs(v1[i] - v2[i])**p
return dist**(1/p)
class Example(object):
def __init__(self, name, features, label = None):
#Assumes features is an array of floats
self.name = name
self.features = features
self.label = label
def dimensionality(self):
return len(self.features)
def getFeatures(self):
return self.features[:]
def getLabel(self):
return self.label
def getName(self):
return self.name
def distance(self, other):
return minkowskiDist(self.features, other.getFeatures(), 2)
def __str__(self):
return self.name +':'+ str(self.features) + ':'\
+ str(self.label)
class Cluster(object):
def __init__(self, examples):
"""Assumes examples a non-empty list of Examples"""
self.examples = examples
self.centroid = self.computeCentroid()
def update(self, examples):
"""Assume examples is a non-empty list of Examples
Replace examples; return amount centroid has changed"""
oldCentroid = self.centroid
self.examples = examples
self.centroid = self.computeCentroid()
return oldCentroid.distance(self.centroid)
def computeCentroid(self):
vals = pylab.array([0.0]*self.examples[0].dimensionality())
for e in self.examples: #compute mean
vals += e.getFeatures()
centroid = Example('centroid', vals/len(self.examples))
return centroid
def getCentroid(self):
return self.centroid
def variability(self):
totDist = 0
for e in self.examples:
totDist += (e.distance(self.centroid))**2
return totDist
def members(self):
for e in self.examples:
yield e
def __str__(self):
names = []
for e in self.examples:
names.append(e.getName())
names.sort()
result = 'Cluster with centroid '\
+ str(self.centroid.getFeatures()) + ' contains:\n '
for e in names:
result = result + e + ', '
return result[:-2] #remove trailing comma and space
def dissimilarity(clusters):
"""Assumes clusters a list of clusters
Returns a measure of the total dissimilarity of the
clusters in the list"""
totDist = 0
for c in clusters:
totDist += c.variability()
return totDist
|
1653439
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.painters import painters
def test_painters():
"""Test module painters.py by downloading
painters.csv and testing shape of
extracted data has 54 rows and 5 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = painters(test_path)
try:
assert x_train.shape == (54, 5)
except:
shutil.rmtree(test_path)
raise()
|
1653457
|
import numpy as np
# ParaMol imports
from ParaMol.System.system import *
from ParaMol.MM_engines.openmm import *
# ParaMol Tasks imports
from ParaMol.Tasks.parametrization import *
from ParaMol.Tasks.ab_initio_properties import *
from ParaMol.Utils.settings import *
# --------------------------------------------------------- #
# Preparation #
# --------------------------------------------------------- #
# Create the OpenMM engine for carbon monoxide
openmm_engine = OpenMMEngine(True, "AMBER", "co.prmtop", "AMBER", "co.inpcrd")
# Create the ParaMol System
co = ParaMolSystem("carbon_monoxide", openmm_engine, 2)
# Create ParaMol's force field representation and ask to optimize bonds's parameters
co.force_field.create_force_field(opt_bonds=True)
# Create ParaMol settings instance
paramol_settings = Settings()
# --------------------------------------------------------- #
# Perform the conformational sampling manually #
# --------------------------------------------------------- #
# Generate conformations; ParaMol uses nanometers for the length
n_atoms = 2
n_conformations = 100
conformations = np.zeros((n_conformations, n_atoms, 3))
# Change the z distance of atom 2
conformations[:, 1, 2] = np.linspace(0.1, 0.12, n_conformations)
# Set this data in the ParaMol system instance
co.ref_coordinates = conformations
co.n_structures = len(co.ref_coordinates)
# --------------------------------------------------------- #
# Calculate QM energies and forces #
# --------------------------------------------------------- #
# Create the ASE calculator
from ase.calculators.dftb import *
calc = Dftb(Hamiltonian_='DFTB',
Hamiltonian_MaxAngularMomentum_='',
Hamiltonian_MaxAngularMomentum_O='p',
Hamiltonian_MaxAngularMomentum_C='p',
Hamiltonian_SCC='Yes',
Hamiltonian_SCCTolerance=1e-8,
Hamiltonian_MaxSCCIterations=10000)
# Set the calculator in the settings; alternatively the QM engine could be created manually
paramol_settings.qm_engine["ase"]["calculator"] = calc
# Calculate Ab initio properties
ab_initio = AbInitioProperties()
ab_initio.run_task(paramol_settings, [co])
# Save coordinates, energies and forces into .nc file
co.write_data("co_scan.nc")
# --------------------------------------------------------- #
# Parametrize the CO bond #
# --------------------------------------------------------- #
parametrization = Parametrization()
optimal_paramters = parametrization.run_task(paramol_settings, [co])
|
1653471
|
import threading
import time
from kivy.app import App
from kivy.lang import Builder
import threading
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import NumericProperty
Builder.load_string('''
<Thread>:
Button:
text: "use thread"
on_release: root.First_thread()
Button:
text: "Hit me"
on_release: root.Counter_function()
Label:
id: lbl
text: "Numbers"''')
class Thread(BoxLayout):
counter = NumericProperty(0)
def Counter_function(self):
self.counter += 1
self.ids.lbl.text = "{}".format(self.counter)
def First_thread(self):
threading.Thread(target = self.Counter_function).start()
self.counter += 1
self.ids.lbl.text = "{}".format(self.counter)
class MyApp(App):
def build(self):
self.load_kv('thread.kv')
return Thread()
if __name__ == "__main__":
app = MyApp()
app.run()
|
1653482
|
t = int(input())
while t:
S = list(map(str, input().split(' ')))
for i in S:
if i == 'not':
print('Real Fancy')
break
else:
print('regularly fancy')
t = t-1
|
1653483
|
import pexpect
p = pexpect.spawn(["login"])
p.expect("login:")
p.sendline("wrong")
p.expect("Password:")
p.sendline("wrong")
p.expect("Login incorrect")
|
1653513
|
def f():
some_global: int
print(some_global)
# EXPECTED:
[
...,
CODE_START('f'),
~LOAD_CONST('int'),
]
|
1653518
|
import os
# Set the default data directory to be the one this file's directory + data/
data_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.realpath(os.path.join(data_dir, 'data'))
# Spacy model name
spacy_model_name = 'en_core_web_sm'
|
1653535
|
import logging
import inspect
import json
import os
import urlparse
import pylons.config as config
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
import ckanext.hdx_theme.helpers.auth as auth
from ckanext.hdx_theme.helpers.redirection_middleware import RedirectionMiddleware
from ckanext.hdx_theme.helpers.custom_validator import doesnt_exceed_max_validity_period
from ckanext.hdx_theme.util.http_exception_helper import FlaskEmailFilter
from ckanext.hdx_theme.views.colored_page import hdx_colored_page
from ckanext.hdx_theme.views.faqs import hdx_faqs
# def run_on_startup():
# cache_on_startup = config.get('hdx.cache.onstartup', 'true')
# if 'true' == cache_on_startup:
# _generate_license_list()
# caching.cached_get_group_package_stuff()
# def _generate_license_list():
# package.Package._license_register = license.LicenseRegister()
# package.Package._license_register.licenses = [
# license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),
# license.License(license.LicenseCreativeCommonsAttribution()),
# license.License(license.LicenseCreativeCommonsAttributionShareAlike()),
# license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),
# license.License(hdx_licenses.LicenseHdxMultiple()),
# license.License(hdx_licenses.LicenseHdxOther())
# ]
class HDXThemePlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.IRoutes, inherit=True)
plugins.implements(plugins.ITemplateHelpers)
plugins.implements(plugins.IActions)
plugins.implements(plugins.IAuthFunctions)
plugins.implements(plugins.IApiToken, inherit=True)
plugins.implements(plugins.IMiddleware, inherit=True)
plugins.implements(plugins.IValidators, inherit=True)
plugins.implements(plugins.IBlueprint)
def _add_resource(cls, path, name):
'''OVERRIDE toolkit.add_resource in order to allow adding a resource library
without the dependency to the base/main resource library
See: toolkit.py:_add_resource(cls,path,name) for more details
'''
# we want the filename that of the function caller but they will
# have used one of the available helper functions
frame, filename, line_number, function_name, lines, index = \
inspect.getouterframes(inspect.currentframe())[1]
this_dir = os.path.dirname(filename)
absolute_path = os.path.join(this_dir, path)
import ckan.lib.fanstatic_resources
ckan.lib.fanstatic_resources.create_library(name, absolute_path, False)
# IConfigurer
def update_config(self, config):
toolkit.add_template_directory(config, 'templates')
toolkit.add_template_directory(config, 'templates_legacy')
toolkit.add_public_directory(config, 'public')
#self._add_resource('fanstatic', 'hdx_theme')
toolkit.add_public_directory(config, 'fanstatic')
toolkit.add_resource('fanstatic', 'hdx_theme')
# Add configs needed for checks
self.__add_dataproxy_url_for_checks(config)
self.__add_gis_layer_config_for_checks(config)
self.__add_spatial_config_for_checks(config)
self.__add_hxl_proxy_url_for_checks(config)
self.__add_wp_faq_url_for_checks(config)
def __add_dataproxy_url_for_checks(self, config):
dataproxy_url = config.get('ckan.recline.dataproxy_url', '')
dataproxy_url = self._create_full_URL(dataproxy_url)
config['hdx_checks.dataproxy_url'] = dataproxy_url
def __add_gis_layer_config_for_checks(self, config):
gis_layer_api = config.get('hdx.gis.layer_import_url', '')
api_index = gis_layer_api.find('/api')
gis_layer_base_api = gis_layer_api[0:api_index]
gis_layer_base_api = self._create_full_URL(gis_layer_base_api)
config['hdx_checks.gis_layer_base_url'] = gis_layer_base_api
def __add_spatial_config_for_checks(self, config):
search_str = '/services'
spatial_url = config.get('hdx.gis.resource_pbf_url', '')
url_index = spatial_url.find(search_str)
spatial_check_url = spatial_url[0:url_index+len(search_str)]
spatial_check_url = self._create_full_URL(spatial_check_url)
config['hdx_checks.spatial_checks_url'] = spatial_check_url
def __add_hxl_proxy_url_for_checks(self, config):
hxl_proxy_url = self._create_full_URL('/hxlproxy/data.json?url=sample.test')
config['hdx_checks.hxl_proxy_url'] = hxl_proxy_url
def __add_wp_faq_url_for_checks(self, config):
wp_url = '{0}/wp-json/wp/v2/ufaq-category?parent={1}&per_page=100'\
.format(config.get('hdx.wordpress.url'), config.get('hdx.wordpress.category.faq'))
config['hdx_checks.wp_faq_url'] = wp_url
basic_auth = config.get('hdx.wordpress.auth.basic')
if not basic_auth:
basic_auth = "None"
config['hdx_checks.wp_basic_auth'] = basic_auth
def _create_full_URL(self, url):
'''
Different URLs specified in prod.ini might be relative URLs or be
protocol independent.
This function tries to guess the full URL.
:param url: the url to be modified if needed
:type url: str
'''
urlobj = urlparse.urlparse(url)
if not urlobj.netloc:
base_url = config.get('ckan.site_url')
base_urlobj = urlparse.urlparse(base_url)
urlobj = urlobj._replace(scheme=base_urlobj.scheme)
urlobj = urlobj._replace(netloc=base_urlobj.netloc)
if not urlobj.scheme:
urlobj = urlobj._replace(scheme='https')
return urlobj.geturl()
def before_map(self, map):
map.connect(
'hdx_home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')
map.connect(
'/count/dataset', controller='ckanext.hdx_theme.helpers.count:CountController', action='dataset')
map.connect(
'/count/country', controller='ckanext.hdx_theme.helpers.count:CountController', action='country')
map.connect(
'/count/source', controller='ckanext.hdx_theme.helpers.count:CountController', action='source')
#map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')
#map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')
map.connect(
'/count/test', controller='ckanext.hdx_theme.helpers.count:CountController', action='test')
map.connect(
'/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')
map.connect(
'/about/license/legacy_hrinfo', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about_hrinfo')
map.connect(
'/widget/topline', controller='ckanext.hdx_theme.controllers.widget_topline:WidgetToplineController', action='show')
map.connect(
'/widget/3W', controller='ckanext.hdx_theme.controllers.widget_3W:Widget3WController', action='show')
map.connect(
'/widget/WFP', controller='ckanext.hdx_theme.controllers.widget_WFP:WidgetWFPController', action='show')
map.connect('about', '/about', controller='ckanext.hdx_theme.controllers.faq:FaqController', action='about')
map.connect('/documentation',
controller='ckanext.hdx_theme.controllers.documentation_controller:DocumentationController',
action='show')
map.connect('/documentation/resources-for-developers',
controller='ckanext.hdx_theme.controllers.documentation_controller:DocumentationController',
action='show')
map.connect('/faq', controller='ckanext.hdx_theme.controllers.faq:FaqController', action='show')
map.connect('/archive',
controller='ckanext.hdx_theme.controllers.archived_quick_links_controller:ArchivedDatavizController',
action='show')
map.connect(
'/faq/contact_us', controller='ckanext.hdx_theme.controllers.faq:FaqController', action='contact_us')
# map.connect('/explore', controller='ckanext.hdx_theme.controllers.explorer:ExplorerController', action='show')
#map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')
map.connect('carousel_settings', '/ckan-admin/carousel/show',
controller='ckanext.hdx_theme.controllers.custom_settings:CustomSettingsController', action='show')
map.connect('quick_links_settings', '/ckan-admin/dataviz/show',
controller='ckanext.hdx_theme.controllers.quick_links_custom_settings:'
'DatavizCustomSettingsController',
action='show')
map.connect('package_links_settings', '/ckan-admin/packagelinks/show',
controller='ckanext.hdx_theme.controllers.package_links_custom_settings:'
'PackageLinksCustomSettingsController',
action='show')
map.connect('pages_show', '/ckan-admin/pages/show',
controller='ckanext.hdx_theme.controllers.custom_settings:CustomSettingsController', action='show_pages')
map.connect('global_file_download', '/global/{filename}',
controller='ckanext.hdx_theme.controllers.global_file_server:GlobalFileController',
action='global_file_download')
map.connect('update_carousel_settings', '/ckan-admin/carousel/update',
controller='ckanext.hdx_theme.controllers.custom_settings:CustomSettingsController', action='update')
map.connect('delete_carousel_settings', '/ckan-admin/carousel/delete/{id}',
controller='ckanext.hdx_theme.controllers.custom_settings:CustomSettingsController',
action='delete')
map.connect('update_quick_links_settings', '/ckan-admin/quick-links/update',
controller='ckanext.hdx_theme.controllers.quick_links_custom_settings:DatavizCustomSettingsController',
action='update')
map.connect('delete_quick_links_settings', '/ckan-admin/quick-links/delete/{id}',
controller='ckanext.hdx_theme.controllers.quick_links_custom_settings:DatavizCustomSettingsController',
action='delete')
map.connect('update_package_links_settings', '/ckan-admin/package-links/update',
controller='ckanext.hdx_theme.controllers.package_links_custom_settings:PackageLinksCustomSettingsController',
action='update')
map.connect('delete_package_links_settings', '/ckan-admin/package-links/delete/{id}',
controller='ckanext.hdx_theme.controllers.package_links_custom_settings:PackageLinksCustomSettingsController',
action='delete')
map.connect('image_serve', '/image/{label}',
controller='ckanext.hdx_theme.controllers.image_controller:ImageController', action='org_file')
map.connect('dataset_image_serve', '/dataset_image/{label}',
controller='ckanext.hdx_theme.controllers.image_controller:ImageController', action='dataset_file')
map.connect('test_dataviz_gallery', '/datavis', controller='ckanext.hdx_theme.controllers.dataviz:DatavizController', action='show')
return map
def get_helpers(self):
from ckanext.hdx_theme.helpers import helpers as hdx_helpers
from ckanext.hdx_theme.helpers.constants import const
return {
'is_downloadable': hdx_helpers.is_downloadable,
'is_not_zipped': hdx_helpers.is_not_zipped,
'is_not_hxl_format': hdx_helpers.is_not_hxl_format,
'get_facet_items_dict': hdx_helpers.get_facet_items_dict,
'get_last_modifier_user': hdx_helpers.get_last_modifier_user,
'get_filtered_params_list': hdx_helpers.get_filtered_params_list,
# 'get_last_revision_package': hdx_helpers.get_last_revision_package,
# 'get_last_revision_group': hdx_helpers.get_last_revision_group,
'get_group_followers': hdx_helpers.get_group_followers,
'get_group_members': hdx_helpers.get_group_members,
'markdown_extract_strip': hdx_helpers.markdown_extract_strip,
'render_markdown_strip': hdx_helpers.render_markdown_strip,
'render_date_from_concat_str': hdx_helpers.render_date_from_concat_str,
'hdx_version': hdx_helpers.hdx_version,
'hdx_build_nav_icon_with_message': hdx_helpers.hdx_build_nav_icon_with_message,
'hdx_build_nav_no_icon': hdx_helpers.hdx_build_nav_no_icon,
'hdx_num_of_new_related_items': hdx_helpers.hdx_num_of_new_related_items,
'hdx_get_extras_element': hdx_helpers.hdx_get_extras_element,
'hdx_get_user_info': hdx_helpers.hdx_get_user_info,
'hdx_linked_user': hdx_helpers.hdx_linked_user,
'hdx_show_singular_plural': hdx_helpers.hdx_show_singular_plural,
'hdx_member_roles_list': hdx_helpers.hdx_member_roles_list,
'hdx_organizations_available_with_roles': hdx_helpers.hdx_organizations_available_with_roles,
'hdx_group_followee_list': hdx_helpers.hdx_group_followee_list,
'hdx_follow_link': hdx_helpers.hdx_follow_link,
'hdx_remove_schema_and_domain_from_url': hdx_helpers.hdx_remove_schema_and_domain_from_url,
'hdx_get_ckan_config': hdx_helpers.hdx_get_ckan_config,
'get_group_name_from_list': hdx_helpers.get_group_name_from_list,
'one_active_item': hdx_helpers.one_active_item,
'hdx_follow_button': hdx_helpers.hdx_follow_button,
'get_last_revision_timestamp_group': hdx_helpers.get_last_revision_timestamp_group,
'feature_count': hdx_helpers.feature_count,
'follow_status': hdx_helpers.follow_status,
'hdx_add_url_param': hdx_helpers.hdx_add_url_param,
'methodology_bk_compat': hdx_helpers.methodology_bk_compat,
'count_public_datasets_for_group': hdx_helpers.count_public_datasets_for_group,
'hdx_resource_preview': hdx_helpers.hdx_resource_preview,
'load_json': hdx_helpers.load_json,
'escaped_dump_json': hdx_helpers.escaped_dump_json,
'json_dumps': json.dumps,
'hdx_less_default': hdx_helpers.hdx_less_default,
'hdx_popular': hdx_helpers.hdx_popular,
'get_dataset_date_format': hdx_helpers.get_dataset_date_format,
'hdx_methodology_list': hdx_helpers.hdx_methodology_list,
'hdx_license_list': hdx_helpers.hdx_license_list,
'hdx_location_list': hdx_helpers.hdx_location_list,
'hdx_organisation_list': hdx_helpers.hdx_organisation_list,
'hdx_tag_list': hdx_helpers.hdx_tag_list,
'hdx_frequency_list': hdx_helpers.hdx_frequency_list,
# 'hdx_get_layer_info': hdx_helpers.hdx_get_layer_info,
'hdx_get_carousel_list': hdx_helpers.hdx_get_carousel_list,
'hdx_get_quick_links_list': hdx_helpers.hdx_get_quick_links_list,
'hdx_get_frequency_by_value': hdx_helpers.hdx_get_frequency_by_value,
'hdx_is_current_user_a_maintainer': hdx_helpers.hdx_is_current_user_a_maintainer,
'hdx_dataset_follower_count': hdx_helpers.hdx_dataset_follower_count,
'hdx_organization_list_for_user': hdx_helpers.hdx_organization_list_for_user,
'hdx_is_sysadmin': hdx_helpers.hdx_is_sysadmin,
'hdx_dataset_preview_values_list': hdx_helpers.hdx_dataset_preview_values_list,
'hdx_dataset_is_hxl': hdx_helpers.hdx_dataset_is_hxl,
'hdx_switch_url_path': hdx_helpers.switch_url_path,
'hdx_munge_title': hdx_helpers.hdx_munge_title,
'hdx_url_for': hdx_helpers.hdx_url_for,
'hdx_check_http_response': hdx_helpers.hdx_check_http_response,
'hdx_get_request_param': hdx_helpers.hdx_get_request_param,
'HDX_CONST': const
}
def get_actions(self):
from ckanext.hdx_theme.helpers import actions as hdx_actions
return {
'hdx_organization_list_for_user': hdx_actions.hdx_organization_list_for_user,
'cached_group_list': hdx_actions.cached_group_list,
'cached_organization_list': hdx_actions.cached_organization_list,
'invalidate_cache_for_groups': hdx_actions.invalidate_cache_for_groups,
'invalidate_cache_for_organizations': hdx_actions.invalidate_cache_for_organizations,
'invalidate_cached_resource_id_apihighways': hdx_actions.invalidate_cached_resource_id_apihighways,
'invalidate_region': hdx_actions.invalidate_region,
'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,
'member_list': hdx_actions.member_list,
# 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins,
# 'hdx_send_new_org_request': hdx_actions.hdx_send_new_org_request,
'hdx_send_editor_request_for_org': hdx_actions.hdx_send_editor_request_for_org,
# 'hdx_send_request_membership': hdx_actions.hdx_send_request_membership,
# 'hdx_user_show': hdx_actions.hdx_user_show,
'hdx_get_indicator_values': hdx_actions.hdx_get_indicator_values,
# 'hdx_get_shape_geojson': hdx_actions.hdx_get_shape_geojson,
# 'hdx_get_shape_info': hdx_actions.hdx_get_shape_info,
# 'hdx_get_indicator_available_periods': hdx_actions.hdx_get_indicator_available_periods,
'hdx_carousel_settings_show': hdx_actions.hdx_carousel_settings_show,
'hdx_carousel_settings_update': hdx_actions.hdx_carousel_settings_update,
# 'hdx_get_json_from_resource':hdx_actions.hdx_get_json_from_resource
#'hdx_get_activity_list': hdx_actions.hdx_get_activity_list
'hdx_general_statistics': hdx_actions.hdx_general_statistics,
'hdx_user_statistics': hdx_actions.hdx_user_statistics,
'hdx_organization_statistics': hdx_actions.hdx_organization_statistics,
'hdx_quick_links_settings_show': hdx_actions.hdx_quick_links_settings_show,
'hdx_quick_links_settings_update': hdx_actions.hdx_quick_links_settings_update,
'hdx_package_links_settings_show': hdx_actions.package_links_settings_show,
'hdx_package_links_settings_update': hdx_actions.package_links_settings_update,
'hdx_package_links_by_id_list': hdx_actions.package_links_by_id_list,
'activity_detail_list': hdx_actions.hdx_activity_detail_list,
}
def get_auth_functions(self):
return {
'hdx_basic_user_info': auth.hdx_basic_user_info,
'group_member_create': auth.group_member_create,
# 'hdx_send_new_org_request': auth.hdx_send_new_org_request,
'hdx_send_editor_request_for_org': auth.hdx_send_editor_request_for_org,
'invalidate_cache_for_groups': auth.invalidate_cache_for_groups,
'invalidate_cache_for_organizations': auth.invalidate_cache_for_organizations,
'invalidate_cached_resource_id_apihighways': auth.invalidate_cached_resource_id_apihighways,
'invalidate_region': auth.invalidate_region,
'hdx_user_statistics': auth.hdx_user_statistics,
'hdx_carousel_update': auth.hdx_carousel_update,
'hdx_request_data_admin_list': auth.hdx_request_data_admin_list,
'hdx_quick_links_update': auth.hdx_quick_links_update,
}
# IMiddleware
def make_middleware(self, app, config):
new_app = RedirectionMiddleware(app, config)
if app.app_name == 'flask_app':
from logging.handlers import SMTPHandler
for handler in app.logger.handlers:
if isinstance(handler, SMTPHandler):
handler.setLevel(logging.ERROR)
app.logger.addFilter(FlaskEmailFilter())
return new_app
# IValidators
def get_validators(self):
return {
'doesnt_exceed_max_validity_period': doesnt_exceed_max_validity_period,
}
# IApiToken
def create_api_token_schema(self, schema):
# add to the schema from expire_api_token plugin
schema['expires_in'].append(toolkit.get_validator('doesnt_exceed_max_validity_period'))
return schema
# IBlueprint
def get_blueprint(self):
return [hdx_colored_page, hdx_faqs]
|
1653573
|
import os
from contextlib import contextmanager
import requests_mock
mydir = os.path.dirname(__file__)
URL_TO_RESPONSE_FILE_MAP = {
('https://kartta.hel.fi/ws/geoserver/avoindata/wfs'
'?service=WFS&version=2.0.0&request=GetCapabilities'): (
'wfs_capabilities_response.xml'),
('https://kartta.hel.fi/ws/geoserver/avoindata/wfs'
'?service=WFS&version=2.0.0&request=GetFeature'
'&typenames=avoindata%3Aliikennemerkkipilotti_pysakointipaikat'): (
'parking_area_importer_data.xml'),
('https://kartta.hel.fi/ws/geoserver/avoindata/wfs'
'?service=WFS&version=2.0.0&request=GetFeature'
'&typenames=Asukas_ja_yrityspysakointivyohykkeet_alue'): (
'permit_area_importer_data.xml'),
('https://kartta.hel.fi/ws/geoserver/avoindata/wfs'
'?service=WFS&version=2.0.0&request=GetFeature'
'&typenames=Pysakoinnin_maksuvyohykkeet_alue'): (
'payment_zone_importer_data.xml'),
}
@contextmanager
def mocked_requests():
with requests_mock.Mocker(real_http=False) as requests_mocker:
for (url, response_file) in URL_TO_RESPONSE_FILE_MAP.items():
with open(os.path.join(mydir, response_file), 'rb') as fp:
response_content = fp.read()
requests_mocker.get(url, content=response_content)
yield requests_mocker
|
1653579
|
import os
import time
import h5py
from collections import OrderedDict
import numpy as np
from env.jaco.two_jaco import TwoJacoEnv
from env.transform_utils import quat_dist
class TwoJacoPlaceEnv(TwoJacoEnv):
def __init__(self, **kwargs):
self.name = 'two-jaco-place'
super().__init__('two_jaco_pick.xml', **kwargs)
# config
self._env_config.update({
"train_left": True,
"train_right": True,
"success_reward": 500,
"target_xy_reward": 500,
"target_z_reward": 500,
"move_finish_reward": 50,
"grasp_reward": 100,
"inair_reward": 0,
"init_randomness": 0.005,
"dest_pos": [0.3, -0.02, 0.86],
"dest_center": True, # set destination to center
"ctrl_reward": 1e-4,
"max_episode_steps": 100,
"init_qpos_dir": None
})
self._env_config.update({ k:v for k,v in kwargs.items() if k in self._env_config })
# target position
if not self._env_config['dest_center']:
if self._env_config['train_left'] and not self._env_config['train_right']:
self._env_config['dest_pos'] = [0.05, 0.2, 0.86] # 0.15
elif not self._env_config['train_left'] and self._env_config['train_right']:
self._env_config['dest_pos'] = [0.05, -0.2, 0.86] # 0.15
# state
self._hold_duration = [0, 0]
self._box_z = [0, 0]
self._min_height = self._env_config['dest_pos'][-1] + 0.08
self._get_reference()
def _get_reference(self):
self.cube_body_id = [self.sim.model.body_name2id("cube{}".format(i)) for i in [1, 2]]
self.cube_geom_id = [self.sim.model.geom_name2id("cube{}".format(i)) for i in [1, 2]]
def _compute_reward(self):
''' Environment reward consists of place reward, xy-position reward
Place reward = -|current z position - expected z position|
Xy-position reward = -||current xy position - init xy position||^2
'''
done = False
off_table = [False, False]
info = {}
# compute gripper centers
cube_z = [self._get_pos("cube1")[-1], self._get_pos("cube2")[-1]]
gripper_centers = self._get_gripper_centers()
# get init box pos
if self._t == 0:
self._init_box_pos = [self._get_pos('cube{}'.format(i + 1)) for i in range(2)]
# object placing reward before placing is finished
in_hand = [True, True]
target_xy_rewards = [0, 0]
target_z_rewards = [0, 0]
target_dist_xy = [0, 0]
target_dist_z = [0, 0]
grasp_count = [0, 0]
grasp_rewards = [0, 0]
inair_rewards = [1, 1]
move_finish_rewards = [0, 0]
for i in range(2):
dist_cube_hand = np.linalg.norm(self._get_pos('cube{}'.format(i + 1)) - gripper_centers[i])
in_hand[i] = dist_cube_hand < 0.08
off_table[i] = cube_z[i] < 0.8
# place reward
cube_pos = self._get_pos('cube{}'.format(i + 1))
target_dist_xy[i] = float(np.linalg.norm(cube_pos[:2] - self._env_config['dest_pos'][:2]))
target_dist_z[i] = float(np.abs(cube_pos[-1] - self._env_config['dest_pos'][-1]))
if self._stage[i] == 'move':
height_decrease = self._init_box_pos[i][-1] - cube_pos[-1]
target_z_rewards[i] = -self._env_config["target_z_reward"] * max(0, height_decrease)
if target_dist_xy[i] < 0.06 and cube_pos[-1] > self._min_height:
move_finish_rewards[i] += self._env_config['move_finish_reward']
self._stage[i] = 'place'
if self._stage[i] == 'place':
target_z_rewards[i] = -self._env_config["target_z_reward"] * target_dist_z[i]
target_xy_rewards[i] -= self._env_config["target_xy_reward"] * target_dist_xy[i]
# grasp reward
contact_flag = [False] * 3
geom_name_prefix = 'jaco_{}_link_finger'.format('l' if i == 0 else 'r')
for j in range(self.sim.data.ncon):
c = self.sim.data.contact[j]
for k in range(3):
geom_name = '{}_{}'.format(geom_name_prefix, k + 1)
geom_id = self.sim.model.geom_name2id(geom_name)
if c.geom1 == geom_id and c.geom2 == self.cube_geom_id[i]:
contact_flag[k] = True
if c.geom2 == geom_id and c.geom1 == self.cube_geom_id[i]:
contact_flag[k] = True
grasp_count[i] = np.array(contact_flag).astype(int).sum()
if self._t == 0:
self._init_grasp_count[i] = grasp_count[i]
grasp_rewards[i] = -self._env_config["grasp_reward"] * max(0, self._init_grasp_count[i] - grasp_count[i])
# in air reward
table_geom_id = self.sim.model.geom_name2id("table_collision")
for j in range(self.sim.data.ncon):
c = self.sim.data.contact[j]
if c.geom1 == table_geom_id and c.geom2 == self.cube_geom_id[i]:
inair_rewards[i] = 0
if c.geom2 == table_geom_id and c.geom1 == self.cube_geom_id[i]:
inair_rewards[i] = 0
if target_dist_xy[i] < 0.06:
inair_rewards[i] = 1
inair_rewards[i] *= self._env_config["inair_reward"]
# success criteria
self._success = True
if self._env_config['train_left']:
self._success &= in_hand[0] and target_dist_xy[0] < 0.04 and target_dist_z[0] < 0.03 and self._stage[0] == 'place'
if self._env_config['train_right']:
self._success &= in_hand[1] and target_dist_xy[1] < 0.04 and target_dist_z[1] < 0.03 and self._stage[1] == 'place'
# success reward
success_reward = 0
if self._success:
print('All places success!')
success_reward = self._env_config["success_reward"]
done = self._success
if self._env_config['train_left']:
done |= (not in_hand[0])
if self._env_config['train_right']:
done |= (not in_hand[1])
reward = success_reward
in_hand_rewards = [0, 0]
if self._env_config["train_left"]:
done |= off_table[0]
reward += target_xy_rewards[0] + target_z_rewards[0] + grasp_rewards[0] + inair_rewards[0] + move_finish_rewards[0]
if self._env_config["train_right"]:
done |= off_table[1]
reward += target_xy_rewards[1] + target_z_rewards[1] + grasp_rewards[1] + inair_rewards[1] + move_finish_rewards[1]
info = {"reward_xy_1": target_xy_rewards[0],
"reward_xy_2": target_xy_rewards[1],
"reward_z_1": target_z_rewards[0],
"reward_z_2": target_z_rewards[1],
"reward_grasp_1": grasp_rewards[0],
"reward_grasp_2": grasp_rewards[1],
"reward_inair_1": inair_rewards[0],
"reward_inair_2": inair_rewards[1],
"reward_move_finish_1": move_finish_rewards[0],
"reward_move_finish_2": move_finish_rewards[1],
"grasp_count_1": grasp_count[0],
"grasp_count_2": grasp_count[1],
"in_hand": in_hand,
"target_pos": self._env_config['dest_pos'],
"cube1_pos": self._get_pos("cube1"),
"cube2_pos": self._get_pos("cube2"),
"gripper1_pos": gripper_centers[0],
"gripper2_pos": gripper_centers[1],
"target_dist_xy": np.round(target_dist_xy, 3),
"target_dist_z": np.round(target_dist_z, 3),
"curr_qpos_l": np.round(self.data.qpos[1:10], 1).tolist(),
"curr_qpos_r": np.round(self.data.qpos[10:19], 1).tolist(),
"stage": self._stage,
"success": self._success }
return reward, done, info
def _step(self, a):
prev_reward, _, _ = self._compute_reward()
# build action from policy output
filled_action = np.zeros((self.action_space.size,))
inclusion_dict = { 'right_arm': self._env_config['train_right'],
'left_arm': self._env_config['train_left'] }
dest_idx, src_idx = 0, 0
for k in self.action_space.shape.keys():
new_dest_idx = dest_idx + self.action_space.shape[k]
if inclusion_dict[k]:
new_src_idx = src_idx + self.action_space.shape[k]
filled_action[dest_idx:new_dest_idx] = a[src_idx:new_src_idx]
src_idx = new_src_idx
dest_idx = new_dest_idx
a = filled_action
# scale actions from [-1, 1] range to actual control range
mins = self.action_space.minimum
maxs = self.action_space.maximum
scaled_action = np.zeros_like(a)
for i in range(self.action_space.size):
scaled_action[i] = mins[i] + (maxs[i] - mins[i]) * (a[i] / 2 + 0.5)
self.do_simulation(scaled_action)
self._t += 1
ob = self._get_obs()
reward, done, info = self._compute_reward()
ctrl_reward = self._ctrl_reward(scaled_action)
info['reward_ctrl'] = ctrl_reward
self._reward = reward - prev_reward + ctrl_reward
return ob, self._reward, done, info
def reset_box(self):
self.cube1_target_reached = False
self.cube2_target_reached = False
super().reset_box()
qpos = self.data.qpos.ravel().copy()
qvel = self.data.qvel.ravel().copy()
# set agent's and box's initial position from saved poses
if self._env_config['init_qpos_dir']:
filepath = os.path.join(self._env_config['init_qpos_dir'], 'success_qpos.p')
with h5py.File(filepath, 'r', libver='latest', swmr=True) as f:
select_success = False
while not select_success:
ix = np.random.randint(len(f))
qpos = f[str(ix)].value
cube_l_ok = qpos[20] > self._min_height or (not self._env_config['train_left'])
cube_r_ok = qpos[27] > self._min_height or (not self._env_config['train_right'])
if cube_l_ok and cube_r_ok:
select_success = True
self.set_state(qpos, qvel)
self._hold_duration = [0, 0]
self._t = 0
self._placed = False
self._init_grasp_count = [0, 0]
self._stage = ['move'] * 2
|
1653597
|
from vk_raid_defender.cli import cli
if __name__ == '__main__':
cli.main()
else:
raise ImportError('этот модуль должен использоваться только для запуска программы')
|
1653605
|
import os
import sys
import shutil
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import chazutsu.datasets
from tests.dataset_base_test import DatasetTestCase
class TestSquad(DatasetTestCase):
def test_download_v1_1(self):
r_train = chazutsu.datasets.SQuAD(
kind="train", version="v1.1").download(self.test_dir)
r_dev = chazutsu.datasets.SQuAD(
kind="dev", version="v1.1").download(self.test_dir)
train = r_train.data()
dev = r_dev.data()
self.assertEqual(train.shape[0], 87599)
self.assertEqual(dev.shape[0], 10570)
self.assertEqual(train.shape[1], dev.shape[1])
def test_download_v2_0(self):
r_train = chazutsu.datasets.SQuAD(
kind="train", version="v2.0").download(self.test_dir)
r_dev = chazutsu.datasets.SQuAD(
kind="dev", version="v2.0").download(self.test_dir)
train = r_train.data()
dev = r_dev.data()
self.assertEqual(train.shape[0], 130319)
self.assertEqual(dev.shape[0], 11873)
self.assertEqual(train.shape[1], dev.shape[1])
if __name__ == "__main__":
unittest.main()
|
1653652
|
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import numpy as np
n_iter=0
def group_weight(module):
group_decay = []
group_no_decay = []
for m in module.modules():
if isinstance(m, nn.Linear):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.modules.conv._ConvNd):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.modules.batchnorm._BatchNorm):
if m.weight is not None:
group_no_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
assert len(list(module.parameters())) == len(group_decay) + len(group_no_decay)
groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
return groups
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = None
self.avg = None
self.sum = None
self.count = None
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def IOU_Score(y_pred,y_val):
def IoUOld(a,b):
intersection = ((a==1) & (a==b)).sum()
union = ((a==1) | (b==1)).sum()
if union > 0:
return intersection / union
elif union == 0 and intersection == 0:
return 1
else:
return 0
y_pred=y_pred[:,1,:,:]#.view(batch_size,1,101,101)
t=0.5
IOU_list=[]
for j in range(y_pred.shape[0]):
y_pred_ = np.array(y_pred[j,:,:] > t, dtype=bool)
y_val_=np.array(y_val[j,:,:], dtype=bool)
IOU = IoUOld(y_pred_, y_val_)
IOU_list.append(IOU)
#now we take different threshholds, these threshholds
#basically determine if our IOU consitutes as a "true positiv"
#or not
prec_list=[]
for IOU_t in np.arange(0.5, 1.0, 0.05):
#get true positives, aka all examples where the IOU is larger than the threshhold
TP=np.sum(np.asarray(IOU_list)>IOU_t)
#calculate the current precision, by devididing by the total number of examples ( pretty sure this is correct :D)
#they where writing the denominator as TP+FP+FN but that doesnt really make sens becasue there are no False postivies i think
Prec=TP/len(IOU_list)
prec_list.append(Prec)
return np.mean(prec_list)
#Main Training Function
from losses import lovasz_softmax,FocalLoss
from training_functions import IOU_Score
focal=FocalLoss(size_average=True)
def train(train_loader,segmentation_module,segmentation_ema,optimizer
,writer
,lovasz_scaling=0.1
,focal_scaling=0.9
,unsupervised_scaling=0.1
,ema_scaling=0.2
,non_ema_scaling=1
,second_batch_size=2
,train=True
,test=False
,writer_name_list=None
):
global n_iter
#Training Loop
cudnn.benchmark = True
lovasz_scaling=torch.tensor(lovasz_scaling).float().cuda()
focal_scaling=torch.tensor(focal_scaling).float().cuda()
unsupervised_scaling=torch.tensor(unsupervised_scaling).float().cuda()
ema_scaling=torch.tensor(ema_scaling).float().cuda()
non_ema_scaling=torch.tensor(non_ema_scaling).float().cuda()
#average meter for all the losses we keep track of.
ave_total_loss = AverageMeter() # Total Loss
ave_non_ema_loss = AverageMeter()
ave_ema_loss = AverageMeter()
ave_total_loss = AverageMeter()
ave_lovasz_loss = AverageMeter()
ave_focal_loss = AverageMeter()
ave_lovasz_loss_ema = AverageMeter()
ave_focal_loss_ema = AverageMeter()
ave_unsupervised_loss = AverageMeter()
ave_iou_score = AverageMeter()
if train==True:
segmentation_module.train()
segmentation_ema.train()
else:
segmentation_module.eval()
segmentation_ema.eval()
for batch_data in train_loader:
batch_data["img_data"]=batch_data["img_data"].cuda()
batch_data["seg_label"]=batch_data["seg_label"].cuda().long().squeeze()
#Normal Pred and Pred from the self ensembeled model
pred = segmentation_module(batch_data)
pred_ema = segmentation_ema(batch_data)
#We dont want to gradient descent into the EMA model
pred_ema=Variable(pred_ema.detach().data, requires_grad=False)
### UNSUPVERVISED LOSS ####
unsupervised_loss = torch.mean((pred - pred_ema)**2).cuda()
### SUPERVISED LOSS ####
#We jsut get rid of the Unlabeled examples for the supervised loss!
pred=pred[:-second_batch_size,:,:]
pred_ema=pred_ema[:-second_batch_size,:,:]
batch_data["seg_label"]=batch_data["seg_label"][:-second_batch_size,:,:]
lovasz_loss=lovasz_softmax(pred, batch_data['seg_label'],ignore=-1,only_present=True).cuda()
focal_loss=focal(pred, batch_data['seg_label'],)
lovasz_loss_ema=lovasz_softmax(pred_ema, batch_data['seg_label'],ignore=-1,only_present=True).cuda()
focal_loss_ema=focal(pred_ema, batch_data['seg_label'],)
#### Loss Combinations #####
non_ema_loss=(lovasz_loss*lovasz_scaling+focal_loss*focal_scaling).cuda()
ema_loss=(lovasz_loss_ema*lovasz_scaling+focal_loss_ema*focal_scaling).cuda()
total_loss=(non_ema_loss*non_ema_scaling+ema_loss*ema_scaling+unsupervised_scaling*unsupervised_loss).cuda()
#Need to give it as softmaxes
pred = nn.functional.softmax(pred, dim=1)
iou_score=IOU_Score(pred,batch_data["seg_label"])
### BW ####
if train==True:
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
n_iter=n_iter+1
update_ema_variables(segmentation_module, segmentation_ema, 0.999, n_iter)
### WRITING STUFF #########
ave_non_ema_loss.update(non_ema_loss.data.item())
ave_ema_loss.update(ema_loss.data.item())
ave_total_loss.update(total_loss.data.item())
ave_lovasz_loss.update(lovasz_loss.data.item())
ave_focal_loss.update(focal_loss.data.item())
ave_lovasz_loss_ema.update(lovasz_loss_ema.data.item())
ave_focal_loss_ema.update(focal_loss_ema.data.item())
ave_unsupervised_loss.update(unsupervised_loss.data.item())
ave_iou_score.update(iou_score.item())
if test==True:
print(n_iter)
break
writer.add_scalar(writer_name_list[0], ave_non_ema_loss.average(), n_iter)
writer.add_scalar(writer_name_list[1], ave_ema_loss.average(), n_iter)
writer.add_scalar(writer_name_list[2], ave_total_loss.average(), n_iter)
writer.add_scalar(writer_name_list[3], ave_lovasz_loss.average(), n_iter)
writer.add_scalar(writer_name_list[4], ave_focal_loss.average(), n_iter)
writer.add_scalar(writer_name_list[5], ave_lovasz_loss_ema.average(), n_iter)
writer.add_scalar(writer_name_list[6], ave_focal_loss_ema.average(), n_iter)
writer.add_scalar(writer_name_list[7], ave_unsupervised_loss.average(), n_iter)
writer.add_scalar(writer_name_list[8], ave_iou_score.average(), n_iter)
if train==False:
return np.mean(ave_iou_score.average())
|
1653693
|
from typing import Dict
from typing import List
from botocore.paginate import Paginator
class DescribeDirectoryConfigs(Paginator):
def paginate(self, DirectoryNames: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppStream.Client.describe_directory_configs`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeDirectoryConfigs>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DirectoryNames=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'DirectoryConfigs': [
{
'DirectoryName': 'string',
'OrganizationalUnitDistinguishedNames': [
'string',
],
'ServiceAccountCredentials': {
'AccountName': 'string',
'AccountPassword': '<PASSWORD>'
},
'CreatedTime': datetime(2015, 1, 1)
},
],
}
**Response Structure**
- *(dict) --*
- **DirectoryConfigs** *(list) --*
Information about the directory configurations. Note that although the response syntax in this topic includes the account password, this password is not returned in the actual response.
- *(dict) --*
Describes the configuration information for the directory used to join a streaming instance to a Microsoft Active Directory domain.
- **DirectoryName** *(string) --*
The fully qualified name of the directory (for example, corp.example.com).
- **OrganizationalUnitDistinguishedNames** *(list) --*
The distinguished names of the organizational units for computer accounts.
- *(string) --*
- **ServiceAccountCredentials** *(dict) --*
The credentials for the service account used by the streaming instance to connect to the directory.
- **AccountName** *(string) --*
The user name of the account. This account must have the following privileges: create computer objects, join computers to the domain, and change/reset the password on descendant computer objects for the organizational units specified.
- **AccountPassword** *(string) --*
The password for the account.
- **CreatedTime** *(datetime) --*
The time the directory configuration was created.
:type DirectoryNames: list
:param DirectoryNames:
The directory names.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeFleets(Paginator):
def paginate(self, Names: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppStream.Client.describe_fleets`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeFleets>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Names=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Fleets': [
{
'Arn': 'string',
'Name': 'string',
'DisplayName': 'string',
'Description': 'string',
'ImageName': 'string',
'ImageArn': 'string',
'InstanceType': 'string',
'FleetType': 'ALWAYS_ON'|'ON_DEMAND',
'ComputeCapacityStatus': {
'Desired': 123,
'Running': 123,
'InUse': 123,
'Available': 123
},
'MaxUserDurationInSeconds': 123,
'DisconnectTimeoutInSeconds': 123,
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED',
'VpcConfig': {
'SubnetIds': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
'CreatedTime': datetime(2015, 1, 1),
'FleetErrors': [
{
'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION'|'SECURITY_GROUPS_NOT_FOUND'|'IGW_NOT_ATTACHED'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION'|'DOMAIN_JOIN_ERROR_FILE_NOT_FOUND'|'DOMAIN_JOIN_ERROR_ACCESS_DENIED'|'DOMAIN_JOIN_ERROR_LOGON_FAILURE'|'DOMAIN_JOIN_ERROR_INVALID_PARAMETER'|'DOMAIN_JOIN_ERROR_MORE_DATA'|'DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN'|'DOMAIN_JOIN_ERROR_NOT_SUPPORTED'|'DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME'|'DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED'|'DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED'|'DOMAIN_JOIN_NERR_PASSWORD_EXPIRED'|'DOMAIN_JOIN_INTERNAL_SERVICE_ERROR',
'ErrorMessage': 'string'
},
],
'EnableDefaultInternetAccess': True|False,
'DomainJoinInfo': {
'DirectoryName': 'string',
'OrganizationalUnitDistinguishedName': 'string'
}
},
],
}
**Response Structure**
- *(dict) --*
- **Fleets** *(list) --*
Information about the fleets.
- *(dict) --*
Describes the parameters for a fleet.
- **Arn** *(string) --*
The ARN for the fleet.
- **Name** *(string) --*
The name of the fleet.
- **DisplayName** *(string) --*
The fleet name to display.
- **Description** *(string) --*
The description to display.
- **ImageName** *(string) --*
The name of the image used to create the fleet.
- **ImageArn** *(string) --*
The ARN for the public, private, or shared image.
- **InstanceType** *(string) --*
The instance type to use when launching fleet instances.
- **FleetType** *(string) --*
The fleet type.
ALWAYS_ON
Provides users with instant-on access to their apps. You are charged for all running instances in your fleet, even if no users are streaming apps.
ON_DEMAND
Provide users with access to applications after they connect, which takes one to two minutes. You are charged for instance streaming when users are connected and a small hourly fee for instances that are not streaming apps.
- **ComputeCapacityStatus** *(dict) --*
The capacity status for the fleet.
- **Desired** *(integer) --*
The desired number of streaming instances.
- **Running** *(integer) --*
The total number of simultaneous streaming instances that are running.
- **InUse** *(integer) --*
The number of instances in use for streaming.
- **Available** *(integer) --*
The number of currently available instances that can be used to stream sessions.
- **MaxUserDurationInSeconds** *(integer) --*
The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 360000.
- **DisconnectTimeoutInSeconds** *(integer) --*
The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 360000. By default, this value is 900 seconds (15 minutes).
- **State** *(string) --*
The current state for the fleet.
- **VpcConfig** *(dict) --*
The VPC configuration for the fleet.
- **SubnetIds** *(list) --*
The identifiers of the subnets to which a network interface is attached from the fleet instance or image builder instance. Fleet instances use one or two subnets. Image builder instances use one subnet.
- *(string) --*
- **SecurityGroupIds** *(list) --*
The identifiers of the security groups for the fleet or image builder.
- *(string) --*
- **CreatedTime** *(datetime) --*
The time the fleet was created.
- **FleetErrors** *(list) --*
The fleet errors.
- *(dict) --*
Describes a fleet error.
- **ErrorCode** *(string) --*
The error code.
- **ErrorMessage** *(string) --*
The error message.
- **EnableDefaultInternetAccess** *(boolean) --*
Indicates whether default internet access is enabled for the fleet.
- **DomainJoinInfo** *(dict) --*
The name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain.
- **DirectoryName** *(string) --*
The fully qualified name of the directory (for example, corp.example.com).
- **OrganizationalUnitDistinguishedName** *(string) --*
The distinguished name of the organizational unit for computer accounts.
:type Names: list
:param Names:
The names of the fleets to describe.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeImageBuilders(Paginator):
def paginate(self, Names: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppStream.Client.describe_image_builders`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeImageBuilders>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Names=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ImageBuilders': [
{
'Name': 'string',
'Arn': 'string',
'ImageArn': 'string',
'Description': 'string',
'DisplayName': 'string',
'VpcConfig': {
'SubnetIds': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
'InstanceType': 'string',
'Platform': 'WINDOWS',
'State': 'PENDING'|'UPDATING_AGENT'|'RUNNING'|'STOPPING'|'STOPPED'|'REBOOTING'|'SNAPSHOTTING'|'DELETING'|'FAILED',
'StateChangeReason': {
'Code': 'INTERNAL_ERROR'|'IMAGE_UNAVAILABLE',
'Message': 'string'
},
'CreatedTime': datetime(2015, 1, 1),
'EnableDefaultInternetAccess': True|False,
'DomainJoinInfo': {
'DirectoryName': 'string',
'OrganizationalUnitDistinguishedName': 'string'
},
'ImageBuilderErrors': [
{
'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION'|'SECURITY_GROUPS_NOT_FOUND'|'IGW_NOT_ATTACHED'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION'|'DOMAIN_JOIN_ERROR_FILE_NOT_FOUND'|'DOMAIN_JOIN_ERROR_ACCESS_DENIED'|'DOMAIN_JOIN_ERROR_LOGON_FAILURE'|'DOMAIN_JOIN_ERROR_INVALID_PARAMETER'|'DOMAIN_JOIN_ERROR_MORE_DATA'|'DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN'|'DOMAIN_JOIN_ERROR_NOT_SUPPORTED'|'DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME'|'DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED'|'DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED'|'DOMAIN_JOIN_NERR_PASSWORD_EXPIRED'|'DOMAIN_JOIN_INTERNAL_SERVICE_ERROR',
'ErrorMessage': 'string',
'ErrorTimestamp': datetime(2015, 1, 1)
},
],
'AppstreamAgentVersion': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **ImageBuilders** *(list) --*
Information about the image builders.
- *(dict) --*
Describes a virtual machine that is used to create an image.
- **Name** *(string) --*
The name of the image builder.
- **Arn** *(string) --*
The ARN for the image builder.
- **ImageArn** *(string) --*
The ARN of the image from which this builder was created.
- **Description** *(string) --*
The description to display.
- **DisplayName** *(string) --*
The image builder name to display.
- **VpcConfig** *(dict) --*
The VPC configuration of the image builder.
- **SubnetIds** *(list) --*
The identifiers of the subnets to which a network interface is attached from the fleet instance or image builder instance. Fleet instances use one or two subnets. Image builder instances use one subnet.
- *(string) --*
- **SecurityGroupIds** *(list) --*
The identifiers of the security groups for the fleet or image builder.
- *(string) --*
- **InstanceType** *(string) --*
The instance type for the image builder.
- **Platform** *(string) --*
The operating system platform of the image builder.
- **State** *(string) --*
The state of the image builder.
- **StateChangeReason** *(dict) --*
The reason why the last state change occurred.
- **Code** *(string) --*
The state change reason code.
- **Message** *(string) --*
The state change reason message.
- **CreatedTime** *(datetime) --*
The time stamp when the image builder was created.
- **EnableDefaultInternetAccess** *(boolean) --*
Enables or disables default internet access for the image builder.
- **DomainJoinInfo** *(dict) --*
The name of the directory and organizational unit (OU) to use to join the image builder to a Microsoft Active Directory domain.
- **DirectoryName** *(string) --*
The fully qualified name of the directory (for example, corp.example.com).
- **OrganizationalUnitDistinguishedName** *(string) --*
The distinguished name of the organizational unit for computer accounts.
- **ImageBuilderErrors** *(list) --*
The image builder errors.
- *(dict) --*
Describes a resource error.
- **ErrorCode** *(string) --*
The error code.
- **ErrorMessage** *(string) --*
The error message.
- **ErrorTimestamp** *(datetime) --*
The time the error occurred.
- **AppstreamAgentVersion** *(string) --*
The version of the AppStream 2.0 agent that is currently being used by the image builder.
:type Names: list
:param Names:
The names of the image builders to describe.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeImages(Paginator):
def paginate(self, Names: List = None, Arns: List = None, Type: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppStream.Client.describe_images`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeImages>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Names=[
'string',
],
Arns=[
'string',
],
Type='PUBLIC'|'PRIVATE'|'SHARED',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Images': [
{
'Name': 'string',
'Arn': 'string',
'BaseImageArn': 'string',
'DisplayName': 'string',
'State': 'PENDING'|'AVAILABLE'|'FAILED'|'COPYING'|'DELETING',
'Visibility': 'PUBLIC'|'PRIVATE'|'SHARED',
'ImageBuilderSupported': True|False,
'Platform': 'WINDOWS',
'Description': 'string',
'StateChangeReason': {
'Code': 'INTERNAL_ERROR'|'IMAGE_BUILDER_NOT_AVAILABLE'|'IMAGE_COPY_FAILURE',
'Message': 'string'
},
'Applications': [
{
'Name': 'string',
'DisplayName': 'string',
'IconURL': 'string',
'LaunchPath': 'string',
'LaunchParameters': 'string',
'Enabled': True|False,
'Metadata': {
'string': 'string'
}
},
],
'CreatedTime': datetime(2015, 1, 1),
'PublicBaseImageReleasedDate': datetime(2015, 1, 1),
'AppstreamAgentVersion': 'string',
'ImagePermissions': {
'allowFleet': True|False,
'allowImageBuilder': True|False
}
},
],
}
**Response Structure**
- *(dict) --*
- **Images** *(list) --*
Information about the images.
- *(dict) --*
Describes an image.
- **Name** *(string) --*
The name of the image.
- **Arn** *(string) --*
The ARN of the image.
- **BaseImageArn** *(string) --*
The ARN of the image from which this image was created.
- **DisplayName** *(string) --*
The image name to display.
- **State** *(string) --*
The image starts in the ``PENDING`` state. If image creation succeeds, the state is ``AVAILABLE`` . If image creation fails, the state is ``FAILED`` .
- **Visibility** *(string) --*
Indicates whether the image is public or private.
- **ImageBuilderSupported** *(boolean) --*
Indicates whether an image builder can be launched from this image.
- **Platform** *(string) --*
The operating system platform of the image.
- **Description** *(string) --*
The description to display.
- **StateChangeReason** *(dict) --*
The reason why the last state change occurred.
- **Code** *(string) --*
The state change reason code.
- **Message** *(string) --*
The state change reason message.
- **Applications** *(list) --*
The applications associated with the image.
- *(dict) --*
Describes an application in the application catalog.
- **Name** *(string) --*
The name of the application.
- **DisplayName** *(string) --*
The application name to display.
- **IconURL** *(string) --*
The URL for the application icon. This URL might be time-limited.
- **LaunchPath** *(string) --*
The path to the application executable in the instance.
- **LaunchParameters** *(string) --*
The arguments that are passed to the application at launch.
- **Enabled** *(boolean) --*
If there is a problem, the application can be disabled after image creation.
- **Metadata** *(dict) --*
Additional attributes that describe the application.
- *(string) --*
- *(string) --*
- **CreatedTime** *(datetime) --*
The time the image was created.
- **PublicBaseImageReleasedDate** *(datetime) --*
The release date of the public base image. For private images, this date is the release date of the base image from which the image was created.
- **AppstreamAgentVersion** *(string) --*
The version of the AppStream 2.0 agent to use for instances that are launched from this image.
- **ImagePermissions** *(dict) --*
The permissions to provide to the destination AWS account for the specified image.
- **allowFleet** *(boolean) --*
Indicates whether the image can be used for a fleet.
- **allowImageBuilder** *(boolean) --*
Indicates whether the image can be used for an image builder.
:type Names: list
:param Names:
The names of the public or private images to describe.
- *(string) --*
:type Arns: list
:param Arns:
The ARNs of the public, private, and shared images to describe.
- *(string) --*
:type Type: string
:param Type:
The type of image (public, private, or shared) to describe.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeSessions(Paginator):
def paginate(self, StackName: str, FleetName: str, UserId: str = None, AuthenticationType: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppStream.Client.describe_sessions`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeSessions>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
StackName='string',
FleetName='string',
UserId='string',
AuthenticationType='API'|'SAML'|'USERPOOL',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Sessions': [
{
'Id': 'string',
'UserId': 'string',
'StackName': 'string',
'FleetName': 'string',
'State': 'ACTIVE'|'PENDING'|'EXPIRED',
'ConnectionState': 'CONNECTED'|'NOT_CONNECTED',
'StartTime': datetime(2015, 1, 1),
'MaxExpirationTime': datetime(2015, 1, 1),
'AuthenticationType': 'API'|'SAML'|'USERPOOL',
'NetworkAccessConfiguration': {
'EniPrivateIpAddress': 'string',
'EniId': 'string'
}
},
],
}
**Response Structure**
- *(dict) --*
- **Sessions** *(list) --*
Information about the streaming sessions.
- *(dict) --*
Describes a streaming session.
- **Id** *(string) --*
The identifier of the streaming session.
- **UserId** *(string) --*
The identifier of the user for whom the session was created.
- **StackName** *(string) --*
The name of the stack for the streaming session.
- **FleetName** *(string) --*
The name of the fleet for the streaming session.
- **State** *(string) --*
The current state of the streaming session.
- **ConnectionState** *(string) --*
Specifies whether a user is connected to the streaming session.
- **StartTime** *(datetime) --*
The time when a streaming instance is dedicated for the user.
- **MaxExpirationTime** *(datetime) --*
The time when the streaming session is set to expire. This time is based on the ``MaxUserDurationinSeconds`` value, which determines the maximum length of time that a streaming session can run. A streaming session might end earlier than the time specified in ``SessionMaxExpirationTime`` , when the ``DisconnectTimeOutInSeconds`` elapses or the user chooses to end his or her session. If the ``DisconnectTimeOutInSeconds`` elapses, or the user chooses to end his or her session, the streaming instance is terminated and the streaming session ends.
- **AuthenticationType** *(string) --*
The authentication method. The user is authenticated using a streaming URL (``API`` ), SAML 2.0 federation (``SAML`` ), or the AppStream 2.0 user pool (``USERPOOL`` ). The default is to authenticate users using a streaming URL.
- **NetworkAccessConfiguration** *(dict) --*
The network details for the streaming session.
- **EniPrivateIpAddress** *(string) --*
The private IP address of the elastic network interface that is attached to instances in your VPC.
- **EniId** *(string) --*
The resource identifier of the elastic network interface that is attached to instances in your VPC. All network interfaces have the eni-xxxxxxxx resource identifier.
:type StackName: string
:param StackName: **[REQUIRED]**
The name of the stack. This value is case-sensitive.
:type FleetName: string
:param FleetName: **[REQUIRED]**
The name of the fleet. This value is case-sensitive.
:type UserId: string
:param UserId:
The user identifier.
:type AuthenticationType: string
:param AuthenticationType:
The authentication method. Specify ``API`` for a user authenticated using a streaming URL, ``SAML`` for a SAML 2.0-federated user, or ``USERPOOL`` for a user in the AppStream 2.0 user pool. The default is to authenticate users using a streaming URL.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeStacks(Paginator):
def paginate(self, Names: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppStream.Client.describe_stacks`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeStacks>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Names=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Stacks': [
{
'Arn': 'string',
'Name': 'string',
'Description': 'string',
'DisplayName': 'string',
'CreatedTime': datetime(2015, 1, 1),
'StorageConnectors': [
{
'ConnectorType': 'HOMEFOLDERS'|'GOOGLE_DRIVE'|'ONE_DRIVE',
'ResourceIdentifier': 'string',
'Domains': [
'string',
]
},
],
'RedirectURL': 'string',
'FeedbackURL': 'string',
'StackErrors': [
{
'ErrorCode': 'STORAGE_CONNECTOR_ERROR'|'INTERNAL_SERVICE_ERROR',
'ErrorMessage': 'string'
},
],
'UserSettings': [
{
'Action': 'CLIPBOARD_COPY_FROM_LOCAL_DEVICE'|'CLIPBOARD_COPY_TO_LOCAL_DEVICE'|'FILE_UPLOAD'|'FILE_DOWNLOAD'|'PRINTING_TO_LOCAL_DEVICE',
'Permission': 'ENABLED'|'DISABLED'
},
],
'ApplicationSettings': {
'Enabled': True|False,
'SettingsGroup': 'string',
'S3BucketName': 'string'
}
},
],
}
**Response Structure**
- *(dict) --*
- **Stacks** *(list) --*
Information about the stacks.
- *(dict) --*
Describes a stack.
- **Arn** *(string) --*
The ARN of the stack.
- **Name** *(string) --*
The name of the stack.
- **Description** *(string) --*
The description to display.
- **DisplayName** *(string) --*
The stack name to display.
- **CreatedTime** *(datetime) --*
The time the stack was created.
- **StorageConnectors** *(list) --*
The storage connectors to enable.
- *(dict) --*
Describes a connector to enable persistent storage for users.
- **ConnectorType** *(string) --*
The type of storage connector.
- **ResourceIdentifier** *(string) --*
The ARN of the storage connector.
- **Domains** *(list) --*
The names of the domains for the account.
- *(string) --* GSuite domain for GDrive integration.
- **RedirectURL** *(string) --*
The URL that users are redirected to after their streaming session ends.
- **FeedbackURL** *(string) --*
The URL that users are redirected to after they click the Send Feedback link. If no URL is specified, no Send Feedback link is displayed.
- **StackErrors** *(list) --*
The errors for the stack.
- *(dict) --*
Describes a stack error.
- **ErrorCode** *(string) --*
The error code.
- **ErrorMessage** *(string) --*
The error message.
- **UserSettings** *(list) --*
The actions that are enabled or disabled for users during their streaming sessions. By default these actions are enabled.
- *(dict) --*
Describes an action and whether the action is enabled or disabled for users during their streaming sessions.
- **Action** *(string) --*
The action that is enabled or disabled.
- **Permission** *(string) --*
Indicates whether the action is enabled or disabled.
- **ApplicationSettings** *(dict) --*
The persistent application settings for users of the stack.
- **Enabled** *(boolean) --*
Specifies whether persistent application settings are enabled for users during their streaming sessions.
- **SettingsGroup** *(string) --*
The path prefix for the S3 bucket where users’ persistent application settings are stored.
- **S3BucketName** *(string) --*
The S3 bucket where users’ persistent application settings are stored. When persistent application settings are enabled for the first time for an account in an AWS Region, an S3 bucket is created. The bucket is unique to the AWS account and the Region.
:type Names: list
:param Names:
The names of the stacks to describe.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeUserStackAssociations(Paginator):
def paginate(self, StackName: str = None, UserName: str = None, AuthenticationType: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppStream.Client.describe_user_stack_associations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeUserStackAssociations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
StackName='string',
UserName='string',
AuthenticationType='API'|'SAML'|'USERPOOL',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'UserStackAssociations': [
{
'StackName': 'string',
'UserName': 'string',
'AuthenticationType': 'API'|'SAML'|'USERPOOL',
'SendEmailNotification': True|False
},
],
}
**Response Structure**
- *(dict) --*
- **UserStackAssociations** *(list) --*
The UserStackAssociation objects.
- *(dict) --*
Describes a user in the user pool and the associated stack.
- **StackName** *(string) --*
The name of the stack that is associated with the user.
- **UserName** *(string) --*
The email address of the user who is associated with the stack.
- **AuthenticationType** *(string) --*
The authentication type for the user.
- **SendEmailNotification** *(boolean) --*
Specifies whether a welcome email is sent to a user after the user is created in the user pool.
:type StackName: string
:param StackName:
The name of the stack that is associated with the user.
:type UserName: string
:param UserName:
The email address of the user who is associated with the stack.
:type AuthenticationType: string
:param AuthenticationType:
The authentication type for the user who is associated with the stack. You must specify USERPOOL.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeUsers(Paginator):
def paginate(self, AuthenticationType: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppStream.Client.describe_users`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeUsers>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
AuthenticationType='API'|'SAML'|'USERPOOL',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Users': [
{
'Arn': 'string',
'UserName': 'string',
'Enabled': True|False,
'Status': 'string',
'FirstName': 'string',
'LastName': 'string',
'CreatedTime': datetime(2015, 1, 1),
'AuthenticationType': 'API'|'SAML'|'USERPOOL'
},
],
}
**Response Structure**
- *(dict) --*
- **Users** *(list) --*
Information about users in the user pool.
- *(dict) --*
Describes a user in the user pool.
- **Arn** *(string) --*
The ARN of the user.
- **UserName** *(string) --*
The email address of the user.
- **Enabled** *(boolean) --*
Specifies whether the user in the user pool is enabled.
- **Status** *(string) --*
The status of the user in the user pool. The status can be one of the following:
* UNCONFIRMED – The user is created but not confirmed.
* CONFIRMED – The user is confirmed.
* ARCHIVED – The user is no longer active.
* COMPROMISED – The user is disabled because of a potential security threat.
* UNKNOWN – The user status is not known.
- **FirstName** *(string) --*
The first name, or given name, of the user.
- **LastName** *(string) --*
The last name, or surname, of the user.
- **CreatedTime** *(datetime) --*
The date and time the user was created in the user pool.
- **AuthenticationType** *(string) --*
The authentication type for the user.
:type AuthenticationType: string
:param AuthenticationType: **[REQUIRED]**
The authentication type for the users in the user pool to describe. You must specify USERPOOL.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListAssociatedFleets(Paginator):
def paginate(self, StackName: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppStream.Client.list_associated_fleets`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ListAssociatedFleets>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
StackName='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Names': [
'string',
],
}
**Response Structure**
- *(dict) --*
- **Names** *(list) --*
The name of the fleet.
- *(string) --*
:type StackName: string
:param StackName: **[REQUIRED]**
The name of the stack.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListAssociatedStacks(Paginator):
def paginate(self, FleetName: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppStream.Client.list_associated_stacks`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ListAssociatedStacks>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
FleetName='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Names': [
'string',
],
}
**Response Structure**
- *(dict) --*
- **Names** *(list) --*
The name of the stack.
- *(string) --*
:type FleetName: string
:param FleetName: **[REQUIRED]**
The name of the fleet.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
|
1653696
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY=os.environ.get('SECRET_KEY') or '<KEY>';
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
@staticmethod
def init_app(app):
pass
class DevelopmntConfig(Config):
Debug = 0
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI') or 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI') or 'sqlite:///' + os.path.join(basedir, 'product.sqlite')
config = {
'development' : DevelopmntConfig,
'product' : ProductionConfig,
}
|
1653724
|
import torch
import torch.nn.functional as F
from models.baseModule import BaseNet
from models.EventUnet import EventUnet
from models.FrameUnet import FrameUnet
from models.FuseNet import FuseNet
import math
class Generator(BaseNet):
def __init__(self, cfg):
super(Generator, self).__init__(cfg.netInitType, cfg.netInitGain)
self.cfg = cfg
self.netScale = 16
self.eventUnet = EventUnet(cfg)
self.frameUnet = FrameUnet(cfg)
self.fuseNet = FuseNet()
if cfg.step in [1, 2, 3]:
self.initPreweight(cfg.pathWeight)
def getWeight(self, pathPreWeight: str = None):
checkpoints = torch.load(pathPreWeight, map_location=torch.device('cpu'))
try:
weightDict = checkpoints['Generator']
except Exception as e:
weightDict = checkpoints['model_state_dict']
return weightDict
def adap2Net(self, tensor: torch.Tensor):
Height, Width = tensor.size(2), tensor.size(3)
Height_ = int(math.floor(math.ceil(Height / self.netScale) * self.netScale))
Width_ = int(math.floor(math.ceil(Width / self.netScale) * self.netScale))
if any([Height_ != Height, Width_ != Width]):
tensor = F.pad(tensor, [0, Width_ - Width, 0, Height_ - Height])
return tensor
def forward(self, I0t, I1t, Et):
N, C, H, W = I0t.shape
I0t = self.adap2Net(I0t)
I1t = self.adap2Net(I1t)
Et = self.adap2Net(Et)
z_e = self.eventUnet(Et)
z_f = self.frameUnet(torch.cat([I0t, I1t], dim=1))
fusedOut = self.fuseNet(z_e, z_f)
output = fusedOut[:, :, 0:H, 0:W]
return output
|
1653736
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
DEPENDENCIES = [] # 'scipy', 'numpy', 'vtk']
TEST_DEPENDENCIES = [] # 'pytest']
VERSION = "1.0"
URL = "https://github.com/KVSlab/morphMan.git"
setuptools.setup(
name="morphman",
version=VERSION,
license="GPL",
author="<NAME>, <NAME>",
author_email="<EMAIL>",
url=URL,
project_urls={
"Documentation": "https://morphman.readthedocs.io/",
"Source Code": URL,
},
description="morphman - morphlogical manipulation",
long_description=long_description,
long_description_content_type="text/markdown",
# Dependencies
install_requires=DEPENDENCIES,
tests_require=TEST_DEPENDENCIES,
dependency_links=['https://github.com/vmtk/vmtk'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
"Programming Language :: Python :: 3",
],
packages=["morphman",
"morphman.common",
"morphman.misc",
"morphman.automated_landmarking"],
package_dir={"morphman": "morphman"},
entry_points={'console_scripts':
['morphman-area=morphman.manipulate_area:main_area',
'morphman-bend=morphman.manipulate_bend:main_bend',
'morphman-bifurcation=morphman.manipulate_bifurcation:main_bifurcation',
'morphman-curvature=morphman.manipulate_curvature:main_curvature',
'morphman-branch=morphman.manipulate_branch:main_branch',
'morphman-surface=morphman.manipulate_surface:main_surface']
}
)
|
1653744
|
from .zenipy import (
message,
error,
warning,
question,
entry,
password,
zlist,
file_selection,
calendar,
color_selection,
scale
)
|
1653755
|
import sys
from markdown import markdown as _m
PREAMBLE = r"""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Mate">
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Verdana">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js"></script>
<link rel="apple-touch-icon" sizes="180x180" href="https://web.evanchen.cc/icons/apple-touch-icon.png">
<link rel="icon" type="image/png" href="https://web.evanchen.cc/icons/favicon-32x32.png" sizes="32x32">
<link rel="icon" type="image/png" href="https://web.evanchen.cc/icons/favicon-16x16.png" sizes="16x16">
<link rel="manifest" href="https://web.evanchen.cc/icons/manifest.json">
<link rel="mask-icon" href="https://web.evanchen.cc/icons/safari-pinned-tab.svg" color="#5bbad5">
<link rel="shortcut icon" href="https://web.evanchen.cc/icons/favicon.ico">
<meta name="msapplication-config" content="https://web.evanchen.cc/icons/browserconfig.xml">
<meta name="theme-color" content="#ffffff">
<link type="text/css" rel="stylesheet" href="https://web.evanchen.cc/css/simple-53544.css">
<script type="text/javascript">
MathJax = {
tex: {
inlineMath: [['$','$'], ['\\(','\\)']],
displayMath: [['\\[', '\\]']],
}
};
</script>
<script type="text/javascript" id="MathJax-script" async
src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js">
</script>
</head>
<body>
<div id="container">
<div id="content">"""
def markdown(content):
return _m(content,
extensions=['extra', 'sane_lists', 'smarty'])
if __name__ == "__main__":
if len(sys.argv) > 1:
with open(sys.argv[1]) as f:
content = ''.join(f.readlines())
else:
content = ''.join(sys.stdin.readlines())
print(PREAMBLE)
print(markdown(content))
print('</div></div>')
print(r'</body></html>')
|
1653770
|
expected_output = {
'vrfs':{
'VRF':{
'vrf_id':5,
'vrf_state':'Up',
'reason':'--'
},
'VRF1':{
'vrf_id':3,
'vrf_state':'Up',
'reason':'--'
},
'VRF2':{
'vrf_id':4,
'vrf_state':'Up',
'reason':'--'
},
'default':{
'vrf_id':1,
'vrf_state':'Up',
'reason':'--'
}
}
}
|
1653781
|
import argparse
import numpy as np
from paz.backend.image import load_image, show_image, resize_image
from paz.backend.camera import Camera
from paz.pipelines import DetectMiniXceptionFER
from paz.pipelines import DetectFaceKeypointNet2D32
from paz.pipelines import HeadPoseKeypointNet2D32
from paz.pipelines import SSD300FAT, SSD300VOC, SSD512COCO, SSD512YCBVideo
parser = argparse.ArgumentParser(description='Real-time face classifier')
parser.add_argument('-o', '--offset', type=float, default=0.1,
help='Scaled offset to be added to bounding boxes')
parser.add_argument('-s', '--score_thresh', type=float, default=0.6,
help='Box/class score threshold')
parser.add_argument('-n', '--nms_thresh', type=float, default=0.45,
help='non-maximum suppression threshold')
parser.add_argument('-p', '--image_path', type=str,
help='full image path used for the pipelines')
parser.add_argument('-c', '--camera_id', type=str,
help='Camera/device ID')
parser.add_argument('-d', '--dataset', type=str, default='COCO',
choices=['VOC', 'COCO', 'YCBVideo', 'FAT'],
help='Dataset name')
args = parser.parse_args()
name_to_model = {'VOC': SSD300VOC, 'FAT': SSD300FAT, 'COCO': SSD512COCO,
'YCBVideo': SSD512YCBVideo}
image = load_image(args.image_path)
H = 1000
W = int((H / image.shape[0]) * image.shape[1])
# image = resize_image(image, (W, H))
focal_length = image.shape[1]
image_center = (image.shape[1] / 2.0, image.shape[0] / 2.0)
camera = Camera(args.camera_id)
camera.distortion = np.zeros((4, 1))
camera.intrinsics = np.array([[focal_length, 0, image_center[0]],
[0, focal_length, image_center[1]],
[0, 0, 1]])
pipeline_A = DetectMiniXceptionFER([args.offset, args.offset])
pipeline_B = DetectFaceKeypointNet2D32()
pipeline_C = HeadPoseKeypointNet2D32(camera)
pipeline_D = name_to_model[args.dataset](args.score_thresh, args.nms_thresh)
pipelines = [pipeline_A, pipeline_B, pipeline_C, pipeline_D]
for pipeline in pipelines:
predictions = pipeline(image.copy())
show_image(predictions['image'])
|
1653784
|
from ._pymimkl import *
# importing wrapped models
from .average_mkl import AverageMKL
from .easy_mkl import EasyMKL
from .umkl_knn import UMKLKNN
# cleanup unused objects
del(EasyMKL_)
del(UMKLKNN_)
del(AverageMKL_)
|
1653812
|
import os
import json
import random
src_path = 'data/original/twisty'
meta_path = os.path.join(src_path, 'TwiSty-NL.json')
data_dir = os.path.join(src_path, 'users_id')
out_dir = 'data/prepared/twisty'
num_tweets = 200
def tweet_text(tweet, replace_ents=True):
text = tweet['text']
if replace_ents:
ents = []
for tag in tweet['entities']['hashtags']:
ents.append((tuple(tag['indices']), tag['text']))
for url in tweet['entities']['urls']:
ents.append((tuple(url['indices']), 'URL')) # url['display_url']
for user in tweet['entities']['user_mentions']:
ents.append((tuple(user['indices']), user['name']))
offset = 0
for (i, j), txt in sorted(ents):
if i + offset <= 0:
text = text[j + offset + 1:]
offset = - (j + 1)
continue
text = text[:i + offset] + txt + text[j + offset:]
offset += len(txt) - (j - i)
return text.replace('\n', ' ').replace('\r', '').replace('\t', ' ')
def get_user_tweet_texts(user_id, tweet_ids):
user_path = os.path.join(data_dir, '{}.json'.format(user_id))
with open(user_path) as f:
tweet_data = json.load(f)['tweets']
user_tweets = []
for tweet_id in tweet_ids:
if tweet_id not in tweet_data:
print('skipping tweet id "{}"'.format(tweet_id))
continue
user_tweets.append(tweet_text(tweet_data[tweet_id]))
return user_tweets
def save(data, path, filename):
with open(os.path.join(path, filename), 'w') as f:
for user_data in data:
for item in user_data:
if len(item) > 0:
f.write('\t'.join(item) + '\n')
def main():
with open(meta_path) as f:
meta = json.load(f)
random.seed(7684)
dev1, dev2 = [], []
data = []
for user_id in meta.keys():
user = meta[user_id]
tweet_ids = user['confirmed_tweet_ids']
tweet_texts = get_user_tweet_texts(user_id, tweet_ids)
random.shuffle(tweet_texts)
user_data = [(txt, user['gender'], user_id) for txt in tweet_texts]
if len(user_data) < num_tweets:
dev1.append(user_data)
continue
if len(user_data) > num_tweets:
dev2.append(user_data[num_tweets:])
data.append(user_data[:num_tweets])
random.shuffle(data)
fold_size = len(data) // 10
test = data[:fold_size]
train = data[fold_size:]
os.makedirs(out_dir, exist_ok=True)
print('train={} dev1={} dev2={} test={}'.format(len(train), len(dev1), len(dev2), len(test)))
save(train, out_dir, 'train.tsv')
save(test, out_dir, 'test.tsv')
save(dev1, out_dir, 'dev.tsv')
save(dev2, out_dir, 'dev2.tsv')
if __name__ == '__main__':
main()
|
1653820
|
import logging
import os
import subprocess
from git.exc import NoSuchPathError
from assigner.roster_util import get_filtered_roster
from assigner.backends import RepoError
from assigner.backends.decorators import requires_config_and_backend
from assigner import progress
help = "Add and commit changes to student repos"
logger = logging.getLogger(__name__)
@requires_config_and_backend
def push(conf, backend, args):
_push(conf, backend, args)
def _push(conf, backend, args):
backend_conf = conf.backend
namespace = conf.namespace
semester = conf.semester
hw_name = args.name
hw_path = args.path
message = args.message
branch = args.branch
add = args.add
remove = args.remove
update = args.update
allow_empty = args.allow_empty
gpg_sign = args.gpg_sign
# Default behavior: commit changes to all tracked files
if (add == []) and (remove == []):
logging.debug("Nothing explicitly added or removed; defaulting to git add --update")
update = True
path = os.path.join(hw_path, hw_name)
roster = get_filtered_roster(conf.roster, args.section, args.student)
for student in progress.iterate(roster):
username = student["username"]
student_section = student["section"]
full_name = backend.student_repo.build_name(semester, student_section,
hw_name, username)
has_changes = False
try:
repo = backend.student_repo(backend_conf, namespace, full_name)
repo_dir = os.path.join(path, username)
repo.add_local_copy(repo_dir)
logging.debug("%s: checking out branch %s", full_name, branch)
repo.get_head(branch).checkout()
index = repo.get_index()
if update:
# Stage modified and deleted files for commit
# This exactly mimics the behavior of git add --update
# (or the effect of doing git commit -a)
for change in index.diff(None):
has_changes = True
if change.deleted_file:
logging.debug("%s: git rm %s", full_name, change.b_path)
index.remove([change.b_path])
else:
logging.debug("%s: git add %s", full_name, change.b_path)
index.add([change.b_path])
if add:
has_changes = True
logging.debug("%s: adding %s", full_name, add)
index.add(add)
if remove:
has_changes = True
logging.debug("%s: removing %s", full_name, remove)
index.remove(remove)
if has_changes or allow_empty:
logging.debug("%s: committing changes with message %s", full_name, message)
if gpg_sign:
# The GitPython interface does not support signed commits, and
# launching via repo.git.commit will launch an inaccessible
# interactive prompt in the background
index.write(ignore_extension_data=True)
subprocess.check_call(["git", "commit", "-S", "-m", '"{}"'.format(message)],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
cwd=repo_dir)
else:
index.commit(message)
else:
logging.warning("%s: No changes in repo; skipping commit.", full_name)
except NoSuchPathError:
logging.warning("Local repo for %s does not exist; skipping...", username)
except RepoError as e:
logging.warning(e)
def setup_parser(parser):
parser.add_argument("name",
help="Name of the assignment to commit to.")
parser.add_argument("message",
help="Commit message")
parser.add_argument("path", default=".", nargs="?",
help="Path of student repositories to commit to")
parser.add_argument("--branch", nargs="?", default="master",
help="Local branch to commit to")
parser.add_argument("-a", "--add", nargs="+", dest="add", default=[],
help="Files to add before committing")
parser.add_argument("-r", "--remove", nargs="+", dest="remove", default=[],
help="Files to remove before committing")
parser.add_argument("-u", "--update", action="store_true", dest="update",
help="Include all changed files (i.e., git add -u or git commit -a)")
parser.add_argument("-e", "--allow-empty", action="store_true", dest="allow_empty",
help="Commit even if there are no changes to commit")
parser.add_argument("-S", "--gpg-sign", action="store_true", dest="gpg_sign",
help="GPG-sign the commits using the committer identity")
parser.add_argument("--section", nargs="?",
help="Section to commit to")
parser.add_argument("--student", metavar="id",
help="ID of student whose assignment is to be committed to.")
parser.set_defaults(run=push)
|
1653830
|
from .neobaseextractor import NeoBaseRecordingExtractor
try:
import neo
HAVE_NEO = True
except ImportError:
HAVE_NEO = False
class MCSRawRecordingExtractor(NeoBaseRecordingExtractor):
extractor_name='mcsrawRecoding'
mode='file'
NeoRawIOClass='RawMCSRawIO'
|
1653834
|
import numpy as np
from typing import Callable
import lcs.agents.xcs as xcs
class Configuration(xcs.Configuration):
def __init__(self,
number_of_actions: int, # theta_mna it is actually smart to make it equal to number of actions
lmc: int = 100,
lem: float = 1,
classifier_wildcard: str = '#',
max_population: int = 200, # n
learning_rate: float = 0.1, # beta
alpha: float = 0.1,
epsilon_0: float = 10,
v: int = 5,
gamma: float = 0.71,
ga_threshold: int = 25,
chi: float = 0.5,
mutation_chance: float = 0.01, # mu
deletion_threshold: int = 20, # theta_del
delta: float = 0.1,
subsumption_threshold: int = 20, # theta_sub
covering_wildcard_chance: float = 0.33, # population wildcard
initial_prediction: float = float(np.finfo(np.float32).tiny), # p_i
initial_error: float = float(np.finfo(np.float32).tiny), # epsilon_i
initial_fitness: float = float(np.finfo(np.float32).tiny), # f_i
epsilon: float = 0.5, # p_exp, exploration probability
do_ga_subsumption: bool = False,
do_action_set_subsumption: bool = False,
metrics_trial_frequency: int = 5,
user_metrics_collector_fcn: Callable = None
) -> None:
self.lmc = lmc
self.lem = lem
self.classifier_wildcard = classifier_wildcard
self.max_population = max_population
self.learning_rate = learning_rate
self.alpha = alpha
self.epsilon_0 = epsilon_0
self.v = v
self.gamma = gamma
self.ga_threshold = ga_threshold
self.chi = chi
self.mutation_chance = mutation_chance
self.deletion_threshold = deletion_threshold
self.delta = delta
self.subsumption_threshold = subsumption_threshold
self.covering_wildcard_chance = covering_wildcard_chance
self.initial_prediction = initial_prediction
self.initial_error = initial_error
self.initial_fitness = initial_fitness
self.epsilon = epsilon # p_exp, probability of exploration
self.number_of_actions = number_of_actions
self.do_GA_subsumption = do_ga_subsumption
self.do_action_set_subsumption = do_action_set_subsumption
self.metrics_trial_frequency = metrics_trial_frequency
self.user_metrics_collector_fcn = user_metrics_collector_fcn
|
1653842
|
import bin_plots
import sys
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
path, output = sys.argv[1:]
ds = xr.open_dataset(path)
fig, axs = plt.subplots(1, 3, figsize=(7, 2), constrained_layout=True)
bin_plots.plot_row(ds, axs=axs)
bin_plots.set_row_titles(axs, ["a) Count\n", "b) Predicted\nP-E (mm/day)", "c) P-E Error\n(mm/day)"])
bin_plots.label_axes(axs[np.newaxis,:])
fig.savefig(output)
|
1653868
|
from click.testing import CliRunner
import responses
from ugoira.cli import ugoira
from ugoira.lib import get_metadata_url
def test_download(
ugoira_id,
meta_body,
small_zip_url,
big_zip_url,
small_image_zip,
big_image_zip,
):
"""Test for command download"""
@responses.activate
def case1():
"""
original head - good
original get - good
common head - not reached
common get - not reached
"""
responses.reset()
responses.add(**{
'method': responses.GET,
'url': get_metadata_url(ugoira_id),
'body': meta_body,
'content_type': 'application/json',
'status': 200,
'match_querystring': True,
})
responses.add(**{
'method': responses.HEAD,
'url': big_zip_url,
'status': 200,
})
responses.add(**{
'method': responses.GET,
'url': big_zip_url,
'body': big_image_zip,
'content_type': 'application/zip',
'status': 200,
})
runner = CliRunner()
result = runner.invoke(
ugoira,
[str(ugoira_id)]
)
assert result.exit_code == 0
assert result.output.strip() == (
'Downloading {} (0/1)\n'.format(ugoira_id) +
'Download was completed successfully.'
' format is {} and output path is {}{}'.format(
'gif',
ugoira_id,
'.gif',
)
)
@responses.activate
def case2():
"""
original head - good
original get - bad
common head - good
common get - good
"""
responses.reset()
responses.add(**{
'method': responses.GET,
'url': get_metadata_url(ugoira_id),
'body': meta_body,
'content_type': 'application/json',
'status': 200,
'match_querystring': True,
})
responses.add(**{
'method': responses.HEAD,
'url': big_zip_url,
'status': 200,
})
responses.add(**{
'method': responses.GET,
'url': big_zip_url,
'status': 403,
})
responses.add(**{
'method': responses.HEAD,
'url': small_zip_url,
'status': 200,
})
responses.add(**{
'method': responses.GET,
'url': small_zip_url,
'body': small_image_zip,
'content_type': 'application/zip',
'status': 200,
})
runner = CliRunner()
result = runner.invoke(
ugoira,
[str(ugoira_id)]
)
assert result.exit_code == 0
assert result.output.strip() == (
'Downloading {} (0/1)\n'.format(ugoira_id) +
'Download was completed successfully.'
' format is {} and output path is {}{}'.format(
'gif',
ugoira_id,
'.gif',
)
)
@responses.activate
def case3():
"""
original head - bad
original get - not reached
common head - good
common get - good
"""
responses.reset()
responses.add(**{
'method': responses.GET,
'url': get_metadata_url(ugoira_id),
'body': meta_body,
'content_type': 'application/json',
'status': 200,
'match_querystring': True,
})
responses.add(**{
'method': responses.HEAD,
'url': big_zip_url,
'status': 403,
})
responses.add(**{
'method': responses.HEAD,
'url': small_zip_url,
'status': 200,
})
responses.add(**{
'method': responses.GET,
'url': small_zip_url,
'body': small_image_zip,
'content_type': 'application/zip',
'status': 200,
})
runner = CliRunner()
result = runner.invoke(
ugoira,
[str(ugoira_id)]
)
assert result.exit_code == 0
assert result.output.strip() == (
'Downloading {} (0/1)\n'.format(ugoira_id) +
'Download was completed successfully.'
' format is {} and output path is {}{}'.format(
'gif',
ugoira_id,
'.gif',
)
)
case1()
case2()
case3()
def test_error(
ugoira_id,
meta_body,
small_zip_url,
big_zip_url,
):
"""Test for encount PixivError"""
@responses.activate
def case1():
"""
original head - bad
original get - not reached
common head - bad
common get - not reached
"""
responses.reset()
responses.add(**{
'method': responses.GET,
'url': get_metadata_url(ugoira_id),
'body': meta_body,
'content_type': 'application/json',
'status': 200,
'match_querystring': True,
})
responses.add(**{
'method': responses.HEAD,
'url': big_zip_url,
'status': 503,
})
responses.add(**{
'method': responses.HEAD,
'url': small_zip_url,
'status': 503,
})
runner = CliRunner()
result = runner.invoke(
ugoira,
[str(ugoira_id)]
)
assert result.output.strip().startswith(
'Downloading {} (0/1)\nError: '.format(ugoira_id)
)
@responses.activate
def case2():
"""
original head - good
original get - bad
common head - bad
common get - not reached
"""
responses.reset()
responses.add(**{
'method': responses.GET,
'url': get_metadata_url(ugoira_id),
'body': meta_body,
'content_type': 'application/json',
'status': 200,
'match_querystring': True,
})
responses.add(**{
'method': responses.HEAD,
'url': big_zip_url,
'status': 200,
})
responses.add(**{
'method': responses.GET,
'url': big_zip_url,
'status': 403,
})
responses.add(**{
'method': responses.HEAD,
'url': small_zip_url,
'status': 503,
})
runner = CliRunner()
result = runner.invoke(
ugoira,
[str(ugoira_id)]
)
assert result.output.strip().startswith(
'Downloading {} (0/1)\nError: '.format(ugoira_id)
)
@responses.activate
def case3():
"""
original head - bad
original get - not reached
common head - good
common get - bad
"""
responses.reset()
responses.add(**{
'method': responses.GET,
'url': get_metadata_url(ugoira_id),
'body': meta_body,
'content_type': 'application/json',
'status': 200,
'match_querystring': True,
})
responses.add(**{
'method': responses.HEAD,
'url': big_zip_url,
'status': 503,
})
responses.add(**{
'method': responses.HEAD,
'url': small_zip_url,
'status': 200,
})
responses.add(**{
'method': responses.GET,
'url': small_zip_url,
'status': 403,
})
runner = CliRunner()
result = runner.invoke(
ugoira,
[str(ugoira_id)]
)
assert result.output.strip().startswith(
'Downloading {} (0/1)\nError: '.format(ugoira_id)
)
case1()
case2()
case3()
def test_is_not_ugoira(
non_ugoira_id,
error_meta_body,
):
"""Test for command download as gif"""
@responses.activate
def test():
responses.reset()
responses.add(**{
'method': responses.GET,
'url': get_metadata_url(non_ugoira_id),
'body': error_meta_body,
'content_type': 'application/json',
'status': 200,
'match_querystring': True,
})
runner = CliRunner()
result = runner.invoke(
ugoira,
[str(non_ugoira_id)]
)
assert result.output.strip() == (
'Downloading {} (0/1)\n'.format(non_ugoira_id) +
'Error: Illust ID {} is not ugoira.'.format(non_ugoira_id)
)
test()
|
1654028
|
import json
import numpy as np
import tensorflow as tf
from .xlnet import build_xlnet
__all__ = [
'build_model_from_config',
'load_model_weights_from_checkpoint',
'load_trained_model_from_checkpoint',
]
def checkpoint_loader(checkpoint_file):
def _loader(name):
return tf.train.load_variable(checkpoint_file, name)
return _loader
def build_model_from_config(config_path,
batch_size,
memory_len,
target_len,
in_train_phase,
**kwargs):
"""Build the model from config file.
:param config_path: The path to the JSON configuration file.
:param batch_size: Batch size.
:param memory_len: Maximum size of memory.
:param target_len: Length of target.
:param in_train_phase: Whether in training phase.
:return: model and config
"""
with open(config_path, 'r') as reader:
config = json.loads(reader.read())
model = build_xlnet(
units=config['d_model'],
training=in_train_phase,
num_token=config['n_token'],
num_block=config['n_layer'],
num_head=config['n_head'],
hidden_dim=config['d_inner'],
batch_size=batch_size,
memory_len=memory_len,
target_len=target_len,
dropout=0.0,
attention_dropout=0.0,
feed_forward_activation=config['ff_activation'],
clamp_len=None,
shared_biases=not config['untie_r'],
**kwargs)
return model, config
def load_model_weights_from_checkpoint(model,
config,
checkpoint_path,
in_train_phase):
"""Load trained official model from checkpoint.
:param model: Built keras model.
:param config: Loaded configuration file.
:param checkpoint_path: The path to the checkpoint files, should end with '.ckpt'.
:param in_train_phase: Whether in training phase.
"""
units = config['d_model']
loader = checkpoint_loader(checkpoint_path)
model.get_layer(name='Embed-Token').set_weights([
loader('model/transformer/word_embedding/lookup_table'),
])
if in_train_phase:
model.get_layer(name='Embed-Mask').set_weights([
loader('model/transformer/mask_emb/mask_emb'),
])
r_w_bias = loader('model/transformer/r_w_bias')
r_r_bias = loader('model/transformer/r_r_bias')
r_s_bias = loader('model/transformer/r_s_bias')
segment_embed = loader('model/transformer/seg_embed')
if config.get('untie_r', False):
for i in range(config['n_layer']):
model.get_layer(name='Relative-Bias-{}'.format(i + 1)).set_weights([
r_w_bias[i].flatten(),
r_r_bias[i].flatten(),
])
model.get_layer(name='Segment-Bias-{}'.format(i + 1)).set_weights([
r_s_bias[i].flatten(),
])
else:
model.get_layer(name='Relative-Bias').set_weights([
r_w_bias.flatten(),
r_r_bias.flatten(),
])
model.get_layer(name='Segment-Bias').set_weights([
r_s_bias.flatten(),
])
for i in range(config['n_layer']):
model.get_layer(name='Embed-Segment-{}'.format(i + 1)).set_weights([
segment_embed[i].reshape((2, units))
])
att_kernel_name = 'model/transformer/layer_{}/rel_attn/{}/kernel'
model.get_layer(name='Attention-{}'.format(i + 1)).set_weights([
np.concatenate(
[
loader(att_kernel_name.format(i, 'q')).reshape((units, units)),
loader(att_kernel_name.format(i, 'k')).reshape((units, units)),
loader(att_kernel_name.format(i, 'v')).reshape((units, units)),
loader(att_kernel_name.format(i, 'r')).reshape((units, units)),
loader(att_kernel_name.format(i, 'o')).reshape((units, units)).transpose(),
],
axis=1,
),
])
model.get_layer(name='Attention-Normal-{}'.format(i + 1)).set_weights([
loader('model/transformer/layer_{}/rel_attn/LayerNorm/gamma'.format(i)),
loader('model/transformer/layer_{}/rel_attn/LayerNorm/beta'.format(i)),
])
model.get_layer(name='FeedForward-{}'.format(i + 1)).set_weights([
loader('model/transformer/layer_{}/ff/layer_1/kernel'.format(i)),
loader('model/transformer/layer_{}/ff/layer_1/bias'.format(i)),
loader('model/transformer/layer_{}/ff/layer_2/kernel'.format(i)),
loader('model/transformer/layer_{}/ff/layer_2/bias'.format(i)),
])
model.get_layer(name='FeedForward-Normal-{}'.format(i + 1)).set_weights([
loader('model/transformer/layer_{}/ff/LayerNorm/gamma'.format(i)),
loader('model/transformer/layer_{}/ff/LayerNorm/beta'.format(i)),
])
if in_train_phase:
model.get_layer(name='Softmax').set_weights([
loader('model/lm_loss/bias'),
])
def load_trained_model_from_checkpoint(config_path,
checkpoint_path,
batch_size,
memory_len,
target_len,
in_train_phase=False,
**kwargs):
"""Load trained official model from checkpoint.
:param config_path: The path to the JSON configuration file.
:param checkpoint_path: The path to the checkpoint files, should end with '.ckpt'.
:param batch_size: Batch size.
:param memory_len: Maximum size of memory.
:param target_len: Length of target.
:param in_train_phase: Whether in training phase.
:return: model
"""
model, config = build_model_from_config(
config_path=config_path,
batch_size=batch_size,
memory_len=memory_len,
target_len=target_len,
in_train_phase=in_train_phase,
**kwargs)
load_model_weights_from_checkpoint(
model=model,
config=config,
checkpoint_path=checkpoint_path,
in_train_phase=in_train_phase,
)
return model
|
1654040
|
class error(Exception):
pass
def as_fd(f):
if not isinstance(f, (int, long)):
try:
fileno = f.fileno
except AttributeError:
raise TypeError("argument must be an int, or have a fileno() method.")
f = f.fileno()
if not isinstance(f, (int, long)):
raise TypeError("fileno() returned a non-integer")
fd = int(f)
if fd < 0 or isinstance(fd, long):
raise ValueError("file descriptor cannot be a negative integer (%i)"%fd)
return fd
def select(iwtd, owtd, ewtd, timeout=None):
"""Wait until one or more file descriptors are ready for some kind of I/O.
The first three arguments are sequences of file descriptors to be waited for:
rlist -- wait until ready for reading
wlist -- wait until ready for writing
xlist -- wait for an ``exceptional condition''
If only one kind of condition is required, pass [] for the other lists.
A file descriptor is either a socket or file object, or a small integer
gotten from a fileno() method call on one of those.
The optional 4th argument specifies a timeout in seconds; it may be
a floating point number to specify fractions of seconds. If it is absent
or None, the call will never time out.
The return value is a tuple of three lists corresponding to the first three
arguments; each contains the subset of the corresponding file descriptors
that are ready.
*** IMPORTANT NOTICE ***
On Windows, only sockets are supported; on Unix, all file descriptors.
"""
from select import poll, POLLIN, POLLOUT, POLLPRI, POLLERR, POLLHUP
fddict = {}
polldict = {}
fd = 0
for f in iwtd + owtd + ewtd:
fddict[id(f)] = as_fd(f)
for f in iwtd:
fd = fddict[id(f)]
polldict[fd] = polldict.get(fd, 0) | POLLIN
for f in owtd:
fd = fddict[id(f)]
polldict[fd] = polldict.get(fd, 0) | POLLOUT
for f in ewtd:
fd = fddict[id(f)]
polldict[fd] = polldict.get(fd, 0) | POLLPRI
p = poll()
for fd, mask in polldict.iteritems():
p.register(fd, mask)
if timeout is not None:
if (not hasattr(timeout, '__int__') and
not hasattr(timeout, '__float__')):
raise TypeError('timeout must be a float or None')
ret = dict(p.poll(int(float(timeout) * 1000)))
else:
ret = dict(p.poll())
iretd = [ f for f in iwtd if ret.get(fddict[id(f)], 0) & (POLLIN|POLLHUP|POLLERR)]
oretd = [ f for f in owtd if ret.get(fddict[id(f)], 0) & POLLOUT]
eretd = [ f for f in ewtd if ret.get(fddict[id(f)], 0) & (POLLERR|POLLPRI)]
return iretd, oretd, eretd
|
1654051
|
import time
class FixedBucket:
'''
Fixed size FIFO container.
'''
def __init__(self, size):
self._data = [None] * size
self._cur = 0
self._size = size
self._flag_full = False
def put(self, v):
self._data[self._cur] = v
self._cur += 1
if self._cur == self._size:
self._cur = 0
self._flag_full = True
@property
def data(self):
if not self._flag_full:
return self._data[: self._cur]
return self._data
def __len__(self):
if not self._flag_full:
return self._cur
return self._size
def __getitem__(self, sl):
if not self._flag_full:
return self._data[: self._cur][sl]
return (self._data[self._cur :] + self._data[: self._cur])[sl]
class TokenBucket:
'''
Dynamic token bucket
'''
def __init__(self, init_amount=0):
self._amount = init_amount
self._last_consume_time = time.time()
def consume(self, take_amount, avg_rate, burst_size):
now = time.time()
inc = (now - self._last_consume_time) * avg_rate
current_amount = min(inc + self._amount, burst_size)
if take_amount > current_amount:
return False
self._amount, self._last_consume_time = current_amount - take_amount, now
return True
|
1654060
|
import torch
import numpy as np
import matplotlib.pyplot as plt
def plot_curve(train_matrix, test_matrix, t_grid, train_grid):
plt.figure(figsize=(5, 5))
truth_grid = t_grid[:,t_grid[0,:].argsort()]
t_truth_grid = train_grid[:,train_grid[0,:].argsort()]
x = truth_grid[0, :]
y = truth_grid[1, :]
y2 = train_matrix[:, 0].squeeze()
print(torch.max(x), torch.max(t_truth_grid[0, :]))
plt.plot(x, y, marker='', ls='-', label='Test', linewidth=4, color='gold')
plt.plot(t_truth_grid[0, :], t_truth_grid[1, :], marker='', ls='-', label='Train',alpha=0.5, linewidth=1, color='grey')
#plt.yticks(np.arange(-2.0, 2.1, 0.5), fontsize=4, family='Times New Roman')
#plt.xticks(np.arange(0, 2.1, 0.2), fontsize=4, family='Times New Roman')
plt.legend()
plt.xlabel('Treatment')
plt.ylabel('Response')
plt.savefig( "news.pdf", bbox_inches='tight')
|
1654068
|
import subprocess
import pysurvive
import sys
import os
from gooey import Gooey, GooeyParser
@Gooey(tabbed_groups=True,
image_dir=os.path.dirname(os.path.realpath(__file__)) + "/images",
use_cmd_args=True,
program_name="pysurvive",
richtext_controls=True,
clear_before_run=True)
def main():
parser = pysurvive.create_argument_parser(GooeyParser())
args = parser.parse_args()
if args.websocketd:
subprocess.run(["survive-websocketd"] + sys.argv[1:])
return
print(" ".join(sys.argv))
actx = pysurvive.SimpleContext(sys.argv)
while actx.Running():
pass
if __name__ == '__main__':
main()
|
1654071
|
from sklearn.datasets import load_digits
from MulticoreTSNE import MulticoreTSNE as TSNE
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
import scipy
import torch
import numpy as np
#digits = load_digits()
query_path = '.'
result_n = scipy.io.loadmat(query_path+'/query_result_normal.mat')
query_n = torch.FloatTensor(result_n['query_f'])
label_n = result_n['query_label'][0]
result_q = scipy.io.loadmat(query_path+'/query_result.mat')
query_q = torch.FloatTensor(result_q['query_f'])
label_q = result_q['query_label'][0]
data = torch.cat( (query_n, query_q), 0)
flag = -1
label_t1 = torch.zeros(label_n.shape)
for index, xx in enumerate(label_n):
if index == 0:
flag = xx
continue
if xx !=flag:
flag = xx
label_t1[index] = label_t1[index-1] +1
else:
label_t1[index] = label_t1[index-1]
flag = -1
label_t2 = torch.zeros(label_q.shape)
for index, xx in enumerate(label_q):
if index == 0:
flag = xx
continue
if xx !=flag:
flag = xx
label_t2[index] = label_t2[index-1] +1
else:
label_t2[index] = label_t2[index-1]
label = np.concatenate( (label_t1, label_t2), 0)
print(label)
#label = torch.cat( (torch.zeros(label_n.shape), torch.ones(label_q.shape)), 0)
print(data.shape, label.shape)
embeddings = TSNE(n_jobs=16).fit_transform(data)
fig = plt.figure(dpi=1200)
top = 10
vis_x = [] #embeddings[0:first20, 0]
vis_y = [] #embeddings[0:first20, 1]
label_t = []
for i in range(500):
if label_t1[i] == top:
break
if i==0 or label_t1[i] != label_t1[i-1]:
vis_x.append(embeddings[i, 0])
vis_y.append(embeddings[i, 1])
label_t.append(label_t1[i])
print(label_t)
plt.scatter(vis_x, vis_y, c=label_t, cmap=plt.cm.get_cmap("jet", top), marker='.')
start = len(label_t1)
vis_x = [] #embeddings[0:first20, 0]
vis_y = [] #embeddings[0:first20, 1]
label_t = []
for i in range(500):
if label_t2[i] == top:
break
if i==0 or label_t2[i] != label_t2[i-1]:
vis_x.append(embeddings[start+i, 0])
vis_y.append(embeddings[start+i, 1])
label_t.append(label_t2[i])
print(label_t)
plt.scatter(vis_x, vis_y, c=label_t, cmap=plt.cm.get_cmap("jet", top), marker='*')
plt.colorbar(ticks=range(top))
plt.clim(-0.5, top-0.5)
plt.show()
fig.savefig( 'tsne.jpg')
|
1654074
|
from datetime import datetime
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String
from sqlalchemy.schema import Index
from freight.config import db
from freight.db.types.json import JSONEncodedDict
class TaskStatus(object):
unknown = 0
pending = 1
in_progress = 2
finished = 3
failed = 4
cancelled = 5
@classmethod
def get_label(cls, status):
return STATUS_LABELS[status]
@classmethod
def label_to_id(cls, label):
return STATUS_LABELS_REV[label]
STATUS_LABELS = {
TaskStatus.unknown: "unknown",
TaskStatus.pending: "pending",
TaskStatus.in_progress: "in_progress",
TaskStatus.finished: "finished",
TaskStatus.failed: "failed",
TaskStatus.cancelled: "cancelled",
}
STATUS_LABELS_REV = {v: k for k, v in list(STATUS_LABELS.items())}
class Task(db.Model):
__tablename__ = "task"
__table_args__ = (
Index("idx_task_app_id", "app_id"),
Index("idx_task_user_id", "user_id"),
)
id = Column(Integer, primary_key=True)
app_id = Column(Integer, ForeignKey("app.id", ondelete="CASCADE"), nullable=False)
user_id = Column(Integer, ForeignKey("user.id", ondelete="CASCADE"), nullable=False)
ref = Column(String(128), nullable=False)
sha = Column(String(40))
provider = Column(String(64), nullable=False)
status = Column(Integer, nullable=False)
params = Column(JSONEncodedDict, nullable=True)
data = Column(JSONEncodedDict)
date_created = Column(DateTime, default=datetime.utcnow, nullable=False)
# represents the start of the task (or the last time it was attempted)
date_started = Column(DateTime)
date_finished = Column(DateTime)
@property
def was_forced(self):
return self.data.get("force", False)
@property
def checks(self):
return self.data.get("checks", [])
@property
def notifiers(self):
return self.data.get("notifiers", [])
@property
def provider_config(self):
return self.data.get("provider_config", {})
@property
def status_label(self):
return STATUS_LABELS.get(self.status, "unknown")
@property
def duration(self):
if not (self.date_finished and self.date_started):
return
return float("%.2f" % (self.date_finished - self.date_started).total_seconds())
|
1654080
|
import os
from setuptools import setup
install_requires = [
'django>1.11',
'six'
]
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
CHANGELOG = open(os.path.join(os.path.dirname(__file__), 'CHANGELOG.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-sortable-listview',
version='0.43',
packages=['sortable_listview'],
include_package_data=True,
license='License :: OSI Approved :: MIT License',
description='An extension of django\'s ListView that provides sorting',
long_description_content_type='text/markdown',
long_description=README + CHANGELOG,
url='https://github.com/aptivate/django-sortable-listview',
author='Aptivate',
author_email='<EMAIL>',
install_requires=install_requires,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
test_suite='tests.runtests.runtests'
)
|
1654110
|
from functools import singledispatchmethod
from typing import Union
from UE4Parse.BinaryReader import BinaryStream
# TODO Implement EGuidFormats
class FGuid:
position: int
A: int
B: int
C: int
D: int
@singledispatchmethod
def __init__(self, reader: BinaryStream):
self.A = reader.readUInt32()
self.B = reader.readUInt32()
self.C = reader.readUInt32()
self.D = reader.readUInt32()
@__init__.register
def _from_str(self, string: str):
self.A = int(string[0:8], 16)
self.B = int(string[8:16], 16)
self.C = int(string[16:24], 16)
self.D = int(string[24:32], 16)
@__init__.register
def from_int_(self, A: int, B: int, C: int, D: int):
self.A = A
self.B = B
self.C = C
self.D = D
def __eq__(self, o: Union['FGuid', str]) -> bool:
if isinstance(o, str):
return str(self).lower() == o.lower()
return ((self.A ^ o.A) | (self.B ^ o.B) | (self.C ^ o.C) | (self.D ^ o.D)) == 0
def GetValue(self):
def formatter(a):
return format(a, '08x')
return f"{formatter(self.A)}{formatter(self.B)}{formatter(self.C)}{formatter(self.D)}".upper()
def __str__(self):
return self.GetValue()
def __hash__(self) -> int:
return hash(self.GetValue())
|
1654135
|
from django.contrib.auth import get_user_model
from django.db import models
from baserow.contrib.database.table.models import Table
from baserow.core.mixins import CreatedAndUpdatedOnMixin
User = get_user_model()
class RowComment(CreatedAndUpdatedOnMixin, models.Model):
"""
A user made comment on a specific row in a user table in Baserow.
"""
table = models.ForeignKey(
Table,
on_delete=models.CASCADE,
help_text="The table the row this comment is for is found in. ",
)
row_id = models.PositiveIntegerField(
help_text="The id of the row the comment is for."
)
user = models.ForeignKey(
User, on_delete=models.CASCADE, help_text="The user who made the comment."
)
comment = models.TextField(help_text="The users comment.")
class Meta:
db_table = "database_rowcomment"
ordering = ("-created_on",)
indexes = [models.Index(fields=["table", "row_id", "-created_on"])]
|
1654144
|
from qttable import QTable
from qt import SIGNAL,PYSIGNAL
class TableEdit(QTable):
def __init__(self, parent=None):
QTable.__init__(self, parent)
self.data = None
def setData(self, data):
self.removeColumns(range(self.numCols()))
self.removeRows(range(self.numRows()))
self.setNumCols(len(data.header))
for j,(h,t) in enumerate(data.header):
self.horizontalHeader().setLabel(j,h)
self.setNumRows(len(data))
for i,row in enumerate(data):
for j,h in enumerate(row):
if h is not None:
if type(h)==str or type(h)==unicode:
self.setText(i,j,h)
else:
self.setText(i,j,str(h))
for j,_ in enumerate(data.header):
self.adjustColumn(j)
if data != self.data:
for sig in ('setHeader','cellChanged','insertRow','insertColumn','takeRow','takeColumn','sort'):
self.connect(data.emitter,PYSIGNAL(sig),eval("self._%s"%sig))
self.connect(self,SIGNAL("valueChanged(int,int)"),self.__cellChanged)
self.data = data
# incoming; model-to-gui
def _setHeader(self, col, header):
self.horizontalHeader().setLabel(col,header[0])
def _cellChanged(self, i, j, val):
if val is None:
val = ''
self.disconnect(self,SIGNAL("valueChanged(int,int)"),self.__cellChanged)
self.setText(i,j,unicode(val))
self.connect(self,SIGNAL("valueChanged(int,int)"),self.__cellChanged)
def _insertRow(self, i, row):
self.insertRows(i)
if row is not None:
for j,c in enumerate(row):
if c is not None:
self.setText(i,j,str(c))
def _insertColumn(self, j, col):
self.insertColumns(j)
if col is not None:
self.horizontalHeader().setLabel(j,col[0][0])
for i,c in enumerate(col[1:]):
if c is not None:
self.setText(i,j,str(c))
def _takeRow(self, i, r):
self.removeRow(i)
def _takeColumn(self, j):
self.removeColumn(j)
def _sort(self):
self.setData(self.data)
# outgoing; gui-to-model
def __cellChanged(self, row, col):
self.disconnect(self,SIGNAL("valueChanged(int,int)"),self.__cellChanged)
self.disconnect(self.data.emitter,PYSIGNAL("cellChanged"),self._cellChanged)
self.data[row][col] = self.item(row,col).text()
self.setText(row,col,self.data[row][col])
self.connect(self,SIGNAL("valueChanged(int,int)"),self.__cellChanged)
self.connect(self.data.emitter,PYSIGNAL("cellChanged"),self._cellChanged)
if __name__ == '__main__':
import qt
from table_qt import TableModel
class Demo(qt.QVBox):
def __init__(self):
qt.QVBox.__init__(self)
self.b1 = qt.QPushButton('reset / load table',self)
self.connect(self.b1,qt.SIGNAL("clicked()"),self.action)
self.tab = TableEdit(self)
self.stage = 0
self.b2 = qt.QPushButton('print table at console',self)
self.connect(self.b2,qt.SIGNAL("clicked()"),self.printTable)
def printTable(self):
self.data.printTable()
def action(self):
if self.stage == 0:
t = [[('start',float),('end',float),('ch',str),('transcript',str)],
[1.23,2.34,'A','hello'],
[2.45,2.67,'B','hi'],
[2.88,3.09,'A','how are you']]
self.data = TableModel.importList(t)
self.tab.setData(self.data)
self.stage = 1
self.b1.setText('add row')
elif self.stage == 1:
self.data.insertRow(len(self.data))
self.stage = 2
self.b1.setText('take row 4')
elif self.stage == 2:
self.tmprow = self.data.takeRow(3)
self.stage = 3
self.b1.setText('insert row at the top')
elif self.stage == 3:
self.data.insertRow(0,self.tmprow)
self.stage = 4
self.b1.setText('sort by start')
elif self.stage == 4:
self.data.sort()
self.stage = 5
self.b1.setText('add column at the begining')
elif self.stage == 5:
self.data.insertColumn(0)
self.data.setHeader(0,('review',str))
self.stage = 6
self.b1.setText('take review column')
elif self.stage == 6:
self.tmpcol = self.data.takeColumn(0)
self.stage = 7
self.b1.setText('insert the column before ch column')
elif self.stage == 7:
self.data.insertColumn(2,self.tmpcol)
self.stage = 8
self.b1.setText('change start time of row 1 to 9.99')
elif self.stage == 8:
self.data[0][0] = 9.99
self.stage = 0
self.b1.setText('reset / load table')
app = qt.QApplication([])
w = Demo()
app.setMainWidget(w)
w.show()
app.exec_loop()
|
1654205
|
from .docusaurus_writer import DocusaurusWriter, DocusaurusTranslator
from docutils.io import StringOutput
from io import open
from munch import munchify
from os import path
from sphinx.builders import Builder
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.osutil import ensuredir, os_path
import datetime
from urllib.parse import quote
logger = logging.getLogger(__name__)
class DocusaurusBuilder(Builder):
"""Class to build Docusaurus-compatible MDX documentation from Sphinx.
"""
name = 'docusaurus'
format = 'docusaurus'
epilog = __('The Docusaurus files are in %(outdir)s.')
allow_parallel = True
default_translator_class = DocusaurusTranslator
current_docname = None # the current doc being processed
out_suffix = '.mdx' # file extensions for output
link_suffix = '/' # end of URL, e.g. a trailing slash
markdown_http_base = '/modeling' # base URL path for all files
# formatting of frontmatter for the API references
# first level specifies the document path, e.g. 'models/api-reference'
# second level specifies how far back the path should show:
# * ``title_tree_levels``: e.g. 'models.abc.xyz' with title_tree_levels 2 becomes 'abc.xyz' in the title
# * ``slug_tree_levels``: e.g. 'models.abc' with slug_tree_levels 1 becomes 'abc' in the slug
# TODO: make configurable instead of hardcoded
api_frontmatter = {
'open-model-hub/models': {
'title_tree_levels': 1,
'slug_tree_levels': 1
},
'open-model-hub/api-reference': {
'title_tree_levels': 1,
'slug_tree_levels': 1
},
'bach/api-reference': {
'title_tree_levels': 1,
'slug_tree_levels': 1
}
}
def init(self):
self.secnumbers = {}
def get_outdated_docs(self):
"""
Find documents that are outdated and should be built.
:returns: generator with each docname that is outdated.
"""
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
continue
targetname = path.join(self.outdir, docname + self.out_suffix)
try:
targetmtime = path.getmtime(targetname)
except Exception:
targetmtime = 0
try:
srcmtime = path.getmtime(self.env.doc2path(docname))
if srcmtime > targetmtime:
yield docname
except EnvironmentError:
pass
def get_target_uri(self, docname: str, typ: str = None) -> str:
"""
Generate a URI from the docname and config.
:returns: Formatted URI
"""
return quote(docname) + self.link_suffix
def prepare_writing(self, docnames):
"""
Prepares documents for writing. Instantiates the writer and context.
"""
self.writer = DocusaurusWriter(self)
self.ctx = self.create_context()
def create_context(self):
"""
Creates the context for writing documents; currently just with the datetime.
"""
ctx = munchify({
'date': datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
})
return ctx
def write_doc(self, docname, doctree):
"""
Writes a document to the fileystem.
"""
self.current_docname = docname
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
destination = StringOutput(encoding='utf-8')
self.writer.write(doctree, destination)
outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix)
ensuredir(path.dirname(outfilename))
try:
with open(outfilename, 'w', encoding='utf-8') as f: # type: ignore
f.write(self.writer.output)
except (IOError, OSError) as err:
logger.warning(__("error writing file %s: %s"), outfilename, err)
def finish(self):
pass
|
1654209
|
from lyrebird import application
from .. import checker
class OnResponseUpstreamHandler:
def __call__(self, rules=None, *args, **kw):
def func(origin_func):
func_type = checker.TYPE_ON_RESPONSE_UPSTREAM
if not checker.scripts_tmp_storage.get(func_type):
checker.scripts_tmp_storage[func_type] = []
checker.scripts_tmp_storage[func_type].append({
'name': origin_func.__name__,
'func': origin_func,
'rules': rules
})
return origin_func
return func
@staticmethod
def register(func_info):
application.on_response_upstream.append(func_info)
@staticmethod
def unregister(func_info):
if func_info in application.on_response_upstream:
application.on_response_upstream.remove(func_info)
on_response_upstream = OnResponseUpstreamHandler()
|
1654221
|
import pytest
import helpers
import pqclean
@pytest.mark.parametrize(
'implementation',
pqclean.Scheme.all_implementations(),
ids=str,
)
@helpers.filtered_test
def test_preprocessor(implementation: pqclean.Implementation):
cfiles = implementation.cfiles()
hfiles = implementation.hfiles()
errors = []
for file in hfiles + cfiles:
with open(file) as f:
for i, line in enumerate(f):
line = line.strip()
if file in hfiles and i == 0 and line.startswith('#ifndef'):
continue
if line.startswith('#if'):
errors.append("\n at {}:{}".format(file, i+1))
if errors:
raise AssertionError(
"Prohibited use of preprocessor conditional" + "".join(errors)
)
if __name__ == '__main__':
import sys
pytest.main(sys.argv)
|
1654266
|
from django.contrib import admin
from . import models
admin.site.register(models.Device)
admin.site.register(models.Port)
|
1654296
|
import bpy
from bpy.props import *
from ...nodes.BASE.node_base import RenderNodeBase
from mathutils import Color, Vector
def update_node(self, context):
if self.operate_type == 'COMBINE':
self.remove_output('x')
self.remove_output('y')
self.remove_output('z')
self.remove_input('input')
self.create_input('RenderNodeSocketFloat', 'x', 'X')
self.create_input('RenderNodeSocketFloat', 'y', 'Y')
self.create_input('RenderNodeSocketFloat', 'z', 'Z')
self.create_output('RenderNodeSocketXYZ', 'output', "Output")
else:
self.remove_input('x')
self.remove_input('y')
self.remove_input('z')
self.remove_output('output')
self.create_input('RenderNodeSocketXYZ', 'input', 'Input')
self.create_output('RenderNodeSocketFloat', 'x', 'X')
self.create_output('RenderNodeSocketFloat', 'y', 'Y')
self.create_output('RenderNodeSocketFloat', 'z', 'Z')
self.execute_tree()
class RenderNodeVectorConvert(RenderNodeBase):
bl_idname = 'RenderNodeVectorConvert'
bl_label = 'Vector Convert'
operate_type: EnumProperty(
name='Type',
items=[
('COMBINE', 'Combine XYZ', ''),
('SEPARATE', 'Separate XYZ', ''),
],
update=update_node
)
def init(self, context):
self.create_input('RenderNodeSocketFloat', 'x', 'X')
self.create_input('RenderNodeSocketFloat', 'y', 'Y')
self.create_input('RenderNodeSocketFloat', 'z', 'Z')
self.create_output('RenderNodeSocketXYZ', 'output', "Output")
def draw_label(self):
name = self.bl_rna.properties['operate_type'].enum_items[self.operate_type].name
return name
def draw_buttons(self, context, layout):
layout.prop(self, 'operate_type', text='')
def process(self, context, id, path):
if self.operate_type == 'COMBINE':
res = Vector((
self.inputs[0].get_value(),
self.inputs[1].get_value(),
self.inputs[2].get_value(),
))
self.outputs[0].set_value(res)
else:
input_value = list(self.inputs[0].get_value())
self.outputs[0].set_value(input_value[0])
self.outputs[1].set_value(input_value[1])
self.outputs[2].set_value(input_value[2])
def register():
bpy.utils.register_class(RenderNodeVectorConvert)
def unregister():
bpy.utils.unregister_class(RenderNodeVectorConvert)
|
1654299
|
import pytest
import json
from easier68k.core.models.list_file import ListFile
from easier68k.assembler.assembler import parse
import os.path
def test_basic_test_input():
script_dir = os.path.dirname(__file__) # The directory the current file is in
# Test
with open(os.path.join(script_dir, 'basic_test_input.x68')) as x68:
assembled, issues = parse(x68.read(-1))
with open(os.path.join(script_dir, 'temp_output_file.txt'), 'w') as out: # Temporary output file for testing results
pretty_json = json.loads(assembled.to_json())
out.write(json.dumps(pretty_json, indent=4, sort_keys=True))
if issues:
out.write('\r\n----- ISSUES -----\r\n')
for issue in issues:
out.write('{}: {}\r\n'.format(issue[1], issue[0]))
assert isinstance(assembled, ListFile)
assert assembled.starting_execution_address == 1024
assert len(assembled.symbols) == 1
assert assembled.symbols['magic'] == 1046
assert len(assembled.data) == 5
assert assembled.data['1024'] == '303cfffd'
assert assembled.data['1028'] == '33fcabcd00aaaaaa'
assert assembled.data['1036'] == '41f900000416'
assert assembled.data['1042'] == 'ffffffff'
assert assembled.data['1046'] == 'abcd'
assert not issues
|
1654320
|
def name_func(func, _, params):
return f'{func.__name__}_{"_".join(str(arg) for arg in params.args)}'
def get_enc_params(dtype):
if dtype == "float32":
return "PCM_F", 32
if dtype == "int32":
return "PCM_S", 32
if dtype == "int16":
return "PCM_S", 16
if dtype == "uint8":
return "PCM_U", 8
raise ValueError(f"Unexpected dtype: {dtype}")
|
1654354
|
import operator
def factorial(n):
"""Calculate n factorial"""
return reduce(operator.mul, range(2, n+1), 1)
def intersection(*sets):
"""Get the intersection of all input sets"""
return reduce(set.intersection, sets)
def union(*sets):
"""Get the union of all input sets"""
return reduce(set.union, sets)
def join(*seqs):
"""Join any input sequences that support concatenation"""
return reduce(operator.concat, seqs)
"""
Some usage:
>>> factorial(3)
6
>>> factorial(10)
3628800
>>> a = set([1, 2, 3, 4, 5])
>>> b = set([5, 6, 3, 7])
>>> c = set([8, 7, 5])
>>> intersection(a, b, c)
set([5])
>>> union(a, b, c)
set([1, 2, 3, 4, 5, 6, 7, 8])
>>> join("one", "two", "three", "four")
'onetwothreefour'
>>> join([1, 2, 3], [5, 6], [6, 7])
[1, 2, 3, 4, 5, 6, 7]
"""
|
1654367
|
from compas.geometry import distance_point_point
from compas_cem.optimization.constraints import FloatConstraint
__all__ = ["DeviationEdgeLengthConstraint"]
class DeviationEdgeLengthConstraint(FloatConstraint):
"""
Make a deviation edge reach a target length.
"""
def __init__(self, edge=None, length=None, weight=1.0):
# TODO: needs different serialization mechanism
super(DeviationEdgeLengthConstraint, self).__init__(edge, length, weight)
def reference(self, data):
"""
"""
u, v = self.key()
point_a = data["node_xyz"][u]
point_b = data["node_xyz"][v]
try:
length = distance_point_point(point_a, point_b)
except TypeError:
# TODO: This import should not happen here
import autograd.numpy as np
length = np.linalg.norm(point_a - point_b)
return length
if __name__ == "__main__":
pass
|
1654388
|
import torch
from torch.nn import ReLU, Tanh, CrossEntropyLoss, Sigmoid, SELU, MSELoss, L1Loss, SmoothL1Loss, NLLLoss, BCELoss
from torch.optim import Adam, SGD, RMSprop, Adagrad
from torch.utils.data.dataset import Dataset
activations = {
"relu": ReLU(),
"tanh": Tanh(),
"sigmoid": Sigmoid(),
"selu": SELU(),
}
optimizers = {
"adam": Adam,
"sgd": SGD,
"rmsprop": RMSprop,
"adagrad": Adagrad,
}
losses = {
"negative log likelihood": NLLLoss(),
"nll": NLLLoss(),
"binary cross entropy": BCELoss(),
"bce": BCELoss(),
"categorical cross entropy": CrossEntropyLoss(),
"cce": CrossEntropyLoss(),
"mean squared error": MSELoss(),
"mse": MSELoss(),
"mean absolute error": L1Loss(),
"mae": L1Loss(),
"huber loss": SmoothL1Loss(),
}
class RawDataset(Dataset):
""" Simple Wrapper for torch dataset (used by classes such as TorchModel and Dataloader) """
def __init__(self, X, y, output_dtype):
self.X, self.y = assure_tensor(X, torch.float32), assure_tensor(y, output_dtype)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
def unflatten(data, dims):
""" Given a batch of flattened 1-D tensors, reshapes them into specified N-Dims. """
B = data.size(0)
return data.view(B, *dims)
def flatten(data):
""" Given a batch of N-D tensors, reshapes them into 1-Dim flat tensor. """
B = data.size(0)
return data.view(B, -1)
def vectorize(data, dtype=torch.uint8):
""" Vectorizes a tuple or int into a torch tensor. """
# Don't need to vectorize an int, its broadcasted
if isinstance(data, int):
return data
elif isinstance(data, tuple):
# If the elements are the same, there is be no need for this, but nvm.
return torch.tensor(data, dtype=dtype)
else:
raise ValueError(f"Invalid argument for vectorize. a must be either int or tuple of int.")
def assure_tensor(data, dtype):
if not isinstance(data, torch.Tensor): data = torch.tensor(data, dtype=dtype)
elif data.dtype != dtype: data = data.clone().detach().type(dtype)
return data
def compute_output(dims, kernel_size, stride, padding=0):
""" Computes the output shape given an input shape, kernel size, stride and padding. """
dims = vectorize(dims)
padding = vectorize(padding)
kernel_size = vectorize(kernel_size)
stride = vectorize(stride)
# Vectorized computation
dims = (dims - kernel_size + 2 * padding) / stride + 1
return tuple(dims.numpy())
# TODO - Remove. Callbacks from now on
class TorchDataset(Dataset):
def __init__(self, X, y, output_dtype, validation_split=0.0, X_val=None, y_val=None):
super(TorchDataset, self).__init__()
from sklearn.model_selection import train_test_split
X, y = assure_tensor(X, torch.float32), assure_tensor(y, output_dtype)
self.no_validation_data = True
if validation_split > 0.0:
if X_val is not None and y_val is not None:
print("WARN: When passing validation_split argument dont pass X_val or y_val.", flush=True)
X, X_val, y, y_val = train_test_split(X, y, test_size=validation_split)
self.no_validation_data = False
elif X_val is not None and y_val is not None:
X_val, y_val = assure_tensor(X_val, torch.float32), assure_tensor(y_val, output_dtype)
self.no_validation_data = False
else:
X_val, y_val = X, y
self.X = X
self.y = y
self.X_val = X_val
self.y_val = y_val
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
|
1654391
|
from spotdl.download.tracking_file_handler import DownloadTracker
from spotdl.download.progress_ui_handler import DisplayManager
from spotdl.download.ffmpeg import convert, has_correct_version
from spotdl.download.embed_metadata import set_id3_data
from spotdl.download.downloader import DownloadManager
|
1654396
|
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def render_plugin_preview(context, plugin):
request = context['request']
try:
content_renderer = request.toolbar.content_renderer
except AttributeError:
from cms.plugin_rendering import ContentRenderer
content_renderer = ContentRenderer(request)
content = content_renderer.render_plugin(
instance=plugin,
context=context,
editable=False,
)
return content
|
1654434
|
from examples.datasets.data.train_example_data import train_example_data
from neuralogic.core import Relation, Template, Var, Term, Dataset
dataset = Dataset()
template = Template()
# Naive trains - one big example
# fmt: off
shapes = [Term.ellipse, Term.rectangle, Term.bucket, Term.hexagon, Term.u_shaped]
roofs = [Term.jagged, Term.arc, Term.none, Term.flat, Term.peaked]
loadshapes = [Term.hexagon, Term.triangle, Term.diamond, Term.rectangle, Term.circle]
vagon_atoms = [Relation.shape, Relation.length, Relation.sides, Relation.wheels, Relation.loadnum, Relation.loadshape, Relation.roof]
X = Var.X
Y = Var.Y
template.add_rules(
[
*[Relation.shape(X, Y) <= Relation.shape(X, Y, s)[1,] for s in shapes],
*[Relation.length(X, Y) <= Relation.length(X, Y, s)[1,] for s in [Term.short, Term.long]],
*[Relation.sides(X, Y) <= Relation.sides(X, Y, s)[1,] for s in [Term.not_double, Term.double]],
*[Relation.roof(X, Y) <= Relation.roof(X, Y, s)[1,] for s in roofs],
*[Relation.wheels(X, Y) <= Relation.wheels(X, Y, s)[1,] for s in [2, 3]],
*[Relation.loadnum(X, Y) <= Relation.loadnum(X, Y, s)[1,] for s in [0, 1, 2, 3]],
*[Relation.loadshape(X, Y) <= Relation.loadshape(X, Y, s)[1,] for s in loadshapes],
Relation.vagon(X, Y) <= (atom(X, Y)[1,] for atom in vagon_atoms),
*[Relation.train(X) <= Relation.vagon(X, i)[1,] for i in [1, 2, 3, 4]],
Relation.direction(X) <= Relation.train(X)[1,],
]
)
dataset.add_example(
[
atom
for _, id, pos, shape, length, sides, roof, wheels, load, loadnum in train_example_data
for atom in [
Relation.shape(id, pos, shape),
Relation.length(id, pos, length),
Relation.sides(id, pos, sides),
Relation.roof(id, pos, roof),
Relation.wheels(id, pos, wheels),
Relation.loadshape(id, pos, load),
Relation.loadnum(id, pos, loadnum),
]
]
)
dataset.add_queries(
[*[Relation.direction(i)[1.0] for i in range(1, 11)], *[Relation.direction(i)[-1.0] for i in range(11, 21)]]
)
|
1654460
|
import re
from threading import Thread
from flask import current_app
from flask_mail import Message
from .extensions import mail
base_prefix = '/api'
default_page_size = 10
default_chat_page_size = 25
response_delay = 1.5
def send_email_async(app, message):
with app.app_context():
mail.send(message)
def send_email(subject, recipients, html):
message = Message(subject=subject, recipients=recipients, html=html)
return Thread(target=send_email_async, args=(current_app._get_current_object(), message)).start() # noqa
def validate_password(password):
return get_password_regex().match(password)
# [8; 25] characters, at least 1 digit, at least 1 lowercase letter, at least 1 uppercase letter, no whitespaces,
# at least 1 special character
def get_password_regex():
return re.compile(r'^(?=\S{8,25}$)(?=.*?\d)(?=.*?[a-z])(?=.*?[A-Z])(?=\S+$)(?=.*?[^A-Za-z\s0-9])')
def is_allowed_image(filename):
return '.' in filename and filename.split('.')[-1].lower() in {'png', 'jpg', 'jpeg'}
|
1654464
|
from mermaid_demos.rdmm_synth_data_generation.create_poly import Poly
import numpy as np
class Rectangle(Poly):
def __init__(self,setting,scale=1.):
name, img_sz, center_pos, height, width, rotation = setting['name'],setting['img_sz'], setting['center_pos'], setting['height'], setting['width'], setting['rotation']
self.center_pos = center_pos
height,width = self.rescale(height*2,width*2,scale)
self.height = height
self.width = width
vertices = self.get_point()
setting_for_poly = dict(name=name, img_sz=img_sz, vertices=vertices,rotation=rotation)
super(Rectangle,self).__init__(setting_for_poly)
self.name = setting['name']
self.type='rect'
self.shape_info = {'center_pos':center_pos, 'height':height,'width':width}
def rescale(self,height,width,scale):
return height*scale, width*scale
def get_point(self):
r= self.height
c = self.width
point = self.center_pos
points = [[point[0]-r/2,point[0] + r/2,point[0] + r/2,point[0]-r/2],
[point[1]-c/2, point[1]-c/2,point[1] + c/2,point[1]+c/2]]
points = np.array(points)
return points
|
1654495
|
from datetime import timedelta
from sqlalchemy.sql import func
from flask import current_app, request
from typing import List
from zeus.api.utils import stats
from zeus.config import db
from zeus.models import Build, Repository, Revision
from zeus.utils import timezone
from zeus.vcs import vcs_client
from .base_repository import BaseRepositoryResource
STAT_CHOICES = frozenset(
(
"builds.aborted",
"builds.failed",
"builds.passed",
"builds.errored",
"builds.total",
"builds.duration",
"tests.count",
"tests.count_unique",
"tests.duration",
"coverage.lines_covered",
"coverage.lines_uncovered",
"coverage.diff_lines_covered",
"coverage.diff_lines_uncovered",
"style_violations.count",
"bundle.total_asset_size",
)
)
def get_revisions(repo: Repository, branch: str = None, limit: int = 200) -> List[str]:
if current_app.config.get("MOCK_REVISIONS"):
return (
db.session.query(Revision.sha)
.filter(Revision.repository_id == repo.id)
.order_by(Revision.date_created.desc())
.limit(limit)
.all()
)
if branch is None:
branch = "!default"
return [r["sha"] for r in vcs_client.log(repo.id, limit=limit, branch=branch)]
class RepositoryStatsResource(BaseRepositoryResource):
def get(self, repo: Repository):
"""
Return various stats per-day for the given repository.
"""
stat = request.args.get("stat")
if not stat:
return self.error("invalid stat")
if stat not in STAT_CHOICES:
return self.error("invalid stat")
aggregate = request.args.get("aggregate", "time")
if aggregate not in ("time", "build"):
return self.error("invalid aggregate")
branch = request.args.get("branch")
since = request.args.get("since")
if since:
date_end = timezone.fromtimestamp(float(since))
else:
date_end = timezone.now() + timedelta(days=1)
date_end = date_end.replace(minute=0, second=0, microsecond=0)
if aggregate == "time":
resolution = request.args.get("resolution", "1d")
points = int(request.args.get("points") or stats.POINTS_DEFAULT[resolution])
if resolution == "1h":
grouper = func.date_trunc("hour", Build.date_created)
decr_res = stats.decr_hour
elif resolution == "1d":
grouper = func.date_trunc("day", Build.date_created)
date_end = date_end.replace(hour=0)
decr_res = stats.decr_day
elif resolution == "1w":
grouper = func.date_trunc("week", Build.date_created)
date_end = date_end.replace(hour=0)
date_end -= timedelta(days=date_end.weekday())
decr_res = stats.decr_week
elif resolution == "1m":
grouper = func.date_trunc("month", Build.date_created)
date_end = date_end.replace(hour=0, day=1)
decr_res = stats.decr_month
elif aggregate == "build":
grouper = Build.number
points = int(request.args.get("points") or 100)
queryset = stats.build_queryset(stat, grouper, repo_id=repo.id)
if aggregate == "time":
date_begin = date_end
for _ in range(points):
date_begin = decr_res(date_begin)
queryset = queryset.filter(
Build.date_created >= date_begin, Build.date_created < date_end
)
elif aggregate == "build":
revision_shas = get_revisions(repo, branch, limit=points * 2)
queryset = queryset.filter(Build.revision_sha.in_(revision_shas)).order_by(
Build.number.desc()
)
queryset = queryset.limit(points)
if aggregate == "time":
results = {
# HACK(dcramer): force (but dont convert) the timezone to be utc
# while this isnt correct, we're not looking for correctness yet
k.replace(tzinfo=timezone.utc): v
for k, v in queryset
}
data = []
cur_date = date_end
for _ in range(points):
cur_date = decr_res(cur_date)
data.append(
{
"time": int(float(cur_date.strftime("%s.%f")) * 1000),
"value": (
int(float(results[cur_date]))
if results.get(cur_date)
else (0 if stat in stats.ZERO_FILLERS else None)
),
}
)
elif aggregate == "build":
data = [
{
"build": k,
"value": (
int(float(v))
if v is not None
else (0 if stat in stats.ZERO_FILLERS else None)
),
}
for k, v in sorted(queryset, key=lambda x: -x[0])
]
return self.respond(data)
|
1654513
|
from bitmovin_api_sdk.encoding.encodings.keyframes.keyframes_api import KeyframesApi
from bitmovin_api_sdk.encoding.encodings.keyframes.keyframe_list_query_params import KeyframeListQueryParams
|
1654590
|
from direct.directnotify import DirectNotifyGlobal
from pirates.distributed.DistributedInteractiveAI import DistributedInteractiveAI
from pirates.movement.DistributedMovingObjectAI import DistributedMovingObjectAI
from pirates.quest.DistributedQuestGiverAI import DistributedQuestGiverAI
class DistributedReputationAvatarAI(DistributedMovingObjectAI, DistributedInteractiveAI, DistributedQuestGiverAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedReputationAvatarAI')
def __init__(self, air):
DistributedMovingObjectAI.__init__(self, air)
DistributedInteractiveAI.__init__(self, air)
DistributedQuestGiverAI.__init__(self, air)
|
1654604
|
import torch.utils.data
from data.base_data_loader import BaseDataLoader
def CreateDataLoader(args):
data_loader = CustomDatasetDataLoader()
data_loader.initialize(args)
return data_loader
def CreateDataset(args):
dataset = None
from data.single_dataset import TestDataset
dataset = TestDataset()
print("The dataset has been created")
dataset.initialize(args)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, args):
BaseDataLoader.initialize(self, args)
self.dataset = CreateDataset(args)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=1,
shuffle=False,
num_workers=1)
def load_data(self):
return self
def __len__(self):
return min(len(self.dataset), self.args.max_dataset_size)
def __iter__(self):
for i, data in enumerate(self.dataloader):
if i >= self.args.max_dataset_size:
break
yield data
|
1654632
|
import torch
import torch.nn.functional as F
from ..models.mingpt import GPT, CGPT, NoiseInjection
from tools.utils import to_cuda
from models import load_network, save_network, print_network
from tqdm import tqdm
from ..modules.vmf import nll_vMF
class Transformer(torch.nn.Module):
def __init__(self, opt, is_train=True, is_main=True, logger=None):
super().__init__()
self.opt = opt
self.is_main = is_main
self.net_t = self.initialize_networks(is_train)
if is_train:
self.opt_t = self.create_optimizers(self.opt)
self.logger = logger if self.is_main else None
height, width = self.opt.z_shape
self.size = height * width
self.state_size = self.opt.state_size
self.tot_size = self.size + self.state_size
def forward(self, data, prefix='', mode='', total_len=None, log=False, global_iter=None, show_progress=False):
code, state_code, cond_code, delta_length_cond, vid_lbl = self.preprocess_input(data)
if mode == 'transformer':
t_loss = self.compute_transformer_loss(code, state_code, cond_code, delta_length_cond, vid_lbl, prefix, log, global_iter)
return t_loss
if mode == 'eval_transformer':
with torch.no_grad():
t_loss = self.compute_transformer_loss(code, log, global_iter, is_eval=True)
return t_loss
if mode == 'inference':
return self.generate_fake(code, state_code, cond_code, delta_length_cond, vid_lbl, total_len, show_progress)
else:
raise ValueError(f"mode '{mode}' is invalid")
def preprocess_input(self, data):
data["code"] = to_cuda(data, "code", flatten_empty=False)
data["state_code"] = to_cuda(data, "state_code", flatten_empty=False)
data["cond_code"] = to_cuda(data, "cond_code")
data["vid_lbl"] = to_cuda(data, "vid_lbl")
data["delta_length_cond"] = to_cuda(data, "delta_length_cond")
return data["code"], data["state_code"], data["cond_code"], data["delta_length_cond"], data["vid_lbl"]
def initialize_networks(self, is_train):
if self.opt.is_continuous:
net_t = CGPT(n_proposals=self.opt.n_proposals, block_size=self.opt.z_len, n_layer=self.opt.n_layer,
n_head=self.opt.n_head, n_embd=self.opt.n_embd, n_in=self.opt.n_in,
resid_noise=self.opt.resid_noise).cuda()
else:
num_lbl = len(self.opt.categories) if self.opt.categories is not None else None
net_t = GPT(vocab_size=self.opt.z_num, block_size=self.opt.z_len, n_layer=self.opt.n_layer,
n_head=self.opt.n_head, n_embd=self.opt.n_embd, emb_mode=self.opt.emb_mode,
shape=self.opt.z_shape, state_vocab_size=self.opt.state_num, num_blocks=self.opt.num_blocks,
state_size=self.opt.state_size, use_start_token=self.opt.use_start_token, use_lbl=self.opt.cat,
num_lbl=num_lbl, state_front=self.opt.state_front).cuda()
if self.is_main:
net_t = load_network(net_t, "transformer_t", self.opt, head_to_n=self.opt.head_to_n)
return net_t
def save_model(self, global_iter, latest=False, best=False):
save_network(self.net_t, "transformer_t", global_iter, self.opt, latest, best)
# Following minGPT:
# This long function is unfortunately doing something very simple and is being very defensive:
# We are separating out all parameters of the model into two buckets: those that will experience
# weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
# We are then returning the PyTorch optimizer object.
def create_optimizers(self, opt):
param_dict = {pn: p for pn, p in self.net_t.named_parameters()}
if opt.finetune_head and opt.finetune_f is None:
optim_groups = [{"params": [param_dict["head.weight"]], "weight_decay": 0.01, "lr": opt.lr}]
else:
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear,)
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding, NoiseInjection)
for mn, m in self.net_t.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('start_tok_emb') if 'start_tok_emb' in param_dict.keys() else None
no_decay.add('pos_emb') if 'pos_emb' in param_dict.keys() else None
no_decay.add('h_emb') if 'h_emb' in param_dict.keys() else None
no_decay.add('w_emb') if 'w_emb' in param_dict.keys() else None
no_decay.add('s_emb') if 's_emb' in param_dict.keys() else None
no_decay.add('t_emb') if 't_emb' in param_dict.keys() else None
no_decay.add('state_pos_emb') if 'state_pos_emb' in param_dict.keys() else None
no_decay.add('state_s_emb') if 'state_s_emb' in param_dict.keys() else None
# validate that we considered every parameter
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params),)
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" % (str(param_dict.keys() - union_params),)
# create the pytorch optimizer object
if opt.finetune_head:
optim_groups = [{"params": [param_dict[pn] for pn in sorted(list(decay)) if pn != "head.weight"], "weight_decay": 0.01, "lr": opt.lr * opt.finetune_f},
{"params": [param_dict["head.weight"]], "weight_decay": 0.01, "lr": opt.lr},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0, "lr": opt.lr * opt.finetune_f}]
else:
optim_groups = [{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01, "lr": opt.lr},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0, "lr": opt.lr}]
if opt.optimizer == "adamw":
opt_t = torch.optim.AdamW(optim_groups, betas=(opt.beta1, opt.beta2))
else:
raise NotImplementedError
return opt_t
def compute_transformer_loss(self, code, state_code, cond_code, delta_length_cond, vid_lbl, prefix, log, global_iter, is_eval=False):
code = code[:, :self.opt.z_len] # limit input to transformer capacity
state_nll_loss = None
if self.opt.is_continuous:
if self.opt.p2p:
pred = self.net_t(code[:, :-1], cond_code, delta_length_cond, lbl_idx=vid_lbl)
else:
pred = self.net_t(code[:, :-1], lbl_idx=vid_lbl)
tgt = code[:, 1:]
vmf_loss = None
other_vmf_loss = None
cosine_loss = None
other_cosine_loss = None
# nll_loss = None
nll_loss = F.mse_loss(pred, tgt)
t_loss = nll_loss
# if self.opt.n_proposals > 1:
# t_loss = torch.tensor(0., requires_grad=True).cuda()
# logits, proposals = pred
# nm_proposals = proposals / torch.norm(proposals, p=2, dim=3, keepdim=True) if self.opt.normalize_pred else proposals
# nm_tgt = tgt / torch.norm(tgt, p=2, dim=2, keepdim=True) if self.opt.normalize_tgt else tgt
# cosine_dist = - (nm_proposals * nm_tgt.unsqueeze(2)).sum(dim=3)
# closest_proposals = cosine_dist.argmin(dim=2, keepdim=True)
# nll_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), closest_proposals.view(-1))
# t_loss += nll_loss
# if self.opt.knn is not None:
# k_closest = max(1, int(self.opt.knn * (1 - global_iter / self.opt.knn_decay_iter)))
# closest_proposals = (-cosine_dist).topk(dim=2, k=k_closest)[1]
# else:
# k_closest = 1
# closest_onehot = torch.zeros(*closest_proposals.shape[:2], self.opt.n_proposals).cuda().scatter_(2, closest_proposals, 1)
# if self.opt.continuous_loss == "cosine":
# pred = nm_proposals[closest_onehot.bool()].view(*nm_proposals.shape[:2], k_closest, -1)
# cosine_loss = - (pred * tgt.unsqueeze(2)).sum(dim=3).mean()
# if self.opt.knn is not None:
# t_loss += cosine_loss
# else:
# other_preds = nm_proposals[~closest_onehot.bool()].view(*nm_proposals.shape[:2], self.opt.n_proposals - k_closest, -1)
# other_cosine_loss = - (other_preds * tgt.unsqueeze(2)).sum(dim=3).mean()
# t_loss += (1 - self.opt.epsilon_other) * cosine_loss + self.opt.epsilon_other * other_cosine_loss
# elif self.opt.continuous_loss == "vmf":
# pred = proposals[closest_onehot.bool()].view(*nm_proposals.shape[:2], k_closest, -1)
# vmf_loss = nll_vMF(pred, tgt.unsqueeze(2))
# if self.opt.knn is not None:
# t_loss += vmf_loss
# else:
# other_preds = proposals[~closest_onehot.bool()].view(*nm_proposals.shape[:2], self.opt.n_proposals - k_closest, -1)
# other_vmf_loss = nll_vMF(other_preds, tgt.unsqueeze(2))
# t_loss += (1 - self.opt.epsilon_other) * vmf_loss + self.opt.epsilon_other * other_vmf_loss
#
# else:
# if self.opt.continuous_loss == "cosine":
# if self.opt.normalize_pred:
# pred = pred / torch.norm(pred, p=2, dim=2, keepdim=True)
# if self.opt.normalize_tgt:
# tgt = tgt / torch.norm(tgt, p=2, dim=2, keepdim=True)
# cosine_loss = - (pred * tgt).sum(dim=2).mean()
# t_loss = cosine_loss
# elif self.opt.continuous_loss == "vmf":
# vmf_loss = nll_vMF(pred, tgt)
# t_loss = vmf_loss
nrec_loss = None
nrec_momentum_loss = None
else:
logits = self.net_t(code[:, :-1], cond_idx=cond_code, state_idx=state_code, delta_length_cond=delta_length_cond, lbl_idx=vid_lbl)
if 0 not in state_code.size():
if self.opt.state_front:
state_i = [i for i in range(logits.size(1)) if (i + 1) < self.state_size * self.opt.num_blocks]
frame_i = [i for i in range(logits.size(1)) if (i + 1) >= self.state_size * self.opt.num_blocks]
else:
state_i = [i for i in range(logits.size(1)) if (i + 1) % self.tot_size < self.state_size]
frame_i = [i for i in range(logits.size(1)) if (i + 1) % self.tot_size >= self.state_size]
state_logits = logits[:, state_i, :self.opt.state_num]
logits = logits[:, frame_i]
target = code
else:
if self.opt.use_start_token or self.opt.cat:
target = code
else:
target = code[:, 1:]
nll_loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1))
nrec_loss = None
other_vmf_loss = None
cosine_loss = None
other_cosine_loss = None
nrec_momentum_loss = None
vmf_loss = None
t_loss = nll_loss
if 0 not in state_code.size():
state_nll_loss = F.cross_entropy(state_logits.reshape(-1, state_logits.size(-1)), state_code[:, 1:].reshape(-1))
t_loss += state_nll_loss
if self.logger and not is_eval:
# log scalars every step
self.logger.log_scalar(f"transformer/{prefix}nll", nll_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}state_nll", state_nll_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}cosine", cosine_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}other_cosine", other_cosine_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}vmf", vmf_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}other_vmf", other_vmf_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}nrec", nrec_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}nrec_momentum", nrec_momentum_loss, global_iter)
return t_loss
def top_k_logits(self, logits, k):
v, _ = torch.topk(logits, k)
out = logits.clone()
out[out < v[..., [-1]]] = -float('Inf')
return out
@torch.no_grad()
def generate_fake(self, code, state_code, cond_code, delta_length_cond, vid_lbl, total_len, show_progress):
''' If 'total_len' is 'None' generate tokens with transformer until the capacity 'z_len' of the transformer has
been reached. Otherwise, fill the code until 'total_len' is reached with a 'z_chunk' stride.
'''
if total_len is None:
code, state_code = self.fill_code(code, state_code, cond_code, delta_length_cond, vid_lbl, show_progress=show_progress)
return {"code": code, "state_code": state_code}
if total_len <= self.opt.z_len:
add_len = total_len - code.size(1)
add_len -= cond_code.size(1) if 0 not in cond_code.size() else 0
add_len -= min(state_code.size(1), self.opt.state_size * self.opt.num_blocks) if 0 not in state_code.size() else 0
code, state_code = self.fill_code(code, state_code, cond_code, delta_length_cond, vid_lbl, add_len=add_len, show_progress=show_progress)
return {"code": code, "state_code": state_code}
if show_progress:
pbar = tqdm(total=int(total_len), desc="Processing codes")
# 1. fill until transformer capacity 'z_len' is reached
code, state_code = self.fill_code(code, state_code, cond_code, delta_length_cond, vid_lbl, show_progress=show_progress)
# 2. predict 'z_chunk' by 'z_chunk'
curr_len = self.opt.z_len
if show_progress:
pbar.update(curr_len)
i = 1
while curr_len < total_len:
add_len = total_len - curr_len if total_len - curr_len < self.opt.z_chunk else None
if 0 not in cond_code.size():
delta_length_cond -= 1
# free some capacity for one chunk
tmp_state_code = state_code[:, i * self.state_size:] if 0 not in state_code.size() else state_code
tmp_code = code[:, i * self.size:]
# predict one chunk
pred_code, pred_state_code = self.fill_code(tmp_code, tmp_state_code, cond_code, delta_length_cond, vid_lbl, add_len=add_len, show_progress=show_progress)
# update code
delta_code = pred_code.size(1) - tmp_code.size(1)
code = torch.cat([code, pred_code[:, -delta_code:]], dim=1)
if 0 not in state_code.size():
delta_state_code = pred_state_code.size(1) - tmp_state_code.size(1)
if delta_state_code > 0:
state_code = torch.cat([state_code, pred_state_code[:, -delta_state_code:]], dim=1)
# else:
# curr_len += self.state_size
# keep track of progress
curr_len += add_len if add_len is not None else self.opt.z_chunk
if show_progress:
# if add_len is not None:
# print("add_len", add_len)
# else:
# print("z_chunk", self.opt.z_chunk)
pbar.update(add_len if add_len is not None else self.opt.z_chunk)
i += 1
if show_progress:
pbar.close()
return {"code": code, "state_code": state_code}
def fill_code(self, code, state_code, cond_code, delta_length_cond, vid_lbl, add_len=None, show_progress=False):
bs = code.size(0)
log_p = None
# compute add_len
if add_len is None:
add_len = self.opt.z_len - code.size(1)
add_len -= cond_code.size(1) if 0 not in cond_code.size() else 0
add_len -= min(state_code.size(1), self.opt.state_size * self.opt.num_blocks) if 0 not in state_code.size() else 0
# iterate
pbar = tqdm(range(add_len), desc="Filling codes", leave=False) if show_progress else range(add_len)
for _ in pbar:
if self.opt.is_continuous:
pred = self.net_t(code, single=True)
if self.opt.normalize_pred:
pred = pred / torch.norm(pred, p=2, dim=2, keepdim=True)
code = torch.cat((code, pred), dim=1)
else:
logits = self.net_t(code, cond_idx=cond_code, state_idx=state_code, delta_length_cond=delta_length_cond, lbl_idx=vid_lbl)
# determine if prediction needs to be affected to code or state_code
is_state = 0 not in state_code.size() and logits.size(1) % self.tot_size < self.state_size
if is_state:
logits = logits[:, :, :self.opt.state_num]
icode = self.get_icode(logits, self.opt.temperature_state, self.opt.top_k_state, self.opt.sample_state)[0]
state_code = torch.cat((state_code, icode), dim=1)
else:
if self.opt.beam_size is not None:
if code.size(0) == bs:
# expand
code = code.unsqueeze(1).repeat(1, self.opt.beam_size, 1).view(bs * self.opt.beam_size, -1)
icode, ilog_p = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample, n=self.opt.beam_size)
log_p = ilog_p
icode = icode.view(-1, 1)
else:
if not self.opt.no_sample:
icode, ilog_p = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample, n=1)
log_p += ilog_p.view(bs, self.opt.beam_size)
icode = icode.view(-1, 1)
else:
# expand
icode, ilog_p = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample, n=self.opt.beam_size)
log_p = log_p.unsqueeze(1).repeat(1, self.opt.beam_size, 1)
log_p += ilog_p.view(bs, self.opt.beam_size, self.opt.beam_size)
icode = icode.view(bs, self.opt.beam_size * self.opt.beam_size)
log_p = log_p.view(bs, self.opt.beam_size * self.opt.beam_size)
# prune
log_p, keep = torch.topk(log_p, dim=1, k=self.opt.beam_size)
icode = torch.gather(icode, dim=1, index=keep).view(-1, 1)
code = code.unsqueeze(1).repeat(1, self.opt.beam_size, 1).view(bs, self.opt.beam_size * self.opt.beam_size, -1)
keep = keep.unsqueeze(-1).repeat(1, 1, code.size(-1))
code = torch.gather(code, dim=1, index=keep).view(-1, code.size(-1))
else:
icode = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample)[0]
code = torch.cat((code, icode), dim=1)
if self.opt.beam_size is not None:
# keep best hypothesis
_, best = torch.topk(log_p, dim=1, k=1)
code = code.view(bs, self.opt.beam_size, -1)
best = best.unsqueeze(-1).repeat(1, 1, code.size(-1))
code = torch.gather(code, dim=1, index=best).view(bs, code.size(-1))
return code, state_code
def get_icode(self, logits, temperature, top_k, sample, n=1):
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = self.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
icode = torch.multinomial(probs, num_samples=n)
else:
_, icode = torch.topk(probs, k=n, dim=-1)
ilog_p = torch.log(torch.gather(probs, 1, icode))
return icode, ilog_p
|
1654638
|
from Orange.classification import (
SVMLearner as SVCLearner,
LinearSVMLearner as LinearSVCLearner,
NuSVMLearner as NuSVCLearner,
)
from Orange.modelling import SklFitter
from Orange.regression import SVRLearner, LinearSVRLearner, NuSVRLearner
__all__ = ["SVMLearner", "LinearSVMLearner", "NuSVMLearner"]
class SVMLearner(SklFitter):
__fits__ = {"classification": SVCLearner, "regression": SVRLearner}
class LinearSVMLearner(SklFitter):
__fits__ = {"classification": LinearSVCLearner, "regression": LinearSVRLearner}
class NuSVMLearner(SklFitter):
__fits__ = {"classification": NuSVCLearner, "regression": NuSVRLearner}
|
1654690
|
from smtplib import SMTPException
from django.core.management.base import BaseCommand
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from agony import settings
from agony.models import QandA
def process():
responses_to_send = QandA.objects.filter(notify_sender=True). \
filter(sender_notified=False)
for response in responses_to_send:
subject = "Response to your question to GroundUp"
html_message = render_to_string('agony/response.html',
{'response': response})
message = strip_tags(html_message)
try:
send_mail(
subject,
message,
settings.AGONY_EMAIL,
[response.sender_email] + settings.AGONY_EMAIL_RECIPIENTS,
html_message=html_message
)
print("Response sent to: ", response.sender_email)
except SMTPException as err:
print("Error sending response to agony writer {0}: {1}".
format(response.sender_email, err))
response.sender_notified = True
response.save()
return len(responses_to_send)
class Command(BaseCommand):
help = 'Send responses to people who write to agony aunt.'
def handle(self, *args, **options):
response_count = process()
print("Agony aunt responses: ", response_count)
|
1654694
|
import os
import csv
__all__ = ["evaluate", "evaluate_dataset"]
def evaluate(
args,
model,
train_loader,
val_loader,
test_loader,
device,
metrics,
custom_header=None,
):
header = []
results = []
loaders = dict(train=train_loader, validation=val_loader, test=test_loader)
for datasplit in args.split:
header += ["{} MAE".format(datasplit), "{} RMSE".format(datasplit)]
derivative = model.output_modules[0].derivative
if derivative is not None:
header += [
"{} MAE ({})".format(datasplit, derivative),
"{} RMSE ({})".format(datasplit, derivative),
]
results += evaluate_dataset(metrics, model, loaders[datasplit], device)
if custom_header:
header = custom_header
eval_file = os.path.join(args.modelpath, "evaluation.txt")
with open(eval_file, "w") as file:
wr = csv.writer(file)
wr.writerow(header)
wr.writerow(results)
def evaluate_dataset(metrics, model, loader, device):
model.eval()
for metric in metrics:
metric.reset()
for batch in loader:
batch = {k: v.to(device) for k, v in batch.items()}
result = model(batch)
for metric in metrics:
metric.add_batch(batch, result)
results = [metric.aggregate() for metric in metrics]
return results
|
1654743
|
from torch import optim
import os, sys, argparse
from experiments import APHYNITYExperiment
from networks import *
from forecasters import *
from utils import init_weights
from datasets import init_dataloaders
__doc__ = '''Training APHYNITY.'''
def cmdline_args():
# Make parser object
p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
p.add_argument("dataset", type=str,
help='''choose dataset:
'rd' - Reaction-diffusion equation
'wave' - Wave equation
'pendulum' - Pendulum
''')
p.add_argument("-r", "--root", type=str, default='./exp',
help='''root path for the experiments. (default: ./exp)''')
p.add_argument("-p", "--phy", type=str, default='incomplete',
help='''choose physical model type:
--phy incomplete - Incomplete Param PDE (default)
--phy complete - Complete Param PDE
--phy true - True PDE
--phy none - No physics
''')
p.add_argument("--aug", action=argparse.BooleanOptionalAction, default=True,
help='''enable augmentation:
--aug - With NN augmentaion (default)
--no-aug - Without NN augmentation
''')
p.add_argument('-d', '--device', type=str, default='cpu',
help='''choose device:
'cpu' - CPU only (default)
'cuda:X' - CUDA device.''')
return p.parse_args()
def train_leads(dataset_name, model_phy_option, model_aug_option, path, device):
train, test = init_dataloaders(dataset_name, os.path.join(path, dataset_name))
if dataset_name == 'rd':
if model_phy_option == 'incomplete':
model_phy = ReactionDiffusionParamPDE(dx=train.dataset.dx, is_complete=False, real_params=None)
elif model_phy_option == 'complete':
model_phy = ReactionDiffusionParamPDE(dx=train.dataset.dx, is_complete=True, real_params=None)
elif model_phy_option == 'true':
model_phy = ReactionDiffusionParamPDE(dx=train.dataset.dx, is_complete=True, real_params=train.dataset.params)
model_aug = ConvNetEstimator(state_c=2, hidden=16)
net = Forecaster(model_phy=model_phy, model_aug=model_aug, is_augmented=model_aug_option)
lambda_0 = 1.0
tau_1 = 1e-3
tau_2 = 1e-3
niter = 1
min_op = 'l2'
if dataset_name == 'wave':
if model_phy_option == 'incomplete':
model_phy = DampedWaveParamPDE(is_complete=False, real_params=None)
elif model_phy_option == 'complete':
model_phy = DampedWaveParamPDE(is_complete=True, real_params=None)
elif model_phy_option == 'true':
model_phy = DampedWaveParamPDE(is_complete=True, real_params=train.dataset.params)
model_aug = ConvNetEstimator(state_c=2, hidden=16)
net = Forecaster(model_phy=model_phy, model_aug=model_aug, is_augmented=model_aug_option)
lambda_0 = 1.0
tau_1 = 1e-4
tau_2 = 1e-3
niter = 3
min_op = 'l2'
if dataset_name == 'pendulum':
if model_phy_option == 'incomplete':
model_phy = DampedPendulumParamPDE(is_complete=False, real_params=None)
elif model_phy_option == 'complete':
model_phy = DampedPendulumParamPDE(is_complete=True, real_params=None)
elif model_phy_option == 'true':
model_phy = DampedPendulumParamPDE(is_complete=True, real_params=train.dataset.params)
model_aug = MLP(state_c=2, hidden=200)
init_weights(model_aug, init_type='orthogonal', init_gain=0.2)
net = Forecaster(model_phy=model_phy, model_aug=model_aug, is_augmented=model_aug_option)
lambda_0 = 1.0
tau_1 = 1e-3
tau_2 = 1
niter = 5
min_op = 'l2_normalized'
optimizer = optim.Adam(net.parameters(), lr=tau_1, betas=(0.9, 0.999))
experiment = APHYNITYExperiment(
train=train, test=test, net=net, optimizer=optimizer,
min_op=min_op, lambda_0=lambda_0, tau_2=tau_2, niter=niter, nlog=10,
nupdate=100, nepoch=50000, path=path, device=device
)
experiment.run()
if __name__ == '__main__':
if sys.version_info<(3,7,0):
sys.stderr.write("You need python 3.7 or later to run this script.\n")
sys.exit(1)
args = cmdline_args()
path = os.path.join(args.root, args.dataset)
os.makedirs(path, exist_ok=True)
option_dict = {
'incomplete': 'Incomplete Param PDE',
'complete': 'Complete Param PDE',
'true': 'True PDE',
'none': 'No physics'
}
print('#' * 80)
print('#', option_dict[args.phy], 'is used in F_p')
print('#', 'F_a is', 'enabled' if args.aug else 'disabled')
print('#' * 80)
train_leads(args.dataset, model_phy_option=args.phy, model_aug_option=args.aug, path=path, device=args.device)
|
1654745
|
from django.http import HttpResponse
from django.conf import settings
import base64
class BasicAuthMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.process_request(request)
# Code to be executed for each request/response after
# the view is called.
return response
def unauthed(self):
response = HttpResponse("""<html><title>Auth required</title><body>
<h1>Authorization Required</h1></body></html>""", content_type="text/html")
response['WWW-Authenticate'] = 'Basic realm="Development"'
response.status_code = 401
return response
def process_request(self,request):
if settings.PYTHON_ENV in ['admin', 'staging']:
if 'HTTP_AUTHORIZATION' not in request.META:
return self.unauthed()
else:
authentication = request.META['HTTP_AUTHORIZATION']
(authmeth, auth) = authentication.split(' ',1)
if 'basic' != authmeth.lower():
return self.unauthed()
auth = base64.b64decode(auth.strip()).decode('utf-8')
username, password = auth.split(':',1)
if username == settings.BASICAUTH_USERNAME and password == settings.BASICAUTH_PASSWORD:
return self.get_response(request)
return self.unauthed()
else:
return self.get_response(request)
|
1654757
|
from abc import ABCMeta , abstractmethod
from math import cos , sin
import taichi as ti
from helper_functions import clamp , distance , distance_sqr
from basic_types import Vector , Float , Int
"""
TODO: Not well complete .
1. bounding_box query
2. ray intersects check
"""
@ti.data_oriented
class Ray:
def __init__(self , origin : Vector , direction :Vector):
self.origin = origin
self.direction = direction
@ti.func
def point_at( self , t : Float ):
return self.origin + self.direction * t
@ti.data_oriented
class BoundingBox:
def __init__(self , point1 , point2 ):
# def __init__(self , lower_corner , upper_corner):
self.lower_corner = ti.Vector([min(point1[0] ,point2[0]) , min(point1[1] , point2[1])])
self.upper_corner = ti.Vector([max(point1[0] ,point2[0]) , max(point2[1] , point2[1])])
@ti.func
def width(self):
return self.upper_corner[0] - self.lower_corner[0]
@ti.func
def height(self):
return self.upper_corner[1] - self.lower_corner[1]
"""
# def overlaps(self , other) -> bool:
# return not ( \
# (self.upper_corner[0] < other.upper_corner[0] or self.lower_corner[0] > other.lower_corner[0]) \
# or
# (self.upper_corner[1] < other.upper_corner[1] or self.lower_corner[1] > other.upper_corner[1]) \
# )
"""
@ti.func
def contains(self , point)->bool :
return self.upper_corner[0] >= point[0] and self.lower_corner[0] <= point[0] \
and self.upper_corner[1] >= point[1] and self.lower_corner[1] <= point[1]
"""
# def intersects(self , ray)->bool:
# tmin , tmax = 0 , 1e8
# ray_dir_inv = ti.Vector([1.0/ray.direction[0] , 1.0/ray.direction[1]])
# ret = True
# for i in ti.static(range(2)):
# near = (self.lower_corner[i] - ray.origin[i]) * ray_dir_inv[i]
# far = (self.upper_corner[i] - ray.origin[i]) * ray_dir_inv[i]
# if near > far :
# near ,far = far , near
# tmin = max(near , tmin)
# tmax = min(far , tmax)
# if tmin > tmax :
# ret = False
# return ret
"""
@ti.data_oriented
class Transform:
def __init__ (self ):
self.translation = ti.Vector([0.0 ,0.0])
self.orientation = 0.0
@property
def translation (self):
return self._translation
@translation.setter
def translation(self , translation : Vector ):
self._translation = translation
@property
def orientation(self):
return self._orientation
@orientation.setter
def orientation(self , orientation : float):
self._orientation = orientation
self._cos_angle = cos(orientation)
self._sin_angle = sin(orientation)
@ti.func
def to_local(self , point_in_world : Vector) -> Vector:
return self.to_local_direction(point_in_world - self.translation)
@ti.func
def to_local_direction(self , dir_in_world : Vector) -> Vector:
#TODO use rotation matrix
return ti.Vector([
self._cos_angle * dir_in_world[0] + self._sin_angle * dir_in_world[1] ,
-self._sin_angle * dir_in_world[0] + self._cos_angle * dir_in_world[1]
])
@ti.func
def to_local_ray(self , ray : Ray) -> Ray:
return Ray(self.to_local(ray.origin) , self.to_local_direction(ray.direction))
"""
# def to_local_boundingbox(self , box : BoundingBox) -> BoundingBox:
# pass
"""
@ti.func
def to_world(self , point_in_local : Vector)->Vector:
return self.to_world_direction(point_in_local) + self.translation
@ti.func
def to_world_direction(self , dir_in_local : Vector)->Vector:
return ti.Vector([
self._cos_angle * dir_in_local[0] - self._sin_angle * dir_in_local[1] + self.translation[0] ,
self._sin_angle * dir_in_local[0] - self._cos_angle * dir_in_local[1] + self.translation[1]
])
@ti.func
def to_world_ray(self, ray : Ray) -> Ray:
return Ray(self.to_world(ray.origin) , self.to_world_direction(ray.direction))
"""
# def to_world_boundingbox(self , box : BoundingBox) -> BoundingBox:
# pass
"""
"""
# class SurfaceRayIntersection:
# def __init__(self):
# self.is_intersecting = False
# self.distance = 1e8
# self.point = [0.0 ,0.0]
# self.normal = [0.0 ,0.0]
# def to_world(self , transform : Transform , is_normal_flipped : bool) :
# self.point = transform.to_world(self.point)
# self.normal = transform.to_world_direction(self.normal)
# if ti.static(is_normal_flipped is True):
# self.normal *= -1,0
"""
@ti.data_oriented
class Surface(metaclass = ABCMeta):
def __init__(self, transfrom : Transform, is_normal_flipped : bool):
self.transform = transfrom
self.is_normal_flipped = is_normal_flipped
@ti.func
def closest_point(self , point : Vector) -> Vector:
return self.transform.to_world(self.closest_point_local(self.transform.to_local(point)))
@ti.func
def closest_normal(self , point : Vector ) -> Vector :
res = self.transform.to_world_direction(self.closest_normal_local(point))
if ti.static(self.is_normal_flipped):
res *= -1.0
return res
"""
# def bounding_box(self):
# pass
# def intersects(self , ray : Ray) ->bool:
# return self.intersects_local(self.transform.to_local_ray(ray))
# def closest_intersection(self , ray : Ray) :
# res = self.closest_interesection_local(self.transform.to_local_ray(ray))
# res.to_world(self.transform , self.is_normal_flipped)
# return res
"""
@ti.func
def closest_distance(self , point : Vector)->float:
return self.closest_distance_local(self.transform.to_local(point))
@ti.func
def is_inside(self , point : Vector) -> bool :
return self.is_normal_flipped is not self.is_inside_local(self.transform.to_local(point))
### ====== abstract methods =================================
# computations in local space
@abstractmethod
def closest_point_local(self , point) -> Vector:
pass
"""
# @abstractmethod
# def bounding_box_local(self)-> BoundingBox:
# pass
# @abstractmethod
# def closest_interesection_local(self , ray : Ray):
# pass
"""
@abstractmethod
def closest_normal_local(self , point) ->Vector:
pass
"""
# def intersects_local(self , ray : Ray) ->bool:
# return self.closest_interesection_local(ray).is_intersecting
"""
@ti.func
def closest_distance_local(self , point:Vector) -> Float:
return distance(point , self.closest_point_local(point))
@ti.func
def is_inside_local(self , point : Vector )->bool :
pl = self.closest_point_local(point)
nl = self.closest_normal_local(point)
return (point - pl).dot(nl) < 0.0
class ImplicitSurface(Surface):
def __init__(self , transform : Transform , is_normal_flipped : bool):
super().__init__(transform, is_normal_flipped)
@abstractmethod
def sign_distance_local(self ,point : Vector) -> Float:
pass
@ti.func
def sign_distance(self , point : Vector) -> Float :
dis = self.sign_distance_local(self.transform.to_local(point))
return -dis if self.is_normal_flipped else dis
@ti.func
def closest_distance_local(self, point :Vector)->Float:
return abs(self.sign_distance_local(point))
@ti.func
def is_inside_local(self ,point: Vector)->bool:
return self.sign_distance_local(point) < 0.0
class SurfaceToImplict(ImplicitSurface):
def __init__(
self ,
surface : Surface ,
transform : Transform = Transform() ,
is_normal_flipped : bool = False):
super().__init__(transform , is_normal_flipped)
self._surface = surface
def surface(self) -> Surface :
return self._surface
@ti.func
def closest_point_local(self , point : Vector)->Vector:
return self._surface.closest_point_local(point)
@ti.func
def closest_distance_local(self , point : Vector)->Float:
return self._surface.closest_distance_local(point)
"""
# def intersects_local(self , ray : Ray) -> bool :
# return self._surface.intersects(ray)
# def bounding_box_local(self) -> BoundingBox:
# return self._surface.bounding_box()
"""
@ti.func
def closest_normal_local(self , point : Vector) -> Vector:
return self._surface.closest_normal_local(point)
@ti.func
def sign_distance_local(self , point : Vector) -> Float:
p = self._surface.closest_point_local(point)
return - distance(p , point) if self._surface.is_inside_local(point) else distance(p , point)
"""
# def closest_interesection_local(self , ray : Ray) -> SurfaceRayIntersection:
# return self._surface.closest_intersection(ray)
"""
@ti.func
def is_inside_local(self , point : Vector) -> bool:
return self._surface.is_inside_local(point)
class Plane(Surface):
def __init__(self , normal , point , transfrom = Transform() , is_normal_flipped = False):
super().__init__(transfrom , is_normal_flipped)
self._normal = normal
self._point = point
@ti.func
def closest_point_local(self ,point : Vector)->Vector:
r = point - self._point
return r - self._normal.dot(r) * self._normal + self._point
"""
# def intersects_local(self ,ray : Ray)->bool:
# return abs(ray.direction.dot(self._normal)) > 0
# def bounding_box_local(self)->BoundingBox:
# pass
"""
@ti.func
def closest_normal_local(self , point :Vector)->Vector:
return self._normal
"""
# def closest_interesection_local(self , ray :Ray)->SurfaceRayIntersection:
# intersect = SurfaceRayIntersection()
# dn = ray.direction.dot(self._normal)
# if abs(dn) > 0:
# t = self._normal.dot(self._point - ray.origin) / dn
# if t >= 0 :
# intersect.is_intersecting = True
# intersect.distance = t
# intersect.point = ray.point_at(t)
# intersect.normal = self._normal
# return intersect
"""
class Box(Surface):
def __init__(
self ,
lower_corner ,
upper_corner ,
transfrom = Transform() ,
is_normal_flipped = False ):
super().__init__(transfrom , is_normal_flipped)
self._bound = BoundingBox(lower_corner , upper_corner)
self._plane = [
Plane(ti.Vector([1,0]), self._bound.upper_corner),
Plane(ti.Vector([0,1]), self._bound.upper_corner),
Plane(ti.Vector([-1,0]),self._bound.lower_corner),
Plane(ti.Vector([0,-1]),self._bound.lower_corner)
]
@ti.func
def closest_point_local(self ,point : Vector)->Vector:
result = point
dis = 1e8
if self._bound.contains(point):
for i in ti.static(range(4)):
p = self._plane[i].closest_point_local(point)
d = distance_sqr(p , point)
if dis > d:
dis = d
result = p
else:
result = clamp(point , self._bound.lower_corner , self._bound.upper_corner)
return result
"""
# def intersects_local(self ,ray : Ray)->bool:
# return self._bound.intersects(ray)
# def bounding_box_local(self)->BoundingBox:
# pass
"""
@ti.func
def closest_normal_local(self , point :Vector)->Vector:
result = self._plane[0]._normal
if self._bound.contains(point):
closest_n = self._plane[0]._normal
dis = 1e8
for i in ti.static(range(4)):
local_p = self._plane[i].closest_point_local(point)
local_dis = distance_sqr(local_p , point)
if local_dis < dis :
closest_n = self._plane[i]._normal
dis = local_dis
result = closest_n
else:
point_to_cloest = point - clamp(point , self._bound.lower_corner,self._bound.upper_corner)
closest_n = self._plane[0]._normal
angle = closest_n.dot(point_to_cloest)
for i in ti.static(range(1,4)):
cos = self._plane[i]._normal.dot(point_to_cloest)
if cos > angle :
closest_n = self._plane[i]._normal
angle = cos
result = closest_n
return result
@ti.func
def is_inside_local(self , point : Vector)->bool:
return self._bound.contains(point)
"""
# def closest_interesection_local(self , ray :Ray)->SurfaceRayIntersection:
# pass
"""
class Ball(Surface):
def __init__(self , mid_point , radius):
self._mid = mid_point
self._radius = abs(radius)
@ti.func
def closest_point_local(self , point : Vector)->Vector:
return self._mid + self.closest_normal_local(point) * self._radius
@ti.func
def is_inside_local(self , point : Vector)->bool:
return distance_sqr(point , self._mid) < (self._radius ** 2)
@ti.func
def closest_normal_local(self , point :Vector )->Vector:
return (point - self._mid).normalized()
@ti.func
def closest_distance_local(self ,point : Vector)->Float:
dis = self._radius - distance(point , self._mid)
return abs(dis)
|
1654770
|
from a10sdk.common.A10BaseClass import A10BaseClass
class SessionList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param protocol: {"type": "string", "format": "string"}
:param forward_source: {"type": "string", "format": "string"}
:param age: {"type": "number", "format": "number"}
:param app_type: {"type": "string", "format": "string"}
:param forward_dest: {"type": "string", "format": "string"}
:param flags: {"type": "string", "format": "string"}
:param hash: {"type": "number", "format": "number"}
:param reverse_source: {"type": "string", "format": "string"}
:param reverse_dest: {"type": "string", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "session-list"
self.DeviceProxy = ""
self.protocol = ""
self.forward_source = ""
self.age = ""
self.app_type = ""
self.forward_dest = ""
self.flags = ""
self.A10WW_hash = ""
self.reverse_source = ""
self.reverse_dest = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param dst_ipv4_addr: {"type": "string", "format": "ipv4-address"}
:param src_ipv6_addr: {"type": "string", "format": "ipv6-address"}
:param src_port: {"type": "number", "format": "number"}
:param dst_ipv6_addr: {"type": "string", "format": "ipv6-address"}
:param name_str: {"type": "string", "format": "string"}
:param total_sessions: {"type": "number", "format": "number"}
:param src_ipv4_addr: {"type": "string", "format": "ipv4-address"}
:param filter_type: {"enum": ["ipv4", "ipv6", "nat44", "nat64", "persist-ipv6-srcp-ip", "persist-ipv6-dst-ip", "persist-ipv6-ssl-id", "persist-dst-ip", "persist-src-ip", "persist-uie", "persist-ssl-id", "radius", "rserver", "vserver", "sip", "sixrd", "filter", "ds-lite", "dns-id-switch"], "type": "string", "format": "enum"}
:param session_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"protocol": {"type": "string", "format": "string"}, "forward-source": {"type": "string", "format": "string"}, "age": {"type": "number", "format": "number"}, "app-type": {"type": "string", "format": "string"}, "forward-dest": {"type": "string", "format": "string"}, "flags": {"type": "string", "format": "string"}, "hash": {"type": "number", "format": "number"}, "reverse-source": {"type": "string", "format": "string"}, "optional": true, "reverse-dest": {"type": "string", "format": "string"}}}]}
:param dest_port: {"type": "number", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.dst_ipv4_addr = ""
self.src_ipv6_addr = ""
self.src_port = ""
self.dst_ipv6_addr = ""
self.name_str = ""
self.total_sessions = ""
self.src_ipv4_addr = ""
self.filter_type = ""
self.session_list = []
self.dest_port = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Sessions(A10BaseClass):
"""Class Description::
Operational Status for the object sessions.
Class sessions supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/sessions/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "sessions"
self.a10_url="/axapi/v3/sessions/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
1654801
|
COLOR_PACK_LIGHT_GREEN_TO_DARK_PURPLE = 1
# lower (light green=59) to max (dark purple=28)
DARK_PURPLE = 28
LIGHT_GREEN = 59
COLOR_PACK_CITRON_TO_VIOLET = 2
CITRON = 0
VIOLET = 28
class ColorPalette(object):
@staticmethod
def get_color_from_percent_between_0_1(pct, color_pack=COLOR_PACK_LIGHT_GREEN_TO_DARK_PURPLE):
if color_pack == COLOR_PACK_LIGHT_GREEN_TO_DARK_PURPLE:
COLOR_START = DARK_PURPLE
COLOR_END = LIGHT_GREEN
elif color_pack == COLOR_PACK_CITRON_TO_VIOLET:
COLOR_START = CITRON
COLOR_END = VIOLET
else:
raise Exception('Bad color pack %s' % color_pack)
# get a degraded color
color_range = COLOR_END - COLOR_START
color = COLOR_START + (pct * color_range)
return int(color)
colorpalette = ColorPalette()
|
1654842
|
import logging
import os
import zipfile
from celery import shared_task
from django.conf import settings
from django.db.utils import OperationalError, ProgrammingError, InternalError
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from daiquiri.core.tasks import Task
class RunQueryTask(Task):
def on_failure(self, exc, task_id, args, kwargs, einfo):
super(RunQueryTask, self).on_failure(exc, task_id, args, kwargs, einfo)
# always import daiquiri packages inside the task
from daiquiri.query.models import QueryJob
# get logger
logger = logging.getLogger(__name__)
# get job_id from the original task args
job_id = args[0]
# log raised exception
logger.error('run_query %s raised an exception (%s)' % (job_id, exc))
# set phase and error_summary of the crashed job
job = QueryJob.objects.get(pk=job_id)
job.phase = job.PHASE_ERROR
job.error_summary = str(_('There has been an server error with your job.'))
job.save()
@shared_task(base=RunQueryTask)
def run_query(job_id):
# always import daiquiri packages inside the task
from daiquiri.core.adapter import DatabaseAdapter
from daiquiri.query.models import QueryJob
from daiquiri.query.utils import get_quota, get_job_sources, get_job_columns, ingest_uploads
from daiquiri.stats.models import Record
# get logger
logger = logging.getLogger(__name__)
# get the job object from the database
job = QueryJob.objects.get(pk=job_id)
if job.phase == job.PHASE_QUEUED:
# get the adapter with the database specific functions
adapter = DatabaseAdapter()
# create the database of the user if it not already exists
try:
adapter.create_user_schema_if_not_exists(job.schema_name)
except OperationalError as e:
job.phase = job.PHASE_ERROR
job.error_summary = str(e)
job.save()
return job.phase
# check if the quota is exceeded
if QueryJob.objects.get_size(job.owner) > get_quota(job.owner):
job.phase = job.PHASE_ERROR
job.error_summary = str(_('Quota is exceeded. Please remove some of your jobs.'))
job.save()
return job.phase
# set database and start time
job.pid = adapter.fetch_pid()
job.actual_query = adapter.build_query(job.schema_name, job.table_name, job.native_query, job.timeout, job.max_records)
job.phase = job.PHASE_EXECUTING
job.start_time = now()
job.save()
logger.info('job %s started' % job.id)
# get the actual query and submit the job to the database
try:
ingest_uploads(job.uploads, job.owner)
# this is where the work ist done (and the time is spend)
adapter.submit_query(job.actual_query)
except (ProgrammingError, InternalError, ValueError) as e:
job.phase = job.PHASE_ERROR
job.error_summary = str(e)
logger.info('job %s failed (%s)' % (job.id, job.error_summary))
except OperationalError as e:
# load the job again and check if the job was killed
job = QueryJob.objects.get(pk=job_id)
if job.phase != job.PHASE_ABORTED:
job.phase = job.PHASE_ERROR
job.error_summary = str(e)
logger.info('job %s failed (%s)' % (job.id, job.error_summary))
else:
# get additional information about the completed job
job.phase = job.PHASE_COMPLETED
logger.info('job %s completed' % job.id)
finally:
# get timing and save the job object
job.end_time = now()
# get additional information about the completed job
if job.phase == job.PHASE_COMPLETED:
job.nrows = adapter.count_rows(job.schema_name, job.table_name)
job.size = adapter.fetch_size(job.schema_name, job.table_name)
# fetch the metadata for used tables
job.metadata['sources'] = get_job_sources(job)
# fetch the metadata for the columns and fetch additional metadata from the metadata store
job.metadata['columns'] = get_job_columns(job)
# remove unneeded metadata
job.metadata.pop('display_columns', None)
job.metadata.pop('tables', None)
# create a stats record for this job
Record.objects.create(
time=job.end_time,
resource_type='QUERY',
resource={
'job_id': job.id,
'job_type': job.job_type,
'query': job.query,
'query_language': job.query_language,
'sources': job.metadata.get('sources', [])
},
client_ip=job.client_ip,
user=job.owner
)
job.save()
return job.phase
@shared_task(base=Task)
def run_ingest(job_id, file_path):
from daiquiri.core.adapter import DatabaseAdapter
from daiquiri.query.models import QueryJob
from daiquiri.stats.models import Record
from daiquiri.query.utils import get_quota, ingest_table
# get logger
logger = logging.getLogger(__name__)
# get the job object from the database
job = QueryJob.objects.get(pk=job_id)
if job.phase == job.PHASE_QUEUED:
# get the adapter with the database specific functions
adapter = DatabaseAdapter()
# create the database of the user if it not already exists
try:
adapter.create_user_schema_if_not_exists(job.schema_name)
except OperationalError as e:
job.phase = job.PHASE_ERROR
job.error_summary = str(e)
job.save()
return job.phase
# check if the quota is exceeded
if QueryJob.objects.get_size(job.owner) > get_quota(job.owner):
job.phase = job.PHASE_ERROR
job.error_summary = str(_('Quota is exceeded. Please remove some of your jobs.'))
job.save()
return job.phase
# set database and start time
job.pid = adapter.fetch_pid()
job.phase = job.PHASE_EXECUTING
job.start_time = now()
job.save()
logger.info('job %s started' % job.id)
# create the table and insert the data
try:
columns = ingest_table(job.schema_name, job.table_name, file_path)
except (ProgrammingError, InternalError, ValueError) as e:
job.phase = job.PHASE_ERROR
job.error_summary = str(e)
logger.info('job %s failed (%s)' % (job.id, job.error_summary))
except OperationalError as e:
# load the job again and check if the job was killed
job = QueryJob.objects.get(pk=job_id)
if job.phase != job.PHASE_ABORTED:
job.phase = job.PHASE_ERROR
job.error_summary = str(e)
logger.info('job %s failed (%s)' % (job.id, job.error_summary))
else:
# get additional information about the completed job
job.phase = job.PHASE_COMPLETED
logger.info('job %s completed' % job.id)
finally:
# get timing and save the job object
job.end_time = now()
# get additional information about the completed job
if job.phase == job.PHASE_COMPLETED:
job.nrows = adapter.count_rows(job.schema_name, job.table_name)
job.size = adapter.fetch_size(job.schema_name, job.table_name)
# store the metadata for the columns from the VOTable
job.metadata = {
'columns': columns
}
# create a stats record for this job
Record.objects.create(
time=job.end_time,
resource_type='UPLOAD',
resource={
'job_id': job.id,
'job_type': job.job_type,
},
client_ip=job.client_ip,
user=job.owner
)
job.save()
return job.phase
@shared_task(base=Task)
def create_download_file(download_id):
# always import daiquiri packages inside the task
from daiquiri.query.models import DownloadJob
# get logger
logger = logging.getLogger(__name__)
# get the job object from the database
download_job = DownloadJob.objects.get(pk=download_id)
if download_job.phase == download_job.PHASE_QUEUED:
# log start
logger.info('download_job %s started' % download_job.file_path)
# create directory if necessary
try:
os.mkdir(os.path.dirname(download_job.file_path))
except OSError:
pass
download_job.phase = download_job.PHASE_EXECUTING
download_job.start_time = now()
download_job.save()
# write file using the generator in the adapter
if download_job.format_key == 'fits':
write_label = 'wb'
else:
write_label = 'w'
try:
with open(download_job.file_path, write_label) as f:
for line in download_job.job.stream(download_job.format_key):
f.write(line)
except Exception as e:
download_job.phase = download_job.PHASE_ERROR
download_job.error_summary = str(e)
download_job.save()
logger.info('download_job %s failed (%s)' % (download_job.id, download_job.error_summary))
raise e
else:
download_job.phase = download_job.PHASE_COMPLETED
logger.info('download_job %s completed' % download_job.file_path)
finally:
download_job.end_time = now()
download_job.save()
@shared_task(track_started=True, base=Task)
def create_archive_file(archive_id):
# always import daiquiri packages inside the task
from daiquiri.query.models import QueryArchiveJob
# get logger
logger = logging.getLogger(__name__)
# get the job object from the database
archive_job = QueryArchiveJob.objects.get(pk=archive_id)
if archive_job.phase == archive_job.PHASE_QUEUED:
# log start
logger.info('create_archive_zip_file %s started' % archive_job.file_path)
# create directory if necessary
try:
os.makedirs(os.path.dirname(archive_job.file_path))
except OSError:
pass
archive_job.phase = archive_job.PHASE_EXECUTING
archive_job.start_time = now()
archive_job.save()
# create a zipfile with all files
with zipfile.ZipFile(archive_job.file_path, 'w') as z:
os.chdir(settings.FILES_BASE_PATH)
for file_path in archive_job.files:
z.write(file_path)
archive_job.end_time = now()
archive_job.phase = archive_job.PHASE_COMPLETED
archive_job.save()
# log completion
logger.info('create_archive_zip_file %s completed' % archive_job.file_path)
@shared_task(base=Task)
def rename_table(schema_name, table_name, new_table_name):
from daiquiri.core.adapter import DatabaseAdapter
DatabaseAdapter().rename_table(schema_name, table_name, new_table_name)
@shared_task(base=Task)
def drop_table(schema_name, table_name):
from daiquiri.core.adapter import DatabaseAdapter
# drop the corresponding database table, but fail silently
try:
DatabaseAdapter().drop_table(schema_name, table_name)
except ProgrammingError:
pass
@shared_task(base=Task)
def abort_query(pid):
from daiquiri.core.adapter import DatabaseAdapter
# abort the job on the database
try:
DatabaseAdapter().abort_query(pid)
except OperationalError:
# the query was probably killed before
pass
|
1654846
|
import sys
from pathlib import Path
file = Path(__file__).resolve()
parent, root = file.parent, file.parents[1]
sys.path.append(str(root))
try:
sys.path.remove(str(parent))
except ValueError: # Already removed
pass
import streamlit as st
from util.release_helper import create_static_notes
VERSION = '.'.join(st.__version__.split('.')[:2])
previous_version = "0.76.0"
demo_pages = {}
st.set_page_config(
page_title=f"New features in Streamlit {VERSION}",
page_icon="random"
)
contributors = [
"yashshah1",
]
intro = f"""
This release focused on stabilizing our code base with bug fixes and visual tweaks.
"""
release_notes = f"""
---
**Features**
- Added a new config option `client.showErrorDetails` allowing the developer to control the granularity of error messages. This is useful for when you deploy an app, and want to conceal from your users potentially-sensitive information contained in tracebacks.
**Notable bug fixes**
- Fixed [bug](https://github.com/streamlit/streamlit/issues/1957) where [`st.image`](https://docs.streamlit.io/en/0.77.0/api.html#streamlit.image) wasn't rendering certain kinds SVGs correctly.
- Fixed [regression](https://github.com/streamlit/streamlit/issues/2699) where the current value of an [`st.slider`](https://docs.streamlit.io/en/0.77.0/api.html#streamlit.slider) was only shown on hover.
"""
# End release updates
def draw_main_page():
st.write(f"""
# Welcome to Streamlit {VERSION}! 👋
""")
st.write(intro)
st.write(release_notes)
create_static_notes(contributors, previous_version, VERSION)
# Draw sidebar
pages = list(demo_pages.keys())
if len(pages):
pages.insert(0, "Release Notes")
st.sidebar.title(f"Streamlit v{VERSION} Demos")
selected_demo = st.sidebar.radio("", pages)
else:
selected_demo = "Release Notes"
# Draw main page
if selected_demo in demo_pages:
demo_pages[selected_demo]()
else:
draw_main_page()
|
1654889
|
from __future__ import print_function
from future.utils import native_str
from builtins import str
from builtins import range
#import mermaid.set_pyreg_paths
import matplotlib as matplt
from mermaid.config_parser import MATPLOTLIB_AGG
if MATPLOTLIB_AGG:
matplt.use('Agg')
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import mermaid.utils as utils
import mermaid.finite_differences as fd
import mermaid.custom_pytorch_extensions_module_version as ce
import mermaid.smoother_factory as sf
import mermaid.deep_smoothers as ds
from mermaid.data_wrapper import AdaptVal, MyTensor
import mermaid.fileio as fio
import mermaid.rungekutta_integrators as rk
import mermaid.forward_models as fm
import mermaid.module_parameters as pars
import torch
import numpy as np
# todo: find out a good way to fix this; this may be related to the MKL libraries
print('WARNING: Disabled square root warning for numpy; this may be an issue of the MKL')
np.warnings.filterwarnings('ignore','invalid value encountered in sqrt')
import os
import random
def subsampled_quiver(u,v,color='red', scale=1, subsample=3):
sz = u.shape
xc,yc = np.meshgrid(range(sz[0]),range(sz[1]))
plt.quiver(xc[::subsample,::subsample],yc[::subsample,::subsample],u[::subsample,::subsample], v[::subsample,::subsample], color=color, scale=scale)
def create_momentum(input_im,centered_map,
randomize_momentum_on_circle,randomize_in_sectors,
smooth_initial_momentum,
sz,spacing,nr_of_angles=10,multiplier_factor=1.0,momentum_smoothing=0.05,visualize=False,
publication_figures_directory=None,
publication_prefix=None,
image_pair_nr=None):
# assumes an input image is given (at least roughly centered in the middle of the image)
# and computes a random momentum
fd_np = fd.FD_np(spacing)
# finite differences expect BxXxYxZ (without image channel)
# gradients for the input image (to determine where the edges are)
dxc = fd_np.dXc(input_im[:, 0, ...])
dyc = fd_np.dYc(input_im[:, 0, ...])
# compute the current distance map (relative to the center)
dist_map = (centered_map[:, 0, ...] ** 2 + centered_map[:, 1, ...] ** 2) ** 0.5
# gradients for the distance map (to get directions)
dxc_d = fd_np.dXc(dist_map)
dyc_d = fd_np.dYc(dist_map)
# zero them out everywhere, where the input image does not have a gradient
ind_zero = (dxc ** 2 + dyc ** 2 == 0)
dxc_d[ind_zero] = 0
dyc_d[ind_zero] = 0
#plt.clf()
#plt.quiver(dyc_d[0,...],dxc_d[0,...],scale=5)
#plt.show()
# and now randomly flip the sign ring by ring
maxr = int(input_im.max())
# identity map to define the sectors
id_c = utils.centered_identity_map_multiN(sz, spacing, dtype='float32')
already_flipped = np.zeros_like(dxc_d)
for r in range(1,maxr+1):
cur_ring_val = r
if randomize_momentum_on_circle:
randomize_over_angles = randomize_in_sectors
if randomize_over_angles:
angles = np.sort(2 * np.pi * np.random.rand(nr_of_angles)).astype('float32')
for a in range(nr_of_angles):
afrom = a
ato = (a + 1) % nr_of_angles
nx_from = -np.sin(angles[afrom])
ny_from = np.cos(angles[afrom])
nx_to = -np.sin(angles[ato])
ny_to = np.cos(angles[ato])
dilated_input_im = ndimage.binary_dilation(input_im[:,0,...]==cur_ring_val)
indx = ((dilated_input_im!=0) & (already_flipped==0) & (dxc ** 2 + dyc ** 2 != 0)
& (id_c[:, 0, ...] * nx_from + id_c[:, 1, ...] * ny_from >= 0)
& (id_c[:, 0, ...] * nx_to + id_c[:, 1, ...] * ny_to < 0))
c_rand_choice = np.random.randint(0, 3)
if c_rand_choice == 0:
multiplier = multiplier_factor
elif c_rand_choice == 1:
multiplier = 0.0
else:
multiplier = -multiplier_factor
c_rand_val_field = multiplier * np.random.rand(*list(indx.shape)).astype('float32')
dxc_d[indx] = dxc_d[indx] * c_rand_val_field[indx]
dyc_d[indx] = dyc_d[indx] * c_rand_val_field[indx]
already_flipped[indx] = 1
#print(c_rand_choice)
#plt.clf()
#plt.subplot(121)
#plt.quiver(dyc_d[0, ...], dxc_d[0, ...], scale=5)
#plt.subplot(122)
#plt.imshow(indx[0,...])
#plt.show()
else:
dilated_input_im = ndimage.binary_dilation(input_im[:, 0, ...] == cur_ring_val)
indx = ((dilated_input_im!=0) & (already_flipped==0) & (dxc ** 2 + dyc ** 2 != 0))
already_flipped[indx] = 1
c_rand_val_field = 2 * 2 * (np.random.rand(*list(indx.shape)).astype('float32') - 0.5)
dxc_d[indx] = dxc_d[indx] * c_rand_val_field[indx]
dyc_d[indx] = dyc_d[indx] * c_rand_val_field[indx]
else:
dilated_input_im = ndimage.binary_dilation(input_im[:, 0, ...] == cur_ring_val)
indx = ((dilated_input_im!=0) & (already_flipped==0) & (dxc ** 2 + dyc ** 2 != 0))
already_flipped[indx] = 1
# multiply by a random number in [-1,1]
c_rand_val = 2 * (np.random.rand().astype('float32') - 0.5)*multiplier_factor
dxc_d[indx] = dxc_d[indx] * c_rand_val
dyc_d[indx] = dyc_d[indx] * c_rand_val
# now create desired initial momentum
m_orig = np.zeros_like(id_c)
m_orig[0, 0, ...] = dxc_d
m_orig[0, 1, ...] = dyc_d
if visualize:
plt.clf()
plt.quiver(m_orig[0, 1, ...], m_orig[0, 0, ...],color='red',scale=5)
plt.axis('equal')
plt.show()
if publication_figures_directory is not None:
plt.clf()
plt.imshow(input_im[0, 0, ...],origin='lower')
plt.quiver(m_orig[0, 1, ...], m_orig[0, 0, ...],color='red',scale=5)
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, '{:s}_m_orig_{:d}.pdf'.format(publication_prefix,image_pair_nr)),bbox_inches='tight',pad_inches=0)
if smooth_initial_momentum:
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] = momentum_smoothing
s_m = sf.SmootherFactory(sz[2::], spacing).create_smoother(s_m_params)
m = s_m.smooth(AdaptVal(torch.from_numpy(m_orig))).detach().cpu().numpy()
if visualize:
plt.clf()
plt.subplot(121)
plt.imshow(m_orig[0, 0, ...])
plt.subplot(122)
plt.imshow(m[0, 0, ...])
plt.suptitle('smoothed mx')
plt.show()
if publication_figures_directory is not None:
plt.clf()
plt.imshow(input_im[0,0,...],origin='lower')
subsampled_quiver(m[0, 1, ...], m[0, 0, ...], color='red', scale=1,subsample=3)
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, '{:s}_m_smoothed_orig_{:d}.pdf'.format(publication_prefix,image_pair_nr)),bbox_inches='tight')
else:
m = m_orig
return m
def compute_overall_std(weights,multi_gaussian_stds,kernel_weighting_type):
# standard deviation image (only for visualization; this is the desired ground truth)
sh_std_im = weights.shape[2:]
std_im = np.zeros(sh_std_im, dtype='float32')
nr_of_mg_weights = weights.shape[1]
# now compute the resulting standard deviation image (based on the computed weights)
for g in range(nr_of_mg_weights):
if kernel_weighting_type=='w_K_w':
std_im += (weights[0,g,...]**2)*multi_gaussian_stds[g]**2
else:
std_im += weights[0,g,...]*multi_gaussian_stds[g]**2
std_im = std_im**0.5
return std_im
def create_rings(levels_in,multi_gaussian_weights,default_multi_gaussian_weights,
multi_gaussian_stds,put_weights_between_circles,kernel_weighting_type,
sz,spacing,visualize=False):
if len(multi_gaussian_weights)+1!=len(levels_in):
raise ValueError('There needs to be one more level than the number of weights, to define this example')
id_c = utils.centered_identity_map_multiN(sz, spacing, dtype='float32')
sh_id_c = id_c.shape
# create the weights that will be used for the multi-Gaussian
nr_of_mg_weights = len(default_multi_gaussian_weights)
# format for weights: B x nr_gaussians x X x Y x Z
sh_weights = [sh_id_c[0]] + [nr_of_mg_weights] + list(sh_id_c[2:])
weights = np.zeros(sh_weights,dtype='float32')
# set the default
for g in range(nr_of_mg_weights):
weights[:,g,...] = default_multi_gaussian_weights[g]
# now get the memory for the ring data
sh_ring_im = list(sh_id_c[2:])
ring_im = np.zeros(sh_ring_im,dtype='float32')
# just add one more level in case we put weights in between (otherwise add a dummy)
levels = np.zeros(len(levels_in)+1,dtype='float32')
levels[0:-1] = levels_in
if put_weights_between_circles:
levels[-1] = levels_in[-1]+levels_in[-1]-levels_in[-2]
else:
levels[-1]=-1.
for i in range(len(levels)-2):
cl = levels[i]
nl = levels[i+1]
nnl = levels[i+2]
cval = i+1
indices_ring = (id_c[0,0,...]**2+id_c[0,1,...]**2>=cl**2) & (id_c[0,0,...]**2+id_c[0,1,...]**2<=nl**2)
ring_im[indices_ring] = cval
# as the momenta will be supported on the ring boundaries, we may want the smoothing to be changing in the middle of the rings
if put_weights_between_circles:
indices_weight = (id_c[0,0,...]**2+id_c[0,1,...]**2>=((cl+nl)/2)**2) & (id_c[0,0,...]**2+id_c[0,1,...]**2<=((nl+nnl)/2)**2)
else:
indices_weight = (id_c[0, 0, ...] ** 2 + id_c[0, 1, ...] ** 2 >= cl ** 2) & (id_c[0, 0, ...] ** 2 + id_c[0, 1, ...] ** 2 <= nl ** 2)
# set the desired weights in this ring
current_desired_weights = multi_gaussian_weights[i]
for g in range(nr_of_mg_weights):
current_weights = weights[0,g,...]
current_weights[indices_weight] = current_desired_weights[g]
std_im = compute_overall_std(weights,multi_gaussian_stds,kernel_weighting_type=kernel_weighting_type)
ring_im = ring_im.view().reshape([1,1] + sh_ring_im)
if visualize:
#grad_norm_sqr = dxc**2 + dyc**2
plt.clf()
plt.subplot(221)
plt.imshow(id_c[0, 0, ...])
plt.colorbar()
plt.subplot(222)
plt.imshow(id_c[0, 1, ...])
plt.colorbar()
plt.subplot(223)
plt.imshow(ring_im[0,0,...])
plt.colorbar()
plt.subplot(224)
#plt.imshow(grad_norm_sqr[0,...]>0)
#plt.colorbar()
plt.show()
plt.clf()
nr_of_weights = weights.shape[1]
for cw in range(nr_of_weights):
plt.subplot(2,3,1+nr_of_weights)
plt.imshow(weights[0,cw,...],vmin=0.0,vmax=1.0)
plt.colorbar()
plt.subplot(236)
plt.imshow(std_im)
plt.colorbar()
plt.suptitle('weights')
plt.show()
return weights,ring_im,std_im
def _compute_ring_radii(extent, nr_of_rings, randomize_radii, randomize_factor=0.75):
if randomize_radii:
rings_at_default = np.linspace(0., extent, nr_of_rings + 1).astype('float32')
diff_r = rings_at_default[1] - rings_at_default[0]
rings_at = np.sort(rings_at_default + (np.random.random(nr_of_rings + 1).astype('float32') - 0.5) * diff_r * randomize_factor)
else:
rings_at = np.linspace(0., extent, nr_of_rings + 1).astype('float32')
# first one needs to be zero:
rings_at[0] = 0
return rings_at
def compute_localized_velocity_from_momentum(m,weights,multi_gaussian_stds,sz,spacing,kernel_weighting_type='w_K',visualize=False):
nr_of_gaussians = len(multi_gaussian_stds)
# create a velocity field from this momentum using a multi-Gaussian kernel
gaussian_fourier_filter_generator = ce.GaussianFourierFilterGenerator(sz[2:], spacing, nr_of_slots=nr_of_gaussians)
t_weights = AdaptVal(torch.from_numpy(weights))
t_momentum = AdaptVal(torch.from_numpy(m))
if kernel_weighting_type=='sqrt_w_K_sqrt_w':
sqrt_weights = torch.sqrt(t_weights)
sqrt_weighted_multi_smooth_v = ds.compute_weighted_multi_smooth_v(momentum=t_momentum, weights=sqrt_weights,
gaussian_stds=multi_gaussian_stds,
gaussian_fourier_filter_generator=gaussian_fourier_filter_generator)
elif kernel_weighting_type=='w_K_w':
# now create the weighted multi-smooth-v
weighted_multi_smooth_v = ds.compute_weighted_multi_smooth_v(momentum=t_momentum, weights=t_weights,
gaussian_stds=multi_gaussian_stds,
gaussian_fourier_filter_generator=gaussian_fourier_filter_generator)
elif kernel_weighting_type=='w_K':
multi_smooth_v = ce.fourier_set_of_gaussian_convolutions(t_momentum,
gaussian_fourier_filter_generator=gaussian_fourier_filter_generator,
sigma=AdaptVal(torch.from_numpy(multi_gaussian_stds)),
compute_std_gradients=False)
# now compute the localized_velocity
# compute velocity based on localized weights
sz_m = m.shape
# get the size of the multi-velocity field; multi_v x batch x channels x X x Y
sz_mv = [nr_of_gaussians] + list(sz_m)
# create the output tensor: will be of dimension: batch x channels x X x Y
localized_v = AdaptVal(MyTensor(*sz_m))
dims = localized_v.shape[1]
# now we apply this weight across all the channels; weight output is B x weights x X x Y
for n in range(dims):
# reverse the order so that for a given channel we have batch x multi_velocity x X x Y
# i.e., the multi-velocity field output is treated as a channel
# reminder: # format of multi_smooth_v is multi_v x batch x channels x X x Y
# (channels here are the vector field components); i.e. as many as there are dimensions
# each one of those should be smoothed the same
# let's smooth this on the fly, as the smoothing will be of form
# w_i*K_i*(w_i m)
if kernel_weighting_type == 'sqrt_w_K_sqrt_w':
# roc should be: batch x multi_v x X x Y
roc = sqrt_weighted_multi_smooth_v[:, :, n, ...]
# print(sqrt_weighted_multi_smooth_v.shape, sqrt_weights.shape,roc.shape)
yc = torch.sum(roc * sqrt_weights, dim=1)
elif kernel_weighting_type == 'w_K_w':
# roc should be: batch x multi_v x X x Y
roc = weighted_multi_smooth_v[:, :, n, ...]
yc = torch.sum(roc * t_weights, dim=1)
elif kernel_weighting_type == 'w_K':
# roc should be: batch x multi_v x X x Y
roc = torch.transpose(multi_smooth_v[:, :, n, ...], 0, 1)
yc = torch.sum(roc * t_weights, dim=1)
else:
raise ValueError('Unknown weighting_type: {}'.format(kernel_weighting_type))
localized_v[:, n, ...] = yc # localized_v is: batch x channels x X x Y
localized_v = localized_v.cpu().numpy()
if visualize:
norm_localized_v = (localized_v[0, 0, ...] ** 2 + localized_v[0, 1, ...] ** 2) ** 0.5
plt.clf()
plt.subplot(121)
plt.imshow(norm_localized_v)
plt.axis('image')
plt.colorbar()
plt.subplot(121)
plt.quiver(m[0,1,...],m[0,0,...])
plt.axis('equal')
plt.show()
return localized_v
def compute_map_from_v(localized_v,sz,spacing):
# now compute the deformation that belongs to this velocity field
params = pars.ParameterDict()
params['number_of_time_steps'] = 40
advectionMap = fm.AdvectMap( sz[2:], spacing )
pars_to_pass = utils.combine_dict({'v':AdaptVal(torch.from_numpy(localized_v))}, dict() )
integrator = rk.RK4(advectionMap.f, advectionMap.u, pars_to_pass, params)
tFrom = 0.
tTo = 1.
phi0 = AdaptVal(torch.from_numpy(utils.identity_map_multiN(sz,spacing)))
phi1 = integrator.solve([phi0], tFrom, tTo )[0]
return phi0,phi1
def add_texture(im_orig,texture_gaussian_smoothness=0.1,texture_magnitude=0.3):
# do this separately for each integer intensity level
levels = np.unique((np.floor(im_orig)).astype('int'))
im = np.zeros_like(im_orig)
for current_level in levels:
sz = im_orig.shape
rand_noise = np.random.random(sz[2:]).astype('float32')-0.5
rand_noise = rand_noise.view().reshape(sz)
r_params = pars.ParameterDict()
r_params['smoother']['type'] = 'gaussian'
r_params['smoother']['gaussian_std'] = texture_gaussian_smoothness
s_r = sf.SmootherFactory(sz[2::], spacing).create_smoother(r_params)
rand_noise_smoothed = s_r.smooth(AdaptVal(torch.from_numpy(rand_noise))).detach().cpu().numpy()
rand_noise_smoothed /= rand_noise_smoothed.max()
rand_noise_smoothed *= texture_magnitude
c_indx = (im_orig>=current_level-0.5)
im[c_indx] = im_orig[c_indx] + rand_noise_smoothed[c_indx]
return im
def create_random_image_pair(weights_not_fluid,weights_fluid,weights_neutral,weight_smoothing_std,multi_gaussian_stds,
kernel_weighting_type,
randomize_momentum_on_circle,randomize_in_sectors,
put_weights_between_circles,
start_with_fluid_weight,
use_random_source,
use_fixed_source,
add_texture_to_image,
texture_gaussian_smoothness,
texture_magnitude,
nr_of_circles_to_generate,
circle_extent,
sz,spacing,
nr_of_angles=10,multiplier_factor=1.0,momentum_smoothing=0.05,
visualize=False,visualize_warped=False,print_warped_name=None,
publication_figures_directory=None,
image_pair_nr=None):
nr_of_rings = nr_of_circles_to_generate
extent = circle_extent
randomize_factor = 0.25
randomize_radii = not use_fixed_source
smooth_initial_momentum = True
# create ordered set of weights
multi_gaussian_weights = []
for r in range(nr_of_rings):
if r%2==0:
if start_with_fluid_weight:
multi_gaussian_weights.append(weights_fluid)
else:
multi_gaussian_weights.append(weights_not_fluid)
else:
if start_with_fluid_weight:
multi_gaussian_weights.append(weights_not_fluid)
else:
multi_gaussian_weights.append(weights_fluid)
rings_at = _compute_ring_radii(extent=extent, nr_of_rings=nr_of_rings, randomize_radii=randomize_radii, randomize_factor=randomize_factor)
weights_orig,ring_im_orig,std_im_orig = create_rings(rings_at,multi_gaussian_weights=multi_gaussian_weights,
default_multi_gaussian_weights=weights_neutral,
multi_gaussian_stds=multi_gaussian_stds,
put_weights_between_circles=put_weights_between_circles,
kernel_weighting_type=kernel_weighting_type,
sz=sz,spacing=spacing,
visualize=visualize)
if weight_smoothing_std is not None:
if weight_smoothing_std>0:
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] = weight_smoothing_std
# smooth the weights
smoother = sf.SmootherFactory(weights_orig.shape[2::], spacing).create_smoother(s_m_params)
#weights_old = np.zeros_like(weights_orig)
#weights_old[:] = weights_orig
weights_orig = (smoother.smooth(AdaptVal(torch.from_numpy(weights_orig)))).detach().cpu().numpy()
# make sure they are strictly positive
weights_orig[weights_orig<0] = 0
if publication_figures_directory is not None:
plt.clf()
plt.imshow(ring_im_orig[0,0,...],origin='lower')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory,'ring_im_orig_{:d}.pdf'.format(image_pair_nr)),bbox_inches='tight',pad_inches=0)
plt.clf()
plt.imshow(std_im_orig,origin='lower')
plt.axis('off')
plt.colorbar()
plt.savefig(os.path.join(publication_figures_directory,'std_im_orig_{:d}.pdf'.format(image_pair_nr)),bbox_inches='tight',pad_inches=0)
id_c = utils.centered_identity_map_multiN(sz, spacing, dtype='float32')
m_orig = create_momentum(ring_im_orig, centered_map=id_c, randomize_momentum_on_circle=randomize_momentum_on_circle,
randomize_in_sectors=randomize_in_sectors,
smooth_initial_momentum=smooth_initial_momentum,
sz=sz, spacing=spacing,
nr_of_angles=nr_of_angles,
multiplier_factor=multiplier_factor,
momentum_smoothing=momentum_smoothing,
publication_figures_directory=publication_figures_directory,
publication_prefix='circle_init',
image_pair_nr=image_pair_nr)
localized_v_orig = compute_localized_velocity_from_momentum(m=m_orig,weights=weights_orig,multi_gaussian_stds=multi_gaussian_stds,sz=sz,spacing=spacing,kernel_weighting_type=kernel_weighting_type)
if publication_figures_directory is not None:
plt.clf()
plt.imshow(ring_im_orig[0, 0, ...], origin='lower')
subsampled_quiver(localized_v_orig[0,1,...],localized_v_orig[0,0,...],color='red', scale=1, subsample=3)
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory,'{:s}_{:d}.pdf'.format('localized_v_orig', image_pair_nr)),bbox_inches='tight',pad_inches=0)
phi0_orig,phi1_orig = compute_map_from_v(localized_v_orig,sz,spacing)
if add_texture_to_image:
ring_im = add_texture(ring_im_orig,texture_gaussian_smoothness=texture_gaussian_smoothness,texture_magnitude=texture_magnitude)
if publication_figures_directory is not None:
plt.clf()
plt.imshow(ring_im[0, 0, ...],origin='lower')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, 'ring_im_orig_textured_{:d}.pdf'.format(image_pair_nr)),bbox_inches='tight',pad_inches=0)
# plt.clf()
# plt.subplot(1,2,1)
# plt.imshow(ring_im[0,0,...],clim=(-0.5,2.5))
# plt.colorbar()
# plt.subplot(1,2,2)
# plt.imshow(ring_im_orig[0, 0, ...], clim=(-0.5, 2.5))
# plt.colorbar()
# plt.show()
else:
ring_im = ring_im_orig
# deform image based on this map
I0_source_orig = AdaptVal(torch.from_numpy(ring_im))
I1_warped_orig = utils.compute_warped_image_multiNC(I0_source_orig, phi1_orig, spacing, spline_order=1)
# define the label images
I0_label_orig = AdaptVal(torch.from_numpy(ring_im_orig))
I1_label_orig = utils.get_warped_label_map(I0_label_orig, phi1_orig, spacing )
if publication_figures_directory is not None:
plt.clf()
plt.imshow(I1_label_orig[0, 0, ...].detach().cpu().numpy(),origin='lower')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, 'ring_im_warped_source_{:d}.pdf'.format(image_pair_nr)),bbox_inches='tight',pad_inches=0)
if use_random_source:
# the initially created target will become the source
id_c_warped_t = utils.compute_warped_image_multiNC(AdaptVal(torch.from_numpy(id_c)), phi1_orig, spacing, spline_order=1)
id_c_warped = id_c_warped_t.detach().cpu().numpy()
weights_warped_t = utils.compute_warped_image_multiNC(AdaptVal(torch.from_numpy(weights_orig)), phi1_orig, spacing, spline_order=1)
weights_warped = weights_warped_t.detach().cpu().numpy()
# make sure they are stirctly positive
weights_warped[weights_warped<0] = 0
warped_source_im_orig = I1_label_orig.detach().cpu().numpy()
m_warped_source = create_momentum(warped_source_im_orig, centered_map=id_c_warped, randomize_momentum_on_circle=randomize_momentum_on_circle,
randomize_in_sectors=randomize_in_sectors,
smooth_initial_momentum=smooth_initial_momentum,
sz=sz, spacing=spacing,
nr_of_angles=nr_of_angles,
multiplier_factor=multiplier_factor,
momentum_smoothing=momentum_smoothing,
publication_figures_directory=publication_figures_directory,
publication_prefix='random_source',
image_pair_nr=image_pair_nr)
localized_v_warped = compute_localized_velocity_from_momentum(m=m_warped_source, weights=weights_warped,
multi_gaussian_stds=multi_gaussian_stds, sz=sz,
spacing=spacing,kernel_weighting_type=kernel_weighting_type)
if publication_figures_directory is not None:
plt.clf()
plt.imshow(warped_source_im_orig[0, 0, ...], origin='lower')
subsampled_quiver(localized_v_warped[0, 1, ...], localized_v_warped[0, 0, ...], color='red', scale=1,subsample=3)
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory,'{:s}_{:d}.pdf'.format('random_source_localized_v', image_pair_nr)),bbox_inches='tight',pad_inches=0)
phi0_w, phi1_w = compute_map_from_v(localized_v_warped, sz, spacing)
if add_texture_to_image:
warped_source_im = add_texture(warped_source_im_orig,texture_gaussian_smoothness=texture_gaussian_smoothness,texture_magnitude=texture_magnitude)
if publication_figures_directory is not None:
plt.clf()
plt.imshow(ring_im[0, 0, ...],origin='lower')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, 'random_source_im_textured_{:d}.pdf'.format(image_pair_nr)),bbox_inches='tight',pad_inches=0)
else:
warped_source_im = warped_source_im_orig
# deform these images based on the new map
# deform image based on this map
I0_source_w = AdaptVal(torch.from_numpy(warped_source_im))
I1_warped_w = utils.compute_warped_image_multiNC(I0_source_w, phi1_w, spacing, spline_order=1)
# define the label images
I0_label_w = AdaptVal(torch.from_numpy(warped_source_im_orig))
I1_label_w = utils.get_warped_label_map(I0_label_w, phi1_w, spacing)
if use_random_source:
I0_source = I0_source_w
I1_warped = I1_warped_w
I0_label = I0_label_w
I1_label = I1_label_w
m = m_warped_source
phi0 = phi0_w
phi1 = phi1_w
weights = weights_warped
else:
I0_source = I0_source_orig
I1_warped = I1_warped_orig
I0_label = I0_label_orig
I1_label = I1_label_orig
m = m_orig
phi0 = phi0_orig
phi1 = phi1_orig
weights = weights_orig
std_im = compute_overall_std(weights,multi_gaussian_stds,kernel_weighting_type=kernel_weighting_type)
if visualize_warped:
plt.clf()
# plot original image, warped image, and grids
plt.subplot(3,4,1)
plt.imshow(I0_source[0,0,...].detach().cpu().numpy())
plt.title('source')
plt.subplot(3,4,2)
plt.imshow(I1_warped[0,0,...].detach().cpu().numpy())
plt.title('warped = target')
plt.subplot(3,4,3)
plt.imshow(I0_source[0,0,...].detach().cpu().numpy())
plt.contour(phi0[0,0,...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.contour(phi0[0,1,...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.subplot(3,4,4)
plt.imshow(I1_warped[0,0,...].detach().cpu().numpy())
plt.contour(phi1[0,0,...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.contour(phi1[0,1,...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
nr_of_weights = weights.shape[1]
for cw in range(nr_of_weights):
plt.subplot(3,4,5+cw)
if kernel_weighting_type=='w_K_w':
plt.imshow(weights[0, cw, ...]**2, vmin=0.0, vmax=1.0)
else:
plt.imshow(weights[0, cw, ...], vmin=0.0, vmax=1.0)
plt.title('w: std' + str(multi_gaussian_stds[cw]))
plt.colorbar()
plt.subplot(3,4,12)
plt.imshow(std_im)
plt.title('std')
plt.colorbar()
if print_warped_name is not None:
plt.savefig(print_warped_name)
else:
plt.show()
if publication_figures_directory is not None:
plt.clf()
plt.imshow(I0_source[0, 0, ...].detach().cpu().numpy(),origin='lower')
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, '{:s}_{:d}.pdf'.format('source_image', image_pair_nr)),bbox_inches='tight',pad_inches=0)
plt.clf()
plt.imshow(I1_warped[0, 0, ...].detach().cpu().numpy(),origin='lower')
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, '{:s}_{:d}.pdf'.format('target_image', image_pair_nr)),bbox_inches='tight',pad_inches=0)
plt.clf()
plt.imshow(I0_source[0, 0, ...].detach().cpu().numpy(),origin='lower')
plt.contour(phi0[0, 0, ...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.contour(phi0[0, 1, ...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, '{:s}_{:d}.pdf'.format('source_image_with_grid', image_pair_nr)),bbox_inches='tight',pad_inches=0)
plt.clf()
plt.imshow(I1_warped[0, 0, ...].detach().cpu().numpy(),origin='lower')
plt.contour(phi1[0, 0, ...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.contour(phi1[0, 1, ...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory,'{:s}_{:d}.pdf'.format('target_image_with_grid', image_pair_nr)), bbox_inches='tight',pad_inches=0)
plt.clf()
plt.imshow(std_im,origin='lower')
plt.colorbar()
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, '{:s}_{:d}.pdf'.format('std_im_source', image_pair_nr)),bbox_inches='tight',pad_inches=0)
return I0_source.detach().cpu().numpy(), I1_warped.detach().cpu().numpy(), weights, \
I0_label.detach().cpu().numpy(), I1_label.detach().cpu().numpy(), phi1.detach().cpu().numpy(), m
def get_parameter_value(command_line_par,params, params_name, default_val, params_description):
if command_line_par is None:
ret = params[(params_name, default_val, params_description)]
else:
params[params_name]=command_line_par
ret = command_line_par
return ret
def get_parameter_value_flag(command_line_par,params, params_name, default_val, params_description):
if command_line_par==default_val:
ret = params[(params_name, default_val, params_description)]
else:
params[params_name]=command_line_par
ret = command_line_par
return ret
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Creates a synthetic registration results')
parser.add_argument('--config', required=False, default=None, help='The main json configuration file that can be used to define the settings')
parser.add_argument('--output_directory', required=False, default='synthetic_example_out', help='Where the output was stored (now this will be the input directory)')
parser.add_argument('--nr_of_pairs_to_generate', required=False, default=10, type=int, help='number of image pairs to generate')
parser.add_argument('--nr_of_circles_to_generate', required=False, default=None, type=int, help='number of circles to generate in an image') #2
parser.add_argument('--circle_extent', required=False, default=None, type=float, help='Size of largest circle; image is [-0.5,0.5]^2') # 0.25
parser.add_argument('--seed', required=False, type=int, default=None, help='Sets the random seed which affects data shuffling')
parser.add_argument('--create_publication_figures', action='store_true', help='If set writes out figures illustrating the generation approach of first example')
parser.add_argument('--use_fixed_source', action='store_true', help='if set the source image is fixed; like a fixed atlas image')
parser.add_argument('--use_random_source', action='store_true', help='if set then inital source is warped randomly, otherwise it is circular')
parser.add_argument('--no_texture', action='store_true',help='if set then no texture is used, otherwise (default) texture is generated')
parser.add_argument('--texture_gaussian_smoothness', required=False, type=float, default=None, help='Gaussian standard deviation used to smooth a random image to create texture.')
parser.add_argument('--texture_magnitude', required=False, type=float, default=None, help='Magnitude of the texture')
parser.add_argument('--do_not_randomize_momentum', action='store_true', help='if set, momentum is deterministic')
parser.add_argument('--do_not_randomize_in_sectors', action='store_true', help='if set and randomize momentum is on, momentum is only randomized uniformly over circles')
parser.add_argument('--put_weights_between_circles', action='store_true', help='if set, the weights will change in-between circles, otherwise they will be colocated with the circles')
parser.add_argument('--start_with_fluid_weight', action='store_true', help='if set then the innermost circle is not fluid, otherwise it is fluid')
parser.add_argument('--weight_smoothing_std',required=False,default=0.02,type=float,help='Standard deviation to smooth the weights with; to assure sufficient regularity')
parser.add_argument('--stds', required=False,type=str, default=None, help='standard deviations for the multi-Gaussian; default=[0.01,0.05,0.1,0.2]')
parser.add_argument('--weights_not_fluid', required=False,type=str, default=None, help='weights for a non fluid circle; default=[0,0,0,1]')
parser.add_argument('--weights_fluid', required=False,type=str, default=None, help='weights for a fluid circle; default=[0.2,0.5,0.2,0.1]')
parser.add_argument('--weights_background', required=False,type=str, default=None, help='weights for the background; default=[0,0,0,1]')
parser.add_argument('--kernel_weighting_type', required=False, type=str, default=None, help='Which kernel weighting to use for integration. Specify as [w_K|w_K_w|sqrt_w_K_sqrt_w]; w_K is the default')
parser.add_argument('--nr_of_angles', required=False, default=None, type=int, help='number of angles for randomize in sector') #10
parser.add_argument('--multiplier_factor', required=False, default=None, type=float, help='value the random momentum is multiplied by') #1.0
parser.add_argument('--momentum_smoothing', required=False, default=None, type=int, help='how much the randomly generated momentum is smoothed') #0.05
parser.add_argument('--sz', required=False, type=str, default=None, help='Desired size of synthetic example; default=[128,128]')
args = parser.parse_args()
if args.seed is not None:
print('Setting the random seed to {:}'.format(args.seed))
random.seed(args.seed)
torch.manual_seed(args.seed)
params = pars.ParameterDict()
if args.config is not None:
# load the configuration
params.load_JSON(args.config)
visualize = True
visualize_warped = True
print_images = True
nr_of_pairs_to_generate = args.nr_of_pairs_to_generate
nr_of_circles_to_generate = get_parameter_value(args.nr_of_circles_to_generate, params, 'nr_of_circles_to_generate', 2, 'number of circles for the synthetic data')
circle_extent = get_parameter_value(args.circle_extent, params, 'circle_extent', 0.2, 'Size of largest circle; image is [-0.5,0.5]^2')
randomize_momentum_on_circle = get_parameter_value_flag(not args.do_not_randomize_momentum,params=params, params_name='randomize_momentum_on_circle',
default_val=True, params_description='randomizes the momentum on the circles')
randomize_in_sectors = get_parameter_value_flag(not args.do_not_randomize_in_sectors, params=params, params_name='randomize_in_sectors',
default_val=True, params_description='randomized the momentum sector by sector')
put_weights_between_circles = get_parameter_value_flag(args.put_weights_between_circles, params=params, params_name='put_weights_between_circles',
default_val=False, params_description='if set, the weights will change in-between circles, otherwise they will be colocated with the circles')
start_with_fluid_weight = get_parameter_value_flag(args.start_with_fluid_weight, params=params, params_name='start_with_fluid_weight',
default_val=False, params_description='if set then the innermost circle is not fluid, otherwise it is fluid')
use_random_source = get_parameter_value_flag(args.use_random_source, params=params, params_name='use_random_source',
default_val=False, params_description='if set then source image is already deformed (and no longer circular)')
use_fixed_source = get_parameter_value_flag(args.use_fixed_source, params=params, params_name='use_fixed_source',
default_val=False,
params_description='if set then source image will be fixed; like a fixed atlas image)')
add_texture_to_image = get_parameter_value_flag(not args.no_texture, params=params, params_name='add_texture_to_image', default_val=True,
params_description='When set to true, texture is added to the images (based on texture_gaussian_smoothness)')
texture_magnitude = get_parameter_value(args.texture_magnitude, params=params, params_name='texture_magnitude',
default_val=0.3, params_description='Largest magnitude of the added texture')
texture_gaussian_smoothness = get_parameter_value(args.texture_gaussian_smoothness,params=params,params_name='texture_gaussian_smoothness',
default_val=0.02, params_description='How much smoothing is used to create the texture image')
kernel_weighting_type = get_parameter_value(args.kernel_weighting_type, params=params, params_name='kernel_weighting_type',
default_val='sqrt_w_K_sqrt_w', params_description='Which kernel weighting to use for integration. Specify as [w_K|w_K_w|sqrt_w_K_sqrt_w]; w_K is the default')
if use_random_source==True and use_fixed_source==True:
raise ValueError('The source image cannot simultaneously be random and fixed. Aborting')
nr_of_angles = get_parameter_value(args.nr_of_angles,params,'nr_of_angles',10,'number of angles for randomize in sector')
multiplier_factor = get_parameter_value(args.multiplier_factor,params,'multiplier_factor',0.5,'value the random momentum is multiplied by')
momentum_smoothing = get_parameter_value(args.momentum_smoothing,params,'momentum_smoothing',0.05,'how much the randomly generated momentum is smoothed')
if args.stds is None:
multi_gaussian_stds_p = None
else:
mgsl = [float(item) for item in args.stds.split(',')]
multi_gaussian_stds_p = list(np.array(mgsl))
multi_gaussian_stds = get_parameter_value(multi_gaussian_stds_p, params, 'multi_gaussian_stds', list(np.array([0.01, 0.05, 0.1, 0.2])), 'multi gaussian standard deviations')
multi_gaussian_stds = np.array(multi_gaussian_stds).astype('float32')
if args.weights_not_fluid is None:
weights_not_fluid_p = None
else:
cw = [float(item) for item in args.weights_not_fluid.split(',')]
weights_not_fluid_p = list(np.array(cw))
weights_not_fluid = get_parameter_value(weights_not_fluid_p, params, 'weights_not_fluid', list(np.array([0,0,0,1.0])), 'weights for the non-fluid regions')
weights_not_fluid = np.array(weights_not_fluid).astype('float32')
if len(weights_not_fluid)!=len(multi_gaussian_stds):
raise ValueError('Need as many weights as there are standard deviations')
if args.weights_fluid is None:
weights_fluid_p = None
else:
cw = [float(item) for item in args.weights_fluid.split(',')]
weights_fluid_p = list(np.array(cw))
weights_fluid = get_parameter_value(weights_fluid_p, params, 'weights_fluid', list(np.array([0.2,0.5,0.2,0.1])), 'weights for fluid regions')
weights_fluid = np.array(weights_fluid).astype('float32')
if len(weights_fluid)!=len(multi_gaussian_stds):
raise ValueError('Need as many weights as there are standard deviations')
if args.weights_background is None:
weights_neutral_p = None
else:
cw = [float(item) for item in args.weights_background.split(',')]
weights_neutral_p = list(np.array(cw))
weights_neutral = get_parameter_value(weights_neutral_p, params, 'weights_neutral', list(np.array([0,0,0,1.0])), 'weights in the neutral/background region')
weights_neutral = np.array(weights_neutral).astype('float32')
if kernel_weighting_type=='w_K_w':
print('INFO: converting weights to w_K_w format, i.e., taking their square root')
# square of weights needs to sum up to one, so simply take the square root of the specified weights here
weights_fluid = np.sqrt(weights_fluid)
weights_neutral = np.sqrt(weights_neutral)
weights_not_fluid = np.sqrt(weights_not_fluid)
if len(weights_neutral)!=len(multi_gaussian_stds):
raise ValueError('Need as many weights as there are standard deviations')
if args.sz is None:
sz_p = None
else:
cw = [int(item) for item in args.sz.split(',')]
sz_p = np.array(cw).astype('float32')
sz = get_parameter_value(sz_p, params, 'sz', [128,128], 'size of the synthetic example')
if len(sz) != 2:
raise ValueError('Only two dimensional synthetic examples are currently supported for sz parameter')
sz = [1, 1, sz[0], sz[1]]
spacing = 1.0 / (np.array(sz[2:]).astype('float32') - 1)
output_dir = os.path.normpath(args.output_directory)+'_kernel_weighting_type_' + native_str(kernel_weighting_type)
image_output_dir = os.path.join(output_dir,'brain_affine_icbm')
label_output_dir = os.path.join(output_dir,'label_affine_icbm')
misc_output_dir = os.path.join(output_dir,'misc')
pdf_output_dir = os.path.join(output_dir,'pdf')
publication_figs = os.path.join(output_dir,'publication_figs')
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
if not os.path.isdir(image_output_dir):
os.makedirs(image_output_dir)
if not os.path.isdir(label_output_dir):
os.makedirs(label_output_dir)
if not os.path.isdir(misc_output_dir):
os.makedirs(misc_output_dir)
if not os.path.isdir(pdf_output_dir):
os.makedirs(pdf_output_dir)
if args.create_publication_figures:
if not os.path.isdir(publication_figs):
os.makedirs(publication_figs)
pt = dict()
pt['source_images'] = []
pt['target_images'] = []
pt['source_ids'] = []
pt['target_ids'] = []
im_io = fio.ImageIO()
# image hdr
hdr = dict()
hdr['space origin'] = np.array([0,0,0])
hdr['spacing'] = np.array(list(spacing) + [spacing[-1]])
hdr['space directions'] = np.array([['1', '0', '0'], ['0', '1', '0'], ['0', '0', '1']])
hdr['dimension'] = 3
hdr['space'] = 'left-posterior-superior'
hdr['sizes'] = list(sz[2:])+[1]
for n in range(nr_of_pairs_to_generate):
print('Writing file pair ' + str(n+1) + '/' + str(nr_of_pairs_to_generate))
if print_images:
print_warped_name = os.path.join(pdf_output_dir,'registration_image_pair_{:05d}.pdf'.format(2*n+1))
else:
print_warped_name = None
publication_figures_directory = None
if args.create_publication_figures and (n==0):
publication_figures_directory = publication_figs
I0Source, I1Target, weights, I0Label, I1Label, gt_map, gt_m = \
create_random_image_pair(weights_not_fluid=weights_not_fluid,
weights_fluid=weights_fluid,
weights_neutral=weights_neutral,
weight_smoothing_std=args.weight_smoothing_std,
multi_gaussian_stds=multi_gaussian_stds,
kernel_weighting_type=kernel_weighting_type,
randomize_momentum_on_circle=randomize_momentum_on_circle,
randomize_in_sectors=randomize_in_sectors,
put_weights_between_circles=put_weights_between_circles,
start_with_fluid_weight=start_with_fluid_weight,
use_random_source=use_random_source,
use_fixed_source=use_fixed_source,
add_texture_to_image=add_texture_to_image,
texture_gaussian_smoothness=texture_gaussian_smoothness,
texture_magnitude=texture_magnitude,
nr_of_circles_to_generate=nr_of_circles_to_generate,
circle_extent=circle_extent,
sz=sz,spacing=spacing,
nr_of_angles=nr_of_angles,
multiplier_factor=multiplier_factor,
momentum_smoothing=momentum_smoothing,
visualize=visualize,
visualize_warped=visualize_warped,
print_warped_name=print_warped_name,
publication_figures_directory=publication_figures_directory,
image_pair_nr=n)
source_filename = os.path.join(image_output_dir,'m{:d}.nii'.format(2*n+1))
target_filename = os.path.join(image_output_dir,'m{:d}.nii'.format(2*n+1+1))
source_label_filename = os.path.join(label_output_dir, 'm{:d}.nii'.format(2 * n + 1))
target_label_filename = os.path.join(label_output_dir, 'm{:d}.nii'.format(2 * n + 1 + 1))
gt_weights_filename = os.path.join(misc_output_dir,'gt_weights_{:05d}.pt'.format(2*n+1))
gt_momentum_filename = os.path.join(misc_output_dir,'gt_momentum_{:05d}.pt'.format(2*n+1))
gt_map_filename = os.path.join(misc_output_dir,'gt_map_{:05d}.pt'.format(2*n+1))
reshape_size = list(sz[2:]) + [1]
# save these files
im_io.write(filename=source_filename,data=I0Source.view().reshape(reshape_size),hdr=hdr)
im_io.write(filename=target_filename,data=I1Target.view().reshape(reshape_size),hdr=hdr)
im_io.write(filename=source_label_filename, data=I0Label.view().reshape(reshape_size), hdr=hdr)
im_io.write(filename=target_label_filename, data=I1Label.view().reshape(reshape_size), hdr=hdr)
torch.save(weights,gt_weights_filename)
torch.save(gt_map,gt_map_filename)
torch.save(gt_m,gt_momentum_filename)
# create source/target configuration
pt['source_images'].append(source_filename)
pt['target_images'].append(target_filename)
pt['source_ids'].append(2*n+1)
pt['target_ids'].append(2*n+1+1)
filename_pt = os.path.join(output_dir,'used_image_pairs.pt')
torch.save(pt,filename_pt)
config_json = os.path.join(output_dir,'config.json')
params.write_JSON(config_json)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.