arxiv_id stringlengths 0 16 | text stringlengths 10 1.65M |
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example of the thermal energy storage (TES) class.
"""
from __future__ import division
import numpy as np
import pycity_base.classes.supply.thermal_energy_storage as tes
import pycity_base.classes.timer
import pycity_base.classes.weather
import pycity_base.classes.prices
import pycity_base.classes.environment
def run_example():
# Create environment
timer = pycity_base.classes.timer.Timer()
weather = pycity_base.classes.weather.Weather(timer, use_TRY=True)
prices = pycity_base.classes.prices.Prices()
environment = pycity_base.classes.environment.Environment(timer, weather, prices)
# Create heating device
t_init = 20 # °C
capacity = 1000 # kg
t_max = 95 # °C
t_surroundings = 20 # °C
k_losses = 3 # W/K
thermal_storage = tes.ThermalEnergyStorage(environment,
t_init,
capacity,
t_max,
t_surroundings,
k_losses)
(tes_capacity, tes_t_max, tes_t_surroundings, tes_k_losses) = thermal_storage.getNominalValues()
# Print results
print()
print(("Initial temperature: " + str(thermal_storage.t_init)))
print(("Water mass: " + str(tes_capacity)))
print(("Maximum temperature: " + str(tes_t_max)))
print(("Surroundings temperature: " + str(tes_t_surroundings)))
print(("Loss factor: " + str(tes_k_losses)))
np.random.seed(0)
result = (np.random.rand(timer.timesteps_used_horizon) * (t_max - t_surroundings) + t_surroundings)
thermal_storage.setResults(result)
print()
print(("Storage temperature: " + str(thermal_storage.getResults(True))))
if __name__ == '__main__':
# Run program
run_example() | |
import os
import numpy as np
from datetime import datetime
from tqdm import tqdm
import random
import torch
import torch.optim as optim
from .utils import register_algorithm, Algorithm, acc
from src.data.utils import load_dataset
from src.models.utils import get_model
import numpy as np
def load_data(args):
"""
Dataloading function. This function can change alg by alg as well.
"""
trainloader = load_dataset(name=args.dataset_name,
rootdir=args.dataset_root,
dset='train',
batch_size=args.batch_size,
num_workers=args.num_workers)
testloader = load_dataset(name=args.dataset_name,
rootdir=args.dataset_root,
dset='test',
batch_size=args.batch_size,
num_workers=args.num_workers)
valloader = load_dataset(name=args.dataset_name,
rootdir=args.dataset_root,
dset='val',
batch_size=args.batch_size,
num_workers=args.num_workers)
return trainloader, testloader, valloader
@register_algorithm('PlainResNet')
class PlainResNet(Algorithm):
"""
Overall training function.
"""
name = 'PlainResNet'
net = None
opt_net = None
scheduler = None
def __init__(self, args):
super(PlainResNet, self).__init__(args=args)
# Training epochs and logging intervals
self.num_epochs = args.num_epochs
self.log_interval = args.log_interval
#######################################
# Setup data for training and testing #
#######################################
self.trainloader, self.testloader, self.valloader = load_data(args)
_, self.train_class_counts = self.trainloader.dataset.class_counts_cal()
def set_train(self):
###########################
# Setup cuda and networks #
###########################
# setup network
self.logger.info('\nGetting {} model.'.format(self.args.model_name))
self.net = get_model(name=self.args.model_name, num_cls=self.args.num_classes,
weights_init=self.args.weights_init, num_layers=self.args.num_layers,
init_feat_only=True, parallel=self.args.parallel)
self.set_optimizers()
def set_eval(self):
###############################
# Load weights for evaluation #
###############################
self.logger.info('\nGetting {} model.'.format(self.args.model_name))
self.logger.info('\nLoading from {}'.format(self.weights_path))
self.net = get_model(name=self.args.model_name, num_cls=self.args.num_classes,
weights_init=self.weights_path, num_layers=self.args.num_layers,
init_feat_only=False)
def set_optimizers(self):
self.logger.info('** SETTING OPTIMIZERS!!! **')
######################
# Optimization setup #
######################
# Setup optimizer parameters for each network component
net_optim_params_list = [
{'params': self.net.feature.parameters(),
'lr': self.args.lr_feature,
'momentum': self.args.momentum_feature,
'weight_decay': self.args.weight_decay_feature},
{'params': self.net.classifier.parameters(),
'lr': self.args.lr_classifier,
'momentum': self.args.momentum_classifier,
'weight_decay': self.args.weight_decay_classifier}
]
# Setup optimizer and optimizer scheduler
self.opt_net = optim.SGD(net_optim_params_list)
self.scheduler = optim.lr_scheduler.StepLR(self.opt_net, step_size=self.args.step_size, gamma=self.args.gamma)
def train(self):
self.net.setup_critera()
best_acc = 0.
best_epoch = 0
for epoch in range(self.num_epochs):
# Training
self.train_epoch(epoch)
# Validation
self.logger.info('\nValidation.')
val_acc, _ = self.evaluate(self.valloader)#, test=False)
if val_acc > best_acc:
self.net.update_best()
best_acc = val_acc
best_epoch = epoch
self.logger.info('\nBest Model Appears at Epoch {} with Mac Acc {:.3f}...'.format(best_epoch, best_acc * 100))
self.save_model()
def evaluate(self, loader, eval_output=False):
outputs = self.evaluate_epoch(loader)
eval_info, mac_acc, mic_acc = self.evaluate_metric(outputs[0], outputs[1])
self.logger.info(eval_info)
if eval_output:
output_path = self.weights_path.replace('.pth', '_{}.npz').format('val' if loader == self.valloader else 'test')
np.savez(output_path, preds=outputs[0], labels=outputs[1], logits=outputs[2], file_ids=outputs[3])
return mac_acc, mic_acc
def train_epoch(self, epoch):
self.net.train()
N = len(self.trainloader)
tr_iter = iter(self.trainloader)
for batch_idx in range(N):
data, labels, _ = next(tr_iter)
# log basic adda train info
info_str = '[Train {}] Epoch: {} [batch {}/{} ({:.2f}%)] '.format(self.name, epoch, batch_idx,
N, 100 * batch_idx / N)
########################
# Setup data variables #
########################
data, labels = data.cuda(), labels.cuda()
data.requires_grad = False
labels.requires_grad = False
####################
# Forward and loss #
####################
# forward
feats = self.net.feature(data)
logits = self.net.classifier(feats)
# calculate loss
loss = self.net.criterion_cls(logits, labels)
#############################
# Backward and optimization #
#############################
# zero gradients for optimizer
self.opt_net.zero_grad()
# loss backpropagation
loss.backward()
# optimize step
self.opt_net.step()
###########
# Logging #
###########
if batch_idx % self.log_interval == 0:
preds = logits.argmax(dim=1)
acc = (preds == labels).float().mean()
# log update info
info_str += 'Acc: {:0.1f} Xent: {:.3f}'.format(acc.item() * 100, loss.item())
self.logger.info(info_str)
self.scheduler.step()
def evaluate_epoch(self, loader):
self.net.eval()
total_preds = []
total_labels = []
total_logits = []
total_file_ids = []
# Forward and record # correct predictions of each class
with torch.set_grad_enabled(False):
for data, labels, file_ids in tqdm(loader, total=len(loader)):
# setup data
data, labels = data.cuda(), labels.cuda()
data.requires_grad = False
labels.requires_grad = False
# forward
feats = self.net.feature(data)
logits = self.net.classifier(feats)
preds = logits.argmax(dim=1)
# max_probs, preds = F.softmax(logits, dim=1).max(dim=1)
total_preds.append(preds.detach().cpu().numpy())
total_labels.append(labels.detach().cpu().numpy())
total_logits.append(logits.detach().cpu().numpy())
total_file_ids.append(np.array(file_ids))
total_preds = np.concatenate(total_preds, axis=0)
total_labels = np.concatenate(total_labels, axis=0)
total_logits = np.concatenate(total_logits, axis=0)
total_file_ids = np.concatenate(total_file_ids, axis=0)
return (total_preds, total_labels, total_logits, total_file_ids)
def evaluate_metric(self, total_preds, total_labels):
class_acc, mac_acc, mic_acc, unique_eval_classes = acc(total_preds, total_labels, self.args.num_classes)
eval_info = '{} Per-class evaluation results: \n'.format(datetime.now().strftime("%Y-%m-%d_%H:%M:%S"))
for i in range(len(class_acc)):
label = unique_eval_classes[i]
eval_info += 'Class {} (tr counts {:>5}):'.format(label, self.train_class_counts[label])
eval_info += 'Acc {:.3f} \n'.format(class_acc[i] * 100)
eval_info += 'Macro Acc: {:.3f}; Micro Acc: {:.3f}\n'.format(mac_acc * 100, mic_acc * 100)
return eval_info, mac_acc, mic_acc
def save_model(self):
os.makedirs(self.weights_path.rsplit('/', 1)[0], exist_ok=True)
self.logger.info('Saving to {}'.format(self.weights_path))
self.net.save(self.weights_path) | |
import math
import numpy as np
from .utilities import array2str
from .BaseTokenizer import BaseTokenizer
class VectorTokenizer(BaseTokenizer):
def __init__(self,
num_embeddings: int,
vector_size: int,
padding_idx: int=0,
random_seed: int=0):
super().__init__(num_embeddings, padding_idx)
self.vector_size = vector_size
np.random.seed(random_seed)
self.random_vecs = np.random.normal(size=[math.ceil(math.log(num_embeddings, 2)), vector_size])
def __call__(self, vectors: np.ndarray):
"""
vectors: (batch_size, vector_size)
return: (batch_size, )
"""
return self.numerize(vectors)
def numerize(self, vectors):
binary_vecs = (vectors @ self.random_vecs.T > 0).astype(int)
numbers = [int(array2str(vector), 2) % self.num_embeddings for vector in binary_vecs]
numbers = [max(number, self.padding_idx + 1) for number in numbers]
return np.array(numbers)
def tokenize(self):
pass | |
import os
import albumentations as A
import cv2
import numpy as np
import pandas as pd
import torch
from pandas import DataFrame
from torch.utils.data import Dataset, DataLoader
from config import Config
from constants import DEEPER_FORENSICS
from training.datasets.transform import create_train_transform, create_val_test_transform
CONFIG = Config()
ORI_ROOT = CONFIG['ORI_ROOT']
class DeeperForensicsDataset(Dataset):
def __init__(self, data_root, df: DataFrame, mode, transform: A.Compose):
self.original_path = os.path.join(ORI_ROOT, 'original_sequences', 'youtube', 'c23')
self.manipulated_path = os.path.join(data_root, 'manipulated_videos')
self.df = df
self.mode = mode
self.transform = transform
def __getitem__(self, index):
video, img_file, label, ori_video, frame = self.df.iloc[index].values
if label == 1:
img_path = os.path.join(self.manipulated_path, 'end_to_end_crops', video, img_file)
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask_path = os.path.join(self.manipulated_path, 'end_to_end_diffs', video,
'{}_diff.png'.format(img_file[:-4]))
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
else:
img_path = os.path.join(self.original_path, 'crops', video, img_file)
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)
# data augmentation
transformed = self.transform(image=image, mask=mask)
image = transformed["image"]
mask = transformed["mask"]
mask = mask.unsqueeze(0) / 255.
return {'images': image, 'labels': label, 'masks': mask}
def __len__(self):
r = self.df.shape[0]
return r
def get_deeper_forensics_dataloader(model_cfg, args):
train_df = pd.read_csv(f'data/{DEEPER_FORENSICS}/data_{DEEPER_FORENSICS}_end_to_end_train.csv')
train_transform = create_train_transform(model_cfg)
train_data = DeeperForensicsDataset(data_root=args.data_dir, df=train_df, mode='train', transform=train_transform)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
else:
train_sampler = None
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=(train_sampler is None),
sampler=train_sampler, num_workers=args.workers, pin_memory=True, drop_last=True)
val_df = pd.read_csv(f'data/{DEEPER_FORENSICS}/data_{DEEPER_FORENSICS}_end_to_end_val.csv')
val_transform = create_val_test_transform(model_cfg)
val_data = DeeperForensicsDataset(data_root=args.data_dir, df=val_df, mode='validation', transform=val_transform)
val_loader = DataLoader(val_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,
pin_memory=True, drop_last=False)
return train_sampler, train_loader, val_loader
def get_deeper_forensics_test_dataloader(model_cfg, args):
test_df = pd.read_csv(f'data/{DEEPER_FORENSICS}/data_{DEEPER_FORENSICS}_end_to_end_test.csv')
# test_df = test_df.iloc[:57265]
test_transform = create_val_test_transform(model_cfg)
test_data = DeeperForensicsDataset(data_root=args.data_dir, df=test_df, mode='test', transform=test_transform)
test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,
pin_memory=True, drop_last=True)
return test_loader | |
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import copy
import time
import logging
from torch.autograd import Variable
import pdb
from src.components.utils import *
from src.components.encoder import *
from src.components.decoder import *
from src.components.self_attention import *
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Directional Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask),src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def make_model(args, src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
no_position = NoPositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(args, d_model, c(attn), c(attn), c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab))
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def run_epoch(data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = 0
tokens = 0
for i, batch in enumerate(data_iter):
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
ntoks = batch.ntokens.float()
loss = loss_compute(out, batch.trg_y, ntoks)
total_loss += loss
total_tokens += ntoks
tokens += ntoks
if i % 200 == 1:
elapsed = time.time() - start + 1e-8
print("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss / ntoks, tokens/elapsed))
start = time.time()
tokens = 0
return total_loss / total_tokens | |
"""
Evaluate the model.
"""
import numpy as np
import json
import pickle
import os
from model import trainer
from itertools import product
from model.displacement import TARGETS, LAGS
from model.displacement.model import Trainer
from model.displacement.features import Generator
PERIODS = [{'train_years': (1995, Y - lg),
'predict_year': Y,
'lag': lg} for Y in np.arange(2010, 2016, 1) for lg in LAGS]
# Get instance
CONFIGURATION = "configuration.json"
# Configuration of data sources
with open(CONFIGURATION, 'rt') as infile:
config = json.load(infile)
COUNTRIES = config['supported-countries']['displacement']
def run():
results = []
for c, p in product(COUNTRIES, PERIODS):
# Generate the problem (training) instance
# Generate the scoring instance
# Measure error
with open("result.pkl", 'wb') as outfile:
pickle.dump(results, outfile) | |
import tensorflow as tf
print('Using Tensorflow '+tf.__version__)
import matplotlib.pyplot as plt
import sys
# sys.path.append('../')
import os
import csv
import numpy as np
from PIL import Image
import time
import cv2
import src.siamese as siam
from src.visualization import show_frame, show_crops, show_scores
width = 640
height = 480
# gpu_device = 2
# os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(gpu_device)
# read default parameters and override with custom ones
def tracker(hp, run, design, video, pos_x, pos_y, target_w, target_h, final_score_sz, image, templates_z, scores, process1, queue):
# num_frames = np.size(frame_name_list)
# stores tracker's output for evaluation
# bboxes = np.zeros((num_frames,4))
bboxes = []
scale_factors = hp.scale_step**np.linspace(-np.ceil(hp.scale_num/2), np.ceil(hp.scale_num/2), hp.scale_num)
# cosine window to penalize large displacements
hann_1d = np.expand_dims(np.hanning(final_score_sz), axis=0)
penalty = np.transpose(hann_1d) * hann_1d
penalty = penalty / np.sum(penalty)
context = design.context*(target_w+target_h)
z_sz = np.sqrt(np.prod((target_w+context)*(target_h+context)))
x_sz = float(design.search_sz) / design.exemplar_sz * z_sz
# thresholds to saturate patches shrinking/growing
min_z = hp.scale_min * z_sz
max_z = hp.scale_max * z_sz
min_x = hp.scale_min * x_sz
max_x = hp.scale_max * x_sz
# run_metadata = tf.RunMetadata()
# run_opts = {
# 'options': tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
# 'run_metadata': run_metadata,
# }
## model
# model_path = '../frozen_inference_graph.pb'
# odapi = DetectorAPI(path_to_ckpt=model_path)
run_opts = {}
# with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
with tf.Session() as sess:
tf.global_variables_initializer().run()
# Coordinate the loading of image files.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# save first frame position (from ground-truth)
# bboxes[0,:] = pos_x-target_w/2, pos_y-target_h/2, target_w, target_h
frame_idx = 1
# in_bytes = process1.stdout.read(width * height * 3)
# if not in_bytes :
# print ("none")
# return
# video = (np.frombuffer(in_bytes, np.unit8).reshape([height, width, 3]))
# video = cv2.cvtColor(video, cv2.COLOR_RGB2BGR)
# box = odapi.processFrame(video, frame_idx)
# pos_x, pos_y, target_w, target_h = box[0], box[1], box[2], box[3]
# image = tf.convert_to_tensor(image)
print (image, type(image), '*'*10)
image_, templates_z_ = sess.run(
[image, templates_z],
feed_dict={
siam.pos_x_ph: pos_x,
siam.pos_y_ph: pos_y,
siam.z_sz_ph: z_sz,
image: video})
new_templates_z_ = templates_z_
# print ('start time: ')
# t_start = time.time()
while True :
frame_idx += 1
# in_bytes = process1.stdout.read(width * height * 3)
# if not in_bytes :
# print ("none")
# continue
# video = (np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]))
video = queue.get()
# video = cv2.cvtColor(video, cv2.COLOR_RGB2BGR)
# t_start = time.time()
# Get an image from the queue
# for i in range(1, num_frames):
scaled_exemplar = z_sz * scale_factors
scaled_search_area = x_sz * scale_factors
scaled_target_w = target_w * scale_factors
scaled_target_h = target_h * scale_factors
image_, scores_ = sess.run(
[image, scores],
feed_dict={
siam.pos_x_ph: pos_x,
siam.pos_y_ph: pos_y,
siam.x_sz0_ph: scaled_search_area[0],
siam.x_sz1_ph: scaled_search_area[1],
siam.x_sz2_ph: scaled_search_area[2],
templates_z: np.squeeze(templates_z_),
# filename: frame_name_list[i],
image: video,
}, **run_opts)
scores_ = np.squeeze(scores_)
# penalize change of scale penalize change of scale
scores_[0,:,:] = hp.scale_penalty*scores_[0,:,:]
scores_[2,:,:] = hp.scale_penalty*scores_[2,:,:]
# find scale with highest peak (after penalty)
new_scale_id = np.argmax(np.amax(scores_, axis=(1,2)))
# update scaled sizes
x_sz = (1-hp.scale_lr)*x_sz + hp.scale_lr*scaled_search_area[new_scale_id]
target_w = (1-hp.scale_lr)*target_w + hp.scale_lr*scaled_target_w[new_scale_id]
target_h = (1-hp.scale_lr)*target_h + hp.scale_lr*scaled_target_h[new_scale_id]
# select response with new_scale_id
score_ = scores_[new_scale_id,:,:]
score_ = score_ - np.min(score_)
score_ = score_/np.sum(score_)
# apply displacement penalty
score_ = (1-hp.window_influence)*score_ + hp.window_influence*penalty
pos_x, pos_y = _update_target_position(pos_x, pos_y, score_, final_score_sz, design.tot_stride, design.search_sz, hp.response_up, x_sz)
# convert <cx,cy,w,h> to <x,y,w,h> and save output
# bboxes[i,:] = pos_x-target_w/2, pos_y-target_h/2, target_w, target_h
current_boxes = [pos_x-target_w/2, pos_y-target_h/2, target_w, target_h]
# bboxes.append(current_boxes)
# update the target representation with a rolling average
print (time.time())
if hp.z_lr>0:
new_templates_z_ = sess.run([templates_z], feed_dict={
siam.pos_x_ph: pos_x,
siam.pos_y_ph: pos_y,
siam.z_sz_ph: z_sz,
image: image_
})
templates_z_=(1-hp.z_lr)*np.asarray(templates_z_) + hp.z_lr*np.asarray(new_templates_z_)
print (time.time())
# update template patch size
z_sz = (1-hp.scale_lr)*z_sz + hp.scale_lr*scaled_exemplar[new_scale_id]
if run.visualization:
# show_frame(image_, bboxes[i,:], 1)
show_frame(video, current_boxes, 1)
# t_elapsed = time.time() - t_start
# speed = frame_idx/t_elapsed
# Finish off the filename queue coordinator.
coord.request_stop()
coord.join(threads)
# from tensorflow.python.client import timeline
# trace = timeline.Timeline(step_stats=run_metadata.step_stats)
# trace_file = open('timeline-search.ctf.json', 'w')
# trace_file.write(trace.generate_chrome_trace_format())
plt.close('all')
return
def _update_target_position(pos_x, pos_y, score, final_score_sz, tot_stride, search_sz, response_up, x_sz):
# find location of score maximizer
p = np.asarray(np.unravel_index(np.argmax(score), np.shape(score)))
# displacement from the center in search area final representation ...
center = float(final_score_sz - 1) / 2
disp_in_area = p - center
# displacement from the center in instance crop
disp_in_xcrop = disp_in_area * float(tot_stride) / response_up
# displacement from the center in instance crop (in frame coordinates)
disp_in_frame = disp_in_xcrop * x_sz / search_sz
# *position* within frame in frame coordinates
pos_y, pos_x = pos_y + disp_in_frame[0], pos_x + disp_in_frame[1]
return pos_x, pos_y | |
from striped.client import CouchBaseBackend
import numpy as np
from numpy.lib.recfunctions import rec_append_fields
import fitsio, healpy as hp
from astropy.io.fits import Header
from striped.common import Tracer
T = Tracer()
def dict_to_recarray(dct, keys=None):
# cnmap : dct key -> column name in the resulting recarray
with T["dict_to_recarray"]:
keys = keys or dct.keys()
types = [(k, dct[k].dtype, dct[k].shape[1:]) for k in keys]
l = len(dct[keys[0]])
for k in keys:
assert len(dct[k]) == l, "Column %s length %d != first column length %d" % (k, len(dct[k]), l)
data = zip(*[dct[k] for k in keys])
return np.array(data, types)
def recarray_to_dict(rec):
with T["recarray_to_dict"]:
cnames = rec.dtype.names
out = {}
for cn in cnames:
out_name = cn
out_arr = rec[cn]
out[out_name] = np.ascontiguousarray(out_arr)
return out
def add_observations(backend, in_observations_data, dataset_name):
schema = backend.schema(dataset_name)
obs_columns = [cn.encode("ascii", "ignore") for cn in schema["branches"]["Observation"].keys()]
prefixed_obs_columns = ["Observation.%s" % (cn,) for cn in obs_columns]
obs_attr_dtypes = {}
for cn, desc in schema["branches"]["Observation"].items():
cn= cn.encode("ascii", "ignore")
dt = (desc["dtype"].encode("ascii","ignore"), tuple(desc["shape"]))
obs_attr_dtypes[cn] = dt
obs_attr_shapes = {cn:desc["shape"] for cn, desc in schema["branches"]["Observation"].items()}
prefixed_to_cn = dict(zip(prefixed_obs_columns, obs_columns))
cn_to_prefixed = dict(zip(obs_columns, prefixed_obs_columns))
#print "-------in_observations_data_0.dtype=", in_observations_data["FLUXERR_APER"].dtype, in_observations_data["FLUXERR_APER"].shape
in_observations_data = np.sort(in_observations_data, order=["rgid","OBJECT_ID"])
in_rgids = in_observations_data["rgid"].copy()
# apply dtypes according to the schema
in_observations_data = np.asarray(in_observations_data, list(obs_attr_dtypes.items())) # rgid is not in the schema, so it will be removed here
#print "-------in_observations_data_1.dtype=", in_observations_data["FLUXERR_APER"].dtype, in_observations_data["FLUXERR_APER"].shape
# break the input data into dictionary of by rgid and then by object id
# note that this will only create views of arrays, without copying the contents
# rename input columns to add Observation. prefix
in_oids = in_observations_data["OBJECT_ID"].copy()
in_observations_data.dtype.names = [cn_to_prefixed.get(cn,cn) for cn in in_observations_data.dtype.names]
with T["input_by_rgid"]:
input_by_rgid = {}
for irow in xrange(len(in_observations_data)):
rgid, oid = in_rgids[irow], in_oids[irow]
by_oid = input_by_rgid.setdefault(rgid, {})
assert not oid in by_oid, "Warning: Found more than 1 observation for a single object %d in the exposire" % (oid,)
by_oid[oid] = irow
with T["rg_loop"]:
# loop over all rg's found in the input data and update them
for rgid, in_rg in input_by_rgid.items():
with T["rg_loop/get_db_data"]:
#
# get observations and objects from the DB
#
db_observations_data = backend.get_arrays(dataset_name, rgid, prefixed_obs_columns)
db_objects_data = backend.get_arrays(dataset_name, rgid, ["Observation.@size","OBJECT_ID"])
nobs_column = db_objects_data["Observation.@size"]
oid_column = db_objects_data["OBJECT_ID"]
with T["rg_loop/reshape"]:
# apply correct shape
for pn in db_observations_data.keys():
if pn in prefixed_to_cn:
cn = prefixed_to_cn[pn]
shape = tuple(obs_attr_shapes[cn])
if len(shape):
db_observations_data[pn] = db_observations_data[pn].reshape((-1,)+shape)
with T["rg_loop/convert"]:
# convert db data into numpy record array
db_observations_data = dict_to_recarray(db_observations_data, keys=in_observations_data.dtype.names)
assert db_observations_data.dtype.names == in_observations_data.dtype.names
with T["rg_loop/break"]:
# break db data into segments by object id
object_segments = {}
merged = []
new_size = nobs_column.copy()
j = 0
for i, (oid, old_size) in enumerate(zip(oid_column, nobs_column)):
if old_size > 0:
merged.append(db_observations_data[j:j+old_size])
if oid in in_rg:
irow = in_rg[oid]
merged.append(in_observations_data[irow:irow+1])
new_size[i] += 1
j += old_size
out_observations_data = np.concatenate(merged)
out_observations_data = recarray_to_dict(out_observations_data)
out_observations_data["Observation.@size"] = new_size
with T["rg_loop/put_arrays"]:
backend.put_arrays(dataset_name, [(rgid, key, array) for key, array in
out_observations_data.items()])
if __name__ == "__main__":
import sys, time, getopt
Usage = """
python add_observations.py [options] <bucket name> <dataset name> <matches_file.fits>
options:
-c <couchbase config> - default environment COUCHBASE_BACKEND_CFG
"""
opts, args = getopt.getopt(sys.argv[1:], "h?c:")
opts = dict(opts)
if '-h' in opts or '-?' in opts or len(args) != 3:
print Usage
sys.exit(1)
config = opts.get("-c")
bucket, dataset, path = args
data = fitsio.read(path)
print "%d object-observation pairs in the input file %s" % (len(data), path)
backend = CouchBaseBackend(bucket)
add_observations(backend, data, dataset)
T.printStats() | |
import os
import numpy as np
import chainer
from chainer import Chain, Variable
from mnist_cnn import CnnModel
def predict(img):
print("lets predict")
model=CnnModel()
chainer.serializers.load_npz(os.path.join('result','cnn_10.npz'),model)
model.to_cpu()
x=Variable(np.array([[img]]))
result = np.argmax(model.fwd(x).data)
print('result',result) | |
import numpy as np
from astropy import units as u
import pytest
from ctapipe.image.cleaning import tailcuts_clean
from ctapipe.image.hillas import hillas_parameters, HillasParameterizationError
from ctapipe.io import event_source
from ctapipe.reco.HillasReconstructor import HillasReconstructor, HillasPlane
from ctapipe.reco.reco_algorithms import TooFewTelescopesException, InvalidWidthException
from ctapipe.utils import get_dataset_path
from astropy.coordinates import SkyCoord, AltAz
def test_estimator_results():
"""
creating some planes pointing in different directions (two
north-south, two east-west) and that have a slight position errors (+-
0.1 m in one of the four cardinal directions """
horizon_frame = AltAz()
p1 = SkyCoord(alt=43 * u.deg, az=45 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=47 * u.deg, az=45 * u.deg, frame=horizon_frame)
circle1 = HillasPlane(p1=p1, p2=p2, telescope_position=[0, 1, 0] * u.m)
p1 = SkyCoord(alt=44 * u.deg, az=90 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=46 * u.deg, az=90 * u.deg, frame=horizon_frame)
circle2 = HillasPlane(p1=p1, p2=p2, telescope_position=[1, 0, 0] * u.m)
p1 = SkyCoord(alt=44.5 * u.deg, az=45 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=46.5 * u.deg, az=45 * u.deg, frame=horizon_frame)
circle3 = HillasPlane(p1=p1, p2=p2, telescope_position=[0, -1, 0] * u.m)
p1 = SkyCoord(alt=43.5 * u.deg, az=90 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=45.5 * u.deg, az=90 * u.deg, frame=horizon_frame)
circle4 = HillasPlane(p1=p1, p2=p2, telescope_position=[-1, 0, 0] * u.m)
# creating the fit class and setting the the great circle member
fit = HillasReconstructor()
fit.hillas_planes = {1: circle1, 2: circle2, 3: circle3, 4: circle4}
# performing the direction fit with the minimisation algorithm
# and a seed that is perpendicular to the up direction
dir_fit_minimise, _ = fit.estimate_direction()
print("direction fit test minimise:", dir_fit_minimise)
print()
def test_h_max_results():
"""
creating some planes pointing in different directions (two
north-south, two east-west) and that have a slight position errors (+-
0.1 m in one of the four cardinal directions """
horizon_frame = AltAz()
p1 = SkyCoord(alt=0 * u.deg, az=45 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=0 * u.deg, az=45 * u.deg, frame=horizon_frame)
circle1 = HillasPlane(p1=p1, p2=p2, telescope_position=[0, 1, 0] * u.m)
p1 = SkyCoord(alt=0 * u.deg, az=90 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=0 * u.deg, az=90 * u.deg, frame=horizon_frame)
circle2 = HillasPlane(p1=p1, p2=p2, telescope_position=[1, 0, 0] * u.m)
p1 = SkyCoord(alt=0 * u.deg, az=45 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=0 * u.deg, az=45 * u.deg, frame=horizon_frame)
circle3 = HillasPlane(p1=p1, p2=p2, telescope_position=[0, -1, 0] * u.m)
p1 = SkyCoord(alt=0 * u.deg, az=90 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=0 * u.deg, az=90 * u.deg, frame=horizon_frame)
circle4 = HillasPlane(p1=p1, p2=p2, telescope_position=[-1, 0, 0] * u.m)
# creating the fit class and setting the the great circle member
fit = HillasReconstructor()
fit.hillas_planes = {1: circle1, 2: circle2, 3: circle3, 4: circle4}
# performing the direction fit with the minimisation algorithm
# and a seed that is perpendicular to the up direction
h_max_reco = fit.estimate_h_max()
print("h max fit test minimise:", h_max_reco)
# the results should be close to the direction straight up
np.testing.assert_allclose(h_max_reco.value, 0, atol=1e-8)
# np.testing.assert_allclose(fitted_core_position.value, [0, 0], atol=1e-3)
def test_reconstruction():
"""
a test of the complete fit procedure on one event including:
• tailcut cleaning
• hillas parametrisation
• HillasPlane creation
• direction fit
• position fit
in the end, proper units in the output are asserted """
filename = get_dataset_path("gamma_test_large.simtel.gz")
source = event_source(filename, max_events=10)
horizon_frame = AltAz()
reconstructed_events = 0
for event in source:
array_pointing = SkyCoord(
az=event.mc.az,
alt=event.mc.alt,
frame=horizon_frame
)
hillas_dict = {}
telescope_pointings = {}
for tel_id in event.dl0.tels_with_data:
geom = source.subarray.tel[tel_id].camera.geometry
telescope_pointings[tel_id] = SkyCoord(
alt=event.pointing.tel[tel_id].altitude,
az=event.pointing.tel[tel_id].azimuth,
frame=horizon_frame,
)
pmt_signal = event.r0.tel[tel_id].waveform[0].sum(axis=1)
mask = tailcuts_clean(geom, pmt_signal,
picture_thresh=10., boundary_thresh=5.)
pmt_signal[mask == 0] = 0
try:
moments = hillas_parameters(geom, pmt_signal)
hillas_dict[tel_id] = moments
except HillasParameterizationError as e:
print(e)
continue
if len(hillas_dict) < 2:
continue
else:
reconstructed_events += 1
# The three reconstructions below gives the same results
fit = HillasReconstructor()
fit_result_parall = fit.predict(hillas_dict, source.subarray, array_pointing)
fit = HillasReconstructor()
fit_result_tel_point = fit.predict(hillas_dict, source.subarray, array_pointing, telescope_pointings)
for key in fit_result_parall.keys():
print(key, fit_result_parall[key], fit_result_tel_point[key])
fit_result_parall.alt.to(u.deg)
fit_result_parall.az.to(u.deg)
fit_result_parall.core_x.to(u.m)
assert fit_result_parall.is_valid
assert reconstructed_events > 0
def test_invalid_events():
"""
The HillasReconstructor is supposed to fail
in these cases:
- less than two teleskopes
- any width is NaN
- any width is 0
This test uses the same sample simtel file as
test_reconstruction(). As there are no invalid events in this
file, multiple hillas_dicts are constructed to make sure
Exceptions get thrown in the mentioned edge cases.
Test will fail if no Exception or another Exception gets thrown."""
filename = get_dataset_path("gamma_test_large.simtel.gz")
fit = HillasReconstructor()
tel_azimuth = {}
tel_altitude = {}
source = event_source(filename, max_events=10)
subarray = source.subarray
for event in source:
hillas_dict = {}
for tel_id in event.dl0.tels_with_data:
geom = source.subarray.tel[tel_id].camera.geometry
tel_azimuth[tel_id] = event.pointing.tel[tel_id].azimuth
tel_altitude[tel_id] = event.pointing.tel[tel_id].altitude
pmt_signal = event.r0.tel[tel_id].waveform[0].sum(axis=1)
mask = tailcuts_clean(geom, pmt_signal,
picture_thresh=10., boundary_thresh=5.)
pmt_signal[mask == 0] = 0
try:
moments = hillas_parameters(geom, pmt_signal)
hillas_dict[tel_id] = moments
except HillasParameterizationError as e:
continue
# construct a dict only containing the last telescope events
# (#telescopes < 2)
hillas_dict_only_one_tel = dict()
hillas_dict_only_one_tel[tel_id] = hillas_dict[tel_id]
with pytest.raises(TooFewTelescopesException):
fit.predict(hillas_dict_only_one_tel, subarray, tel_azimuth, tel_altitude)
# construct a hillas dict with the width of the last event set to 0
# (any width == 0)
hillas_dict_zero_width = hillas_dict.copy()
hillas_dict_zero_width[tel_id]['width'] = 0 * u.m
with pytest.raises(InvalidWidthException):
fit.predict(hillas_dict_zero_width, subarray, tel_azimuth, tel_altitude)
# construct a hillas dict with the width of the last event set to np.nan
# (any width == nan)
hillas_dict_nan_width = hillas_dict.copy()
hillas_dict_zero_width[tel_id]['width'] = np.nan * u.m
with pytest.raises(InvalidWidthException):
fit.predict(hillas_dict_nan_width, subarray, tel_azimuth, tel_altitude) | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 9/15/20 11:16 PM
# @Author : anonymous
# @File : pyquil-topo.py
import networkx as nx
from pyquil.api._quantum_computer import _get_qvm_with_topology
from pyquil.device import NxDevice, gates_in_isa
from pyquil import Program, get_qc
from pyquil.gates import *
def make_circuit()-> Program:
prog = Program()
prog += X(1)
prog += X(2)
prog += CCNOT(1,2,0) # number=8
# circuit end
return prog
if __name__ == '__main__':
#qubits = [0, 1, 2, 3, 4, 5, 6] # qubits are numbered by octagon
edges = [(0, 1), (1, 0), (2, 3), (3, 2)] # second octagon
topo = nx.from_edgelist(edges)
device = NxDevice(topology=topo)
qc = _get_qvm_with_topology(name="line",topology=topo)
print(qc.run_and_measure(make_circuit(),10))
#pyquil seems should use this way to do the topology | |
"""
Copyright (c) 2019-2022, Zihao Ding/Carnegie Mellon University
All rights reserved.
********************************************************************
Project: eu2qu.py
MODULE: util
Author: Zihao Ding, Carnegie Mellon University
Brief:
-------------
Refer to source code in https://github.com/marcdegraef/3Drotations
convert Euler angles to quaternions
Date:
-------------
2022/03/17 ZD 1.0 public version
"""
from math import sin, cos
import numpy as np
def eu2qu(eu):
'''
input: euler angles, array-like (3,)
output: quaternions, array-like (4,)
default value of eps = 1
'''
eps = 1
sigma = 0.5 * (eu[0] + eu[2])
delta = 0.5 * (eu[0] - eu[2])
c = cos(eu[1]/2)
s = sin(eu[1]/2)
q0 = c * cos(sigma)
if q0 >= 0:
q = np.array([c*cos(sigma), -eps*s*cos(delta), -eps*s*sin(delta), -eps*c*sin(sigma)], dtype=float)
else:
q = np.array([-c*cos(sigma), eps*s*cos(delta), eps*s*sin(delta), eps*c*sin(sigma)], dtype=float)
# set values very close to 0 as 0
# thr = 10**(-10)
# q[np.where(np.abs(q)<thr)] = 0.
return q | |
from typing import Any
import jax
from jax import numpy as jnp
import numpy as np
from flax import struct
@struct.dataclass
class TargetState:
done: jnp.ndarray
reward: jnp.ndarray
obs: jnp.ndarray
class OneStepEnvironment(object):
def __init__(self):
self.action_size = 1
def reset(self, seed=None):
return TargetState(done=jnp.zeros([], dtype=np.bool),
reward=jnp.zeros([]),
obs=jnp.zeros([1]))
def step(self, state, action):
del action # Unused.
return TargetState(obs=jnp.ones_like(state.obs),
done=jnp.ones([], dtype=np.bool),
reward=jnp.where(state.done, 0., 1.))
class DiscreteTargetEnvironment(object):
def __init__(self, size=1, dim=1):
self._size = size
self._dim = dim
self.action_size = dim * 2
def reset(self, seed=None):
return TargetState(done=False,
reward=jnp.zeros([]),
obs=jnp.zeros([self._dim], dtype=jnp.int32))
def step(self, state, action):
# action is an int between 0 and dim*2.
action_dim = action % self._dim
action_dir = (action // self._dim) * 2 - 1
delta = action_dir * jax.nn.one_hot(
action_dim, num_classes=self._dim, dtype=state.obs.dtype)
new_state_pos = state.obs + delta
new_state_pos = jnp.minimum(
jnp.maximum(new_state_pos,
-self._size * jnp.ones_like(new_state_pos)),
self._size * jnp.ones_like(new_state_pos))
new_state_done = jnp.all(new_state_pos == self._size *
jax.nn.one_hot(0, num_classes=self._dim))
return TargetState(obs=jnp.where(state.done, state.obs, new_state_pos),
done=jnp.where(state.done, state.done,
new_state_done),
reward=jnp.where(state.done, 0.,
jnp.where(new_state_done, 1., 0.)))
class ContinuousEnvironmentStateless(object):
def __init__(self, dim=1, size=100.):
self._dim = dim
self._size = size
def reset(self, seed):
return TargetState(done=False,
reward=jnp.zeros([]),
obs=self._size *
jax.random.normal(seed, shape=[self._dim]))
def step(self, state, action):
# action is a vector of shape [dim]
loss = jnp.linalg.norm(action - state.obs[:-1])**2
return TargetState(obs=state.obs,
done=jnp.ones([], dtype=np.bool),
reward=-loss)
class ContinuousEnvironmentInvertMatrix(object):
def __init__(self,
size=2,
dim=1,
goal_tolerance=0.5,
cost_of_living=0.1,
shape_reward=True):
self._size = size
self._dim = dim
self._goal_tolerance = goal_tolerance
self._shape_reward = shape_reward
self._cost_of_living = cost_of_living
self._matrix = jax.random.normal(jax.random.PRNGKey(0),
shape=[self._dim, self._dim])
def reset(self, seed):
return TargetState(done=False,
reward=jnp.zeros([]),
obs=self._size *
jax.random.normal(seed, shape=[self._dim]))
def step(self, state, action):
# action is a vector of shape [dim]
new_state_pos = state.obs + jnp.dot(self._matrix, action)
new_state_pos = jnp.minimum(
jnp.maximum(new_state_pos,
-self._size * 10 * jnp.ones_like(new_state_pos)),
self._size * 10 * jnp.ones_like(new_state_pos))
distance_to_goal = jnp.linalg.norm(new_state_pos)
new_state_done = distance_to_goal < self._goal_tolerance
reward = jnp.where(new_state_done,
1. - distance_to_goal / self._goal_tolerance,
-self._cost_of_living)
if self._shape_reward:
reward += jnp.linalg.norm(state.obs) - distance_to_goal
return TargetState(obs=jnp.where(state.done, state.obs, new_state_pos),
done=jnp.where(state.done, state.done,
new_state_done),
reward=jnp.where(state.done, 0., reward)) | |
# Ger Hanlon, 13.04.2018
# The describe function for the 2018 Iris Data-Set project
import pandas as pd # Import the panda library and reference it is pd- his library is used for data manipulation and analysis
import numpy as np # Import the numpy library and reference it is np- This library is useful for adding support for large, multi-dimensional
# arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays
import matplotlib as pl #Import the matplot library and reference it is pl-
import matplotlib.pyplot as mpl #Import the plotting framework from matplot and reference it as mpl
df = pd.read_csv('Iris head.csv') #read the csv file Iris Head into the dataframe(df)
print(df.describe()) #Print the describe function from the dataframe | |
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create TF graphs for calculating log-mel-spectral features.
NOTE: This code is very experimental and will likely change, both in interface
and what it outputs.
The single published method is build_mel_calculation_graph, which
will assemble a TF graph from a provided waveform input vector
through to a (num_frames, frame_width, num_mel_bins) tensor of log-
transformed mel spectrogram patches, suitable for feeding the input
to a typical classifier. All the mel calculation parameters
are available as options, but default to their standard values
(e.g. frame_width=96, frame_hop=10). The input waveform can have
size (None,), meaning it will be specified at run-time.
with tflite_compatible=True, the returned graph is constructed only
from tflite-compatible ops (i.e., it uses matmul for the DFT, and
explicitly unrolled framing). In this case, the input waveform tensor
must have an explicit size at graph-building time.
"""
import fractions
import math
from magenta.music import mfcc_mel
import numpy as np
import tensorflow.compat.v1 as tf
def _stft_magnitude_full_tf(waveform_input, window_length_samples,
hop_length_samples, fft_length):
"""Calculate STFT magnitude (spectrogram) using tf.signal ops."""
stft_magnitude = tf.abs(
tf.signal.stft(
waveform_input,
frame_length=window_length_samples,
frame_step=hop_length_samples,
fft_length=fft_length),
name='magnitude_spectrogram')
return stft_magnitude
def _dft_matrix(dft_length):
"""Calculate the full DFT matrix in numpy."""
omega = (0 + 1j) * 2.0 * np.pi / float(dft_length)
# Don't include 1/sqrt(N) scaling, tf.signal.rfft doesn't apply it.
return np.exp(omega * np.outer(np.arange(dft_length), np.arange(dft_length)))
def _naive_rdft(signal_tensor, fft_length):
"""Implement real-input Fourier Transform by matmul."""
# We are right-multiplying by the DFT matrix, and we are keeping
# only the first half ("positive frequencies").
# So discard the second half of rows, but transpose the array for
# right-multiplication.
# The DFT matrix is symmetric, so we could have done it more
# directly, but this reflects our intention better.
complex_dft_matrix_kept_values = _dft_matrix(fft_length)[:(
fft_length // 2 + 1), :].transpose()
real_dft_tensor = tf.constant(
np.real(complex_dft_matrix_kept_values).astype(np.float32),
name='real_dft_matrix')
imag_dft_tensor = tf.constant(
np.imag(complex_dft_matrix_kept_values).astype(np.float32),
name='imaginary_dft_matrix')
signal_frame_length = signal_tensor.shape[-1].value
half_pad = (fft_length - signal_frame_length) // 2
pad_values = tf.concat([
tf.zeros([tf.rank(signal_tensor) - 1, 2], tf.int32),
[[half_pad, fft_length - signal_frame_length - half_pad]]
],
axis=0)
padded_signal = tf.pad(signal_tensor, pad_values)
result_real_part = tf.matmul(padded_signal, real_dft_tensor)
result_imag_part = tf.matmul(padded_signal, imag_dft_tensor)
return result_real_part, result_imag_part
def _fixed_frame(signal, frame_length, frame_step, first_axis=False):
"""tflite-compatible tf.signal.frame for fixed-size input.
Args:
signal: Tensor containing signal(s).
frame_length: Number of samples to put in each frame.
frame_step: Sample advance between successive frames.
first_axis: If true, framing is applied to first axis of tensor; otherwise,
it is applied to last axis.
Returns:
A new tensor where the last axis (or first, if first_axis) of input
signal has been replaced by a (num_frames, frame_length) array of individual
frames where each frame is drawn frame_step samples after the previous one.
Raises:
ValueError: if signal has an undefined axis length. This routine only
supports framing of signals whose shape is fixed at graph-build time.
"""
signal_shape = signal.shape.as_list()
if first_axis:
length_samples = signal_shape[0]
else:
length_samples = signal_shape[-1]
if length_samples <= 0:
raise ValueError('fixed framing requires predefined constant signal length')
num_frames = max(0, 1 + (length_samples - frame_length) // frame_step)
if first_axis:
inner_dimensions = signal_shape[1:]
result_shape = [num_frames, frame_length] + inner_dimensions
gather_axis = 0
else:
outer_dimensions = signal_shape[:-1]
result_shape = outer_dimensions + [num_frames, frame_length]
# Currently tflite's gather only supports axis==0, but that may still
# work if we want the last of 1 axes.
gather_axis = len(outer_dimensions)
subframe_length = fractions.gcd(frame_length, frame_step) # pylint: disable=deprecated-method
subframes_per_frame = frame_length // subframe_length
subframes_per_hop = frame_step // subframe_length
num_subframes = length_samples // subframe_length
if first_axis:
trimmed_input_size = [num_subframes * subframe_length] + inner_dimensions
subframe_shape = [num_subframes, subframe_length] + inner_dimensions
else:
trimmed_input_size = outer_dimensions + [num_subframes * subframe_length]
subframe_shape = outer_dimensions + [num_subframes, subframe_length]
subframes = tf.reshape(
tf.slice(
signal,
begin=np.zeros(len(signal_shape), np.int32),
size=trimmed_input_size), subframe_shape)
# frame_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate frame in subframes. For example:
# [[0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4]]
frame_selector = np.reshape(
np.arange(num_frames) * subframes_per_hop, [num_frames, 1])
# subframe_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate subframe within a frame. For example:
# [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
subframe_selector = np.reshape(
np.arange(subframes_per_frame), [1, subframes_per_frame])
# Adding the 2 selector tensors together produces a [num_frames,
# subframes_per_frame] tensor of indices to use with tf.gather to select
# subframes from subframes. We then reshape the inner-most subframes_per_frame
# dimension to stitch the subframes together into frames. For example:
# [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]].
selector = frame_selector + subframe_selector
frames = tf.reshape(
tf.gather(subframes, selector.astype(np.int32), axis=gather_axis),
result_shape)
return frames
def _stft_tflite(signal, frame_length, frame_step, fft_length):
"""tflite-compatible implementation of tf.signal.stft.
Compute the short-time Fourier transform of a 1D input while avoiding tf ops
that are not currently supported in tflite (Rfft, Range, SplitV).
fft_length must be fixed. A Hann window is of frame_length is always
applied.
Since fixed (precomputed) framing must be used, signal.shape[-1] must be a
specific value (so "?"/None is not supported).
Args:
signal: 1D tensor containing the time-domain waveform to be transformed.
frame_length: int, the number of points in each Fourier frame.
frame_step: int, the number of samples to advance between successive frames.
fft_length: int, the size of the Fourier transform to apply.
Returns:
Two (num_frames, fft_length) tensors containing the real and imaginary parts
of the short-time Fourier transform of the input signal.
"""
# Make the window be shape (1, frame_length) instead of just frame_length
# in an effort to help the tflite broadcast logic.
window = tf.reshape(
tf.constant(
(0.5 - 0.5 * np.cos(2 * np.pi * np.arange(0, 1.0, 1.0 / frame_length))
).astype(np.float32),
name='window'), [1, frame_length])
framed_signal = _fixed_frame(
signal, frame_length, frame_step, first_axis=False)
framed_signal *= window
real_spectrogram, imag_spectrogram = _naive_rdft(framed_signal, fft_length)
return real_spectrogram, imag_spectrogram
def _stft_magnitude_tflite(waveform_input, window_length_samples,
hop_length_samples, fft_length):
"""Calculate spectrogram avoiding tflite incompatible ops."""
real_stft, imag_stft = _stft_tflite(
waveform_input,
frame_length=window_length_samples,
frame_step=hop_length_samples,
fft_length=fft_length)
stft_magnitude = tf.sqrt(
tf.add(real_stft * real_stft, imag_stft * imag_stft),
name='magnitude_spectrogram')
return stft_magnitude
def build_mel_calculation_graph(waveform_input,
sample_rate=16000,
window_length_seconds=0.025,
hop_length_seconds=0.010,
num_mel_bins=64,
lower_edge_hz=125.0,
upper_edge_hz=7500.0,
frame_width=96,
frame_hop=10,
tflite_compatible=False):
"""Build a TF graph to go from waveform to mel spectrum patches.
Args:
waveform_input: 1D Tensor which will be filled with 16 kHz waveform as
tf.float32.
sample_rate: Scalar giving the sampling rate of the waveform. Only 16 kHz
is acceptable at present.
window_length_seconds: Duration of window used for each Fourier transform.
hop_length_seconds: Time shift between successive analysis time frames.
num_mel_bins: The number of mel frequency bins to calculate.
lower_edge_hz: Frequency boundary at bottom edge of mel mapping.
upper_edge_hz: Frequency boundary at top edge of mel mapping.
frame_width: The number of successive time frames to include in each patch.
frame_hop: The frame advance between successive patches.
tflite_compatible: Avoid ops not currently supported in tflite.
Returns:
Tensor holding [num_patches, frame_width, num_mel_bins] log-mel-spectrogram
patches.
"""
# `waveform_input` is a [?] vector as a tensor.
# `magnitude_spectrogram` is a [?, fft_length/2 + 1] tensor of spectrograms.
# Derive the dependent parameters.
window_length_samples = int(round(window_length_seconds * sample_rate))
hop_length_samples = int(round(hop_length_seconds * sample_rate))
fft_length = 2**int(
math.ceil(math.log(window_length_samples) / math.log(2.0)))
if tflite_compatible:
magnitude_spectrogram = _stft_magnitude_tflite(
waveform_input, window_length_samples, hop_length_samples, fft_length)
else:
magnitude_spectrogram = _stft_magnitude_full_tf(
waveform_input, window_length_samples, hop_length_samples, fft_length)
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = magnitude_spectrogram.shape[-1].value
if tflite_compatible:
linear_to_mel_weight_matrix = tf.constant(
mfcc_mel.SpectrogramToMelMatrix(num_mel_bins, num_spectrogram_bins,
sample_rate, lower_edge_hz,
upper_edge_hz).astype(np.float32),
name='linear_to_mel_matrix')
else:
# In full tf, the mel weight matrix is calculated at run time within the
# TF graph. This avoids including a matrix of 64 x 256 float values (i.e.,
# 100 kB or more, depending on the representation) in the exported graph.
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hz,
upper_edge_hz)
mel_spectrogram = tf.matmul(
magnitude_spectrogram,
linear_to_mel_weight_matrix,
name='mel_spectrogram')
log_offset = 0.001
log_mel_spectrogram = tf.log(
mel_spectrogram + log_offset, name='log_mel_spectrogram')
# log_mel_spectrogram is a [?, num_mel_bins] gram.
if tflite_compatible:
features = _fixed_frame(
log_mel_spectrogram,
frame_length=frame_width,
frame_step=frame_hop,
first_axis=True)
else:
features = tf.signal.frame(
log_mel_spectrogram,
frame_length=frame_width,
frame_step=frame_hop,
axis=0)
# features is [num_patches, frame_width, num_mel_bins].
return features | |
#!/usr/bin/python
# Author: GMFTBY
# Time: 2019.11.8
'''
Chat script, show the demo
'''
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import numpy as np
import math
import argparse
from utils import *
from data_loader import *
from model.seq2seq_attention import Seq2Seq
from model.HRED import HRED
from model.HRED_cf import HRED_cf
from model.when2talk_GCN import When2Talk_GCN
from model.when2talk_GAT import When2Talk_GAT
from model.GCNRNN import GCNRNN
from model.GatedGCN import GatedGCN
from model.W2T_RNN_First import W2T_RNN_First
from model.W2T_GCNRNN import W2T_GCNRNN
from model.GatedGCN_nobi import GatedGCN_nobi
from model.GATRNN import GATRNN
def create_model(kwargs, src_w2idx, tgt_w2idx):
# load model
# load net
kwargs = vars(kwargs)
if kwargs['model'] == 'seq2seq':
net = Seq2Seq(len(src_w2idx), kwargs['embed_size'],
len(tgt_w2idx), kwargs['utter_hidden'],
kwargs['decoder_hidden'], pad=tgt_w2idx['<pad>'],
sos=tgt_w2idx['<sos>'], utter_n_layer=kwargs['utter_n_layer'])
elif kwargs['model'] == 'hred':
net = HRED(kwargs['embed_size'], len(src_w2idx), len(tgt_w2idx),
kwargs['utter_hidden'], kwargs['context_hidden'],
kwargs['decoder_hidden'], pad=tgt_w2idx['<pad>'],
sos=tgt_w2idx['<sos>'], utter_n_layer=kwargs['utter_n_layer'])
elif kwargs['model'] == 'hred-cf':
net = HRED_cf(kwargs['embed_size'], len(src_w2idx), len(tgt_w2idx),
kwargs['utter_hidden'], kwargs['context_hidden'],
kwargs['decoder_hidden'], pad=tgt_w2idx['<pad>'],
sos=tgt_w2idx['<sos>'], utter_n_layer=kwargs['utter_n_layer'],
user_embed_size=kwargs['user_embed_size'])
elif kwargs['model'] == 'when2talk_GCN':
net = When2Talk_GCN(len(src_w2idx), len(tgt_w2idx), kwargs['embed_size'],
kwargs['utter_hidden'], kwargs['context_hidden'],
kwargs['decoder_hidden'], kwargs['position_embed_size'],
user_embed_size=kwargs['user_embed_size'],
sos=tgt_w2idx["<sos>"], pad=tgt_w2idx['<pad>'],
utter_n_layer=kwargs['utter_n_layer'],
contextrnn=kwargs['contextrnn'])
elif kwargs['model'] == 'when2talk_GAT':
net = When2Talk_GAT(len(src_w2idx), len(tgt_w2idx), kwargs['embed_size'],
kwargs['utter_hidden'], kwargs['context_hidden'],
kwargs['decoder_hidden'], kwargs['position_embed_size'],
user_embed_size=kwargs['user_embed_size'],
sos=tgt_w2idx["<sos>"], pad=tgt_w2idx['<pad>'],
utter_n_layer=kwargs['utter_n_layer'],
contextrnn=kwargs['contextrnn'])
elif kwargs['model'] == 'GATRNN':
net = GATRNN(len(src_w2idx), len(tgt_w2idx), kwargs['embed_size'],
kwargs['utter_hidden'], kwargs['context_hidden'],
kwargs['decoder_hidden'], kwargs['position_embed_size'],
user_embed_size=kwargs['user_embed_size'],
sos=tgt_w2idx["<sos>"], pad=tgt_w2idx['<pad>'],
utter_n_layer=kwargs['utter_n_layer'],
context_threshold=kwargs['context_threshold'])
elif kwargs['model'] == 'GCNRNN':
net = GCNRNN(len(src_w2idx), len(tgt_w2idx), kwargs['embed_size'],
kwargs['utter_hidden'], kwargs['context_hidden'],
kwargs['decoder_hidden'], kwargs['position_embed_size'],
user_embed_size=kwargs['user_embed_size'],
sos=tgt_w2idx["<sos>"], pad=tgt_w2idx['<pad>'],
utter_n_layer=kwargs['utter_n_layer'],
context_threshold=kwargs['context_threshold'])
elif kwargs['model'] == 'W2T_GCNRNN':
net = W2T_GCNRNN(len(src_w2idx), len(tgt_w2idx),
kwargs['embed_size'],
kwargs['utter_hidden'],
kwargs['context_hidden'],
kwargs['decoder_hidden'],
kwargs['position_embed_size'],
user_embed_size=kwargs['user_embed_size'],
sos=tgt_w2idx["<sos>"],
pad=tgt_w2idx['<pad>'],
utter_n_layer=kwargs['utter_n_layer'])
elif kwargs['model'] == 'GatedGCN':
net = GatedGCN(len(src_w2idx), len(tgt_w2idx), kwargs['embed_size'],
kwargs['utter_hidden'], kwargs['context_hidden'],
kwargs['decoder_hidden'], kwargs['position_embed_size'],
user_embed_size=kwargs['user_embed_size'],
sos=tgt_w2idx["<sos>"], pad=tgt_w2idx['<pad>'],
utter_n_layer=kwargs['utter_n_layer'],
context_threshold=kwargs['context_threshold'])
elif kwargs['model'] == 'GatedGCN_nobi':
net = GatedGCN_nobi(len(src_w2idx), len(tgt_w2idx), kwargs['embed_size'],
kwargs['utter_hidden'], kwargs['context_hidden'],
kwargs['decoder_hidden'], kwargs['position_embed_size'],
user_embed_size=kwargs['user_embed_size'],
sos=tgt_w2idx["<sos>"], pad=tgt_w2idx['<pad>'],
utter_n_layer=kwargs['utter_n_layer'],
context_threshold=kwargs['context_threshold'])
elif kwargs['model'] == 'W2T_RNN_First':
net = W2T_RNN_First(len(src_w2idx), len(tgt_w2idx), kwargs['embed_size'],
kwargs['utter_hidden'], kwargs['context_hidden'],
kwargs['decoder_hidden'], kwargs['position_embed_size'],
user_embed_size=kwargs['user_embed_size'],
sos=tgt_w2idx["<sos>"], pad=tgt_w2idx['<pad>'],
utter_n_layer=kwargs['utter_n_layer'])
else:
raise Exception('[!] wrong model (seq2seq, hred, hred-cf)')
return net
class Bot:
def __init__(self, kwargs, maxlen=50, role='<1>'):
# load vocab
tgt_vocab = load_pickle(kwargs.tgt_vocab)
src_vocab = load_pickle(kwargs.src_vocab)
self.src_w2idx, self.src_idx2w = src_vocab
self.tgt_w2idx, self.tgt_idx2w = tgt_vocab
# whether have the ability to decide the talk timing
if args.model in ['hred', 'seq2seq']:
self.decision = False
else:
self.decision = True
# load the model
self.net = create_model(args, self.src_w2idx, self.tgt_w2idx)
if torch.cuda.is_available():
self.net.cuda()
self.net.eval()
print('Net:')
print(self.net)
# load checkpoint
load_best_model(args.dataset, args.model, self.net,
args.min_threshold, args.max_threshold)
# reset flag
self.reset = True
self.container = []
self.history = []
self.roles = ['<0>', '<1>'] # <0>: human, <1>: chatbot
self.role = role
# print configure
self.maxlen = max(50, maxlen)
self.src_maxlen = 100
print('[!] Init the model over')
def str2tensor(self, utterance, role):
line = [self.src_w2idx['<sos>'], self.src_w2idx[role]] + [self.src_w2idx.get(w, self.src_w2idx['<unk>']) for w in nltk.word_tokenize(utterance)] + [self.src_w2idx['<eos>']]
if len(line) > self.src_maxlen:
line = [self.src_w2idx['<sos>'], line[1]] + line[-maxlen:]
return line
def get_role(self, role):
try:
role = self.roles.index(role)
except:
raise Exception(f'[!] Unknown role {role}')
return role
def add_sentence(self, utterance, role):
self.history.append((role, utterance))
nrole = self.get_role(role)
self.container.append((nrole, self.str2tensor(utterance, role)))
def create_graph(self):
# create the graph by using self.container
# role information and temporal information
edges = {}
turn_len = len(self.container)
# temporal information
for i in range(turn_len - 1):
edges[(i, i + 1)] = [1]
# role information
for i in range(turn_len):
for j in range(turn_len):
if j > i:
useri, _ = self.container[i]
userj, _ = self.container[j]
if useri == userj:
if edges.get((i, j), None):
edges[(i, j)].append(1)
else:
edges[(i, j)] = [1]
# clear
e, w = [[], []], []
for src, tgt in edges.keys():
e[0].append(src)
e[1].append(tgt)
w.append(max(edges[(src, tgt)]))
return (e, w)
def process_input(self):
'''role: chatbot / human'''
# add to the container
# self.add_sentence(utterance, role)
# generate the graph
gbatch = [self.create_graph()]
# src_utterance, src_role
sbatch, subatch = [], []
for i in self.container:
sbatch.append(self.load2GPU(torch.tensor(i[1], dtype=torch.long).unsqueeze(1)))
subatch.append(i[0])
subatch = self.load2GPU(torch.tensor(subatch, dtype=torch.long).unsqueeze(1))
# tubatch
tubatch = self.load2GPU(torch.tensor([self.get_role(self.role)], dtype=torch.long))
# turn_lengths
turn_lengths = [[len(i[1])] for i in self.container]
turn_lengths = self.load2GPU(torch.tensor(turn_lengths, dtype=torch.long))
return sbatch, gbatch, subatch, tubatch, self.maxlen, turn_lengths
def load2GPU(self, t):
if torch.cuda.is_available():
t = t.cuda()
return t
def tensor2str(self, t):
rest = []
for i in t[1:]:
w = self.tgt_idx2w[i]
if w in ['<pad>', '<eos>']:
break
rest.append(w)
return ' '.join(rest)
def generate(self):
sbatch, gbatch, subatch, tubatch, maxlen, turn_lengths = self.process_input()
# de: [1], outputs: [maxlen, 1]
de, output = self.net.predict(sbatch, gbatch, subatch, tubatch, maxlen, turn_lengths)
output = list(map(int, output.squeeze(1).cpu().tolist())) # [maxlen]
de = de.cpu().item() > 0.5
if de:
# Talk
return self.tensor2str(output)
else:
return '<silence>'
def show_history(self):
for i in self.history:
print(f'{i[0]}: {i[1]}')
def set_reset(self):
self.container = []
self.history = []
self.reset = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Translate script')
parser.add_argument('--src_test', type=str, default=None, help='src test file')
parser.add_argument('--tgt_test', type=str, default=None, help='tgt test file')
parser.add_argument('--min_threshold', type=int, default=0,
help='epoch threshold for loading best model')
parser.add_argument('--max_threshold', type=int, default=30,
help='epoch threshold for loading best model')
parser.add_argument('--batch_size', type=int, default=16, help='batch size')
parser.add_argument('--model', type=str, default='HRED', help='model to be trained')
parser.add_argument('--utter_n_layer', type=int, default=1, help='layer of encoder')
parser.add_argument('--utter_hidden', type=int, default=150,
help='utterance encoder hidden size')
parser.add_argument('--context_hidden', type=int, default=150,
help='context encoder hidden size')
parser.add_argument('--decoder_hidden', type=int, default=150,
help='decoder hidden size')
parser.add_argument('--seed', type=int, default=30,
help='random seed')
parser.add_argument('--embed_size', type=int, default=200,
help='embedding layer size')
parser.add_argument('--src_vocab', type=str, default=None, help='src vocabulary')
parser.add_argument('--tgt_vocab', type=str, default=None, help='tgt vocabulary')
parser.add_argument('--maxlen', type=int, default=50, help='the maxlen of the utterance')
parser.add_argument('--pred', type=str, default=None,
help='the csv file save the output')
parser.add_argument('--hierarchical', type=int, default=1, help='whether hierarchical architecture')
parser.add_argument('--tgt_maxlen', type=int, default=50, help='target sequence maxlen')
parser.add_argument('--user_embed_size', type=int, default=10, help='user embed size')
parser.add_argument('--cf', type=int, default=0, help='whether have the classification')
parser.add_argument('--dataset', type=str, default='ubuntu')
parser.add_argument('--position_embed_size', type=int, default=30)
parser.add_argument('--graph', type=int, default=0)
parser.add_argument('--test_graph', type=str, default=None)
parser.add_argument('--plus', type=int, default=0, help='the same as the one in train.py')
parser.add_argument('--contextrnn', dest='contextrnn', action='store_true')
parser.add_argument('--no-contextrnn', dest='contextrnn', action='store_false')
parser.add_argument('--context_threshold', type=int, default=2)
args = parser.parse_args()
# show the parameters
print('Parameters:')
print(args)
chatbot = Bot(args, maxlen=args.maxlen, role='<1>')
# begin to chat with human
for i in range(100):
print(f'===== Dialogue {i} begin =====')
while True:
utterance = input(f'<0>: ')
utterance = utterance.strip()
if 'exit' in utterance:
break
chatbot.add_sentence(utterance, '<0>')
while True:
response = chatbot.generate()
if 'silence' in response:
break
else:
response = response.replace('<1>', '').replace('<0>', '').strip()
chatbot.add_sentence(response, '<1>')
print(f'<1> {response}')
print(f'===== Dialogue {i} finish =====')
chatbot.show_history()
chatbot.set_reset() | |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 5 14:27:35 2018
@author: ensur
"""
import os
import numpy as np
import pickle
# generate all characters dict
lines = open('cjkvi-ids/ids.txt',encoding='UTF-8').readlines()[2:]
char_seq = {}
char_seq['⿰'] = '⿰'
char_seq['⿱'] = '⿱'
char_seq['⿵'] = '⿵'
char_seq['⿻'] = '⿻'
char_seq['⿺'] = '⿺'
char_seq['⿹'] = '⿹'
char_seq['⿶'] = '⿶'
char_seq['⿳'] = '⿳'
char_seq['⿴'] = '⿴'
char_seq['⿸'] = '⿸'
char_seq['⿷'] = '⿷'
char_seq['⿲'] = '⿲'
char_seq['A'] = 'A'
char_seq['H'] = 'H'
char_seq['U'] = 'U'
char_seq['X'] = 'X'
for i in range(len(lines)):
a = lines[i].split(' ')[0].replace('\n','').split('\t')
seq = a[2].replace(' ','').replace('[','').replace(']','')\
.replace('G','').replace('T','').replace('J','')\
.replace('K','').replace('V','')
char_seq[a[1]] = seq
for i in range(len(lines)):
a = lines[i].split(' ')[0].replace('\n','').split('\t')
seq = a[2].replace(' ','').replace('[','').replace(']','')\
.replace('G','').replace('T','').replace('J','')\
.replace('K','').replace('V','')
for k in seq:
char_seq[k]
# analysis all seq
def is_all(seq):
all_len = [len(char_seq[c]) for c in seq]
if max(all_len) > 1:
return False
else:
return True
char_seq_all = {}
for i in range(len(lines)):
print(i)
a = lines[i].split(' ')[0].replace('\n','').split('\t')
char = a[1]
seq_tmp = char_seq[a[1]]
while not is_all(seq_tmp):
for k in range(len(seq_tmp)):
if len(char_seq[seq_tmp[k]]) > 1:
seq_tmp = seq_tmp.replace(seq_tmp[k],char_seq[seq_tmp[k]])
print(seq_tmp)
char_seq_all[char] = seq_tmp
alphabet = ''
for value in char_seq_all.values():
alphabet += value
alphabet = list(set(alphabet))
alphabet = ''.join(alphabet)
print(len(alphabet))
char_seq_index = {}
for keys in char_seq_all.keys():
char_seq_index[keys] = [alphabet.index(c) for c in list(char_seq_all[keys])]
#保存序列
save_file = open('char2seq_dict_real.pkl', 'wb')
pickle.dump(char_seq_all, save_file)
save_file.close()
#保存序列
save_file = open('char2seq_dict.pkl', 'wb')
pickle.dump(char_seq_index, save_file)
save_file.close()
#保存字典
f = open('radical_alphabet.txt','w',encoding='utf-8')
f.write(alphabet)
f.close() | |
from __future__ import print_function, division
import unittest
from hotspots.grid_extension import Grid, _GridEnsemble
import numpy as np
from glob import glob
import os
from os.path import join, exists
import shutil
class TestEnsembleSyntheticData(unittest.TestCase):
@staticmethod
def make_test_data():
def make_grid(offset, vals, idxs, nsteps):
grid_origin = offset
grid_far_corner = (
offset[0] + (nsteps[0] - 1) * 0.5, offset[1] + (nsteps[1] - 1) * 0.5, offset[2] + (nsteps[2] - 1) * 0.5)
out_grid = Grid(origin=grid_origin,
far_corner=grid_far_corner,
spacing=0.5,
_grid=None,
default=0)
for (nx, ny, nz), v in zip(idxs, vals):
# print(nx, ny, nz, v)
# print(int(nx-offset[0]*2), int(ny-offset[1]*2), int(nz-offset[2]*2))
out_grid.set_value(int(nx - offset[0] * 2), int(ny - offset[1] * 2), int(nz - offset[2] * 2), v)
return out_grid
# Set up the numpy array:
nsteps = (40, 50, 45)
# Fill with a thousand random floats between 0 and 40 (roughly hotspots range)
np.random.seed(3)
values = np.random.uniform(1, 40, 1000)
ind_x = np.random.randint(5, nsteps[0] - 5, size=1000)
ind_y = np.random.randint(5, nsteps[1] - 5, size=1000)
ind_z = np.random.randint(5, nsteps[2] - 5, size=1000)
# Create 10 grids, with an origin offset of up to 10 grid points in each direction
grid_spacing = 0.5
offset_x = np.random.randint(-5, 5, 10) * grid_spacing
offset_y = np.random.randint(-5, 5, 10) * grid_spacing
offset_z = np.random.randint(-5, 5, 10) * grid_spacing
offsets = zip(offset_x, offset_y, offset_z)
print(offsets)
indices = zip(ind_x, ind_y, ind_z)
tmp_dir = join(os.getcwd(), "tmp_gridensemble_test")
if not exists(tmp_dir):
os.mkdir(tmp_dir)
grid_list = []
for i in range(len(offsets)):
off = offsets[i]
g = make_grid(off, values, indices, nsteps)
print(g.nsteps)
grid_list.append(g)
g.write(join(tmp_dir, "test_apolar_{}.ccp4".format(str(i))))
return grid_list
def setUp(self):
self.grid_list = self.make_test_data()
self.tmp_dir = join(os.getcwd(), "tmp_gridensemble_test")
self.grid_paths = glob(join(self.tmp_dir, "test_apolar_*.ccp4"))
print(self.grid_paths)
self.grid_ensemble = _GridEnsemble()
self.max_grid = self.grid_ensemble.from_grid_list(self.grid_list, os.getcwd(), "test", "apolar")
self.values = self.grid_ensemble.results_array[self.grid_ensemble.nonzeros]
def test_from_hotspot_maps(self):
# hard coded because 988 values in test set - perhaps there's a better way to test this?
grid_ensemble = _GridEnsemble()
max_grid = grid_ensemble.from_hotspot_maps(self.grid_paths, os.getcwd(), "test", "apolar")
values = grid_ensemble.results_array[grid_ensemble.nonzeros]
self.assertEqual(values.shape,
(988, 10),
msg="Check number of values")
self.assertEqual(grid_ensemble.tup_max_length,
len(self.grid_paths),
msg="Check tup_max_length assigned")
def test_from_grid_list(self):
grid_ensemble = _GridEnsemble()
max_grid = grid_ensemble.from_grid_list(self.grid_list, os.getcwd(), "test", "apolar")
values = grid_ensemble.results_array[grid_ensemble.nonzeros]
self.assertEqual(values.shape,
(988, 10),
msg="Check number of values: expected: {}, observed {}".format((988, 10), values.shape))
self.assertEqual(grid_ensemble.tup_max_length,
len(self.grid_list),
msg="Check tup_max_length assigned")
def test_get_gridpoint_max(self):
maxes = self.grid_ensemble.get_gridpoint_max()
self.assertEqual(len(maxes),self.values.shape[0], msg="Check get_max has correct number of values")
arr_max = np.array(maxes)
arr_vals = np.array([self.values[i][0] for i in range(self.values.shape[0])])
self.assertTrue(np.array_equal(arr_max, arr_vals), msg="Check get_max")
def test_get_gridpoint_means(self):
means = self.grid_ensemble.get_gridpoint_means()
self.assertEqual(len(means),
self.values.shape[0],
msg="Check get_gridpoint_means has correct number of values: expected {} observed {}".format(self.values.shape[0], len(means)))
arr_mean = np.array(means)
arr_vals = np.array([self.values[i][0] for i in range(len(self.values))])
self.assertTrue(np.allclose(arr_mean, arr_vals), msg="Check get_means")
def test_get_gridpoint_means_spread(self):
means = self.grid_ensemble.get_gridpoint_means_spread()
self.assertIsInstance(means, list)
self.assertEqual(len(means), len(self.values.flatten()))
def test_get_gridpoint_ranges(self):
ranges = self.grid_ensemble.get_gridpoint_ranges()
self.assertIsInstance(ranges, list)
self.assertEqual(len(set(ranges)), 1)
def test_output_grid(self):
#g_0 = Grid.from_file(self.grid_paths[0])
g_0 = self.grid_list[0]
print(g_0.count_grid())
print(self.max_grid.count_grid())
max_g, ref_g = Grid.common_grid([self.max_grid, g_0])
diff_grid = (max_g - ref_g)
nx, ny, nz = diff_grid.nsteps
for x in range(nx):
for y in range(ny):
for z in range(nz):
if diff_grid.value(x,y,z)!=0:
print(diff_grid.value(x,y,z))
self.assertEqual((max_g-ref_g).count_grid(), 0, msg="Testing the max_grid")
means_grid = self.grid_ensemble.output_grid(mode="mean", save=False)
mean_g, ref_g = Grid.common_grid([means_grid, g_0])
self.assertEqual((max_g - ref_g).count_grid(), 0, msg="Testing the means_grid")
ranges_grid = self.grid_ensemble.output_grid(mode="ranges", save=False)
self.assertEqual(ranges_grid.count_grid(), 0, msg="Testing the ranges grid")
other_g = self.grid_ensemble.output_grid(mode="bla", save=False)
self.assertIsNone(other_g)
def tearDown(self):
if exists(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
#os.remove("test_max_apolar.ccp4")
if __name__ == "__main__":
unittest.main() | |
import numpy.random as rd
import tensorflow as tf
from toolbox.einsum_re_writter.einsum_re_written import einsum_bi_bij_to_bj
a = rd.rand(2,3)
b = rd.rand(2,3,4)
tf_a = tf.constant(a)
tf_b = tf.constant(b)
prod1 = tf.einsum('bi,bij->bj',tf_a,tf_b)
prod2 = einsum_bi_bij_to_bj(tf_a,tf_b)
sess = tf.Session()
np_prod_1 = sess.run(prod1)
np_prod_2 = sess.run(prod2)
assert (np_prod_1 == np_prod_2).all(), 'Mistmatch'
print('Prod 1')
print(np_prod_1)
print('Prod 2')
print(np_prod_2) | |
#!/usr/bin/env python2
# -*- coding: utf8 -*-
import sys
sys.path.append("../imposm-parser")
import time
import math
import yaml
import pyproj
import networkx as nx
import premap_pb2 as pb
import types_pb2 as pbtypes
from utils import angle,int2deg,deg2int,distance, nodeWays, deleteAloneNodes
from Map import Map
from Raster import Raster
scale = 10
walkTypes = [pbtypes.PAVED,pbtypes.UNPAVED,pbtypes.STEPS,pbtypes.HIGHWAY]
def onBorder(amap,neighs,nodeid,lastnodeid):
""" Find next node on border of an object for given last nodes"""
lastnode = amap.nodes[amap.nodesIdx[lastnodeid]]
node = amap.nodes[amap.nodesIdx[nodeid]]
vx = lastnode.lon-node.lon
vy = lastnode.lat-node.lat
neighangles = []
for nid in neighs[node.id]:
if nid==lastnodeid:
continue
#print "with ",nid
n = amap.nodes[amap.nodesIdx[nid]]
ux = n.lon-node.lon
uy = n.lat-node.lat
ang = angle(ux,uy,vx,vy)
neighangles.append((ang,nid))
neighangles.sort(key=lambda n: n[0])
return neighangles[-1][1]
def firstBorder(amap,neighs,node):
""" Find first node on border"""
vx = 0
vy = -1000
neighangles = []
#print "Node ",node.id,"neighs:",neighs[node.id]
for nid in neighs[node.id]:
n = amap.nodes[amap.nodesIdx[nid]]
ux = n.lon-node.lon
uy = n.lat-node.lat
ang = angle(ux,uy,vx,vy)
neighangles.append((ang,nid))
neighangles.sort(key=lambda n: n[0])
return neighangles[-1][1]
def mergeWays(amap,wayids):
""" Merge given ways into one"""
newway = pb.Way()
way1 = amap.ways[amap.waysIdx[wayids[0]]]
neighs = {}
nodes = []
for wayid in wayids:
way = amap.ways[amap.waysIdx[wayid]]
waynodes = [amap.nodes[amap.nodesIdx[ref]] for ref in way.refs]
if len(waynodes)<3:
continue
if len(waynodes)==3 and waynodes[-1]==waynodes[0]:
continue
if waynodes[-1] != waynodes[0]:
waynodes.append(waynodes[0])
for i in range(len(waynodes)):
wniid = waynodes[i].id
if wniid not in neighs:
neighs[wniid] = []
nodes.append(waynodes[i])
if i!=0:
if waynodes[i-1].id not in neighs[wniid]:
neighs[wniid].append(waynodes[i-1].id)
if i!=len(waynodes)-1:
if waynodes[i+1].id not in neighs[wniid]:
neighs[wniid].append(waynodes[i+1].id)
if len(nodes)<3:
print "merging",wayids
print "Merge failed -- too few nodes"
return None
bylon = sorted(nodes,key=lambda n:n.lon)
bylatlon = sorted(bylon,key=lambda n:n.lat)
first = bylatlon[0]
#print neighs[first.id]
second = amap.nodes[amap.nodesIdx[firstBorder(amap,neighs,first)]]
#print "edge:",first.id," ",second.id
newway.refs.append(first.id)
newway.refs.append(second.id)
nextid = onBorder(amap,neighs,second.id,first.id)
while(nextid != first.id):
nextid = onBorder(amap,neighs,newway.refs[-1],newway.refs[-2])
if nextid in newway.refs and nextid != first.id:
print "merging",wayids
print "Merge failed -- wrong cycle"
print newway.refs,", repeated:",nextid
return None
newway.refs.append(nextid)
# print "F",first.id,"n",nextid
# print newway.refs
newway.refs.append(first.id)
newway.type = way1.type
newway.id = amap.newWayid()
newway.area = way1.area
newway.render = True
return newway
def makeNeighGraph(amap,nodeways):
""" Make neighbourgh graph of buildings"""
G = nx.Graph()
G.add_nodes_from([way.id for way in amap.ways])
broken = {way.id : False for way in amap.ways }
bcnt = 0
for n in amap.nodes:
nidx = amap.nodesIdx[n.id]
ways = nodeways[nidx]
isbroken = False
buildings = []
for wayid in ways:
wayidx = amap.waysIdx[wayid]
way = amap.ways[wayidx]
if way.type != pbtypes.BARRIER or way.area!=True:
isbroken = True
else:
bcnt+=1
buildings.append(wayidx)
if isbroken:
for wayidx in buildings:
broken[amap.ways[wayidx].id]=True
elif len(buildings) > 0:
firstwayid = amap.ways[buildings[0]].id
for wayidx in buildings[1:]:
G.add_edge(firstwayid,amap.ways[wayidx].id)
print bcnt
return (G,broken)
def mergeComponents(amap,G,broken):
""" Merge blocks of buildings into one """
remove = []
for comp in nx.connected_components(G):
nbc = [c for c in comp if broken[c]==False]
if len(nbc) <= 1:
continue
way = mergeWays(amap,nbc)
if way!=None:
remove += nbc
amap.ways.append(way)
return remove
def removeMerged(amap,remove):
""" Remove original ways, which have been merged together"""
removeidxs = [amap.waysIdx[r] for r in remove]
removeidxs.sort()
toidx = 0
ridx = 0
for fromidx in range(len(amap.ways)):
if ridx<len(removeidxs) and fromidx == removeidxs[ridx]:
ridx+=1
continue
amap.ways[toidx] = amap.ways[fromidx]
toidx += 1
for i in range(len(amap.ways)-1,toidx,-1):
del amap.ways[i]
def getbbox(amap,wayid):
""" Get bounding box of a way"""
way = amap.ways[amap.waysIdx[wayid]]
nodeids = way.refs
minlon = 10**10
minlat = 10**10
maxlon = 0
maxlat = 0
for nid in nodeids:
node = amap.nodes[amap.nodesIdx[nid]]
maxlon = max(maxlon,node.lon)
maxlat = max(maxlat,node.lat)
minlon = min(minlon,node.lon)
minlat = min(minlat,node.lat)
return (minlon,minlat,maxlon,maxlat)
def isIn(amap,node,way):
""" Is node in way?"""
if node.id in way.refs:
return True
elif way.area == False:
return False
bbox = getbbox(amap,way.id)
if node.lon < bbox[0] or node.lon > bbox[2] or node.lat < bbox[1] or node.lat > bbox[3]:
return False
intcnt = 0
up = False
lat = node.lat
lon = node.lon
memnode = amap.nodes[amap.nodesIdx[way.refs[0]]]
if memnode.lat == lat and memnode.lon >= lon: #FIXME
return True
for nid in way.refs[1:]:
node = amap.nodes[amap.nodesIdx[nid]]
if node.lat == lat and node.lon == lon:
return True
if (memnode.lon < lon) and node.lon < lon:
memnode = node
continue
if (memnode.lat < lat and node.lat < lat) or (memnode.lat > lat and node.lat > lat):
memnode = node
continue
if (memnode.lon >= lon) and node.lon >=lon:
if (memnode.lat < lat and node.lat > lat) or (memnode.lat>lat and node.lat<lat):
intcnt+=1
memnode = node
continue
if (node.lat==lat):
if memnode.lat > lat:
up = True
elif memnode.lat < lat:
up = False
memnode = node
continue
if memnode.lat==lat:
if node.lat > lat:
if not up:
intcnt+=1
if node.lat < lat:
if up:
intcnt+=1
memnode = node
continue
if (memnode.lat < lat and node.lat > lat) or (memnode.lat>lat and node.lat<lat):
dlon = node.lon - memnode.lon
dlat = node.lat - memnode.lat
ndlat = lat - memnode.lat
if (memnode.lon+dlon*(ndlat*1.0/dlat) >= lon):
intcnt +=1
memnode = node
continue
if memnode.lat == lat:
return True
if memnode.lat < lat:
up = False
elif memnode.lat > lat:
up = True
memnode = node
continue
if intcnt%2 == 0:
return False
return True
def markInside(amap,raster):
""" Mark nodes inside barriers"""
incnt = 0
for way in amap.ways:
if not (way.area and way.type==pbtypes.BARRIER) :
continue
bbox = getbbox(amap,way.id)
minbox = raster.getBox(bbox[0],bbox[1])
maxbox = raster.getBox(bbox[2],bbox[3])
nodes = []
for i in range(minbox[0],maxbox[0]+1):
for j in range(minbox[1],maxbox[1]+1):
nodes += raster.raster[i][j]
for node in nodes:
if isIn(amap,amap.nodes[amap.nodesIdx[node]],way):
amap.nodes[amap.nodesIdx[node]].inside = True
incnt+=1
print "Way ",way.id," should collide with ",len(nodes)," nodes."
print "Nodes:",len(amap.nodes),"inside",incnt
def unmarkBorderNodes(amap):
""" Unmark nodes on the perimeter of a barrier """
waycnt = 0
for way in amap.ways:
if way.area or way.type==pbtypes.BARRIER or way.type == pbtypes.IGNORE:
continue
memnode = amap.nodes[amap.nodesIdx[way.refs[0]]]
border = False
for nodeid in way.refs[1:]:
node = amap.nodes[amap.nodesIdx[nodeid]]
if memnode.inside==node.inside:
memnode = node
continue
if memnode.inside and not node.inside:
memnode.inside = False
memnode = node
continue
if border:
memnode = node
border = False
continue
node.inside = False
border = True
waycnt+=1
print "Non-barrier ways:",waycnt
def mergeMultipolygon(amap,wayids):
""" Merge multipolygon into ways"""
newway = pb.Way()
way1 = amap.ways[amap.waysIdx[wayids[0]]]
neighs = {}
nodeways = {}
for wayid in wayids:
way = amap.ways[amap.waysIdx[wayid]]
waynodes = [amap.nodes[amap.nodesIdx[ref]] for ref in way.refs]
for i in range(len(waynodes)):
wniid = waynodes[i].id
if wniid not in nodeways:
nodeways[wniid] = []
nodeways[wniid].append(wayid)
if wniid not in neighs:
neighs[wniid] = []
if i!=0:
if waynodes[i-1].id not in neighs[wniid]:
neighs[wniid].append(waynodes[i-1].id)
if i!=len(waynodes)-1:
if waynodes[i+1].id not in neighs[wniid]:
neighs[wniid].append(waynodes[i+1].id)
for k,v in neighs.iteritems():
if len(v)!=2:
print "Error in node",k
print neighs
return (None,None)
first = amap.nodes[amap.nodesIdx[way1.refs[0]]]
second = amap.nodes[amap.nodesIdx[neighs[first.id][0]]]
#print "edge:",first.id," ",second.id
newway.refs.append(first.id)
newway.refs.append(second.id)
nextid = neighs[newway.refs[-1]][0]
if nextid == newway.refs[-2]:
nextid = neighs[newway.refs[-1]][1]
while(nextid != first.id):
nextid = neighs[newway.refs[-1]][0]
if nextid == newway.refs[-2]:
nextid = neighs[newway.refs[-1]][1]
if nextid in newway.refs and nextid != first.id:
print "merging",wayids
print "Merge failed -- wrong cycle"
print newway.refs,", repeated:",nextid
return (None,None)
newway.refs.append(nextid)
# print "F",first.id,"n",nextid
# print newway.refs
for nodeid in newway.refs:
for wayid in nodeways[nodeid]:
if wayid in wayids:
wayids.remove(wayid)
newway.refs.append(first.id)
newway.type = way1.type
newway.id = amap.newWayid()
newway.render = True
return (newway,wayids)
def mergeMultipolygons(amap):
""" Merge all multipolygons into ways"""
print "Multipolygons: ",len(amap.multipols)
winner = 0
woinner = 0
for multi in amap.multipols:
outer = []
hasInner = False
for i in range(len(multi.roles)):
if multi.roles[i] == pb.Multipolygon.INNER:
hasInner = True
else:
if multi.refs[i] in amap.waysIdx:
outer.append(multi.refs[i])
if hasInner:
winner +=1
else:
woinner +=1
if len(outer)<=1:
continue
print "Merging",len(outer),"in multipolygon",multi.id
while (len(outer)>0):
(way,outer) = mergeMultipolygon(amap,outer)
if way == None:
print "Merging Error"
break
print "Remains",len(outer),"ways"
if multi.type != pbtypes.WAY:
way.type = multi.type
way.area = True
print "Way created"
amap.ways.append(way)
print "With Inner: ",winner
print "Without Inner: ",woinner
def divideEdge(slon,slat,shgt,elon,elat,ehgt,cnt):
""" Make interleaving point for dividing a long edge """
dlon = (elon-slon)/cnt;
dlat = (elat-slat)/cnt;
dhgt = (ehgt-shgt)/cnt;
lonlats = [(slon+i*dlon,slat+i*dlat,shgt+i*dhgt) for i in range(1,cnt)]
return lonlats
def divideLongEdges(amap):
""" Divide too long edges"""
longcnt = 0
edgecnt = 0
for wayidx in range(len(amap.ways)):
way = amap.ways[wayidx]
if not (way.type in walkTypes):
continue
newway = pb.Way()
replace = False
for i in range(len(way.refs)-1):
ref1 = amap.nodes[amap.nodesIdx[way.refs[i]]]
ref2 = amap.nodes[amap.nodesIdx[way.refs[i+1]]]
newway.refs.append(ref1.id)
dist = distance(ref1,ref2)
if dist<30:
continue
replace=True
lonlats = divideEdge(ref1.lon,ref1.lat,ref1.height,ref2.lon,ref2.lat,ref2.height,int(dist/20))
for lon,lat,hgt in lonlats:
newnode = pb.Node()
newnode.id = amap.newNodeid()
newnode.lon = lon
newnode.lat = lat
newnode.height = hgt
newnode.inside = ref1.inside and ref2.inside
newnode.onBridge = ref1.onBridge and ref2.onBridge
newnode.inTunnel = ref1.inTunnel and ref2.inTunnel
newway.refs.append(newnode.id)
amap.nodes.append(newnode)
if not replace:
continue
newway.type = way.type
newway.id = way.id
newway.area = way.area
newway.barrier = way.barrier
newway.bordertype = way.bordertype
newway.refs.append(way.refs[-1])
amap.ways[wayidx] = newway
datadir="../../data/"
start = time.time()
amap = Map()
amap.loadFromPB(datadir+"praha-pre.pbf")
end = time.time()
print "Loading took "+str(end-start)
start = time.time()
nodeways=nodeWays(amap)
end = time.time()
print "Nodeways took "+str(end-start)
start = time.time()
mergeMultipolygons(amap)
amap.updateWaysIdx()
end = time.time()
print "Multipolygons took "+str(end-start)
start = time.time()
(G,broken) = makeNeighGraph(amap,nodeways)
#print "Components",len(nx.connected_components(G))
end = time.time()
print "Neighs took "+str(end-start)
start = time.time()
remove = mergeComponents(amap,G,broken)
print "To remove:",len(remove)
removeMerged(amap,remove)
amap.updateWaysIdx()
end = time.time()
print "Merge took "+str(end-start)
start = time.time()
nodeways=nodeWays(amap)
deleteAloneNodes(amap,nodeways)
amap.updateNodesIdx()
end = time.time()
print "Deleting alone nodes took "+str(end-start)
start = time.time()
raster = Raster(amap)
end = time.time()
print "Making raster took "+str(end-start)
start = time.time()
markInside(amap,raster)
unmarkBorderNodes(amap)
end = time.time()
print "Marking inside nodes took "+str(end-start)
start = time.time()
divideLongEdges(amap)
#amap.updateNodesIdx()
end = time.time()
print "Long edges took "+str(end-start)
start = time.time()
del raster
del nodeways
del G
print len(amap.nodes)," nodes, ",len(amap.ways)," ways"
outfile = open(datadir+"praha-union.pbf","w")
outfile.write(amap.toPB().SerializeToString())
outfile.close()
end = time.time()
print "Saving took "+str(end-start) | |
from __future__ import division, print_function
import numpy as np
from .lightcurve import LightCurve
__all__ = ['cdpp']
def cdpp(flux, **kwargs):
"""A convenience function which wraps LightCurve.cdpp().
For details on the algorithm used to compute the Combined Differential
Photometric Precision (CDPP) noise metric, please see the docstring of
the `LightCurve.cdpp()` method.
Parameters
----------
flux : array-like
Flux values.
**kwargs : dict
Dictionary of arguments to be passed to `LightCurve.cdpp()`.
Returns
-------
cdpp : float
Savitzky-Golay CDPP noise metric in units parts-per-million (ppm).
"""
return LightCurve(time=np.arange(len(flux)), flux=flux).cdpp(**kwargs) | |
#!/usr/bin/env python3
#Compute entropy over AnnData objects
import argparse
import numpy as np
import pandas as pd
import scanpy as sc
import anndata
import scipy
from math import log
def shannon_entropy (x, b_vec, N_b):
tabled_values = b_vec[x > 0].value_counts()/ len(b_vec[x >0]) #class 'pandas.core.series.Series'
tabled_val = tabled_values.tolist()
entropy = 0.0
for element in tabled_val:
if element != 0:
entropy += element * log(element)
entropy /= log(N_b)
return(-entropy) #the entropy formula is the -sum, this is why we include the minus sign
def save_file_to_csv(results):
results.to_csv(args.output_entropy, header = True, index = False)
def compute_entropy(df, **kwargs):
#apply function
batch_entropy = df.apply(shannon_entropy, axis=0, args=(kwargs['batch_vector'],kwargs['N_batches']))
cell_type_entropy = df.apply(shannon_entropy, axis=0, args=(kwargs['cell_type_vector'] ,kwargs['N_cell_types']))
print("Entropy calculated!")
results = {'Batch_entropy': batch_entropy, "Cell_type_entropy":cell_type_entropy}
results = pd.concat(results, axis = 1, keys = ['Batch_entropy', 'Cell_type_entropy'])
save_file_to_csv(results)
def distribute_datasets(dataset):
kwargs = {}
#batch vector(batch id of each cell)
kwargs['batch_vector'] = dataset.obs[args.batch_key]
#modify index of batch vector so it coincides with matrix's index
kwargs['batch_vector'].index = range(0,len(kwargs['batch_vector']))
#number of batches
kwargs['N_batches'] = len(dataset.obs['Batch'].astype('category').cat.categories)
#cell_type vector( betch id of each cell)
kwargs['cell_type_vector'] = dataset.obs[args.celltype_key]
#modify index of cell_type vector so it coincides with matrix's index
kwargs['cell_type_vector'].index = range(0,len(kwargs['cell_type_vector']))
#number of cell_types
kwargs['N_cell_types'] = len(dataset.obs['cell_type1'].astype('category').cat.categories)
try:
knn_graph = dataset.uns['neighbors']
print('BBKNN corrected object!')
except KeyError:
#Both: pre corrected logcounts and Scanorama counts enter through this way.
#compute neighbors
sc.tl.pca(dataset, n_comps = args.n_pcs)
sc.pp.neighbors(dataset, n_neighbors = args.n_neighbors, knn = True)
#knn graph
knn_graph = dataset.uns['neighbors']['connectivities']
#transforming csr_matrix to dataframe
df = pd.DataFrame(knn_graph.toarray())
compute_entropy(df, **kwargs)
def read_h5ad(dataset):
#read input h5ad
return sc.read(dataset)
print("File read!")
# args
if __name__== "__main__":
parser = argparse.ArgumentParser(description='Input/Output files')
parser.add_argument("--input_object",
dest='input_object',
type=str,
help ='Input h5ad object')
parser.add_argument("--batch_key",
dest='batch_key',
type=str,
default='Batch',
help ='Cell key defining Batch')
parser.add_argument("--celltype_key",
dest='celltype_key',
type = str,
default= 'cell_type1',
help ='Cell key defining cell type')
parser.add_argument("--n_neighbours",
dest='n_neighbours',
type= int,
default = 30,
help ='Number of nearest neighbours per batch to perform graph correction.')
parser.add_argument("--n_pcs",
dest='n_pcs',
type = int,
default= 25,
help ='Number of PCs for PCA prior to graph correction')
parser.add_argument('--output_entropy',
dest='output_entropy',
type=str,
help='Csv with entropy values')
args = parser.parse_args()
read_h5ad(args) | |
import ctypes
import glob
import os
import numpy as np
from multiprocessing import cpu_count
# Build the extension function (this should be negligible performance-wise)
fl = glob.glob(os.path.join(os.path.dirname(__file__), "ctransforms*"))[0]
def morlet_transform_c(data, nu, convergence_extent=10.0, fourier_b = 1,
vol_norm=False, nthreads=None):
"""
Perform a Morlet Transform using underlying C code.
Parameters
----------
data : array_like, SHAPE=[N_NU, ...]
The visibility data on which to perform the Morlet Transform. The
transform itself occurs over the first axis.
nu : array_like, SHAPE=[N_NU]
The frequencies
convergence_extent : float, optional
How many sigma to integrate the Morlet kernel
fourier_b : float, optional
Defines the Fourier convention.
vol_norm : bool, optional
Whether to apply a volume normalisation so that different eta
have the same expected power.
nthreads : int, optional
Number of threads to use in transform. Default is all of them.
Returns
-------
complex array, SHAPE=[N_ETA, N_NU, ...]
The output transformed visibilities.
"""
morlet = ctypes.CDLL(fl).cmorlet
morlet.argtypes = [
ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_double, ctypes.c_double,
np.ctypeslib.ndpointer("complex128", flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"),
ctypes.c_int,
np.ctypeslib.ndpointer("complex128", flags="C_CONTIGUOUS"),
]
if nthreads is None:
nthreads = cpu_count()
assert nthreads <= cpu_count()
# Get data into shape (everything_else, len(nu))
orig_shape = data.shape
n_nu = orig_shape[0]
n_data = int(np.product(orig_shape[1:]))
assert n_nu == len(nu)
data = np.ascontiguousarray(data.flatten())
dnu = (nu.max() - nu.min()) / (n_nu - 1)
L = n_nu * dnu
eta = np.arange(1, n_nu / 2) / L
n_eta = len(eta)
out = np.zeros(n_data * n_nu * n_eta, dtype=np.complex128)
morlet(n_data, n_nu, n_eta, float(convergence_extent), fourier_b,
data, nu, eta, nthreads, out)
if vol_norm:
norm = np.sqrt(np.abs(eta)) * dnu * np.pi ** (-1. / 4)
else:
norm = dnu
print(out[350])
out = norm * out.reshape((len(eta),) + orig_shape) # Make the array.
return out, eta, nu
def morlet_transform(data, t, fourier_b=1):
"""
Pure python version
In current configuration, data can be N-dimensional, but must only be transformed over the *last* dimension.
t here corresponds to 'nu' in terms of the sky.
"""
# Get data into shape (everything_else, len(nu))
orig_shape = data.shape
n = orig_shape[-1]
data = data.reshape((-1, n)) # Make it 2D
dt = (t.max() - t.min()) / (n - 1)
L = n * dt
f = np.arange(1, n / 2) / L
ans = np.zeros(data.shape + (len(f),), dtype=np.complex128)
for i, d in enumerate(data):
reduced = np.outer(np.add.outer(t, -t), f).reshape((len(t), len(t), len(f))).T
ans[i] = np.sum(np.exp(-reduced ** 2 / 2) * np.exp(fourier_b * reduced * 1j) * d.T, axis=-1).T
# for i,tt in enumerate(t):
# reduced = np.outer(tt - t, f)
# ans += np.exp(-reduced**2/2) * np.exp(2*np.pi*reduced*1j) * data[..., i]
# rl, im = morlet(np.real(data), np.imag(data), t, t, f) # Here data is complex.
norm = np.sqrt(np.abs(f)) * dt * np.pi ** (-1. / 4)
return (norm * ans).reshape(orig_shape + (len(f),)), f, t | |
import numpy as np
import matplotlib.pyplot as plt
data = np.arange(10)
plt.plot(data)
fig = plt.figure() # create figure object
ax1 = fig.add_subplot(2, 2, 1) # add 2 x 2 subplots to fig, initialize at pos 1
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
plt.plot([1.5, 3.5, -2, 1.6]) # draw on last active plot
plt.plot(np.random.randn(50).cumsum(), 'k--') # note how axis are changed automatically
# k ... style option, --: use dashed line
# plot to a subplot by calling its axis
_ = ax1.hist(np.random.randn(100), bins=20, color='k', alpha=.3)
ax2.scatter(np.arange(30), np.arange(30) + 3 * np.random.randn(30))
fig, axes = plt.subplots(2, 3) # convenience function to create subplots
type(axes) # note: is numpy ndarray and can be indexed [, ] style
axes.shape # and has shape (2, 3)
axes[1, 0] scatter(np.arange(20), np.arange(20) + 2 * np.random.randn(20))
# plot parameters
x = np.random.randn(30)
y = 5 + 0.8 * x + np.random.randn(30)
plt.plot(x, y, linestyle = "--", color = "g")
plt.close()
plt.scatter(x, y, marker = "o", color = "#b2df8a") # accepts hex
plt.scatter(x, y, label = "Random Data")
plt.close()
data = np.random.randn(50).cumsum()
plt.plot(data) # line charts are linearly interpolated by default
plt.plot(data, "k--", label="Default")
plt.plot(data, "k-", drawstyle="steps-post", label="steps-post")
plt.legend(loc="best") # required to create legend, will work even if no labels set | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
import timeit
import numpy as np
import pyspark.sql.functions as fn
import pyspark.sql.types as t
from pyspark import SparkContext
from pyspark.sql import HiveContext
from pyspark.sql.functions import concat_ws, col
from datetime import datetime, timedelta
from din_model.pipeline.util import load_config, load_df, load_batch_config
from din_model.pipeline.util import write_to_table, drop_table, print_batching_info
from pyspark.sql.window import Window
def fit_distribution(df):
raw_impressions = [
547788492, 113155690, 357229507, 353837519, 231243807,
343536283, 204197454, 298400019, 154702872, 101595992,
173078606, 245617500, 210661722, 94400758, 100694511,
104621562, 47302450, 254680986, 38653660, 118198547,
167100705, 484483594, 69681730, 230778513, 109614764,
86169134, 125825905, 478045002, 476281884, 155086936,
100034338, 140623283, 132801777, 150906980, 108772611,
2165682, 41589670, 327990489, 85909446, 349940682,
8776893, 33502930, 282401283, 82297276, 385473488,
219411532, 154739307, 51048940, 192605283, 114587775,
230422182, 41743162, 103709711, 171397519, 158854098,
105911195, 118981954, 78965914, 91906274, 158792685,
63487656, 54706539, 111072455, 92442258, 150615474,
79697857, 108585993, 112360549, 262424361, 494715712,
1152693549, 1035303850, 324325907, 921851042, 390727201,
1257338071, 392629713, 778974819, 129782245, 1683290505,
811910155, 1997598872]
total_impressions = sum(raw_impressions)
# calculate the region percentage and store it in a dict of
# region ID --> percentage of number of impressions in that region ID
region_percentage = {i + 1: float(x) / float(total_impressions)
for i, x in enumerate(raw_impressions)}
# cumulatively sum up the region percentage's value
# so that cum_percentage is
# [region_percentage[0], region_percentage[0] + region_percentage[1], ...]
cum_percentage = list(np.cumsum(region_percentage.values()))
# forming a list of tuple with interval of [lower_end, upper_end)
cum_pairs = zip([0]+cum_percentage[:-1], cum_percentage)
# for reverse lookup, map of
# [lower_end, upper_end) --> region ID
cum_map = {x: i + 1 for i, x in enumerate(cum_pairs)}
# add a new column with uid
# pyspark.sql.functions.monotonically_increasing_id()
# A column that generates monotonically increasing 64-bit integers.
# The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive. The current
# implementation puts the partition ID in the upper 31 bits, and the record number within each partition in the
# lower 33 bits. The assumption is that the data frame has less than 1 billion partitions, and each partition has
# less than 8 billion records.
# As an example, consider a DataFrame with two partitions, each with 3 records. This expression would return the
# following IDs: 0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
df1 = df.withColumn('uid', fn.monotonically_increasing_id())
# create a window function to partition the df by uckey, and order within each partition by uid
w = Window.partitionBy('uckey').orderBy('uid')
# pyspark.sql.functions.cume_dist()
# Window function: returns the cumulative distribution of values within a window partition, i.e. the fraction of
# rows that are below the current row.
# Because ordered by UID, which is globally unique, and unique within each partition, each row will always be below
# its successive rows.
# This becomes a cumulative percentage of number of rows. e.g. assuming we have dataset of 10 rows, cumu_dist will
# be [0.1, 0.2, 0.3, ...] == [i / total number of rows in partition for i from 1 to number of rows]
# In any partitions, the PDF of a single row is 1 / rows b/c UID is unique
# Therefore, the CDF of the partition becomes [1 / rows, 2 / rows, ...]
df2 = df1.withColumn('cum_id', fn.cume_dist().over(w))
def lookup_ipl(value):
# naive algorithm, O(n)
# for each (lower_end, upper_end) pair in the map, find out if the given 'value' is in the range of
# lower_end and upper_end. If yes, return that region ID
for pair in cum_pairs:
low, high = pair
if low <= value < high or value == 1.0:
return cum_map[pair]
return -1
_udf_1 = fn.udf(lookup_ipl, t.IntegerType())
df3 = df2.withColumn('index', _udf_1(fn.col('cum_id')))
df3 = df3.drop(df3.uid).drop(df3.cum_id)
return df3
def add_region_to_logs(hive_context, batch_config, logs_table):
start_date, end_date, load_minutes = batch_config
timer_start = timeit.default_timer()
batched = 1
starting_time = datetime.strptime(start_date, "%Y-%m-%d")
ending_time = datetime.strptime(end_date, "%Y-%m-%d")
logs_table_temp_name = logs_table+'_temp'
while starting_time < ending_time:
# data clean for showlog table.
time_start_str = starting_time.strftime("%Y-%m-%d %H:%M:%S")
batched_time_end = starting_time + timedelta(minutes=load_minutes)
time_end_str = batched_time_end.strftime("%Y-%m-%d %H:%M:%S")
print_batching_info("Main regions", batched, time_start_str, time_end_str)
command = """select * from {} where action_time >= '{}' and action_time < '{}'"""
logs = hive_context.sql(command.format(logs_table, time_start_str, time_end_str))
logs = logs.drop(col('region_id'))
logs = fit_distribution(logs)
logs = logs.withColumnRenamed('index', 'region_id')
logs = logs.withColumn('uckey', concat_ws(",", col('media'),
col('media_category'), col('net_type'),
col('gender'), col('age'), col('region_id')))
mode = 'overwrite' if batched == 1 else 'append'
write_to_table(logs, logs_table_temp_name, mode=mode)
batched += 1
starting_time = batched_time_end
# use the temp table to save all the batched logs with region id inside it.
# drop the logs table and alter the temp table name to the logs table.
drop_table(hive_context, logs_table)
command = """alter table {} rename to {}""".format(
logs_table_temp_name, logs_table)
hive_context.sql(command)
timer_end = timeit.default_timer()
print('Total batching seconds: ' + str(timer_end - timer_start))
def run(hive_context, cfg):
batch_config = load_batch_config(cfg)
cfg_logs = cfg['pipeline']['main_logs']
logs_table = cfg_logs['logs_output_table_name']
# add region ids to logs.
add_region_to_logs(hive_context, batch_config, logs_table)
if __name__ == "__main__":
"""
This is an optional step only for the logs data without regions.
If original logs have the geo info or region(ipl or r), ignore this.
"""
sc, hive_context, cfg = load_config(description="main logs with regions")
run(hive_context=hive_context, cfg=cfg)
sc.stop() | |
# author: GROUP 12
# date: 2021-11-19
'''This script downloads a data file in csv format.
This script takes an unquoted data file path to a csv file,
the name of the file type to write the file to (ex. csv),
and the name of a file path to write locally (including the name of the file).
Usage: data_download.py --file_path=<file_path> --out_type=<out_type> --out_file=<out_file>
Options:
--file_path=<file_path> Path to the data file (must be in standard csv format)
--out_type=<out_type> Type of file to write locally (script supports either feather or csv)
--out_file=<out_file> Path (including filename) of where to locally write the file
'''
import pandas as pd
import numpy as np
from docopt import docopt
import os
import requests
opt = docopt(__doc__)
def main(file_path, out_type, out_file):
try:
request = requests.get(file_path)
request.status_code == 200
except Exception as req:
print("Website at the provided url does not exist.")
print(req)
# read in data and test it
data = None
data = pd.read_csv(file_path)
# Tests for raw data
test_path(data)
test_columns(data)
# Create new file path if it doesn't exist
if out_type == "csv":
try:
data.to_csv(out_file, index = False)
except:
os.makedirs(os.path.dirname(out_file))
data.to_csv(out_file, index = False)
def test_path(data):
assert data.empty == False, "Data file path/URL is incorrect"
def test_columns(data):
assert data.columns[0] == 'customerID', "Data columns are incorrect"
assert data.columns[1] == 'gender', "Data columns are incorrect"
assert data.columns[2] == 'SeniorCitizen', "Data columns are incorrect"
assert data.columns[3] == 'Partner', "Data columns are incorrect"
assert data.columns[20] == 'Churn', "Target column is incorrect"
if __name__ == "__main__":
# Call main method, and have the user input file path, out type, and out path
main(opt["--file_path"], opt["--out_type"], opt["--out_file"]) | |
from sympy import symbols, cos, sin
from sympy.external import import_module
from sympy.utilities.matchpy_connector import WildDot, WildPlus, WildStar
matchpy = import_module("matchpy")
x, y, z = symbols("x y z")
def _get_first_match(expr, pattern):
from matchpy import ManyToOneMatcher, Pattern
matcher = ManyToOneMatcher()
matcher.add(Pattern(pattern))
return next(iter(matcher.match(expr)))
def test_matchpy_connector():
if matchpy is None:
return
from multiset import Multiset
from matchpy import Pattern, Substitution
w_ = WildDot("w_")
w__ = WildPlus("w__")
w___ = WildStar("w___")
expr = x + y
pattern = x + w_
p, subst = _get_first_match(expr, pattern)
assert p == Pattern(pattern)
assert subst == Substitution({'w_': y})
expr = x + y + z
pattern = x + w__
p, subst = _get_first_match(expr, pattern)
assert p == Pattern(pattern)
assert subst == Substitution({'w__': Multiset([y, z])})
expr = x + y + z
pattern = x + y + z + w___
p, subst = _get_first_match(expr, pattern)
assert p == Pattern(pattern)
assert subst == Substitution({'w___': Multiset()})
def test_matchpy_optional():
if matchpy is None:
return
from matchpy import Pattern, Substitution
from matchpy import ManyToOneReplacer, ReplacementRule
p = WildDot("p", optional=1)
q = WildDot("q", optional=0)
pattern = p*x + q
expr1 = 2*x
pa, subst = _get_first_match(expr1, pattern)
assert pa == Pattern(pattern)
assert subst == Substitution({'p': 2, 'q': 0})
expr2 = x + 3
pa, subst = _get_first_match(expr2, pattern)
assert pa == Pattern(pattern)
assert subst == Substitution({'p': 1, 'q': 3})
expr3 = x
pa, subst = _get_first_match(expr3, pattern)
assert pa == Pattern(pattern)
assert subst == Substitution({'p': 1, 'q': 0})
expr4 = x*y + z
pa, subst = _get_first_match(expr4, pattern)
assert pa == Pattern(pattern)
assert subst == Substitution({'p': y, 'q': z})
replacer = ManyToOneReplacer()
replacer.add(ReplacementRule(Pattern(pattern), lambda p, q: sin(p)*cos(q)))
assert replacer.replace(expr1) == sin(2)*cos(0)
assert replacer.replace(expr2) == sin(1)*cos(3)
assert replacer.replace(expr3) == sin(1)*cos(0)
assert replacer.replace(expr4) == sin(y)*cos(z) | |
'''
does a few things. First, it counts the total number of the 4 orientations of read pairs (left-most first, ++, +-, -+, --).
This is Erez's in-in, in-out, out-in, out-out . Second, it counts the same for only reads that with distances less than 2000 bp,
and prints out a file with the distances for the four types.
'''
from optparse import OptionParser
import sys
import re
import gzip
import random
from random import randint
from random import shuffle
from numpy import percentile
def parse_options():
parser = OptionParser()
parser.add_option("-f", "--infile", dest="filename",
help="input file: paired mapped", metavar="INFILE")
parser.add_option("-o", "--outfile", dest="outfile",
help="outfile stem", metavar="OUTFILE")
(options, args) = parser.parse_args()
return options
options = parse_options()
pp_count = 0
pm_count = 0
mp_count = 0
mm_count = 0
plus_plus = []
plus_minus = []
minus_plus = []
minus_minus = []
f = options.filename
if (f[-2:] == 'gz'):
infile = gzip.open(f, 'rt')
else:
infile = open(options.filename,'r')
for line in infile:
items = line.split()
Lmost_strand = ''
Rmost_strand = ''
chr1 = items[2]
chr2 = items[5]
pos1 = int(items[3])
pos2 = int(items[6])
if (chr1 == chr2):
size = abs(pos1 - pos2)
if (pos1 < pos2):
Lmost_strand = items[1]
Rmost_strand = items[4]
if (pos1 > pos2):
Lmost_strand = items[4]
Rmost_strand = items[1]
if (Lmost_strand == '+' and Rmost_strand == '+'):
if(size < 2000): plus_plus.append(size)
pp_count += 1
if (Lmost_strand == '+' and Rmost_strand == '-'):
if(size < 2000): plus_minus.append(size)
pm_count += 1
if (Lmost_strand == '-' and Rmost_strand == '+'):
if(size < 2000): minus_plus.append(size)
mp_count += 1
if (Lmost_strand == '-' and Rmost_strand == '-'):
if(size < 2000): minus_minus.append(size)
mm_count += 1
infile.close()
max_pp = len(plus_plus)
max_pm = len(plus_minus)
max_mp = len(minus_plus)
max_mm = len(minus_minus)
max_entry = max(max_pp, max_pm, max_mp, max_mm)
#outfile = open(options.outfile, 'w')
#outfile.write('++' + '\t' + '+-' + '\t' + '-+' + '\t' + '--' + '\n')
for i in range (0, max_entry):
if (i < max_pp):
#outfile.write(str(plus_plus[i]) + '\t')
else: outfile.write('NA' + '\t')
if (i < max_pm):
#outfile.write(str(plus_minus[i]) + '\t')
else: outfile.write('NA' + '\t')
if (i < max_mp):
#outfile.write(str(minus_plus[i]) + '\t')
else: outfile.write('NA' + '\t')
if (i < max_mm):
#outfile.write(str(minus_minus[i]) + '\n')
else: outfile.write('NA' + '\n')
#outfile.close()
print('2000 counts: ' + str(max_pp) + '\t' + str(max_pm) + '\t' + str(max_mp) + '\t' + str(max_mm) + '\n')
print('Total counts: ' + str(pp_count) + '\t' + str(pm_count) + '\t' + str(mp_count) + '\t' + str(mm_count) + '\n') | |
# -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as st
import scipy.signal as sig
from collections import namedtuple
from scipy.constants import c, physical_constants
tup = namedtuple('tup','wl t data')
def add_to_cls(cls):
def function_enum(fn):
setattr(cls, fn.__name__, staticmethod(fn))
return fn
return function_enum
def add_tup(str_lst):
def function_enum(fn):
str_lst[0].append(fn.__name__)
str_lst[1].append(fn)
return fn
return function_enum
def fs2cm(t):
return 1/(t * 3e-5)
def cm2fs(cm):
return 1/(cm * 3e-5)
def nm2cm(nm):
return 1e7/nm
def cm2nm(cm):
return 1e7/cm
def cm2eV(cm):
eV_m = physical_constants['electron volt-inverse meter relationship'][0]
eV_cm = eV_m/100
return cm/eV_cm
def eV2cm(eV):
eV_m = physical_constants['electron volt-inverse meter relationship'][0]
eV_cm = eV_m/100
return eV*eV_cm
def cm2THz(cm):
return c/(cm*100)
def trimmed_mean(arr, axis=-1, ratio=2., use_sem=True):
arr = np.sort(arr, axis=axis)
std = np.std(arr, axis, keepdims=1)
std = np.std(st.trimboth(arr, 0.1, axis), keepdims=1)
mean = np.mean(st.trimboth(arr, 0.1, axis), keepdims=1)
#std = np.std(st.trimboth(arr, 0.1, axis), keepdims=1)
#mean = np.mean(st.trimboth(arr, 0.1, axis), keepdims=1)
idx = np.abs(arr - mean) > ratio * std
n = np.sqrt(np.sum(~idx, axis))
if not use_sem:
n = 1
arr[idx] = np.nan
mean = np.nanmean(arr, axis)
std = np.nanstd(arr, axis, ddof=1)/n
return mean, std
from scipy.interpolate import UnivariateSpline
def smooth_spline(x, y, s):
s = UnivariateSpline(x, y, s=s)
return s(x)
def svd_filter(d, n=6):
u, s, v = np.linalg.svd(d, full_matrices=0)
s[n:] = 0
f = np.dot(u, np.diag(s).dot(v))
return f
def apply_spline(t, d, s=None):
out = np.zeros_like(d)
for i in range(d.shape[1]):
out[:, i] =smooth_spline(t, d[:, i], s)
return out
def normalize(x):
return x/abs(x).max()
def weighted_binner(n, wl, dat, std):
"""
Given wavelengths and data it bins the data into n-wavelenths.
Returns bdata and bwl
"""
i = np.argsort(wl)
wl = wl[i]
dat = dat[:, i]
idx = np.searchsorted(wl,np.linspace(wl.min(),wl.max(),n+1))
binned = np.empty((dat.shape[0], n))
binned_std = np.empty_like(binned)
binned_wl = np.empty(n)
for i in range(n):
data = dat[:,idx[i]:idx[i+1]]
weights = 1/std[:,idx[i]:idx[i+1]]**2
binned[:,i] = np.average(data, 1, weights)
binned_std[:, i] = np.average(std[:,idx[i]:idx[i+1]], 1, weights)
binned_wl[i] = np.mean(wl[idx[i]:idx[i+1]])
return binned, binned_wl, binned_std
def binner(n, wl, dat, func=np.mean):
"""
Given wavelengths and data it bins the data into n-wavelenths.
Returns bdata and bwl
"""
i = np.argsort(wl)
wl = wl[i]
dat = dat[:, i]
idx=np.searchsorted(wl,np.linspace(wl.min(),wl.max(),n+1))
binned=np.empty((dat.shape[0], n))
binned_wl=np.empty(n)
for i in range(n):
binned[:,i]=func(dat[:,idx[i]:idx[i+1]],1)
binned_wl[i]=np.mean(wl[idx[i]:idx[i+1]])
return binned, binned_wl
def fi(w, x):
"""
Given a value, it finds the index of the nearest value in the array.
Parameters
----------
w : np.ndarray
Array where to look.
x : float or list of floats
Value or values to look for.
Returns
-------
int or list of ints
Indicies of the nearest values.
"""
try:
len(x)
except TypeError:
x = [x]
ret = [np.argmin(np.abs(w-i)) for i in x]
if len(ret)==1:
return ret[0]
else:
return ret
def subtract_background(dat, t, tn, offset=0.3):
out = np.zeros_like(dat)
for i in range(dat.shape[1]):
mask = (t-tn[i]) < -offset
corr = dat[mask, i].mean()
out[:, i] = dat[:, i] - corr
return out
def polydetrend(x, t=None, deg=3):
if t is None:
t = np.arange(x.shape[0])
p = np.polyfit(t, x, deg)
yf = np.poly1d(p)(t)
return x - yf
def exp_detrend(y, t, start_taus=[1], use_constant=True):
m, yf = exp_fit(t, y, start_taus, use_constant=use_constant, verbose=0)
return y - yf
def arr_polydetrend(x, t=None, deg=3):
out = np.zeros_like(x)
for i in range(x.shape[1]):
out[:, i] = polydetrend(x[:, i], t, deg)
return out
from scipy.stats import trim_mean
def meaner(dat, t, llim, ulim, proportiontocut=0.0):
return trim_mean(dat[fi(t, llim):fi(t, ulim)], axis=0, proportiontocut=proportiontocut)
def legend_format(l):
return [str(i/1000.)+ ' ps' for i in l]
def apply_sg(y, window_size, order, deriv=0):
out = np.zeros_like(y)
coeffs = sig.savgol_coeffs(window_size, order, deriv=0, use='dot')
for i in range(y.shape[1]):
out[:, i] = coeffs.dot(y[:, i])
return out
import scipy.ndimage as nd
def apply_sg_scan(y, window_size, order, deriv=0):
out = np.zeros_like(y)
c = sig.savgol_coeffs(window_size, order, deriv=0)
# for s in range(y.shape[-1]):
# for i in range(y.shape[1]):
# print c.shape
out = nd.convolve1d(y, c, 0, mode='nearest')
#out, s] = c.dot(y[:, i, 1, s])
return out
def calc_error(args):
"""
Calculates the error from a leastsq fit infodict.
"""
p, cov, info, mesg, success = args
chisq = sum(info["fvec"] * info["fvec"])
dof = len(info["fvec"]) - len(p)
sigma = np.array([np.sqrt(cov[i, i]) * np.sqrt(chisq / dof) for i in range(len(p))])
return p, sigma
def min_pulse_length(width_in_cm, shape='gauss'):
width_hz = width_in_cm * 3e10
if shape == 'gauss':
return (0.44 / width_hz) / 1e-15
def wavelength2rgb(w):
"""
Converts a wavelength to a RGB color.
"""
if w >= 380 and w < 440:
R = -(w - 440.) / (440. - 350.)
G = 0.0
B = 1.0
elif w >= 440 and w < 490:
R = 0.0
G = (w - 440.) / (490. - 440.)
B = 1.0
elif w >= 490 and w < 510:
R = 0.0
G = 1.0
B = -(w - 510.) / (510. - 490.)
elif w >= 510 and w < 580:
R = (w - 510.) / (580. - 510.)
G = 1.0
B = 0.0
elif w >= 580 and w < 645:
R = 1.0
G = -(w - 645.) / (645. - 580.)
B = 0.0
elif w >= 645 and w <= 780:
R = 1.0
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
if (w>700.):
s=.3+.7* (780.-w)/(780.-700.)
elif (w<420.):
s=.3+.7*(w-380.)/(420.-380.)
else:
s=1.
R,G,B=(np.array((R,G,B))*s)**0.8
return R,G,B
def equal_color(plots1,plots2):
if len(plots1)!=len(plots2):
raise ValueError
for (plot1,plot2) in zip(plots1,plots2):
plot2.set_color(plot1.get_color())
def find_linear_part(t):
"""
Finds the first value of an 1d-array where the difference betweeen
consecutively value varys.
"""
d=np.diff(t)
return np.argmin(np.abs(d-d[0])<0.00001)
def rebin(a, new_shape):
"""
Resizes a 2d array by averaging or repeating elements,
new dimensions must be integral factors of original dimensions
Parameters
----------
a : array_like
Input array.
new_shape : tuple of int
Shape of the output array
Returns
-------
rebinned_array : ndarray
If the new shape is smaller of the input array, the data are averaged,
if the new shape is bigger array elements are repeated
See Also
--------
resize : Return a new array with the specified shape.
Examples
--------
>>> a = np.array([[0, 1], [2, 3]])
>>> b = rebin(a, (4, 6)) #upsize
>>> b
array([[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[2, 2, 2, 3, 3, 3]])
>>> c = rebin(b, (2, 3)) #downsize
>>> c
array([[ 0. , 0.5, 1. ],
[ 2. , 2.5, 3. ]])
"""
M, N = a.shape
m, n = new_shape
if m<M:
return a.reshape((m,M/m,n,N/n)).mean(3).mean(1)
else:
return np.repeat(np.repeat(a, m/M, axis=0), n/N, axis=1)
from scipy.sparse.linalg import svds
def efa(dat, n, reverse=False):
"""
Doing evolving factor analyis.
"""
if reverse:
data=dat[::-1, :]
else:
data=dat
out=np.zeros((data.shape[0], n))
for i in range(6, data.shape[0]):
sv = svds(data[:i, :], min(i,n))[1]
out[i, :] = sv
return out
def moving_efa(dat, n, ncols, method='svd'):
out=np.zeros((dat.shape[0], n))
p = PCA()
for i in range(0, dat.shape[0]-n):
if method=='svd':
sv = np.linalg.svd(dat[i:i+ncols,:])[1][:n]
elif method=='pca':
p = p.fit(dat[i:i+ncols,:])
sv = p.explained_variance_ratio_[:n]
out[i,:] = sv
return out
from scipy.optimize import nnls
def pfid_tau_to_w(tau):
"""
Given the rise time of the pertuabed
free induction decay, calculate the
corresponding spectral width in cm-^1.
"""
return 1/(np.pi*3e7*tau*1e-9)
def als(dat, n=5):
u, s, v = np.linalg.svd(dat)
u0=u[:n]
v0=v.T[:n]
u0 = np.random.random(u0.shape)+0.1
v0 = np.random.random(v0.shape)+0.1
res = np.linalg.lstsq(u0.T,dat)[1].sum()
res = 10000.
for i in range(15000):
if i % 2 == 0:
v0 = do_nnls(u0.T, dat)
v0 /= np.max(v0, 1)[:, None]
res_n = ((u0.T.dot(v0) - dat)**2).sum()
else:
u0, res_n, t , t = np.linalg.lstsq(v0.T, dat.T)
##r.fit(dat.T, v0.T)
#u0 = r.coef_[:]
res_n = ((u0.T.dot(v0) - dat)**2).sum()
if abs(res-res_n) < 0.001:
break
else:
print(i, res_n)
res = res_n
return u0.T, v0.T
def spec_int(tup, r, is_wavelength=True):
wl, t, d = tup.wl, tup.t, tup.data
if is_wavelength:
wl = 1e7/wl
wl1, wl2 = 1e7 / r[0], 1e7 / r[1]
else:
wl1, wl2 = r
ix = np.argsort(wl)
wl = wl[ix]
d = d[:, ix]
idx1, idx2 = sorted([fi(wl, wl1), fi(wl, wl2)])
dat = np.trapz(d[:, idx1:idx2], wl[idx1:idx2]) / np.ptp(wl[idx1:idx2])
return dat
#import mls
def do_nnls(A,b):
n = b.shape[1]
out = np.zeros((A.shape[1], n))
for i in range(n):
#mls.bounded_lsq(A.T, b[:,i], np.zeros((A.shape[1],1)), np.ones((A.shape[1],1))).shape
out[:,i] = nnls(A, b[:,i])[0]
return out
import lmfit
def exp_fit(x, y, start_taus = [1], use_constant=True, amp_max=None, amp_min=None, weights=None, amp_penalty=0,
verbose=True, start_amps=None):
num_exp = len(start_taus)
para = lmfit.Parameters()
if use_constant:
para.add('const', y[-1] )
for i in range(num_exp):
para.add('tau' + str(i), start_taus[i], min=0)
y_c = y - y[-1]
if start_amps is None:
a = y_c[fi(x, start_taus[i])]
else:
a = start_amps[i]
para.add('amp' + str(i), a)
if amp_max is not None:
para['amp' + str(i)].max = amp_max
if amp_min is not None:
para['amp' + str(i)].min = amp_min
def fit(p):
y_fit = np.zeros_like(y)
if use_constant:
y_fit += p['const'].value
for i in range(num_exp):
amp = p['amp'+str(i)].value
tau = p['tau'+str(i)].value
y_fit += amp * np.exp(-x/tau)
return y_fit
def res(p):
if weights is None:
pen = 0
for i in range(num_exp):
pen += p['amp'+str(i)].value**2
return np.hstack(((y - fit(p)), amp_penalty*pen))
else:
return (y - fit(p)) / weights
mini = lmfit.minimize(res, para)
if verbose:
lmfit.report_fit(mini)
y_fit = fit(mini.params)
return mini, y_fit
def calc_ratios(fitter, tmin=0.35, tmax=200):
from skultrafast import zero_finding
tup = zero_finding.interpol(fitter, fitter.tn)
w, t, d = tup
i = fi(t, tmin)
i_max = fi(t, tmax)
t = t[i:i_max]
d = d[i:i_max, :]
pos = np.where(d > 0, d, 0)
neg = np.where(d < 0, d, 0)
pos = np.trapz(pos, w)
neg = np.trapz(neg, w)
return t, pos, neg, pos/neg, d.sum(1)
def make_fi(data_to_search):
return lambda x: fi(data_to_search, x) | |
#!/usr/bin/python
# This script parses the log file generated by the connected protocol FileLog logger
# How to use :
# python plot.py [file] [sampling] [first] [end]
# [file] the file log
# [sampling] frequency of logging in seconds
# [first] offset of the first point to be displayed in graph
# [end] offset of the last point to be displayed in graph
import sys
import numpy as np
import matplotlib.pyplot as plt
file = sys.argv[1]
sampling = float(sys.argv[2])
first = int(sys.argv[3])
if (len(sys.argv) > 4):
end = int(sys.argv[4])
else:
end = -1
# parse log file
with open(file) as f:
data = f.read()
data = data.split('\n')
x_range = range(first, len(data[0:end]))
x = [index * sampling for index in x_range]
sending_period = [row.split(' ')[0] for row in data[first:end]]
cc_window_flow_size = [row.split(' ')[1] for row in data[first:end]]
remote_arrival_speed = [row.split(' ')[2] for row in data[first:end]]
remote_link_capacity = [row.split(' ')[3] for row in data[first:end]]
rtt = [row.split(' ')[4] for row in data[first:end]]
rtt_var = [row.split(' ')[5] for row in data[first:end]]
ack_period = [row.split(' ')[6] for row in data[first:end]]
nack_count = [row.split(' ')[7] for row in data[first:end]]
ack_count = [row.split(' ')[8] for row in data[first:end]]
ack_sent_count = [row.split(' ')[9] for row in data[first:end]]
ack2_count = [row.split(' ')[10] for row in data[first:end]]
ack2_sent_count = [row.split(' ')[11] for row in data[first:end]]
multiplexer_sent_count = [row.split(' ')[12] for row in data[first:end]]
flow_sent_count = [row.split(' ')[13] for row in data[first:end]]
received_count = [row.split(' ')[14] for row in data[first:end]]
local_arrival_speed = [row.split(' ')[15] for row in data[first:end]]
local_link_capacity = [row.split(' ')[16] for row in data[first:end]]
remote_window_flow_size = [row.split(' ')[17] for row in data[first:end]]
mean_sending_period = np.mean(list(map(float, sending_period)))
print("Means :")
print(" * rtt : %d us" % (np.mean(list(map(float, rtt)))))
if (mean_sending_period > 0):
print(" * sending period : %d us" % mean_sending_period)
print(" * bandwidth : %d packets/sec ( %dMbits / sec)" % (1000000 / mean_sending_period , 1 / mean_sending_period * 1500 * 8))
print(" * congestion window flow size : %d packets" % (np.mean(list(map(float, cc_window_flow_size)))))
print(" * local arrival speed : %d packets/sec" % (np.mean(list(map(float, local_arrival_speed)))))
print(" * local link capacity : %d packets/sec" % (np.mean(list(map(float, local_link_capacity)))))
print(" * ack_sent_count : %d" % (np.mean(list(map(float, ack_sent_count)))))
print(" * ack2_sent_count : %d" % (np.mean(list(map(float, ack2_sent_count)))))
print("\n")
print(" * remote window flow size : %d packets" % (np.mean(list(map(float, remote_window_flow_size)))))
print(" * remote arrival speed : %d packets/sec" % (np.mean(list(map(float, remote_arrival_speed)))))
print(" * remote link capacity : %d packets/sec" % (np.mean(list(map(float, remote_link_capacity)))))
print(" * ack count : %d" % (np.mean(list(map(float, ack_count)))))
print(" * ack2 count : %d" % (np.mean(list(map(float, ack2_count)))))
y_client = [
sending_period,
cc_window_flow_size,
remote_arrival_speed,
remote_link_capacity,
rtt,
nack_count,
ack_count,
multiplexer_sent_count,
remote_window_flow_size
]
label_client = [
"sending period (us)",
"cc window flow size (pkt)",
"remote arrival speed (pkt/s)",
"remote link capacity (pkt/s)",
"rtt (us)",
"nack count",
"ack count",
"multiplexer sent",
"remote window flow"
]
y_server = [
local_arrival_speed,
local_link_capacity,
rtt,
rtt_var,
ack_period,
ack_sent_count,
ack2_count,
received_count
]
label_server = [
"local arrival speed (pkt/s)",
"local link capacity (pkt/s)",
"rtt (us)",
"rtt_var (us)",
"ack period (us)",
"ack sent",
"ack2 count",
"received_count"
]
# display sending statistics as graph
fig_client = plt.figure(1)
i = 1
for y_arr_client, label_arr_client in zip(y_client, label_client):
ax = fig_client.add_subplot(len(label_client)*100 + 10 + i)
ax.plot(x, y_arr_client, label=label_arr_client)
ax.set_ylim(bottom=0)
i = i + 1
fig_client.suptitle("stat client side")
ax.legend()
# display receiving statistics as graph
fig_server = plt.figure(2)
i = 1
for y_arr_server, label_arr_server in zip(y_server, label_server):
ax = fig_server.add_subplot(len(label_server)*100 + 10 + i)
ax.plot(x, y_arr_server, label=label_arr_server)
ax.set_ylim(bottom=0)
i = i + 1
fig_server.suptitle("stat server side")
ax.legend()
plt.show() | |
from .star import BlackbodyStar
import numpy as np
import os
from taurex.constants import MSOL
import math
class PhoenixStar(BlackbodyStar):
"""
A star that uses the `PHOENIX <https://www.aanda.org/articles/aa/abs/2013/05/aa19058-12/aa19058-12.html>`_
synthetic stellar atmosphere spectrums.
These spectrums are read from ``.gits.gz`` files in a directory given by
``phoenix_path``
Each file must contain the spectrum for one temperature
Parameters
----------
phoenix_path: str, **required**
Path to folder containing phoenix ``fits.gz`` files
temperature: float, optional
Stellar temperature in Kelvin
radius: float, optional
Stellar radius in Solar radius
metallicity: float, optional
Metallicity in solar values
mass: float, optional
Stellar mass in solar mass
distance: float, optional
Distance from Earth in pc
magnitudeK: float, optional
Maginitude in K band
Raises
------
Exception
Raised when no phoenix path is defined
"""
def __init__(self, temperature=5000, radius=1.0, metallicity=1.0, mass=1.0,
distance=1, magnitudeK=10.0, phoenix_path=None):
super().__init__(temperature=temperature, radius=radius,
distance=distance,
magnitudeK=magnitudeK, mass=mass,
metallicity=metallicity)
if phoenix_path is None:
self.error('No file path to phoenix files defined')
raise Exception('No file path to phoenix files defined')
self.info('Star is PHOENIX type')
self._phoenix_path = phoenix_path
self.get_avail_phoenix()
self.use_blackbody = False
self.recompute_spectra()
# self.preload_phoenix_spectra()
def compute_logg(self):
"""
Computes log(surface_G)
"""
import astropy.units as u
from astropy.constants import G
mass = self._mass * u.kg
radius = self._radius * u.m
small_g = (G * mass) / (radius**2)
small_g = small_g.to(u.cm/u.s**2)
return math.log10(small_g.value)
def recompute_spectra(self):
if self.temperature > self._T_list.max() or \
self.temperature < self._T_list.min():
self.use_blackbody = True
else:
self.use_blackbody = False
self._logg = self.compute_logg()
f = self.find_nearest_file()
self.read_spectra(f)
def read_spectra(self, p_file):
from astropy.io import fits
import astropy.units as u
with fits.open(p_file) as hdu:
strUnit = hdu[1].header['TUNIT1']
wl = hdu[1].data.field('Wavelength') * u.Unit(strUnit)
strUnit = hdu[1].header['TUNIT2']
sed = hdu[1].data.field('Flux') * u.Unit(strUnit)
self.wngrid = 10000/(wl.value)
argidx = np.argsort(self.wngrid)
self._base_sed = sed.to(u.W/u.m**2/u.micron)
self.wngrid = self.wngrid[argidx]
self._base_sed = self._base_sed[argidx]
@property
def temperature(self):
"""
Effective Temperature in Kelvin
Returns
-------
T: float
"""
return self._temperature
@temperature.setter
def temperature(self, value):
self._temperature = value
self.recompute_spectra()
@property
def mass(self):
"""
Mass of star in solar mass
Returns
-------
M: float
"""
return self._mass
@mass.setter
def mass(self, value):
self._mass = value * MSOL
self.recompute_spectra()
def find_nearest_file(self):
idx = self._index_finder(
[self._temperature, self._logg, self._metallicity])
return self._files[int(idx)]
def get_avail_phoenix(self):
from scipy.interpolate import NearestNDInterpolator
import glob
files = glob.glob(os.path.join(self._phoenix_path, '*.spec.fits.gz'))
self._files = files
self._T_list = np.array(
[np.float(os.path.basename(k)[3:8]) for k in files])*100
self._Logg_list = np.array(
[np.float(os.path.basename(k)[9:12]) for k in files])
self._Z_list = np.array(
[np.float(os.path.basename(k)[13:16]) for k in files])
self._index_finder = NearestNDInterpolator(
(self._T_list, self._Logg_list, self._Z_list),
np.arange(0, self._T_list.shape[0]))
# def preload_phoenix_spectra(self):
# T_list = self.detect_all_T(self._phoenix_path)
# self._temperature_grid = np.array([x[0] for x in T_list])
# self.debug('Detected temepratures = %s',self._temperature_grid)
# self._avail_max_temp = max(self._temperature_grid)
# self._avail_min_temp = min(self._temperature_grid)
# self.debug('Temperature range = [%s-%s] ',self._avail_min_temp,self._avail_max_temp)
# self._max_index = self._temperature_grid.shape[0]
# self.spectra_grid = []
# #Load in all arrays
# for temp,f in T_list:
# self.debug('Loading %s %s',temp,f)
# arr = np.loadtxt(f)
# grid = 10000/np.copy(arr[:,0])
# sorted_idx = np.argsort(grid)
# self.wngrid = 10000/np.copy(arr[sorted_idx,0])
# self.spectra_grid.append(arr[sorted_idx,1]*10.0) #To SI
# def find_closest_index(self,T):
# """
# Finds the two closest indices in our temeprature grid to our desired temperature
# Parameters
# ----------
# T: float
# Temperature in Kelvin
# Returns
# -------
# t_min: int
# Index to the left of ``T``
# t_max: int
# Index to the right of ``T``
# """
# t_min=self._temperature_grid.searchsorted(T,side='right')-1
# t_max = t_min+1
# return t_min,t_max
# def interpolate_linear_temp(self,T):
# """
# Linearly interpolates the spectral emission density grid to the
# temperature given by ``T``
# Parameters
# ----------
# T: float
# Temeprature to interpolate to
# Returns
# -------
# out: :obj:`array`
# Spectral emission density interpolated to desired temperature
# """
# t_min,t_max = self.find_closest_index(T)
# if self._temperature_grid[t_min] == T:
# return self.spectra_grid[t_min]
# Tmax = self._temperature_grid[t_max]
# Tmin = self._temperature_grid[t_min]
# fx0=self.spectra_grid[t_min]
# fx1 = self.spectra_grid[t_max]
# return interp_lin_only(fx0,fx1,T,Tmin,Tmax)
# def detect_all_T(self,path):
# """
# Finds files and detects all temperatures available in path
# Parameters
# ----------
# path: str
# Path to directory containing PHOENIX data
# """
# files = glob.glob(os.path.join(self._phoenix_path,'*.fits.gz'))
# files.sort()
# temp_list = []
# for f in files:
# #Gewt just the name of the file
# clean_name = pathlib.Path(f).stem
# #Split it by numbers
# split = re.split('(\d+)',clean_name)
# try:
# _T = float(split[1])
# except Exception:
# self.warning('Problem when reading filename %s',f)
# continue
# temp_list.append( (_T,f) )
# #Now sort the numbers
# temp_list.sort(key=lambda x: x[0])
# return temp_list
def initialize(self, wngrid):
"""
Initializes and interpolates the spectral emission density to the current
stellar temperature and given wavenumber grid
Parameters
----------
wngrid: :obj:`array`
Wavenumber grid to interpolate the SED to
"""
# If temperature outside of range, use blavkbody
if self.use_blackbody:
self.warning('Using black body as temperature is outside of Star temeprature range {}'.format(
self.temperature))
super().initialize(wngrid)
else:
sed = self._base_sed
self.sed = np.interp(wngrid, self.wngrid, sed)
@property
def spectralEmissionDensity(self):
"""
Spectral emmision density
Returns
-------
sed: :obj:`array`
"""
return self.sed
def write(self, output):
star = super().write(output)
star.write_string('phoenix_path', self._phoenix_path)
return star | |
from collections import Counter
from copy import copy
import json
import numpy as np
import re
import logging
from stanza.models.common.utils import ud_scores, harmonic_mean
from stanza.utils.conll import CoNLL
from stanza.models.common.doc import *
logger = logging.getLogger('stanza')
def load_mwt_dict(filename):
if filename is not None:
with open(filename, 'r') as f:
mwt_dict0 = json.load(f)
mwt_dict = dict()
for item in mwt_dict0:
(key, expansion), count = item
if key not in mwt_dict or mwt_dict[key][1] < count:
mwt_dict[key] = (expansion, count)
return mwt_dict
else:
return
def process_sentence(sentence, mwt_dict=None):
sent = []
i = 0
for tok, p, additional_info in sentence:
expansion = None
if (p == 3 or p == 4) and mwt_dict is not None:
# MWT found, (attempt to) expand it!
if tok in mwt_dict:
expansion = mwt_dict[tok][0]
elif tok.lower() in mwt_dict:
expansion = mwt_dict[tok.lower()][0]
if expansion is not None:
infostr = None if len(additional_info) == 0 else '|'.join([f"{k}={additional_info[k]}" for k in additional_info])
sent.append({ID: f'{i+1}-{i+len(expansion)}', TEXT: tok})
if infostr is not None: sent[-1][MISC] = infostr
for etok in expansion:
sent.append({ID: f'{i+1}', TEXT: etok})
i += 1
else:
if len(tok) <= 0:
continue
if p == 3 or p == 4:
additional_info['MWT'] = 'Yes'
infostr = None if len(additional_info) == 0 else '|'.join([f"{k}={additional_info[k]}" for k in additional_info])
sent.append({ID: f'{i+1}', TEXT: tok})
if infostr is not None: sent[-1][MISC] = infostr
i += 1
return sent
def find_token(token, text):
"""
Robustly finds the first occurrence of token in the text, and return its offset and it's underlying original string.
Ignores whitespace mismatches between the text and the token.
"""
m = re.search('\s*'.join(['\s' if re.match('\s', x) else re.escape(x) for x in token]), text)
return m.start(), m.group()
def output_predictions(output_file, trainer, data_generator, vocab, mwt_dict, max_seqlen=1000, orig_text=None, no_ssplit=False,prob=False):
paragraphs = []
for i, p in enumerate(data_generator.sentences):
start = 0 if i == 0 else paragraphs[-1][2]
length = sum([len(x) for x in p])
paragraphs += [(i, start, start+length, length+1)] # para idx, start idx, end idx, length
paragraphs = list(sorted(paragraphs, key=lambda x: x[3], reverse=True))
all_preds = [None] * len(paragraphs)
all_raw = [None] * len(paragraphs)
eval_limit = max(3000, max_seqlen)
batch_size = trainer.args['batch_size']
batches = int((len(paragraphs) + batch_size - 1) / batch_size)
t = 0
list_prob = []
for i in range(batches):
batchparas = paragraphs[i * batch_size : (i + 1) * batch_size]
offsets = [x[1] for x in batchparas]
t += sum([x[3] for x in batchparas])
batch = data_generator.next(eval_offsets=offsets)
raw = batch[3]
N = len(batch[3][0])
if N <= eval_limit:
a = trainer.predict(batch)
pred = np.argmax(a, axis=2)
print("555 "+str(a))
print("Hi "+str(pred))
list_prob.append(a)
else:
idx = [0] * len(batchparas)
Ns = [p[3] for p in batchparas]
pred = [[] for _ in batchparas]
while True:
ens = [min(N - idx1, eval_limit) for idx1, N in zip(idx, Ns)]
en = max(ens)
batch1 = batch[0][:, :en], batch[1][:, :en], batch[2][:, :en], [x[:en] for x in batch[3]]
pred1 = np.argmax(trainer.predict(batch1), axis=2)
print("p1 "+str(pred1))
for j in range(len(batchparas)):
sentbreaks = np.where((pred1[j] == 2) + (pred1[j] == 4))[0]
if len(sentbreaks) <= 0 or idx[j] >= Ns[j] - eval_limit:
advance = ens[j]
else:
advance = np.max(sentbreaks) + 1
pred[j] += [pred1[j, :advance]]
idx[j] += advance
if all([idx1 >= N for idx1, N in zip(idx, Ns)]):
break
batch = data_generator.next(eval_offsets=[x+y for x, y in zip(idx, offsets)])
pred = [np.concatenate(p, 0) for p in pred]
print(pred)
for j, p in enumerate(batchparas):
len1 = len([1 for x in raw[j] if x != '<PAD>'])
if pred[j][len1-1] < 2:
pred[j][len1-1] = 2
elif pred[j][len1-1] > 2:
pred[j][len1-1] = 4
all_preds[p[0]] = pred[j][:len1]
all_raw[p[0]] = raw[j]
offset = 0
oov_count = 0
doc = []
text = orig_text
char_offset = 0
for j in range(len(paragraphs)):
raw = all_raw[j]
pred = all_preds[j]
current_tok = ''
current_sent = []
for t, p in zip(raw, pred):
if t == '<PAD>':
break
# hack la_ittb
if trainer.args['shorthand'] == 'la_ittb' and t in [":", ";"]:
p = 2
offset += 1
if vocab.unit2id(t) == vocab.unit2id('<UNK>'):
oov_count += 1
current_tok += t
if p >= 1:
tok = vocab.normalize_token(current_tok)
assert '\t' not in tok, tok
if len(tok) <= 0:
current_tok = ''
continue
if orig_text is not None:
st0, tok0 = find_token(tok, text)
st = char_offset + st0
text = text[st0 + len(tok0):]
char_offset += st0 + len(tok0)
additional_info = {START_CHAR: st, END_CHAR: st + len(tok0)}
else:
additional_info = dict()
current_sent += [(tok, p, additional_info)]
current_tok = ''
if (p == 2 or p == 4) and not no_ssplit:
doc.append(process_sentence(current_sent, mwt_dict))
current_sent = []
if len(current_tok):
tok = vocab.normalize_token(current_tok)
assert '\t' not in tok, tok
if len(tok) > 0:
if orig_text is not None:
st0, tok0 = find_token(tok, text)
st = char_offset + st0
text = text[st0 + len(tok0):]
char_offset += st0 + len(tok0)
additional_info = {END_CHAR: st, END_CHAR: st + len(tok0)}
else:
additional_info = dict()
current_sent += [(tok, 2, additional_info)]
if len(current_sent):
doc.append(process_sentence(current_sent, mwt_dict))
if output_file: CoNLL.dict2conll(doc, output_file)
if prob:
return oov_count, offset, all_preds, doc, list(list_prob)
return oov_count, offset, all_preds, doc
def eval_model(args, trainer, batches, vocab, mwt_dict):
oov_count, N, all_preds, doc = output_predictions(args['conll_file'], trainer, batches, vocab, mwt_dict, args['max_seqlen'])
all_preds = np.concatenate(all_preds, 0)
labels = [y[1] for x in batches.data for y in x]
counter = Counter(zip(all_preds, labels))
def f1(pred, gold, mapping):
pred = [mapping[p] for p in pred]
gold = [mapping[g] for g in gold]
lastp = -1; lastg = -1
tp = 0; fp = 0; fn = 0
for i, (p, g) in enumerate(zip(pred, gold)):
if p == g > 0 and lastp == lastg:
lastp = i
lastg = i
tp += 1
elif p > 0 and g > 0:
lastp = i
lastg = i
fp += 1
fn += 1
elif p > 0:
# and g == 0
lastp = i
fp += 1
elif g > 0:
lastg = i
fn += 1
if tp == 0:
return 0
else:
return 2 * tp / (2 * tp + fp + fn)
f1tok = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:1, 4:1})
f1sent = f1(all_preds, labels, {0:0, 1:0, 2:1, 3:0, 4:1})
f1mwt = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:2, 4:2})
logger.info(f"{args['shorthand']}: token F1 = {f1tok*100:.2f}, sentence F1 = {f1sent*100:.2f}, mwt F1 = {f1mwt*100:.2f}")
return harmonic_mean([f1tok, f1sent, f1mwt], [1, 1, .01]) | |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
def matrix_scatter_plot(matrix, path, filename, dtype):
coordinates = np.where(matrix == 1)
x = coordinates[0]
y = coordinates[1]
t = coordinates[2]
sns.set_style("whitegrid", {"axes.grid": False})
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111, projection="3d")
ax.grid(False)
ax.scatter(x, y, t, facecolor=(0, 0, 0, 0), edgecolor="crimson")
ax.set_ylim(0, 90)
ax.set_xlabel("Longitude")
ax.set_ylabel("Latitude")
ax.set_zlabel("Month")
ax.set_title(
f"{dtype} Data Mapped to Grid Cells (1987 - 2008)", fontsize=15, y=1.02
)
plt.savefig(
f"{path}/{filename}.pdf",
format="pdf",
dpi=1200,
bbox_inches="tight",
)
plt.close(fig)
def matrix_histogram(matrix, path, filename, dtype):
coordinates = np.where(matrix == 1)
plt.figure(figsize=(8, 5))
plt.hist(coordinates[2], 264)
plt.xlabel("Month", fontsize=11)
plt.ylabel("Number of Measurements", fontsize=11)
plt.title(f"{dtype} per Month (1987-2008)", fontsize=14)
plt.savefig(
f"{path}/{filename}.pdf",
format="pdf",
dpi=1200,
bbox_inches="tight",
) | |
import argparse
import inspect
import json
import logging
import os
import pickle
import shutil
import sys
import time
import numpy as np
from nasbench_analysis.search_spaces.search_space_1 import SearchSpace1
from nasbench_analysis.search_spaces.search_space_2 import SearchSpace2
from nasbench_analysis.search_spaces.search_space_3 import SearchSpace3
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
class Rung:
def __init__(self, rung, nodes):
self.parents = set()
self.children = set()
self.rung = rung
for node in nodes:
n = nodes[node]
if n.rung == self.rung:
self.parents.add(n.parent)
self.children.add(n.node_id)
class Node:
def __init__(self, parent, arch, node_id, rung):
self.parent = parent
self.arch = arch
self.node_id = node_id
self.rung = rung
def to_dict(self):
out = {'parent': self.parent, 'arch': self.arch, 'node_id': self.node_id, 'rung': self.rung}
if hasattr(self, 'objective_val'):
out['objective_val'] = self.objective_val
return out
class Random_NAS:
def __init__(self, B, model, seed, save_dir):
self.save_dir = save_dir
self.B = B
self.model = model
self.seed = seed
self.iters = 0
self.arms = {}
self.node_id = 0
def print_summary(self):
logging.info(self.parents)
objective_vals = [(n, self.arms[n].objective_val) for n in self.arms if hasattr(self.arms[n], 'objective_val')]
objective_vals = sorted(objective_vals, key=lambda x: x[1])
best_arm = self.arms[objective_vals[0][0]]
val_ppl = self.model.evaluate(best_arm.arch, split='valid')
logging.info(objective_vals)
logging.info('best valid ppl: %.2f' % val_ppl)
def get_arch(self):
arch = self.model.sample_arch()
self.arms[self.node_id] = Node(self.node_id, arch, self.node_id, 0)
self.node_id += 1
return arch
def save(self):
to_save = {a: self.arms[a].to_dict() for a in self.arms}
# Only replace file if save successful so don't lose results of last pickle save
with open(os.path.join(self.save_dir, 'results_tmp.pkl'), 'wb') as f:
pickle.dump(to_save, f)
shutil.copyfile(os.path.join(self.save_dir, 'results_tmp.pkl'), os.path.join(self.save_dir, 'results.pkl'))
self.model.save(epoch=self.model.epochs)
def run(self):
epochs = 0
# self.get_eval_arch(1)
while self.iters < self.B:
arch = self.get_arch()
self.model.train_batch(arch)
self.iters += 1
# If epoch has changed then evaluate the network.
if epochs < self.model.epochs:
epochs = self.model.epochs
self.get_eval_arch(1)
if self.iters % 500 == 0:
self.save()
self.save()
def get_eval_arch(self, rounds=None):
# n_rounds = int(self.B / 7 / 1000)
if rounds is None:
n_rounds = max(1, int(self.B / 10000))
else:
n_rounds = rounds
best_rounds = []
for r in range(n_rounds):
sample_vals = []
for _ in range(1000):
arch = self.model.sample_arch()
try:
ppl = self.model.evaluate(arch)
except Exception as e:
ppl = 1000000
logging.info(arch)
logging.info('objective_val: %.3f' % ppl)
sample_vals.append((arch, ppl))
# Save sample validations
with open(os.path.join(self.save_dir,
'sample_val_architecture_epoch_{}.obj'.format(self.model.epochs)), 'wb') as f:
pickle.dump(sample_vals, f)
sample_vals = sorted(sample_vals, key=lambda x: x[1])
full_vals = []
if 'split' in inspect.getfullargspec(self.model.evaluate).args:
for i in range(5):
arch = sample_vals[i][0]
try:
ppl = self.model.evaluate(arch, split='valid')
except Exception as e:
ppl = 1000000
full_vals.append((arch, ppl))
full_vals = sorted(full_vals, key=lambda x: x[1])
logging.info('best arch: %s, best arch valid performance: %.3f' % (
' '.join([str(i) for i in full_vals[0][0]]), full_vals[0][1]))
best_rounds.append(full_vals[0])
else:
best_rounds.append(sample_vals[0])
# Save the fully evaluated architectures
with open(os.path.join(self.save_dir,
'full_val_architecture_epoch_{}.obj'.format(self.model.epochs)), 'wb') as f:
pickle.dump(full_vals, f)
return best_rounds
def main(args):
# Fill in with root output path
root_dir = os.getcwd()
print('root_dir', root_dir)
if args.save_dir is None:
save_dir = os.path.join(root_dir, 'experiments/random_ws/ss_{}_{}_{}'.format(time.strftime("%Y%m%d-%H%M%S"),
args.search_space, args.seed))
else:
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if args.eval_only:
assert args.save_dir is not None
# Dump the config of the run folder
with open(os.path.join(save_dir, 'config.json'), 'w') as fp:
json.dump(args.__dict__, fp)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save_dir, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
if args.search_space == '1':
search_space = SearchSpace1()
elif args.search_space == '2':
search_space = SearchSpace2()
elif args.search_space == '3':
search_space = SearchSpace3()
else:
raise ValueError('Unknown search space')
if args.benchmark == 'ptb':
raise ValueError('PTB not supported.')
else:
data_size = 25000
time_steps = 1
B = int(args.epochs * data_size / args.batch_size / time_steps)
if args.benchmark == 'cnn':
from optimizers.random_search_with_weight_sharing.darts_wrapper_discrete import DartsWrapper
model = DartsWrapper(save_dir, args.seed, args.batch_size, args.grad_clip, args.epochs,
num_intermediate_nodes=search_space.num_intermediate_nodes, search_space=search_space,
init_channels=args.init_channels, cutout=args.cutout)
else:
raise ValueError('Benchmarks other cnn on cifar are not available')
searcher = Random_NAS(B, model, args.seed, save_dir)
logging.info('budget: %d' % (searcher.B))
if not args.eval_only:
searcher.run()
archs = searcher.get_eval_arch()
else:
np.random.seed(args.seed + 1)
archs = searcher.get_eval_arch(2)
logging.info(archs)
arch = ' '.join([str(a) for a in archs[0][0]])
with open('/tmp/arch', 'w') as f:
f.write(arch)
return arch
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Args for SHA with weight sharing')
parser.add_argument('--benchmark', dest='benchmark', type=str, default='cnn')
parser.add_argument('--seed', dest='seed', type=int, default=100)
parser.add_argument('--epochs', dest='epochs', type=int, default=50)
parser.add_argument('--batch_size', dest='batch_size', type=int, default=64)
parser.add_argument('--grad_clip', dest='grad_clip', type=float, default=0.25)
parser.add_argument('--save_dir', dest='save_dir', type=str, default=None)
parser.add_argument('--eval_only', dest='eval_only', type=int, default=0)
# CIFAR-10 only argument. Use either 16 or 24 for the settings for random_ws search
# with weight-sharing used in our experiments.
parser.add_argument('--init_channels', dest='init_channels', type=int, default=16)
parser.add_argument('--search_space', choices=['1', '2', '3'], default='1')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
args = parser.parse_args()
main(args) | |
# -*- coding: UTF-8 -*-
import tkinter as tk
from tkinter import *
from PIL import ImageTk, Image
from tkinter import filedialog
import glob
import os
import csv
import pandas as pd
import subprocess
from pygame import mixer
from tkinter import messagebox
import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
import glob
import matplotlib
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import pickle
import numpy as np
import matplotlib.animation as animation
# original annotation imports was here
import arabic_reshaper
import pandas as pd
import os
from tkinter import messagebox
from bidi.algorithm import get_display
###############################################################
#Constants defined#
###############################################################
HEADER_FONT_STYLE = ("Tahoma", 10, "bold")
FONT_STYLE_BUTTON = ("Arial Bold", 7, "bold")
FONT_STYLE_ANNOTATION = ("Arial", 20)
# On increasing these values window size shrinks
INITIAL_HEIGHT_ADJUST = 1600
INITIAL_WIDTH_ADJUST = 1200
# On increasing these values window size enlarges
FINAL_HEIGHT_ADJUST = 1600
FINAL_WIDTH_ADJUST = 1200
#Height and width of buttons
BUTTONS_HEIGHT = 2
BUTTONS_WIDTH = 15
CSV_FILENAME = "ANNOTATIONS_FILE.csv"
CSV_ORIGINAL_ANNOTATIONS_NAME = ''
# # ==================================================== ADDED ===============================
def _onKeyRelease(event):
ctrl = (event.state & 0x4) != 0
if event.keycode==88 and ctrl and event.keysym.lower() != "x":
event.widget.event_generate("<<Cut>>")
if event.keycode==86 and ctrl and event.keysym.lower() != "v":
event.widget.event_generate("<<Paste>>")
if event.keycode==67 and ctrl and event.keysym.lower() != "c":
event.widget.event_generate("<<Copy>>")
###############################################################
#Initaliazing Tkinter Window#
###############################################################
# -*- coding: UTF-8 -*-
root = tk.Tk()
proc = subprocess.Popen(["xrandr | grep \* | cut -d' ' -f4"], stdout=subprocess.PIPE, shell=True)
(OUT, ERR) = proc.communicate()
OUT = str(OUT).split("x")
# HEIGHT_SIZE = str(int(int(OUT[0])/2)-INITIAL_HEIGHT_ADJUST)
# WIDTH_SIZE = str(int(int(OUT[1])/2)-INITIAL_WIDTH_ADJUST)
root.geometry(str(INITIAL_HEIGHT_ADJUST)+"x"+str(INITIAL_WIDTH_ADJUST))
root.bind_all("<Key>", _onKeyRelease, "+")
root.title("Annotation Tool")
root.resizable(1,1)
HEADER = Label(root,text="نرمافزار ساخت زیر نویس برای فایل های صوتی کوتاه ده ثانیه ای",
underline=0, font=HEADER_FONT_STYLE).grid(row=0, column=10, pady=10)
CURRENT_INDEX = 0
CURRENT_SECOND = 0
LONGEST_AUDIO_MS = 10000
ANNOTATION_ENTRY_VAR = StringVar(root)
mixer.init(16000)
###############################################################
#Audio Files Folder#
###############################################################
def browse_wav_files():
"""
Get the folder path of .wav files
"""
filename = filedialog.askdirectory()
global FOLDER_WAV_FILES
# NOTE : I changed next line to just detect one
FOLDER_WAV_FILES = glob.glob(filename+"/*.mp3")
if os.path.exists(CSV_FILENAME):
# ['E:/SUBTITLE TOOLS/cicada-audio-annotation-tool/annotator_wav_to_mp3', 'radio-goftego-98_07_06-08_30.mp3chunk10.mp3']
starter_string_for_folder_wav_files = FOLDER_WAV_FILES[2].split('\\')[0]
annotated_files = pd.read_csv(CSV_FILENAME, error_bad_lines=False)
annotated_files = annotated_files['Filename'].values.tolist()
print(FOLDER_WAV_FILES[0:2])
# for i in FOLDER_WAV_FILES:
# if i.split("/")[-1].split('\\')[-1] in annotated_files:
# FOLDER_WAV_FILES.remove(i)
for i in list(set(annotated_files)):
file_to_remove = starter_string_for_folder_wav_files + '\\' + i
print("==================================")
print(f'i am searching for {i} in folderWavFile')
if file_to_remove in FOLDER_WAV_FILES:
print(file_to_remove)
FOLDER_WAV_FILES.remove(file_to_remove)
print("==================================")
else:
pass
if len(FOLDER_WAV_FILES) == 0:
messagebox.showerror("Error", "No Wav Files in Given path")
else:
Label(root, text=FOLDER_WAV_FILES[0].split("/")[-1], font=FONT_STYLE_BUTTON).grid(row=4, column=10, sticky=(N, S, W, E), pady=10)
ASK_FOR_WAVFILE_DIR = Button(root, text="Audio Files Folder", fg="green", bd=3, relief="raised",
command=browse_wav_files, height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH,
font=FONT_STYLE_BUTTON)
ASK_FOR_WAVFILE_DIR.grid(row=3, column=12, pady=2)
###############################################################
# Wehre IS Annotation of Audios #
###############################################################
def browse_folder_to_save_annotations():
global CSV_ORIGINAL_ANNOTATIONS_NAME
filename = filedialog.askopenfilename()
CSV_ORIGINAL_ANNOTATIONS_NAME = filename
ASK_FOR_ANNOTATION_DIR = Button(root, text="Annotatios File", bd=3, relief="raised", fg="green",
command=browse_folder_to_save_annotations, height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH,
font=FONT_STYLE_BUTTON)
ASK_FOR_ANNOTATION_DIR.grid(row=2, column=12, pady=10)
###############################################################
# QUIT #
###############################################################
def _quit():
offset = 15.0
popooz = 12.0
mixer.music.load(FOLDER_WAV_FILES[CURRENT_INDEX])
mixer.music.play(loops=0, start=offset + popooz) # seconds from beginning
# quit_button = Button(root, text="Quit", bd=3, fg="green", relief="raised", command= _quit,
# font=FONT_STYLE_BUTTON, height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH)
# quit_button.grid(row=2, column=12, pady=10)
###############################################################
#Details Button#
###############################################################
def get_details():
try:
# print(FOLDER_WAV_FILES[CURRENT_INDEX].split('/')[-1])
# total_annotations = len(glob.glob(FOLDER_TO_SAVE_ANNOTATIONS+"/*.pkl"))
total_annotations = pd.read_csv(CSV_FILENAME, error_bad_lines=False)
total_annotations = len(list(set(total_annotations['Filename'].values)))
remaining_files = len(FOLDER_WAV_FILES) - (total_annotations)
messagebox.showinfo("Details", "Total Annotations : " +str(total_annotations)+
"\n Total Remaining wav files: "+str(remaining_files))
except (NameError, FileNotFoundError):
messagebox.showerror("Path not specified", "Give path for saving annotations")
DETAILS_BUTTON = Button(root, text="Details", bd=3, relief="raised", fg="green", command=get_details,
font=FONT_STYLE_BUTTON, height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH)
DETAILS_BUTTON.grid(row=4, column=12, pady=10)
###############################################################
#FIND original annotation#
###############################################################
def show_original_annotation():
ab = os.getcwd()
CSV_ORIGINAL_ANNOTATIONS_DATAFRAME = pd.read_csv(CSV_ORIGINAL_ANNOTATIONS_NAME)
text_to_be_reshaped = CSV_ORIGINAL_ANNOTATIONS_DATAFRAME[
CSV_ORIGINAL_ANNOTATIONS_DATAFRAME['wav_filename'] == FOLDER_WAV_FILES[CURRENT_INDEX].split('\\')[-1] ]['transcript']
reshaped_text = arabic_reshaper.reshape(text_to_be_reshaped.values[0])
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(text_to_be_reshaped.values[0])
Label(root, text="",
font=("Arial", 20)).grid(row=10, column=10,
sticky=(N, S, W, E), pady=10)
return get_display(reshaped_text)
###############################################################
#Previous Audio Button#
###############################################################
def previous_audio_update_index():
try:
check_folder = len(FOLDER_WAV_FILES)
global CURRENT_INDEX
if CURRENT_INDEX == 0:
return CURRENT_INDEX
else:
CURRENT_INDEX = CURRENT_INDEX - 1
ANNOTATION_ENTRY_VAR.set("")
Label(root, text=FOLDER_WAV_FILES[CURRENT_INDEX].split("/")[-1],
font=FONT_STYLE_BUTTON).grid(row=4, column=10,
sticky=(N, S, W, E), pady=10)
Label(root, text=show_original_annotation(),
font=FONT_STYLE_BUTTON).grid(row=3, column=10,
sticky=(N, S, W, E), pady=10)
if mixer.music.get_busy():
mixer.music.stop()
play_audio(CURRENT_INDEX)
except NameError:
messagebox.showinfo("File Path", "No Wav Files Path Given")
previous_audio_button = Button(root, text="<< Previous",bd=3,relief="raised",fg="green",
command=previous_audio_update_index, font=FONT_STYLE_BUTTON,
height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH)
previous_audio_button.grid(row=7, column=0, pady=10)
###############################################################
#Next Audio Button
###############################################################
def next_audio_update_index():
"""
Loop over the next audio if the directory
"""
try:
global CURRENT_INDEX
print(len(FOLDER_WAV_FILES))
if CURRENT_INDEX == len(FOLDER_WAV_FILES)-1:
return CURRENT_INDEX
else:
CURRENT_INDEX = CURRENT_INDEX + 1
ANNOTATION_ENTRY_VAR.set("")
Label(root, text=FOLDER_WAV_FILES[CURRENT_INDEX].split("/")[-1],
font=FONT_STYLE_BUTTON).grid(row=4, column=10,
sticky=(N, S, W, E), pady=10)
Label(root, text=show_original_annotation(),
font=FONT_STYLE_BUTTON).grid(row=3, column=10,
sticky=(N, S, W, E), pady=10)
if mixer.music.get_busy():
mixer.music.stop()
play_audio(CURRENT_INDEX)
except NameError:
messagebox.showinfo("File Path", "No Wav Files Path Given")
NEXT_AUDIO_BUTTON = Button(root, text="Next >>", bd=3, relief="raised", fg="green",
command=next_audio_update_index, font=FONT_STYLE_BUTTON,
height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH)
NEXT_AUDIO_BUTTON.grid(row=6, column=0, pady=10)
###############################################################
# Pause Audio
###############################################################
def pause():
"""
TODO : write a function to pause unpause with one function.
Initialize A VAR and Zero it when next and prev.
"""
try:
if mixer.music.get_busy():
mixer.music.pause()
# print("====================== dasht ye chizi pakhsh mishod ======================")
except NameError:
messagebox.showinfo("File Path", "No Wav Files Path Given")
NEXT_AUDIO_BUTTON = Button(root, text="Pause", bd=3, relief="raised", fg="green",
command=pause, font=FONT_STYLE_BUTTON,
height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH)
NEXT_AUDIO_BUTTON.grid(row=2, column=0, pady=10)
###############################################################
# Resume Audio
###############################################################
def resume():
"""
TODO : write a function to pause unpause with one function.
Initialize A VAR and Zero it when next and prev.
"""
try:
global CURRENT_SECOND
if mixer.music.get_busy():
mixer.music.unpause()
except NameError:
messagebox.showinfo("File Path", "No Wav Files Path Given")
NEXT_AUDIO_BUTTON = Button(root, text="Resume", bd=3, relief="raised", fg="green",
command=resume, font=FONT_STYLE_BUTTON,
height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH)
NEXT_AUDIO_BUTTON.grid(row=1, column=0, pady=10)
###############################################################
# 2sec forward Audio
###############################################################
def secForward():
"""
Loop over the next audio if the directory
"""
try:
global CURRENT_SECOND
global LONGEST_AUDIO_MS
if mixer.music.get_busy():
if CURRENT_SECOND + mixer.music.get_pos() + 2000 < LONGEST_AUDIO_MS:
CURRENT_SECOND = CURRENT_SECOND + mixer.music.get_pos()
mixer.music.rewind()
CURRENT_SECOND = CURRENT_SECOND + 2000
mixer.music.play(loops=0, start = CURRENT_SECOND/1000)
# print(CURRENT_SECOND)
except NameError:
messagebox.showinfo("File Path", "No Wav Files Path Given")
NEXT_AUDIO_BUTTON = Button(root, text="2 SEC Forward", bd=3, relief="raised", fg="green",
command=secForward, font=FONT_STYLE_BUTTON,
height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH)
NEXT_AUDIO_BUTTON.grid(row=3, column=0, pady=10)
###############################################################
# 2sec backward Audio
###############################################################
def secBack():
"""
Loop over the next audio if the directory
"""
try:
global CURRENT_SECOND
global LONGEST_AUDIO_MS
if mixer.music.get_busy():
if CURRENT_SECOND + mixer.music.get_pos() - 2000 > 0:
CURRENT_SECOND = CURRENT_SECOND + mixer.music.get_pos()
mixer.music.rewind()
CURRENT_SECOND = CURRENT_SECOND - 2000
mixer.music.play(loops=0, start = CURRENT_SECOND/1000)
# print(CURRENT_SECOND)
except NameError:
messagebox.showinfo("File Path", "No Wav Files Path Given")
NEXT_AUDIO_BUTTON = Button(root, text="2 SEC Backward", bd=3, relief="raised", fg="green",
command=secBack, font=FONT_STYLE_BUTTON,
height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH)
NEXT_AUDIO_BUTTON.grid(row=4, column=0, pady=10)
###############################################################
#Play Audio Button#
###############################################################
def play_audio(index_value):
"""
Play audio
"""
try:
mixer.music.load(FOLDER_WAV_FILES[index_value])
mixer.music.play()
except NameError:
messagebox.showerror("No Wav file", "No audio file to Play")
PLAY_BUTTON = Button(root, text="Start Audio", bd=3, fg="green",
command=lambda: play_audio(CURRENT_INDEX), font=FONT_STYLE_BUTTON,
height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH)
PLAY_BUTTON.grid(row=5, column=0, pady=10)
###############################################################
#Annotation Entry Field#
###############################################################
ANNOTATIONS_ENTRY = Entry(root, width = 100,textvariable=ANNOTATION_ENTRY_VAR, bd=5, relief="raised",
font=("Arial", 15)).grid(row=9, column=10, ipady = 10, ipadx = 10)
# Label(root, text="Spectrogram Type: ", fg="green",
# font=FONT_STYLE_BUTTON).grid(row=7, column=12, sticky=(N, S, W, E), pady=10)
###############################################################
#Save Annotations#
###############################################################
def save_annotations(index_value):
"""
Function to save the annotations
"""
try:
if os.path.exists(CSV_FILENAME):
with open(CSV_FILENAME, "a", encoding='utf-8') as file_object:
wavfile_information_object = csv.writer(file_object)
wavfile_information_object.writerow([FOLDER_WAV_FILES[index_value].split("\\")[-1]] + ANNOTATION_ENTRY_VAR.get().split(","))
# print(FOLDER_WAV_FILES[index_value].split("/")[-1]+" - " '{}'.format(ANNOTATION_ENTRY_VAR.get().split(",")))
# with open(FOLDER_TO_SAVE_ANNOTATIONS+"/"+FOLDER_WAV_FILES[index_value].split("/")[-1][:-4]+".pkl", "wb") as file_obj:
# pickle.dump(ANNOTATION_ENTRY_VAR.get().split(","), file_obj)
# next_audio_update_index()
else:
with open(CSV_FILENAME, "w", encoding='utf-8') as file_object:
wavfile_information_object = csv.writer(file_object)
wavfile_information_object.writerow(["Filename","Label1","Label2","Label3","Label4"])
wavfile_information_object.writerow([FOLDER_WAV_FILES[index_value].split("\\")[-1]]+ANNOTATION_ENTRY_VAR.get().split(","))
Label(root, text="SUBMITTED",
font=FONT_STYLE_BUTTON).grid(row=11, column=10,
sticky=(N, S, W, E), pady=10)
except NameError:
messagebox.showerror("No Path", "Specify path to save annotations!")
def save_and_next_audio(event):
"""
Binding the submit button to <Return> key
"""
save_annotations(CURRENT_INDEX)
next_audio_update_index()
play_audio(CURRENT_INDEX)
SUBMIT_BUTTON = Button(root, text="Submit", bd=3, relief="raised", fg="green",
command=lambda: save_annotations(CURRENT_INDEX),
font=FONT_STYLE_BUTTON, height=BUTTONS_HEIGHT, width=BUTTONS_WIDTH)
SUBMIT_BUTTON.grid(row=5, column=12, pady=10)
root.bind('<Return>', save_and_next_audio)
root.mainloop() | |
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
import numpy as np
N = 10000
n_procs = comm.Get_size()
print("This is process", rank)
# Create an array
x_part = np.random.uniform(-1, 1, int(N/n_procs))
y_part = np.random.uniform(-1, 1, int(N/n_procs))
hits_part = x_part**2 + y_part**2 < 1
hits_count = hits_part.sum()
print("partial counts", hits_count)
total_counts = comm.reduce(hits_count, root=0)
if rank == 0:
print("Total hits:", total_counts)
print("Final result:", 4 * total_counts/N) | |
import cdutil, pickle
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import statsmodels.api as sm
from Function_regressions import get_regression_prediction as GRP
def fig_contributions(instance_110):
vORG_110 = instance_110.vORG
HISm_mean_110 = instance_110.HISm_mean
HISm_spat_110 = instance_110.HISm_spat
EOFreturn1 = instance_110.class_cal_eofs(vORG_110, HISm_mean_110, HISm_spat_110, idx=[-1], REA_idx=True)
CHG_Mean_126 = instance_110.CHG_Mean_126
CHG_Mean_245 = instance_110.CHG_Mean_245
CHG_Mean_370 = instance_110.CHG_Mean_370
CHG_Mean_585 = instance_110.CHG_Mean_585
predictor1 = np.c_[EOFreturn1['std_HISm'], EOFreturn1['std_EOFsOnHISm'][:,1:-1] ]
input_predictand_reanalysis1 = np.r_[EOFreturn1['std_era20c'], EOFreturn1['std_EOFs_era20c'][1:-1] ]
input_predictand_reanalysis2 = np.r_[EOFreturn1['std_20crv3'], EOFreturn1['std_EOFs_20crv3'][1:-1] ]
def plot_bar_contribution(y_array, x_array, num):
new_x_arrays = np.c_[np.ones(num), x_array]
term1 = np.mean(EOFreturn1['std_HISm'])
term2 = np.mean(EOFreturn1['std_EOFsOnHISm'][:,1:-1], axis=0)
new_predictm = np.r_[1, term1, term2]
new_predict1 = np.r_[1, input_predictand_reanalysis1]
new_predict2 = np.r_[1, input_predictand_reanalysis2]
regress = sm.OLS(y_array, new_x_arrays).fit()
predicm = new_predictm * regress.params
predic1 = new_predict1 * regress.params
predic2 = new_predict2 * regress.params
predicm_sum = np.sum(predicm)
predic1_sum = np.sum(predic1)
predic2_sum = np.sum(predic2)
To_plot1 = np.r_[predic1_sum-predicm_sum, predic1[1:]-predicm[1:]]
To_plot2 = np.r_[predic2_sum-predicm_sum, predic2[1:]-predicm[1:]]
plt.plot(np.r_[-0.5, 4.5], np.r_[0,0], color='black', linewidth=0.5)
plt.bar(np.arange(5)-0.1, np.array(To_plot1), 0.2)
plt.bar(np.arange(5)+0.1, np.array(To_plot2), 0.2)
plt.xlim(-0.5, 4.5)
plt.show()
plt.clf()
plt.plot(np.r_[-1, 4], np.r_[0,0], color='black', linewidth=0.5)
plt.bar(np.arange(4), regress.params[1:], 0.6)
plt.xlim(-1, 4)
plt.show()
plt.clf()
idx = 64
plot_bar_contribution(CHG_Mean_585[:,idx], predictor1, 17) | |
"""
Cartpole Agent with Tensorflow 2
Reference: https://github.com/awjuliani/DeepRL-Agents/blob/master/Vanilla-Policy.ipynb
"""
import tensorflow as tf
import numpy as np
import gym
# Load cartpole environment
env = gym.make("CartPole-v0")
GAMMA = 0.99
learning_rate = 0.01
state_size = 4
num_actions = 2
hidden_size = 8
T_episodes = 5000
max_ep = 999
update_frequency = 5
is_visualize = False
def discount_rewards(r):
discounted_r = np.zeros_like(r)
running_add = 0
for i in reversed(range(0, r.size)):
running_add = running_add * GAMMA + r[i]
discounted_r[i] = running_add
return discounted_r
class PolicyNetworks(tf.keras.Model):
def __init__(self):
super(PolicyNetworks, self).__init__()
self.hidden_layer_1 = tf.keras.layers.Dense(hidden_size, activation='relu')
self.output_layer = tf.keras.layers.Dense(num_actions, activation='softmax')
def call(self, x):
H1_output = self.hidden_layer_1(x)
outputs = self.output_layer(H1_output)
return outputs
def pg_loss(outputs, actions, rewards):
indexes = tf.range(0, tf.shape(outputs)[0]) * tf.shape(outputs)[1] + actions
responsible_outputs = tf.gather(tf.reshape(outputs, [-1]), indexes)
loss = -tf.reduce_mean(tf.math.log(responsible_outputs) * rewards)
return loss
optimizer = tf.optimizers.Adam(learning_rate)
def train_step(model, states, actions, rewards):
with tf.GradientTape() as tape:
outputs = model(states)
loss = pg_loss(outputs, actions, rewards)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
PG_model = PolicyNetworks()
i = 0
total_reward = []
total_length = []
while i < T_episodes:
s = env.reset()
running_reward = 0
ep_history = []
for j in range(max_ep):
if is_visualize == True:
env.render()
s = np.expand_dims(s, 0)
a_dist = PG_model(s).numpy()
a = np.random.choice(a_dist[0], p=a_dist[0])
a = np.argmax(a_dist == a)
s1, r, d, _ = env.step(a) # Get reward and next state
ep_history.append([s, a, r, s1])
s = s1
running_reward += r
if d == True:
ep_history = np.array(ep_history)
ep_history[:, 2] = discount_rewards(ep_history[:, 2])
np_states = np.array(ep_history[0, 0])
for idx in range(1, ep_history[:, 0].size):
np_states = np.append(np_states, ep_history[idx, 0], axis=0)
if i % update_frequency == 0 and i != 0:
train_step(PG_model, np_states, ep_history[:, 1], ep_history[:, 2])
total_reward.append(running_reward)
total_length.append(j)
break
if i % 100 == 0:
print(np.mean(total_reward[-100:]))
i += 1 | |
import pandas as pd
import numpy as np
object = pd.read_pickle('adj_matrix.p')
print(object)
print(type(object))
print(object.shape[0]) | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 20 17:47:40 2018
@author: JSen
"""
import numpy as np
import matplotlib.pyplot as plt
from numpy import loadtxt, load
import os
from scipy import optimize
from scipy.optimize import minimize
from sklearn import linear_model
import scipy.io as spio
import random
os.chdir(os.path.realpath('.'))
#load weights
theta = spio.loadmat('ex4weights.mat')
theta1 = theta['Theta1'] #25x401
theta2 = theta['Theta2'] #10x26
#load training data
data = spio.loadmat('ex4data1.mat')
#X 5000x400 y 5000x1
X = data['X']
y = data['y']
# 显示100个数字,拿某位仁兄代码过来用
def display_data(imgData):
sum = 0
'''
显示100个数(若是一个一个绘制将会非常慢,可以将要画的数字整理好,放到一个矩阵中,显示这个矩阵即可)
- 初始化一个二维数组
- 将每行的数据调整成图像的矩阵,放进二维数组
- 显示即可
'''
pad = 1
display_array = -np.ones((pad+10*(20+pad), pad+10*(20+pad)))
for i in range(10):
for j in range(10):
display_array[pad+i*(20+pad):pad+i*(20+pad)+20, pad+j*(20+pad):pad+j*(20+pad)+20] = (imgData[sum,:].reshape(20,20,order="F")) # order=F指定以列优先,在matlab中是这样的,python中需要指定,默认以行
sum += 1
plt.imshow(display_array,cmap='gray') #显示灰度图像
plt.axis('off')
plt.show()
#随机选取100个数字
rand_indices = random.choices(range(X.shape[0]), k=100)
display_data(X[rand_indices,:]) # 显示100个数字
def sigmoid(z):
s = 1.0/(1.0 + np.exp(-z))
return s
input_layer_size = 400; # 20x20 Input Images of Digits
hidden_layer_size = 25; # 25 hidden units
num_labels = 10;
def compress_theta_in_one_column(theta1, theta2):
t1 = theta1.reshape(-1, 1)
t2 = theta2.reshape(-1, 1)
t3 = np.vstack((t1, t2))
return t3
def decompress_theta_from_cloumn(one_column_theta, input_layer_size, hidden_layer_size, num_labels):
one_column_theta = one_column_theta.reshape(-1, 1) #确保是nx1向量
t1 = one_column_theta[0:hidden_layer_size*(input_layer_size+1), :]
t1 = t1.reshape((hidden_layer_size, input_layer_size+1))
t2 = one_column_theta[hidden_layer_size*(input_layer_size+1):, :]
t2 = t2.reshape(((num_labels, hidden_layer_size+1)))
return t1, t2
def decompress_theta_from_row(one_row_theta, input_layer_size, hidden_layer_size, num_labels):
one_row_theta = one_row_theta.reshape(1, -1) #确保是1xn向量
t1 = one_row_theta[:, 0:hidden_layer_size*(input_layer_size+1)]
t1 = t1.reshape((hidden_layer_size, input_layer_size+1))
t2 = one_row_theta[:, hidden_layer_size*(input_layer_size+1):]
t2 = t2.reshape(((num_labels, hidden_layer_size+1)))
return t1, t2
def nnCostFunction(nn_parms, input_layer_size, hidden_layer_size, num_labels, X, y):
'''不带正则化的损失函数'''
theta_t1, theta_t2 = decompress_theta_from_cloumn(nn_parms, input_layer_size, hidden_layer_size, num_labels)
m = X.shape[0]
X = np.hstack((np.ones((m,1)), X))
h1 = np.dot(X, theta_t1.T) #隐层 5000x25
h1 = sigmoid(h1) #隐层输出
#为隐层结点添加偏置1
ones = np.ones((m, 1))
h1 = np.hstack((ones, h1)) #5000x26
h2 = np.dot(h1, theta_t2.T) #5000x10
h = sigmoid(h2) #结果映射到0-1,之间,后面才可以计算损失
#将y转化为5000x10矩阵
y_mat = np.zeros((m, num_labels),dtype=int)
for i in range(m):
y_mat[i, y[i]-1] = 1 #1->y(1)=1, 2->y(2)=1,...9->y(9)=1,10->y(10)=1,但python数组从0开始,故每个减一,注意10代表0
#计算损失
A = np.dot(y_mat.T, np.log(h)) + np.dot((1-y_mat).T , np.log(1-h)) #10 by 10 matrix
J = -1/m * np.trace(A)
return J
theta_in_one_col = compress_theta_in_one_column(theta1, theta2)
loss = nnCostFunction(theta_in_one_col, input_layer_size, hidden_layer_size, num_labels, X, y)
print('loss:', loss)
def nnCostFunction_with_regularization(nn_parms, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe):
'''带正则化的损失函数'''
theta_t1, theta_t2 = decompress_theta_from_cloumn(nn_parms, input_layer_size, hidden_layer_size, num_labels)
m = X.shape[0]
X = np.hstack((np.ones((m,1)), X))
h1 = np.dot(X, theta_t1.T) #隐层 5000x25
h1 = sigmoid(h1) #隐层输出
#为隐层结点添加偏置1
ones = np.ones((m, 1))
h1 = np.hstack((ones, h1)) #5000x26
h2 = np.dot(h1, theta_t2.T) #5000x10
h = sigmoid(h2) #结果映射到0-1,之间,后面才可以计算损失
#将y转化为5000x10矩阵
y_mat = np.zeros((m, num_labels),dtype=int)
for i in range(m):
y_mat[i, y[i]-1] = 1 #1->y(1)=1, 2->y(2)=1,...9->y(9)=1,10->y(10)=1,但python数组从0开始,故每个减一,注意10代表0
#计算损失
A = np.dot(y_mat.T, np.log(h)) + np.dot((1-y_mat).T , np.log(1-h)) #10 by 10 matrix
J = -1/m * np.trace(A)
#正则化,所有theta第一列不用正则化
theta_t1_r1 = theta_t1[:, 1:]
theta_t2_r2 = theta_t2[:, 1:]
B = np.dot(theta_t1_r1, theta_t1_r1.T) + np.dot(theta_t2_r2.T, theta_t2_r2)#25x25
reg =lambda_coe/(2*m) * np.trace(B)
J = J + reg
return J
loss = nnCostFunction_with_regularization(theta_in_one_col, input_layer_size, hidden_layer_size, num_labels, X, y, 1)
print('loss with regularization:', loss)
def sigmoid_gradient(z):
'''假设z是经过sigmoid函数处理过得'''
# gz = sigmoid(z)
g = z * (1-z)
return g
def randInitializeWeights(input_layer_size, hidden_layer_size):
epsilon_init = 0.12
rand_matrix = np.random.rand(hidden_layer_size, input_layer_size+1) #得到[0,1)之间均匀分布的数字
W = rand_matrix * 2 * epsilon_init - epsilon_init #得到(-epsilon_init, epsilon_init)之间的数字
return W
def compress_theta_in_one_row(theta1, theta2):
t1 = np.matrix.flatten(theta1)
t2 = np.matrix.flatten(theta2)
t3 = np.hstack((t1, t2)).reshape(1, -1) #连接起来
return t3
def compute_gradient(nn_parms: np.ndarray, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe=0):
'''计算参数为nn_parms,梯度'''
theta_t1, theta_t2 = decompress_theta_from_cloumn(nn_parms, input_layer_size, hidden_layer_size, num_labels)
m = X.shape[0]
X = np.hstack((np.ones((m,1)), X))
h1 = np.dot(X, theta_t1.T) #隐层 5000x25
h1 = sigmoid(h1) #隐层输出
#为隐层结点添加偏置1
ones = np.ones((m, 1))
h1 = np.hstack((ones, h1)) #5000x26
h2 = np.dot(h1, theta_t2.T) #5000x10
h = sigmoid(h2) #结果映射到0-1,之间,后面才可以计算损失
#将y转化为5000x10矩阵
y_mat = np.zeros((m, num_labels),dtype=int)
for i in range(m):
y_mat[i, y[i]-1] = 1 #1->y(1)=1, 2->y(2)=1,...9->y(9)=1,10->y(10)=1,但python数组从0开始,故每个减一,注意10代表0
'''BP算法'''
big_delta1 = np.zeros((theta_t1.shape)) #25x401
big_delta2 = np.zeros((theta_t2.shape)) #10x26
for i in range(m):
out = h[i, :]
y_current = y_mat[i, :]
delta3 = (out - y_current).reshape(1, -1) #1x10
delta2 = np.dot(delta3, theta_t2) * sigmoid_gradient(h1[i, :]) #1x26
delta2 = delta2.reshape(1, -1)
big_delta2 = big_delta2 + np.dot(delta3.T, h1[i, :].reshape(1, -1)) #10x26
#此处tricky,分开写
t1 = delta2[:, 1:].T
t2 = X[i,:].reshape(1,-1)
big_delta1 = big_delta1 + np.dot(t1, t2) #25x401
B1 = np.hstack((np.zeros((theta_t1.shape[0], 1)), theta_t1[:, 1:])) #25x401
theta1_grad = 1/m * big_delta1 + lambda_coe/m * B1
B2 = np.hstack((np.zeros((theta_t2.shape[0], 1)), theta_t2[:, 1:])) #10x26
theta2_grad = 1/m * big_delta2 + lambda_coe/m * B2
#返回展平的参数
r = compress_theta_in_one_column(theta1_grad, theta2_grad)
return r.ravel() #将返回值展成(n,)形式,不能写成(n,1),因为报维度出错:deltak = numpy.dot(gfk, gfk)
def debugInitializeWeights(fan_out, fan_in):
'''随机创建一组根据fan_out, fan_in确定的权重'''
arr = np.arange(1, fan_out*(fan_in + 1) + 1)
W = np.sin(arr).reshape(fan_out, fan_in + 1)
return W
test_pram = debugInitializeWeights(3,3)
def computeNumericalGradient(nn_param, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe):
'''由导数定义计算对每一个theta的偏导数'''
numgrad = np.zeros((nn_param.shape))
perturb = np.zeros((nn_param.shape))
e = 1e-4
for p in range(np.size(nn_param)):
perturb[p, :] = e
loss1 = nnCostFunction_with_regularization(nn_param - perturb, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe)
loss2 = nnCostFunction_with_regularization(nn_param + perturb, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe)
numgrad[p, :] = (loss2 - loss1) / (2 * e)
perturb[p, :] = 0
return numgrad
def checkNNGradients(lambda_coe = 0):
'''创建一个小型网络,验证梯度计算是正确的'''
input_layer_size = 3;
hidden_layer_size = 5;
num_labels = 3;
m = 5;
# We generate some 'random' test data
Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size);
Theta2 = debugInitializeWeights(num_labels, hidden_layer_size);
# Reusing debugInitializeWeights to generate X
X = debugInitializeWeights(m, input_layer_size - 1);
#生成对应的label
y = 1 + np.mod(np.arange(1, m+1), num_labels).reshape(-1,1)
param = compress_theta_in_one_column(Theta1, Theta2)
cost = nnCostFunction_with_regularization(param, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe)
grad = compute_gradient(param, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe)
numgrad = computeNumericalGradient(param, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe)
#相对误差小于1e-9 特别注意:numgrad和grad相减,一定要确保他们的shape是一样的
diff = np.linalg.norm(numgrad-grad) / np.linalg.norm(numgrad+grad)
print(f'Relative Difference:{diff}')
checkNNGradients()
lambda_coe = 0.6
initial_theta1 = randInitializeWeights(input_layer_size, hidden_layer_size);
initial_theta2 = randInitializeWeights(hidden_layer_size, num_labels);
initial_theta_in_one_col = compress_theta_in_one_column(initial_theta1, initial_theta2)
t1, t2 = decompress_theta_from_cloumn(initial_theta_in_one_col, input_layer_size, hidden_layer_size, num_labels)
nnCostFunction_with_regularization(initial_theta_in_one_col, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe)
g = compute_gradient(initial_theta_in_one_col, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe)
'''最小化损失'''
result = optimize.fmin_cg(nnCostFunction_with_regularization, initial_theta_in_one_col, fprime=compute_gradient, args=(input_layer_size, hidden_layer_size, num_labels, X, y, lambda_coe), maxiter=100)
#最终结果
res_theta1, res_theta2 = decompress_theta_from_cloumn(result, input_layer_size, hidden_layer_size, num_labels)
def predict(theta1, theta2, X):
m = X.shape[0]
#forward propagation
X = np.hstack((np.ones((m,1)), X))
h1 = np.dot(X, theta1.T) #隐层 5000x25
h1 = sigmoid(h1) #隐层输出
#为隐层结点添加偏置1
ones = np.ones((m, 1))
h1 = np.hstack((ones, h1)) #5000x26
h2 = np.dot(h1, theta2.T) #5000x10
h = sigmoid(h2) #结果映射到0-1,之间,后面才可以计算损失
#找出每行最大值得下标,因为下标从0开始,而预测结果从1开始,故加一,
row_max_index = np.argmax(h, axis=1)
row_max_index = row_max_index + 1
return row_max_index
pre = predict(res_theta1, res_theta2, X)
print('accuracy:', np.mean(np.float32(pre.reshape(-1,1)==y))) | |
from pathlib import Path
from pandas import read_csv, to_datetime, DataFrame
from pylab import arange, arcsin, array, cos, pi, sin, sqrt
from scipy.interpolate import interp1d
from warnings import warn
# Last inn gpxpy kun dersom det er installert
try:
import gpxpy
HAR_GPXPY = True
except ImportError:
HAR_GPXPY = False
def sjekk_at_gpxpy_er_installert():
"""Vis en feilmelding med installasjonsinstrukser om GPXPY ikke er installert.
"""
if not HAR_GPXPY:
raise ValueError(
"Du må installere gpxpy for å bruke gpx-filer. "
"Den letteste måten å installere gpxpy er med Spyder eller "
"Jupyter Notebooks. Med Spyder skriver du `!pip install gpxpy` "
"i terminalvinduet, og trykker <Enter>. Med Jupyter Notebooks "
"skriver du `!pip install gpxpy` i en celle du kjører."
)
def haversinus(vinkel):
"""Haversinus funksjonen
Brukes for å regne ut vinkelen mellom to punkt på et kuleskall
Du kan lese om denne funksjonen her
https://en.wikipedia.org/wiki/Versine
"""
return sin(vinkel/2)**2
def arc_haversinus(forhold):
"""Den inverse haversinus funksjonen
Brukes for å regne ut vinkelen mellom to punkt på et kuleskall
Du kan lese om denne funksjonen her
https://en.wikipedia.org/wiki/Versine
"""
return 2*arcsin(sqrt(forhold))
def sentralvinkel(
lengdegrad1, breddegrad1, lengdegrad2, breddegrad2,
):
"""Finner sentralvinkelen mellom to punkt på et kuleskall
For å finne sentralvinkelen brukes haversinus formelen,
som du kan lese om her:
https://en.wikipedia.org/wiki/Haversine_formula
"""
lengdegrad_diff = lengdegrad2 - lengdegrad1
breddegrad_diff = breddegrad1 - breddegrad2
ledd1 = haversinus(lengdegrad_diff)
ledd2 = cos(lengdegrad1)*cos(lengdegrad2)*haversinus(breddegrad_diff)
return arc_haversinus(ledd1 + ledd2)
def avstand(
lengdegrad1, breddegrad1, lengdegrad2, breddegrad2, radius
):
"""Finner avstanden mellom to punkt på et kuleskall.
Du kan lese mer om haversinus formelen vi bruker her
https://en.wikipedia.org/wiki/Haversine_formula
Vinkelene er lister
* Første element er lengdegrad
* Andre element er breddegrad
"""
return radius*sentralvinkel(lengdegrad1, breddegrad1, lengdegrad2, breddegrad2)
def jordavstand(
lengdegrad1, breddegrad1, lengdegrad2, breddegrad2
):
"""Finn luftavstand mellom to punkt på jordoverflata i km.
Her bruker vi en kuleapproksimasjon av jorda, men siden jorda
er elliptisk vil tallene være noe feil med veldig store
avstander (f.eks avstand mellom to fjerne land).
"""
jord_radius_km = 6371
return avstand(
lengdegrad1, breddegrad1, lengdegrad2, breddegrad2, jord_radius_km
)
def grad_til_radian(grad):
"""Gjør om grader til radianer
"""
return grad*pi/180
def finn_tidsendring_i_sekunder(tidspunkt1, tidspunkt2):
"""Finner tidsendring i sekund mellom to tidspunktsvariabler.
"""
tidsendring = tidspunkt1 - tidspunkt2
return tidsendring.seconds + tidsendring.microseconds/1_000_000
def hent_forflyttningsdata(data):
"""Lager en array som inneholder hvor langt man har bevegd seg i kilometer ved den gitte målingen.
"""
forflytning_siden_start = [0]
# Vi starter med å ikke ha noen posisjon
nåværende_lengdegrad = None
nåværende_breddegrad = None
for tidspunkt, rad in data.iterrows():
forrige_lengdegrad = nåværende_lengdegrad
forrige_breddegrad = nåværende_breddegrad
nåværende_lengdegrad = grad_til_radian(rad['lon'])
nåværende_breddegrad = grad_til_radian(rad['lat'])
# Hvis vi ikke har noen forrige posisjon
# så fortsetter vi til neste iterasjon
if forrige_lengdegrad is None:
continue
# Regn ut avstanden vi har bevegd oss
posisjonsendring = jordavstand(
forrige_lengdegrad,
forrige_breddegrad,
nåværende_lengdegrad,
nåværende_breddegrad
)
# Legg til den avstanden i forflytningen vår
nåværende_forflytning = forflytning_siden_start[-1] + posisjonsendring
# Plasser den nåværende forflytningen på slutten av forflytningslista
forflytning_siden_start.append(nåværende_forflytning)
return array(forflytning_siden_start)
def hent_tidsdata(data):
"""Lager en array som inneholder hvor lenge man har bevegd seg i sekunder ved den gitte målingen.
"""
sekunder_siden_start = [0]
nåværende_tidspunkt = None
for indeks, rad in data.iterrows():
forrige_tidspunkt = nåværende_tidspunkt
nåværende_tidspunkt = indeks
if forrige_tidspunkt is None:
continue
tidsendring = finn_tidsendring_i_sekunder(
nåværende_tidspunkt, forrige_tidspunkt
)
tid_siden_start = sekunder_siden_start[-1] + tidsendring
sekunder_siden_start.append(tid_siden_start)
return array(sekunder_siden_start)
def hent_uniform_data(data, tid_mellom_målinger_s=5):
"""Gjør om datasettet slik at alle målingene er like langt unna hverandre
Trekker en rett linje mellom hvert datapunkt
(slik som når vi plotter) og henter ut forflyttningsdata
med like langt tid mellom hver måling.
Returnerer avstander og tidspunkt.
"""
tidspunkt_s = hent_tidsdata(data)
avstander_km = hent_forflyttningsdata(data)
interpolant = interp1d(tidspunkt_s, avstander_km)
tidspunkt_uniform = arange(0, tidspunkt_s[-1], tid_mellom_målinger_s)
return tidspunkt_uniform, interpolant(tidspunkt_uniform)
def last_rådata_gpslogger(datafil):
data = read_csv(datafil).set_index('time')
data.index = to_datetime(data.index)
data = data[data['provider'] == 'gps'] # Bruk kun rander hvor posisjonsdata kommer fra GPSen
tidspunkt_s = hent_tidsdata(data)
avstander_km = hent_forflyttningsdata(data)
return tidspunkt_s, avstander_km
def last_uniform_data_gpslogger(datafil, tid_mellom_målinger_s):
data = read_csv(datafil).set_index('time')
data.index = to_datetime(data.index)
data = data[data['provider'] == 'gps'] # Bruk kun rander hvor posisjonsdata kommer fra GPSen
return hent_uniform_data(data, tid_mellom_målinger_s)
def last_rådata_gpx(datafil, track=None):
with open(datafil, 'r') as gpxfile:
gpx = gpxpy.parse(gpxfile)
if len(gpx.tracks) > 0 and track is None:
warn(
"Det er mer en ett track i gpx filen, henter det første "
"om du ønsker track nummer `n`, må du spesifisere `track=n-1` når "
"du laster inn dataen (f.eks. `last_rådata(datafil, track=2)` "
"å hente inn track nummer 3).\n\n"
"For å fjerne denne advarselen kan du kalle på denne funksjonen med "
"`track=0`."
)
track = 0
data = []
for segment in gpx.tracks[track].segments:
for point in segment.points:
rad = {
'lat': point.latitude,
'lon': point.longitude,
'time': point.time
}
data.append(rad)
data = DataFrame(data).set_index('time')
tidspunkt_s = hent_tidsdata(data)
avstander_km = hent_forflyttningsdata(data)
return tidspunkt_s, avstander_km
def last_uniform_data_gpx(datafil, tid_mellom_målinger_s, track=None):
with open(datafil, 'r') as gpxfile:
gpx = gpxpy.parse(gpxfile)
if len(gpx.tracks) > 0 and track is None:
warn(
"Det er mer en ett track i gpx filen, henter det første "
"om du ønsker track nummer `n`, må du spesifisere `track=n-1` når "
"du laster inn dataen (f.eks. `last_uniform_data(datafil, dt, track=2)` "
"å hente inn track nummer 3).\n\n"
"For å fjerne denne advarselen kan du kalle på denne funksjonen med "
"`track=0`."
)
track = 0
data = []
for segment in gpx.tracks[track].segments:
for point in segment.points:
rad = {
'lat': point.latitude,
'lon': point.longitude,
'time': point.time
}
data.append(rad)
data = DataFrame(data).set_index('time')
return hent_uniform_data(data, tid_mellom_målinger_s)
def last_rådata(datafil, track=None):
"""Last inn rådata fra en gpx-fil eller en GPSLogger csv-fil.
"""
datafil = Path(datafil)
if datafil.suffix == '.gpx':
sjekk_at_gpxpy_er_installert()
return last_rådata_gpx(datafil, track=track)
elif datafil.suffix == '.csv':
if track is not None:
warn("Du kan kun spesifisere track dersom du bruker gpx-filer.")
return last_rådata_gpslogger(datafil)
else:
raise ValueError("Filtypen må enten være csv (for GPSLogger csv filer) eller gpx.")
def last_uniform_data(datafil, tid_mellom_målinger_s, track=None):
"""Gjør om datasettet slik at alle målingene er like langt unna hverandre
Trekker en rett linje mellom hvert datapunkt
(slik som når vi plotter) og henter ut forflyttningsdata
med like langt tid mellom hver måling.
Returnerer avstander og tidspunkt.
"""
datafil = Path(datafil)
if datafil.suffix == '.gpx':
sjekk_at_gpxpy_er_installert()
return last_uniform_data_gpx(datafil, tid_mellom_målinger_s, track=track)
elif datafil.suffix == '.csv':
if track is not None:
warn("Du kan kun spesifisere track dersom du bruker gpx-filer.")
return last_uniform_data_gpslogger(datafil, tid_mellom_målinger_s)
else:
raise ValueError("Filtypen må enten være csv (for GPS tracker csv filer) eller gpx.") | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 16:08:04 2020
@author: sariyanidi
@description: This script takes a trained keras model, and converts it
into a format that's recognizable by OpenCV
"""
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
import tensorflow as tf
import numpy as np
import os
from FAN import FAN
# The weights file is created by running train.py. The code below if the
# weights file exists. If you don't want to run train.py (takes ~10hrs),
# you can simply download the file from the link below
weights_file = './models/FAN_3d_nadam_rmsprop68.h5'
url_weights_file = "http://www.sariyanidi.com/media/%s" % os.path.basename(weights_file)
if not os.path.exists(weights_file):
print("""Make sure that there is a trained model file at %s""" % weights_file)
print("""You can either download the trained model from %s,
or train it yourself by running train.py.""" % url_weights_file)
exit()
# Create the model and load the weights
model = FAN(4)
model.build(input_shape=(None, 256, 256, 3))
model.compile(loss='mse')
model.load_weights(weights_file)
model.predict(np.random.rand(1, 256, 256, 3))
model.trainable = False
# The following lines convert the keras model to a tf model
model.save('model_FAN_final')
loaded = tf.saved_model.load('model_FAN_final')
infer = loaded.signatures['serving_default']
f = tf.function(infer).get_concrete_function(input_1=tf.TensorSpec(shape=[1, 256, 256, 3], dtype=tf.float32))
f2 = convert_variables_to_constants_v2(f)
graph_def = f2.graph.as_graph_def()
# Export frozen graph, the file models/model_FAN_frozen.pb is the file
# that will be used by OpenCV
with tf.io.gfile.GFile('models/model_FAN_frozen.pb', 'wb') as f:
f.write(graph_def.SerializeToString()) | |
from checkers.game import Game
import numpy as np
import copy
import operator
import random
def main():
game = Game()
game.consecutive_noncapture_move_limit = 100
while (not game.is_over()):
if game.whose_turn() == 1:
human_move(
game) # if you want the bot to play for the human replace this line with bot_move(game,desired_depth)
# bot_move(game, 1)
else:
bot_move(game, 5)
print_game_to_console(game)
def human_move(game):
print("Possible moves for human ", game.get_possible_moves())
prompt = "insert move number from list 0 - " + str(len(game.get_possible_moves()) - 1)
move_number = int(input(prompt))
if move_number >= len(game.get_possible_moves()):
print("number out of bounds please provide a number within 0 - ", len(game.get_possible_moves()) - 1)
move_number = int(input(prompt))
print("player moved to ", game.get_possible_moves()[move_number])
game.move(game.get_possible_moves()[move_number])
def bot_move(game, depth):
print("Possible moves for bot ", game.get_possible_moves())
first_moves = []
for i in range(0, len(game.get_possible_moves())):
new_game = copy.deepcopy(game)
new_game.move(game.get_possible_moves()[i])
val, best_move = alphabeta(new_game, depth, float("-inf"), float("inf"), True, game.get_possible_moves()[0])
# print(val)
first_moves.append(val)
index, value = max(enumerate(first_moves), key=operator.itemgetter(1))
duplicates = [i for i, x in enumerate(first_moves) if x == value] # isint python just the best?
print("bot moved to ", game.get_possible_moves()[
random.choice(duplicates)]) # picks randomly between the elements with the highest values
# print(val)
game.move(game.get_possible_moves()[index])
def alphabeta(node, depth, alpha, beta, maximizingPlayer,
best_move): # used the psudocode provided at: https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning
# possible_scores
if depth == 0 or node.is_over():
return get_game_score(node), best_move
if maximizingPlayer:
value = float("-inf")
for i in range(0, len(node.get_possible_moves())):
new_game = copy.deepcopy(node)
new_game.move(node.get_possible_moves()[i])
new_value, best_move = alphabeta(new_game, depth - 1, alpha, beta, False, best_move)
value = max(value, new_value)
if get_game_score(new_game) > value:
best_move = node.get_possible_moves()[i]
# print(best_move, value)
alpha = max(alpha, value)
if alpha >= beta:
break
return value, best_move
else:
value = float("inf")
for i in range(0, len(node.get_possible_moves())):
new_game = copy.deepcopy(node)
new_game.move(node.get_possible_moves()[i])
new_value, best_move = alphabeta(new_game, depth - 1, alpha, beta, True, best_move)
value = min(value, new_value) # need to do the same as above here
beta = min(beta, value)
if get_game_score(new_game) < value:
best_move = node.get_possible_moves()[i]
if alpha >= beta:
break
return value, best_move
def get_game_score(game):
player_num = game.whose_turn()
total_score = 0
if game.get_winner() == player_num:
total_score = total_score + 500
elif game.is_over():
total_score = total_score - 500
for piece in game.board.pieces:
if not piece.captured:
if len(piece.get_possible_capture_moves()) > 1:
total_score = total_score + 20
if piece.king and piece.player == player_num:
total_score = total_score + 20
elif piece.king:
total_score = total_score - 20
if not piece.king and piece.player == player_num:
total_score = total_score + 5
elif not piece.king:
total_score = total_score - 5
return total_score
def print_game_to_console(game):
game_state = np.chararray((8, 8), unicode=True)
game_state[:] = '_'
for piece in game.board.pieces:
if piece.player == 1:
checker_symbol = '⛀'
king_symbol = '⛁'
else:
checker_symbol = '⛂'
king_symbol = '⛃'
if not piece.captured:
if piece.king == 1:
if piece.get_row() % 2 == 0:
game_state[piece.get_row()][piece.get_column() * 2 + 1] = king_symbol
else:
game_state[piece.get_row()][piece.get_column() * 2] = king_symbol
else:
# print(piece.get_adjacent_positions(), "", piece.get_row(), piece.get_column())
if piece.get_row() % 2 == 0:
game_state[piece.get_row()][piece.get_column() * 2 + 1] = checker_symbol
else:
game_state[piece.get_row()][piece.get_column() * 2] = checker_symbol
print(game_state)
if __name__ == "__main__":
main() | |
import re
import torch
from batchgenerators.dataloading import MultiThreadedAugmenter
import numpy as np
import os
from batchgenerators.dataloading.data_loader import DataLoaderFromDataset
from batchgenerators.datasets.cifar import HighPerformanceCIFARLoader, CifarDataset
from batchgenerators.transforms.spatial_transforms import SpatialTransform
from batchgenerators.transforms import NumpyToTensor, Compose
from torch._six import int_classes, string_classes, container_abcs
from torch.utils.data.dataloader import numpy_type_map
_use_shared_memory = False
def default_collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], container_abcs.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], container_abcs.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
if __name__ == '__main__':
### current implementation of betchgenerators stuff for this script does not use _use_shared_memory!
from time import time
batch_size = 50
num_workers = 8
pin_memory = False
num_epochs = 3
dataset_dir = '/media/fabian/data/data/cifar10'
numpy_to_tensor = NumpyToTensor(['data', 'labels'], cast_to=None)
fname = os.path.join(dataset_dir, 'cifar10_training_data.npz')
dataset = np.load(fname)
cifar_dataset_as_arrays = (dataset['data'], dataset['labels'], dataset['filenames'])
print('batch_size', batch_size)
print('num_workers', num_workers)
print('pin_memory', pin_memory)
print('num_epochs', num_epochs)
tr_transforms = [SpatialTransform((32, 32))] * 1 # SpatialTransform is computationally expensive and we need some
# load on CPU so we just stack 5 of them on top of each other
tr_transforms.append(numpy_to_tensor)
tr_transforms = Compose(tr_transforms)
cifar_dataset = CifarDataset(dataset_dir, train=True, transform=tr_transforms)
dl = DataLoaderFromDataset(cifar_dataset, batch_size, num_workers, 1)
mt = MultiThreadedAugmenter(dl, None, num_workers, 1, None, pin_memory)
batches = 0
for _ in mt:
batches += 1
assert len(_['data'].shape) == 4
assert batches == len(cifar_dataset) / batch_size # this assertion only holds if len(datset) is divisible by
# batch size
start = time()
for _ in range(num_epochs):
batches = 0
for _ in mt:
batches += 1
stop = time()
print('batchgenerators took %03.4f seconds' % (stop - start))
# The best I can do:
dl = HighPerformanceCIFARLoader(cifar_dataset_as_arrays, batch_size, num_workers, 1) # this circumvents the
# default_collate function, just to see if that is slowing things down
mt = MultiThreadedAugmenter(dl, tr_transforms, num_workers, 1, None, pin_memory)
batches = 0
for _ in mt:
batches += 1
assert len(_['data'].shape) == 4
assert batches == len(cifar_dataset_as_arrays[0]) / batch_size # this assertion only holds if len(datset) is
# divisible by batch size
start = time()
for _ in range(num_epochs):
batches = 0
for _ in mt:
batches += 1
stop = time()
print('high performance batchgenerators %03.4f seconds' % (stop - start))
from torch.utils.data import DataLoader as TorchDataLoader
trainloader = TorchDataLoader(cifar_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=pin_memory, collate_fn=default_collate)
batches = 0
for _ in iter(trainloader):
batches += 1
assert len(_['data'].shape) == 4
start = time()
for _ in range(num_epochs):
batches = 0
for _ in trainloader:
batches += 1
stop = time()
print('pytorch took %03.4f seconds' % (stop - start)) | |
import numpy as np
from math import sqrt
class PondingLoadCell2d:
id = '' # Load cell ID
xI = 0.0 # X coordinate of Node I
yI = 0.0 # Y coordinate of Node I
xJ = 0.0 # X coordinate of Node J
yJ = 0.0 # Y coordinate of Node J
dyI = 0.0 # Y deflection of Node I
dyJ = 0.0 # Y deflection of Node J
gamma = 0 # Fluid density
tw = 0 # Tributary width
gammas = 0 # Snow density
hs = 0 # Snow height
def __init__(self):
pass
def get_load_vector(self,z):
L = abs(self.xJ-self.xI);
hI = z - (self.yI + self.dyI)
hJ = z - (self.yJ + self.dyJ)
if hI >= 0:
if hJ == 0:
F = 0
x = 0.5*L
elif hJ > 0:
F = -self.gamma*self.tw*(hI+hJ)*L/2
x = L*(2*hJ+hI)/(3*(hI+hJ))
else:
Lo = (hI)/(hI-hJ)*L
F = -self.gamma*self.tw*hI*Lo/2
x = Lo/3
else:
if hJ >= 0:
Lo = (hJ)/(hJ-hI)*L
F = -self.gamma*self.tw*hJ*Lo/2
x = L-Lo/3
else:
F = 0
x = 0.5*L
if self.gammas > 0 and self.hs > 0:
# Snow Force
Fs = -self.gammas*self.tw*self.hs*L
xs = L/2
# Overlap Adjustment Force
gammaoa = min(self.gamma,self.gammas)
Lxs = (self.hs-hI)*L/(hJ-hI) # length from I-end where the water level crosses the snow level
Lxb = -hI*L/(hJ-hI) # length from I-end where the water level crosses the beam
if hI >= self.hs:
if hJ >= self.hs:
Foa = gammaoa*self.tw*self.hs*L
xoa = L/2
elif hJ >= 0:
F1 = gammaoa*self.tw*self.hs*Lxs
x1 = Lxs/2
F2 = gammaoa*self.tw*(self.hs+hJ)*(L-Lxs)/2
x2 = Lxs + (L-Lxs)*(2*hJ+self.hs)/(3*(hJ+self.hs))
Foa = F1 + F2
xoa = (x1*F1 + x2*F2)/Foa
else:
F1 = gammaoa*self.tw*self.hs*Lxs
x1 = Lxs/2
F2 = gammaoa*self.tw*(self.hs)*(Lxb-Lxs)/2
x2 = Lxs + (Lxb-Lxs)/3
Foa = F1 + F2
xoa = (x1*F1 + x2*F2)/Foa
elif hI >= 0:
if hJ >= self.hs:
F1 = gammaoa*self.tw*(hI+self.hs)*(Lxs)/2
x1 = Lxs*(2*self.hs+hI)/(3*(self.hs+hI))
F2 = gammaoa*self.tw*self.hs*(L-Lxs)
x2 = Lxs + (L-Lxs)/2
Foa = F1 + F2
xoa = (x1*F1 + x2*F2)/Foa
elif hJ >= 0:
Foa = gammaoa*self.tw*(hI+hJ)*L/2
xoa = L*(2*hJ+hI)/(3*(hJ+hI))
else:
Foa = gammaoa*self.tw*hI*Lxb/2
xoa = Lxb/3
else:
if hJ >= self.hs:
F1 = gammaoa*self.tw*self.hs*(Lxs-Lxb)/2
x1 = Lxs - (Lxs-Lxb)/3
F2 = gammaoa*self.tw*self.hs*(L-Lxs)
x2 = L - (L-Lxs)/2
Foa = F1 + F2
xoa = (x1*F1 + x2*F2)/Foa
elif hJ >= 0:
Foa = gammaoa*self.tw*hJ*(L-Lxb)/2
xoa = L - (L-Lxb)/3
else:
Foa = 0
xoa = 0
# Total Force
x = (x*F + xs*Fs + xoa*Foa)/(F+Fs+Foa)
F = F + Fs + Foa
f = np.mat([[(1-x/L)*F],
[ (x/L)*F]])
return f
def get_volume(self,z):
L = abs(self.xJ-self.xI);
hI = z - (self.yI + self.dyI)
hJ = z - (self.yJ + self.dyJ)
if hI >= 0:
if hJ >= 0:
V = self.tw*(hI+hJ)*L/2
dVdz = self.tw*L
else:
Lo = (hI)/(hI-hJ)*L
V = self.tw*hI*Lo/2
dVdz = self.tw*Lo
else:
if hJ >= 0:
Lo = (hJ)/(hJ-hI)*L
V = self.tw*hJ*Lo/2
dVdz = self.tw*Lo
else:
V = 0
dVdz = 0
if self.gammas > 0 and self.hs > 0:
raise Exception('get_volume not yet implemented for cases with snow')
return (V,dVdz)
class PondingLoadCell3d:
id = '' # Load cell ID
# Define nodes (vertices) in counterclockwise (CCW) direction
xI = 0.0 # X coordinate of Node I
yI = 0.0 # Y coordinate of Node I
zI = 0.0 # Z coordinate of Node I
xJ = 0.0 # X coordinate of Node J
yJ = 0.0 # Y coordinate of Node J
zJ = 0.0 # Z coordinate of Node J
xK = 0.0 # X coordinate of Node K
yK = 0.0 # Y coordinate of Node K
zK = 0.0 # Z coordinate of Node K
xL = 0.0 # X coordinate of Node L
yL = 0.0 # Y coordinate of Node L
zL = 0.0 # Z coordinate of Node L
dzI = 0.0 # Z deflection of Node I
dzJ = 0.0 # Z deflection of Node J
dzK = 0.0 # Z deflection of Node K
dzL = 0.0 # Z deflection of Node L
gamma = 0 # Fluid density
na = 1 # Number of sub-cells along IJ
nb = 1 # Number of sub-cells along JK
gammas = 0 # Snow density
hs = 0 # Snow height
return_water_load_only = False
def __init__(self):
pass
def get_load_vector(self,z):
coords = np.mat([[self.xI,self.yI],
[self.xJ,self.yJ],
[self.xK,self.yK],
[self.xL,self.yL]])
hI = z - (self.zI + self.dzI)
hJ = z - (self.zJ + self.dzJ)
hK = z - (self.zK + self.dzK)
hL = z - (self.zL + self.dzL)
# Define numerical integration points and weights
n_ip = 4
xi_ip = [-1/sqrt(3), 1/sqrt(3), 1/sqrt(3),-1/sqrt(3)]
eta_ip = [-1/sqrt(3),-1/sqrt(3), 1/sqrt(3), 1/sqrt(3)]
w_ip = [ 1, 1, 1, 1]
# Calculate load
f = np.zeros((4,1))
if self.na == 1 and self.nb == 1:
# Compute pressure due to water and snow at each corner of the cell
if self.gammas > 0 and self.hs > 0:
if hI <= 0:
wpI = self.gammas*self.hs
elif hI <= self.hs:
wpI = max(self.gamma,self.gammas)*hI + self.gammas*(self.hs-hI)
else:
wpI = max(self.gamma,self.gammas)*self.hs + self.gamma*(hI-self.hs)
if hJ <= 0:
wpJ = self.gammas*self.hs
elif hJ <= self.hs:
wpJ = max(self.gamma,self.gammas)*hJ + self.gammas*(self.hs-hJ)
else:
wpJ = max(self.gamma,self.gammas)*self.hs + self.gamma*(hJ-self.hs)
if hK <= 0:
wpK = self.gammas*self.hs
elif hK <= self.hs:
wpK = max(self.gamma,self.gammas)*hK + self.gammas*(self.hs-hK)
else:
wpK = max(self.gamma,self.gammas)*self.hs + self.gamma*(hK-self.hs)
if hL <= 0:
wpL = self.gammas*self.hs
elif hL <= self.hs:
wpL = max(self.gamma,self.gammas)*hL + self.gammas*(self.hs-hL)
else:
wpL = max(self.gamma,self.gammas)*self.hs + self.gamma*(hL-self.hs)
if self.return_water_load_only:
wpI = wpI - self.gammas*self.hs
wpJ = wpJ - self.gammas*self.hs
wpK = wpK - self.gammas*self.hs
wpL = wpL - self.gammas*self.hs
wp = np.array([[wpI],[wpJ],[wpK],[wpL]])
else:
wp = self.gamma*np.array([[max(0,hI)],[max(0,hJ)],[max(0,hK)],[max(0,hL)]])
# Compute the force vector
for iip in range(n_ip):
j = self.Jacobian(xi_ip[iip],eta_ip[iip],coords)
N = self.ShapeFunction(xi_ip[iip],eta_ip[iip])
f += j*N.dot(np.transpose(N).dot(-wp))
else:
h = np.array([[hI],[hJ],[hK],[hL]])
# Loop over each sub-cell
for ia in range(self.na):
for ib in range(self.nb):
# Define coordinates (in local coordinates) of the corners of the sub-cell
xi_sub = [-1+2*ia/self.na,-1+2*(ia+1)/self.na,-1+2*(ia+1)/self.na,-1+2*ia/self.na]
eta_sub = [-1+2*ib/self.nb,-1+2*ib/self.nb,-1+2*(ib+1)/self.nb,-1+2*(ib+1)/self.nb]
# Compute for each corner of the sub-cell...
coords_sub = np.zeros((4,2))
wp_sub = np.zeros((4,1))
for i in range(4):
N = self.ShapeFunction(xi_sub[i],eta_sub[i])
# Coordinates (in global coordinates)
coords_sub[i,:] = np.transpose(N).dot(coords)
# Height of water at corner of sub-cell
h_sub = np.transpose(N).dot(h)
# Pressure due to water and snow
if self.gammas > 0 and self.hs > 0:
if h_sub <= 0:
wp_sub[i] = self.gammas*self.hs
elif h_sub <= self.hs:
wp_sub[i] = max(self.gamma,self.gammas)*h_sub + self.gammas*(self.hs-h_sub)
else:
wp_sub[i] = max(self.gamma,self.gammas)*self.hs + self.gamma*(h_sub-self.hs)
if self.return_water_load_only:
wp_sub[i] = wp_sub[i] - self.gammas*self.hs
else:
wp_sub[i] = self.gamma*max(0,h_sub)
# Compute sub-cell force vector
f_sub = np.zeros((4,1))
for iip in range(n_ip):
j = self.Jacobian(xi_ip[iip],eta_ip[iip],coords_sub)
N = self.ShapeFunction(xi_ip[iip],eta_ip[iip])
f_sub += j*N.dot(np.transpose(N).dot(-wp_sub))
# Convert sub-cell force vector to cell force vector
for i in range(4):
N = self.ShapeFunction(xi_sub[i],eta_sub[i])
f = f + N*f_sub[i]
return f
def get_volume(self,z):
coords = np.mat([[self.xI,self.yI],
[self.xJ,self.yJ],
[self.xK,self.yK],
[self.xL,self.yL]])
hI = z - (self.zI + self.dzI)
hJ = z - (self.zJ + self.dzJ)
hK = z - (self.zK + self.dzK)
hL = z - (self.zL + self.dzL)
h = np.array([[hI],[hJ],[hK],[hL]])
V = -self.get_load_vector(z).sum()/self.gamma
dVdz = 0
for ia in range(self.na):
for ib in range(self.nb):
xi_sub = [-1+2*ia/self.na,-1+2*(ia+1)/self.na,-1+2*(ia+1)/self.na,-1+2*ia/self.na]
eta_sub = [-1+2*ib/self.nb,-1+2*ib/self.nb,-1+2*(ib+1)/self.nb,-1+2*(ib+1)/self.nb]
# Compute coordinates and height of ponded water in the sub-cell
coords_sub = np.zeros((4,2))
h_sub = np.zeros((4,1))
for i in range(4):
N = self.ShapeFunction(xi_sub[i],eta_sub[i])
coords_sub[i,:] = np.transpose(N).dot(coords)
h_sub[i] = np.transpose(N).dot(h)
# Determine pologon where h > 0
x_coord = np.empty(0)
y_coord = np.empty(0)
if h_sub[0] >= 0:
x_coord = np.append(x_coord,coords_sub[0,0])
y_coord = np.append(y_coord,coords_sub[0,1])
if (h_sub[0] > 0 and h_sub[1] < 0) or (h_sub[0] < 0 and h_sub[1] > 0):
a = h_sub[0]/(h_sub[0]-h_sub[1])
x_coord = np.append(x_coord,coords_sub[0,0]+a*(coords_sub[1,0]-coords_sub[0,0]))
y_coord = np.append(y_coord,coords_sub[0,1]+a*(coords_sub[1,1]-coords_sub[0,1]))
if h_sub[1] >= 0:
x_coord = np.append(x_coord,coords_sub[1,0])
y_coord = np.append(y_coord,coords_sub[1,1])
if (h_sub[1] > 0 and h_sub[2] < 0) or (h_sub[1] < 0 and h_sub[2] > 0):
a = h_sub[1]/(h_sub[1]-h_sub[2])
x_coord = np.append(x_coord,coords_sub[1,0]+a*(coords_sub[2,0]-coords_sub[1,0]))
y_coord = np.append(y_coord,coords_sub[1,1]+a*(coords_sub[2,1]-coords_sub[1,1]))
if h_sub[2] >= 0:
x_coord = np.append(x_coord,coords_sub[2,0])
y_coord = np.append(y_coord,coords_sub[2,1])
if (h_sub[2] > 0 and h_sub[3] < 0) or (h_sub[2] < 0 and h_sub[3] > 0):
a = h_sub[2]/(h_sub[2]-h_sub[3])
x_coord = np.append(x_coord,coords_sub[2,0]+a*(coords_sub[3,0]-coords_sub[2,0]))
y_coord = np.append(y_coord,coords_sub[2,1]+a*(coords_sub[3,1]-coords_sub[2,1]))
if h_sub[3] >= 0:
x_coord = np.append(x_coord,coords_sub[3,0])
y_coord = np.append(y_coord,coords_sub[3,1])
if (h_sub[3] > 0 and h_sub[0] < 0) or (h_sub[3] < 0 and h_sub[0] > 0):
a = h_sub[3]/(h_sub[3]-h_sub[0])
x_coord = np.append(x_coord,coords_sub[3,0]+a*(coords_sub[0,0]-coords_sub[3,0]))
y_coord = np.append(y_coord,coords_sub[3,1]+a*(coords_sub[0,1]-coords_sub[3,1]))
# Compute area of polygon and add it to dVdz
if x_coord.size > 0:
dVdz += 0.5*np.abs(np.dot(x_coord,np.roll(y_coord,1))-np.dot(y_coord,np.roll(x_coord,1)))
return (V,dVdz)
@staticmethod
def ShapeFunction(xi,eta):
N = np.array([[(1-xi)*(1-eta)],
[(1+xi)*(1-eta)],
[(1+xi)*(1+eta)],
[(1-xi)*(1+eta)]])/4
return N
@staticmethod
def Jacobian(xi,eta,coords):
dNd_ = np.array([[-(1-eta), (1-eta), (1+eta),-(1+eta)],
[ -(1-xi), -(1+xi), (1+xi), (1-xi)]])/4
jac = np.dot(dNd_,coords)
j = np.linalg.det(jac)
return j | |
################################################################################
# Copyright (C) 2011-2012,2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Module for the categorical distribution node.
"""
import numpy as np
from .node import ensureparents
from .expfamily import (ExponentialFamily,
useconstructor)
from .multinomial import (MultinomialMoments,
MultinomialDistribution,
Multinomial)
from .dirichlet import DirichletMoments
from bayespy.utils import random
from bayespy.utils import misc
class CategoricalMoments(MultinomialMoments):
"""
Class for the moments of categorical variables.
"""
def compute_fixed_moments(self, x):
"""
Compute the moments for a fixed value
"""
# Check that x is valid
x = np.asanyarray(x)
if not misc.isinteger(x):
raise ValueError("Values must be integers")
if np.any(x < 0) or np.any(x >= self.categories):
raise ValueError("Invalid category index")
u0 = np.zeros((np.size(x), self.categories))
u0[[np.arange(np.size(x)), np.ravel(x)]] = 1
u0 = np.reshape(u0, np.shape(x) + (self.categories,))
return [u0]
@classmethod
def from_values(cls, x, categories):
"""
Return the shape of the moments for a fixed value.
The observations are scalar.
"""
return cls(categories)
raise DeprecationWarning()
return ( (self.D,), )
def get_instance_conversion_kwargs(self):
return dict(categories=self.categories)
def get_instance_converter(self, categories):
if categories is not None and categories != self.categories:
raise ValueError(
"No automatic conversion from CategoricalMoments to "
"CategoricalMoments with different number of categories"
)
return None
class CategoricalDistribution(MultinomialDistribution):
"""
Class for the VMP formulas of categorical variables.
"""
def __init__(self, categories):
"""
Create VMP formula node for a categorical variable
`categories` is the total number of categories.
"""
if not isinstance(categories, int):
raise ValueError("Number of categories must be integer")
if categories < 0:
raise ValueError("Number of categoriess must be non-negative")
self.D = categories
super().__init__(1)
def compute_fixed_moments_and_f(self, x, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
# Check the validity of x
x = np.asanyarray(x)
if not misc.isinteger(x):
raise ValueError("Values must be integers")
if np.any(x < 0) or np.any(x >= self.D):
raise ValueError("Invalid category index")
# Form a binary matrix with only one non-zero (1) in the last axis
u0 = np.zeros((np.size(x), self.D))
u0[[np.arange(np.size(x)), np.ravel(x)]] = 1
u0 = np.reshape(u0, np.shape(x) + (self.D,))
u = [u0]
# f(x) is zero
f = 0
return (u, f)
def random(self, *phi, plates=None):
"""
Draw a random sample from the distribution.
"""
logp = phi[0]
logp -= np.amax(logp, axis=-1, keepdims=True)
p = np.exp(logp)
return random.categorical(p, size=plates)
class Categorical(ExponentialFamily):
r"""
Node for categorical random variables.
The node models a categorical random variable :math:`x \in \{0,\ldots,K-1\}`
with prior probabilities :math:`\{p_0, \ldots, p_{K-1}\}` for each category:
.. math::
p(x=k) = p_k \quad \text{for } k\in \{0,\ldots,K-1\}.
Parameters
----------
p : Dirichlet-like node or (...,K)-array
Probabilities for each category
See also
--------
Bernoulli, Multinomial, Dirichlet
"""
def __init__(self, p, **kwargs):
"""
Create Categorical node.
"""
super().__init__(p, **kwargs)
@classmethod
def _constructor(cls, p, **kwargs):
"""
Constructs distribution and moments objects.
This method is called if useconstructor decorator is used for __init__.
Becase the distribution and moments object depend on the number of
categories, that is, they depend on the parent node, this method can be
used to construct those objects.
"""
# Get the number of categories
p = cls._ensure_moments(p, DirichletMoments)
D = p.dims[0][0]
parent_moments = (p._moments,)
parents = [p]
distribution = CategoricalDistribution(D)
moments = CategoricalMoments(D)
return (parents,
kwargs,
moments.dims,
cls._total_plates(kwargs.get('plates'),
distribution.plates_from_parent(0, p.plates)),
distribution,
moments,
parent_moments)
def __str__(self):
"""
Print the distribution using standard parameterization.
"""
p = self.u[0]
return ("%s ~ Categorical(p)\n"
" p = \n"
"%s\n"
% (self.name, p)) | |
import numpy as np
from ceRNA.Calculations import estimate_parameters, rmse_vector, percent_error_vector
class Estimator:
def __init__(self, real_vector: np.ndarray, tests: np.ndarray):
self.real_vector = real_vector
self.estimate_size = len(real_vector)
self.tests = tests
self.number_of_tests = len(tests)
self.matrix = self.create_matrix()
self.estimate = None
self.rmses = None
self.average_rmse = 0
self.relative_errors = None
self.average_relative_error = 0
def create_matrix(self) -> np.ndarray:
return self.tests
def calculate_accuracy(self):
self.estimate = estimate_parameters(self.matrix)[-len(self.real_vector):]
# Accuracy calculations
self.rmses = rmse_vector(self.real_vector, self.estimate)
self.average_rmse = self.rmses.mean()
self.relative_errors = percent_error_vector(self.real_vector, self.estimate)
self.average_relative_error = self.relative_errors.mean()
def print_results(self):
print("Results:")
print(" Average RMSE: {0}".format(self.average_rmse))
print(" Average PE: {0}".format(self.average_relative_error))
class DecayEstimator(Estimator):
def __init__(self, real_vector: np.ndarray, tests: np.ndarray):
Estimator.__init__(self, real_vector, tests)
# Returns the right half of tests.
def create_matrix(self) -> np.ndarray:
matrix = self.tests[:, -self.estimate_size:]
return matrix | |
from Model import BNInception_gsm
import pandas as pd
import numpy as np
import torch
from torch import nn
import os
import random
import cv2
from Dataset import DataGenerator
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from torch import optim
from torch.backends import cudnn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from CosineAnnealingLR import WarmupCosineLR
import pickle
import datetime
num_classes = 101
pretrained_settings = {
'bninception': {
'imagenet': {
# Was ported using python2 (may trigger warning)
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/bn_inception-52deb4733.pth',
# 'url': 'http://yjxiong.me/others/bn_inception-9f5701afb96c8044.pth',
'input_space': 'BGR',
'input_size': [3, 224, 224],
'input_range': [0, 255],
'mean': [104, 117, 128],
'std': [1, 1, 1],
'num_classes': 1000
}
}
}
def bninception(num_classes=1000, pretrained='imagenet'):
r"""BNInception model architecture from <https://arxiv.org/pdf/1502.03167.pdf>`_ paper.
"""
model = BNInception_gsm(num_classes=101)
if pretrained is not None:
settings = pretrained_settings['bninception'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
states = model_zoo.load_url(settings['url'])
del states['last_linear.weight']
del states['last_linear.bias']
model.load_state_dict(states, strict=False)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def evaluate(preds,labels):
correct = 0
correct_class = dict([(x,0) for x in range(101)])
for i in range(len(preds)):
if preds[i]==labels[i]:
correct += 1
correct_class[preds[i]] += 1
print(i)
print(labels[i])
return correct/len(preds),correct_class
def predict(model,num_frames,class_dict):
video_file = 'Archery/v_Archery_g07_c03'
path = './Dataset/UCF-101/frames/'
video_frames = sorted(os.listdir(path + video_file + '/'))
images = []
for image in video_frames[:num_frames]:
images.append(cv2.resize(cv2.imread(path + video_file + '/' + image), (224, 224)))
images = np.array(images)
model.eval()
output = torch.mean(
model(torch.from_numpy(images.reshape((num_frames, 3, 224, 224))).float().cuda()),
dim=[0]).view(1, -1)
prediction = np.argmax(output.cpu().data.numpy())
print('Video file name: ',video_file)
print('Model prediction: ',class_dict[prediction])
def main():
classes_dict = {}
path = './Dataset/UCF101TrainTestSplits-RecognitionTask/ucfTrainTestlist/'
with open(path + 'classInd.txt', 'r') as f:
classes_dict = dict([(int(x.split()[0]) - 1,x.split()[1]) for x in f.read().strip().split('\n')])
partition = {'training': [], 'validation': [], 'testing': []}
with open(path + 'trainlist03.txt', 'r') as f:
data = [x.split() for x in f.read().strip().split('\n')]
labels = {'training': [], 'validation': [], 'testing': []}
random.seed(1)
classes = dict([(x,[]) for x in range(num_classes)])
for i in range(len(data)):
classes[int(data[i][1])-1].append(i)
training_sample = []
validation_sample = []
for i in classes.keys():
training_sample.extend(random.sample(classes[i],20))
classes[i] = [x for x in classes[i] if x not in training_sample]
validation_sample.extend(random.sample(classes[i], 5))
training_sample = set(training_sample)
validation_sample = set(validation_sample)
X_train = [data[x][0][:-4] for x in range(len(data)) if x in training_sample]
y_train = [int(data[x][1]) - 1 for x in range(len(data)) if x in training_sample]
X_val = [data[x][0][:-4] for x in range(len(data)) if x in validation_sample]
y_val = [int(data[x][1]) - 1 for x in range(len(data)) if x in validation_sample]
for i, j in enumerate(X_train):
partition['training'].append(j)
labels['training'].append(y_train[i])
for i, j in enumerate(X_val):
partition['validation'].append(j)
labels['validation'].append(y_val[i])
num_frames = 30
batch_size = 3
training_set = DataGenerator(partition['training'], labels['training'], batch_size=batch_size,
num_frames=num_frames)
validation_set = DataGenerator(partition['validation'], labels['validation'], batch_size=batch_size,
num_frames=num_frames)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
torch.cuda.empty_cache()
cudnn.benchmark = True
print('Device: ', device)
model = bninception().cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01,momentum=0.9)
num_epochs = 100
scheduler = WarmupCosineLR(optimizer=optimizer,milestones=[10,num_epochs],warmup_iters=10,min_ratio=1e-7)
model.train(True)
print('Started training')
losses = []
for epoch in range(num_epochs):
starttime = datetime.datetime.now()
idx = 0
running_loss = 0
num_batches = 0
while (idx < len(training_set)):
curr_batch, curr_labels = training_set[idx]
if len(curr_batch)==0:
break
outputs = torch.mean(
model(torch.from_numpy(curr_batch[0].reshape((curr_batch.shape[1], 3, 224, 224))).float().cuda()),
dim=[0]).view(1, -1)
for i in range(1, len(curr_batch)):
output = model(
torch.from_numpy(curr_batch[i].reshape((curr_batch.shape[1], 3, 224, 224))).float().cuda())
outputs = torch.cat((outputs, torch.mean(output, dim=[0]).view(1, -1)), dim=0)
loss = criterion(outputs, torch.from_numpy(curr_labels).cuda())
losses.append(loss.item())
running_loss += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
idx += len(curr_batch)
num_batches += 1
endtime = datetime.datetime.now()
print('Average loss for epoch {} is: {}'.format(epoch,running_loss/num_batches))
print('Execution time: ',endtime-starttime)
torch.save(model.state_dict(),'./Model.pt')
model.eval()
idx = 0
predictions = []
while (idx < len(labels['validation'])):
curr_batch, curr_labels = validation_set[idx]
if len(curr_labels)==0:
break
outputs = torch.mean(
model(torch.from_numpy(curr_batch[0].reshape((curr_batch.shape[1], 3, 224, 224))).float().cuda()),
dim=[0]).view(1, -1)
for i in range(1, len(curr_batch)):
output = model(torch.from_numpy(curr_batch[i].reshape((curr_batch.shape[1], 3, 224, 224))).float().cuda())
outputs = torch.cat((outputs, torch.mean(output, dim=[0]).view(1, -1)), dim=0)
predictions.extend([np.argmax(outputs[i].cpu().data.numpy()) for i in range(outputs.shape[0])])
idx += len(curr_batch)
accuracy,classes = evaluate(predictions,labels['validation'])
print(accuracy)
print(classes)
if __name__ == '__main__':
main() | |
__author__ = "Tomasz Rybotycki"
import abc
from typing import List
from numpy import ndarray
class SimulationStrategyInterface(abc.ABC):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, "simulate") and
callable(subclass.simulate))
@abc.abstractmethod
def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarray]:
"""
Simulate the lossy boson sampling experiment.
:param input_state: Input state of the simulation.
:param samples_number: Number of samples one wants to simulate.
:return:
"""
raise NotImplementedError | |
import tensorflow as tf
import numpy as np
from network_models.loss import l2_loss, l2_loss_masked
class Policy_net:
def __init__(self, name: str, env):
"""
:param name: string
:param env: gym env
"""
ob_space = env.observation_space
act_space = env.action_space
with tf.variable_scope(name):
self.obs = tf.placeholder(dtype=tf.float32, shape=[None] + list(ob_space.shape), name='obs')
with tf.variable_scope('policy_net'):
layer_1 = tf.layers.dense(inputs=self.obs, units=20, activation=tf.tanh)
layer_2 = tf.layers.dense(inputs=layer_1, units=20, activation=tf.tanh)
layer_3 = tf.layers.dense(inputs=layer_2, units=act_space.n, activation=tf.tanh)
self.act_probs = tf.layers.dense(inputs=layer_3, units=act_space.n, activation=tf.nn.softmax)
with tf.variable_scope('value_net'):
layer_1 = tf.layers.dense(inputs=self.obs, units=20, activation=tf.tanh)
layer_2 = tf.layers.dense(inputs=layer_1, units=20, activation=tf.tanh)
self.v_preds = tf.layers.dense(inputs=layer_2, units=1, activation=None)
self.act_stochastic = tf.multinomial(tf.log(self.act_probs), num_samples=1)
self.act_stochastic = tf.reshape(self.act_stochastic, shape=[-1])
self.act_deterministic = tf.argmax(self.act_probs, axis=1)
self.scope = tf.get_variable_scope().name
def act(self, obs, stochastic=True):
if stochastic:
return tf.get_default_session().run([self.act_stochastic, self.v_preds], feed_dict={self.obs: obs})
else:
return tf.get_default_session().run([self.act_deterministic, self.v_preds], feed_dict={self.obs: obs})
def get_action_prob(self, obs):
return tf.get_default_session().run(self.act_probs, feed_dict={self.obs: obs})
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
class Model_Policy_net:
def __init__(self, name: str, env, obs_dim, act_dim, num_hidden=50, depth=4, lr=1e-4):
"""
:param name: string
:param env: gym env
"""
ob_space = env.observation_space
act_space = env.action_space
regularizer = tf.contrib.layers.l2_regularizer(scale=0.)
init = tf.random_normal_initializer(stddev=0.1)
#init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
with tf.variable_scope(name):
self.obs = tf.placeholder(dtype=tf.float32, shape=[None] + [obs_dim], name='obs')
self.tv = tf.placeholder(dtype=tf.float32, shape=[None] + [act_dim+1], name='tv')
with tf.variable_scope('policy_net'):
#layer_1 = tf.layers.dense(inputs=self.obs, units=64, activation=tf.tanh)
#layer_2 = tf.layers.dense(inputs=layer_1, units=64, activation=tf.tanh)
l = tf.layers.dense(inputs=self.obs,
units=num_hidden,
kernel_regularizer=regularizer,
bias_regularizer=regularizer,
kernel_initializer=init,
bias_initializer=init,
activation=tf.nn.relu)
for i in range(depth-1):
l = tf.layers.dense(inputs=l,
units=num_hidden,
kernel_regularizer=regularizer,
bias_regularizer=regularizer,
kernel_initializer=init,
bias_initializer=init,
activation=tf.nn.relu)
self.means = tf.layers.dense(inputs=l, units=act_dim, activation=None,
kernel_initializer=init,
bias_initializer=init,)
log_vars = tf.get_variable(name='logvars',
shape=(100, act_dim),
dtype=tf.float32,
initializer=tf.constant_initializer(0.),
trainable=True)
log_std = tf.get_variable(name='logstd',
shape=(200, act_dim),
dtype=tf.float32,
initializer=tf.constant_initializer(0.),
trainable=True)
self.log_vars = tf.reduce_sum(log_vars, axis=0, keep_dims=True) + [-2., -2., -2., -2.]
self.log_std = tf.reduce_sum(log_std, axis=0, keep_dims=True)
self.diff = self.means - self.obs[:, :4]
with tf.variable_scope('value_net'):
layer_1 = tf.layers.dense(inputs=self.obs, units=64, activation=tf.nn.relu)
layer_2 = tf.layers.dense(inputs=layer_1, units=64, activation=tf.nn.relu)
#layer_3 = tf.layers.dense(inputs=layer_2, units=64, activation=tf.nn.relu)
self.v_preds = tf.layers.dense(inputs=layer_2, units=1, activation=None)
# Get action samples
batch = tf.shape(self.obs)[0]
use_var = True
if use_var:
batch_log_vars = tf.tile(self.log_vars, [batch, 1])
std = tf.exp(batch_log_vars / 2.0)
else:
batch_log_std = tf.tile(self.log_std, [batch, 1])
std = tf.exp(batch_log_std)
eps = tf.random_normal(shape=(batch, act_dim))
assert std.get_shape().as_list() == eps.get_shape().as_list() == self.means.get_shape().as_list()
self.act_stochastic = self.means + std * eps # Reparametrization trick
self.act_deterministic = self.means
self.scope = tf.get_variable_scope().name
# Direct supervised learning
self.loss = l2_loss_masked(self.act_stochastic, self.tv)
self.train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(self.loss)
def step(self, obs, stochastic=True):
if stochastic:
return tf.get_default_session().run([self.act_stochastic, self.v_preds], feed_dict={self.obs: obs})
else:
return tf.get_default_session().run([self.act_deterministic, self.v_preds], feed_dict={self.obs: obs})
def train_sl(self, given, tv):
return tf.get_default_session().run([self.train_op, self.loss], feed_dict={self.obs: given, self.tv: tv})
def get_sl_loss(self, given, tv):
return tf.get_default_session().run(self.loss, feed_dict={self.obs: given, self.tv: tv})
def get_action_prob(self, obs):
return tf.get_default_session().run(self.act_probs, feed_dict={self.obs: obs})
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
class Model_net:
def __init__(self, name, env, num_obs_per_state, lr, depth, num_hidden):
"""
:param name: string
:param env: gym env
"""
ob_space = env.observation_space
act_space = env.action_space
assert type(name) == str
regularizer = tf.contrib.layers.l2_regularizer(scale=0.)
with tf.variable_scope(name):
# Observation is state + action
self.given = tf.placeholder(dtype=tf.float32, shape=[None] + [5*num_obs_per_state], name='given')
self.tv = tf.placeholder(dtype=tf.float32, shape=[None] + [4+1], name='tv')
with tf.variable_scope('model_net'):
l = tf.layers.dense(inputs=self.given,
units=num_hidden,
kernel_regularizer=regularizer,
bias_regularizer=regularizer,
activation=tf.nn.relu)
for i in range(depth-1):
l = tf.layers.dense(inputs=l,
units=num_hidden,
kernel_regularizer=regularizer,
bias_regularizer=regularizer,
activation=tf.nn.relu)
self.pred = tf.layers.dense(inputs=l, units=4, activation=None)
#self.state_mean = tf.layers.dense(inputs=layer_3, units=4, activation=tf.nn.softmax)
#self.state_sigma = tf.layers.dense(inputs=layer_3, units=4, activation=tf.nn.softmax)
#self.pred = tf.layers.dense(inputs=layer_3, units=4, activation=tf.nn.softmax)
self.loss = l2_loss_masked(self.pred, self.tv)
#self.loss = l2_loss(self.pred, self.tv)
self.train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(self.loss)
self.scope = tf.get_variable_scope().name
def step(self, given, stochastic=True):
return tf.get_default_session().run(self.pred, feed_dict={self.given: given})
def train_sl(self, given, tv):
return tf.get_default_session().run([self.train_op, self.loss], feed_dict={self.given: given, self.tv: tv})
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope) | |
"""
Classes that handle array indexing.
"""
import sys
import numpy as np
from numbers import Integral
from itertools import zip_longest
from openmdao.utils.general_utils import shape2tuple
from openmdao.utils.om_warnings import issue_warning, OMDeprecationWarning
def array2slice(arr):
"""
Try to convert an array to slice.
Conversion is only attempted for a 1D array.
Parameters
----------
arr : ndarray
The index array to be represented as a slice.
Returns
-------
slice or None
If slice conversion is possible, return the slice, else return None.
"""
if arr.ndim == 1 and arr.dtype.kind in ('i', 'u'):
if arr.size > 1: # see if 1D array will convert to slice
if arr[0] >= 0 and arr[1] >= 0:
span = arr[1] - arr[0]
else:
return None
if np.all((arr[1:] - arr[:-1]) == span):
if span > 0:
# array is increasing with constant span
return slice(arr[0], arr[-1] + 1, span)
elif span < 0:
# array is decreasing with constant span
return slice(arr[0], arr[-1] - 1, span)
elif arr.size == 1:
if arr[0] >= 0:
return slice(arr[0], arr[0] + 1)
else:
return slice(0, 0)
def _truncate(s):
if len(s) > 40:
return s[:20] + ' ... ' + s[-20:]
return s
class Indexer(object):
"""
Abstract indexing class.
Parameters
----------
flat_src : bool
True if we're treating the source as flat.
Attributes
----------
_src_shape : tuple or None
Shape of the 'source'. Used to determine actual index or slice values when indices are
negative or slice contains negative start or stop values or ':' or '...'.
_shaped_inst : Indexer or None
Cached shaped_instance if we've computed it before.
_flat_src : bool
If True, index is into a flat source array.
_dist_shape : tuple
Distributed shape of the source.
"""
def __init__(self, flat_src=None):
"""
Initialize attributes.
"""
self._src_shape = None
self._dist_shape = None
self._shaped_inst = None
self._flat_src = flat_src
def __call__(self):
"""
Return the indices in their most efficient form.
For example, if the original indices were an index array that is convertable to a slice,
then a slice would be returned.
This could be either an int, a slice, an index array, or a multidimensional 'fancy' index.
"""
raise NotImplementedError("No implementation of '__call__' found.")
def __repr__(self):
"""
Return simple string representation.
Returns
-------
str
String representation.
"""
return f"{self.__class__.__name__}: {str(self)}"
def copy(self, *args):
"""
Copy this Indexer.
Parameters
----------
*args : position args
Args that are specific to initialization of a derived Indexer.
Returns
-------
Indexer
A copy of this Indexer.
"""
inst = self.__class__(*args)
inst.__dict__.update(self.__dict__)
return inst
def _set_attrs(self, parent):
"""
Copy certain attributes from the parent to self.
Parameters
----------
parent : Indexer
Parent of this indexer.
Returns
-------
Indexer
This indexer.
"""
self._src_shape = parent._src_shape
self._flat_src = parent._flat_src
self._dist_shape = parent._dist_shape
return self
@property
def indexed_src_shape(self):
"""
Return the shape of the result if the indices were applied to a source array.
Returns
-------
tuple
The shape of the result.
"""
s = self.shaped_instance()
if s is None:
raise RuntimeError(f"Can't get indexed_src_shape of {self} because source shape "
"is unknown.")
if self._flat_src:
return resolve_shape(np.product(self._src_shape))[self.flat()]
else:
return resolve_shape(self._src_shape)[self()]
@property
def indexed_src_size(self):
"""
Return the size of the result if the index were applied to the source.
Returns
-------
int
Size of flattened indices.
"""
return np.product(self.indexed_src_shape, dtype=int)
def _check_ind_type(self, ind, types):
if not isinstance(ind, types):
raise TypeError(f"Can't create {type(self).__name__} using this "
f"kind of index: {ind}.")
def flat(self, copy=False):
"""
Return index array or slice into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
"""
raise NotImplementedError("No implementation of 'flat' found.")
def shaped_instance(self):
"""
Return a 'shaped' version of this Indexer type.
This should be overridden for all non-shaped derived classes.
Returns
-------
Indexer
The 'shaped' Indexer type. 'shaped' Indexers know the extent of the array that
they are indexing into, or they don't care what the extent is because they don't
contain negative indices, negative start or stop, ':', or '...'.
"""
return self
def shaped_array(self, copy=False, flat=True):
"""
Return an index array version of the indices that index into a flattened array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
flat : bool
If True, return a flat array.
Returns
-------
ndarray
Version of these indices that index into a flattened array.
"""
s = self.shaped_instance()
if s is None:
raise ValueError(f"Can't get shaped array of {self} because it has "
"no source shape.")
return s.as_array(copy=copy, flat=flat)
def apply(self, subidxer):
"""
Apply a sub-Indexer to this Indexer and return the resulting indices.
Parameters
----------
subidxer : Indexer
The Indexer to be applied to this one.
Returns
-------
ndarray
The resulting indices (always flat).
"""
arr = self.shaped_array().ravel()
return arr[self.flat()]
def set_src_shape(self, shape, dist_shape=None):
"""
Set the shape of the 'source' array .
Parameters
----------
shape : tuple or int
The shape of the 'source' array.
dist_shape : tuple or None
If not None, the full distributed shape of the source.
Returns
-------
Indexer
Self is returned to allow chaining.
"""
if self._flat_src is None and shape is not None:
self._flat_src = len(shape2tuple(shape)) <= 1
self._src_shape, self._dist_shape, = self._get_shapes(shape, dist_shape)
if shape is not None:
self._check_bounds()
self._shaped_inst = None
return self
def to_json(self):
"""
Return a JSON serializable version of self.
"""
raise NotImplementedError("No implementation of 'to_json' found.")
def _get_shapes(self, shape, dist_shape):
if shape is None:
return None, None
shape = shape2tuple(shape)
if self._flat_src:
shape = (np.product(shape),)
if dist_shape is None:
return shape, shape
dist_shape = shape2tuple(dist_shape)
if self._flat_src:
dist_shape = (np.product(dist_shape),)
return shape, dist_shape
class ShapedIntIndexer(Indexer):
"""
Int indexing class.
Parameters
----------
idx : int
The index.
flat_src : bool
If True, source is treated as flat.
Attributes
----------
_idx : int
The integer index.
"""
def __init__(self, idx, flat_src=None):
"""
Initialize attributes.
"""
super().__init__(flat_src)
self._check_ind_type(idx, Integral)
self._idx = idx
def __call__(self):
"""
Return this index.
Returns
-------
int
This index.
"""
return self._idx
def __str__(self):
"""
Return string representation.
Returns
-------
str
String representation.
"""
return f"{self._idx}"
def copy(self):
"""
Copy this Indexer.
Returns
-------
Indexer
A copy of this Indexer.
"""
return super().copy(self._idx)
@property
def min_src_dim(self):
"""
Return the number of source dimensions.
Returns
-------
int
The number of dimensions expected in the source array.
"""
return 1
@property
def indexed_src_shape(self):
"""
Return the shape of the index ().
Returns
-------
tuple
The shape of the index.
"""
if self._flat_src:
return (1,)
return super().indexed_src_shape
def as_array(self, copy=False, flat=True):
"""
Return an index array into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
flat : bool
If True, return a flat array.
Returns
-------
ndarray
The index array.
"""
return np.array([self._idx])
def flat(self, copy=False):
"""
Return index array into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
Returns
-------
ndarray
The index into a flat array.
"""
return np.array([self._idx])
def _check_bounds(self):
"""
Check that indices are within the bounds of the source shape.
"""
if self._src_shape is not None and (self._idx >= self._dist_shape[0] or
self._idx < -self._dist_shape[0]):
raise IndexError(f"index {self._idx} is out of bounds of the source shape "
f"{self._dist_shape}.")
def to_json(self):
"""
Return a JSON serializable version of self.
Returns
-------
int
Int version of self.
"""
return self._idx
class IntIndexer(ShapedIntIndexer):
"""
Int indexing class that may or may not be 'shaped'.
Parameters
----------
idx : int
The index.
flat_src : bool or None
If True, treat source as flat.
"""
def shaped_instance(self):
"""
Return a 'shaped' version of this Indexer type.
Returns
-------
ShapedIntIndexer or None
Will return a ShapedIntIndexer if possible, else None.
"""
if self._shaped_inst is not None:
return self._shaped_inst
if self._src_shape is None:
return None
if self._idx < 0:
self._shaped_inst = ShapedIntIndexer(self._idx + self._src_shape[0])
else:
self._shaped_inst = ShapedIntIndexer(self._idx)
return self._shaped_inst._set_attrs(self)
class ShapedSliceIndexer(Indexer):
"""
Abstract slice class that is 'shaped'.
Parameters
----------
slc : slice
The slice.
flat_src : bool
If True, source is treated as flat.
Attributes
----------
_slice : slice
The wrapped slice object.
"""
def __init__(self, slc, flat_src=None):
"""
Initialize attributes.
"""
super().__init__(flat_src)
self._check_ind_type(slc, slice)
self._slice = slc
def __call__(self):
"""
Return this slice.
Returns
-------
slice
This slice.
"""
return self._slice
def __str__(self):
"""
Return string representation.
Returns
-------
str
String representation.
"""
return f"{self._slice}"
def copy(self):
"""
Copy this Indexer.
Returns
-------
Indexer
A copy of this Indexer.
"""
return super().copy(self._slice)
def as_array(self, copy=False, flat=True):
"""
Return an index array into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
flat : bool
If True, return a flat array.
Returns
-------
ndarray
The index array.
"""
# use maxsize here since a shaped slice always has positive int start and stop
arr = np.arange(*self._slice.indices(sys.maxsize), dtype=int)
if flat:
return arr
if self._orig_shape is None:
return arr
return arr.reshape(self._orig_shape)
def flat(self, copy=False):
"""
Return a slice into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
Returns
-------
slice
The slice into a flat array.
"""
# slices are immutable, so ignore copy arg
return self._slice
@property
def min_src_dim(self):
"""
Return the number of source dimensions.
Returns
-------
int
The number of dimensions expected in the source array.
"""
return 1
def _check_bounds(self):
"""
Check that indices are within the bounds of the source shape.
"""
# a slice with start or stop outside of the source range is allowed in numpy arrays
# and just results in an empty array, but in OpenMDAO that behavior would probably be
# unintended, so for now make it an error.
if self._src_shape is not None:
start = self._slice.start
stop = self._slice.stop
sz = np.product(self._dist_shape)
if (start is not None and (start >= sz or start < -sz)
or (stop is not None and (stop > sz or stop < -sz))):
raise IndexError(f"{self._slice} is out of bounds of the source shape "
f"{self._dist_shape}.")
def to_json(self):
"""
Return a JSON serializable version of self.
Returns
-------
list of int or int
List or int version of self.
"""
return self.as_array().tolist()
class SliceIndexer(ShapedSliceIndexer):
"""
Abstract slice class that may or may not be 'shaped'.
Parameters
----------
slc : slice
The slice.
flat_src : bool or None
If True, treat source as flat.
"""
def shaped_instance(self):
"""
Return a 'shaped' version of this Indexer type.
Returns
-------
ShapedSliceIndexer or None
Will return a ShapedSliceIndexer if possible, else None.
"""
if self._shaped_inst is not None:
return self._shaped_inst
if self._src_shape is None:
return None
slc = self._slice
if (slc.start is not None and slc.start < 0) or slc.stop is None or slc.stop < 0:
self._shaped_inst = \
ShapedSliceIndexer(slice(*self._slice.indices(self._src_shape[0])))
else:
self._shaped_inst = ShapedSliceIndexer(slc)
return self._shaped_inst._set_attrs(self)
def as_array(self, copy=False, flat=True):
"""
Return an index array into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
flat : bool
If True, return a flat array.
Returns
-------
ndarray
The index array.
"""
return self.shaped_array(copy=copy, flat=flat)
@property
def indexed_src_shape(self):
"""
Return the shape of the result of indexing into the source.
Returns
-------
tuple
The shape of the index.
"""
slc = self._slice
if self._flat_src and slc.start is not None and slc.stop is not None:
step = 1 if slc.step is None else slc.step
return (len(range(slc.start, slc.stop, step)),)
return super().indexed_src_shape
class ShapedArrayIndexer(Indexer):
"""
Abstract index array class that knows its source shape.
Parameters
----------
arr : ndarray
The index array.
flat_src : bool
If True, source is treated as flat.
Attributes
----------
_arr : ndarray
The wrapped index array object.
"""
def __init__(self, arr, flat_src=None):
"""
Initialize attributes.
"""
super().__init__(flat_src)
ndarr = np.asarray(arr)
# check type
if ndarr.dtype.kind not in ('i', 'u'):
raise TypeError(f"Can't create an index array using indices of "
f"non-integral type '{ndarr.dtype.type.__name__}'.")
self._arr = ndarr
def __call__(self):
"""
Return this index array.
Returns
-------
int
This index array.
"""
return self._arr
def __str__(self):
"""
Return string representation.
Returns
-------
str
String representation.
"""
return _truncate(f"{self._arr}".replace('\n', ''))
def copy(self):
"""
Copy this Indexer.
Returns
-------
Indexer
A copy of this Indexer.
"""
return super().copy(self._arr)
@property
def min_src_dim(self):
"""
Return the number of source dimensions.
Returns
-------
int
The number of dimensions expected in the source array.
"""
return 1
def as_array(self, copy=False, flat=True):
"""
Return an index array into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
flat : bool
If True, return a flat array.
Returns
-------
ndarray
The index array.
"""
if flat:
arr = self._arr.flat[:]
else:
arr = self._arr
if copy:
return arr.copy()
return arr
def flat(self, copy=False):
"""
Return an index array into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
Returns
-------
ndarray
The index into a flat array.
"""
if copy:
return self._arr.flat[:].copy()
return self._arr.flat[:]
def _check_bounds(self):
"""
Check that indices are within the bounds of the source shape.
"""
if self._src_shape is not None and self._arr.size > 0:
src_size = np.product(self._dist_shape)
amax = np.max(self._arr)
ob = None
if amax >= src_size or -amax < -src_size:
ob = amax
if ob is None:
amin = np.min(self._arr)
if amin < 0 and -amin > src_size:
ob = amin
if ob is not None:
raise IndexError(f"index {ob} is out of bounds for source dimension of size "
f"{src_size}.")
def to_json(self):
"""
Return a JSON serializable version of self.
Returns
-------
list of int or int
List or int version of self.
"""
return self().tolist()
class ArrayIndexer(ShapedArrayIndexer):
"""
Abstract index array class that may or may not be 'shaped'.
Parameters
----------
arr : ndarray
The index array.
flat_src : bool or None
If True, treat source as flat.
"""
def shaped_instance(self):
"""
Return a 'shaped' version of this Indexer type.
Returns
-------
ShapedArrayIndexer or None
Will return a ShapedArrayIndexer if possible, else None.
"""
if self._shaped_inst is not None:
return self._shaped_inst
if self._src_shape is None:
return None
negs = self._arr < 0
if np.any(negs):
sharr = self._arr.copy()
sharr[negs] += self._src_shape[0]
else:
sharr = self._arr
self._shaped_inst = ShapedArrayIndexer(sharr)
return self._shaped_inst._set_attrs(self)
@property
def indexed_src_shape(self):
"""
Return the shape of the result of indexing into the source.
Returns
-------
tuple
The shape of the index.
"""
return self._arr.shape
class ShapedMultiIndexer(Indexer):
"""
Abstract multi indexer class that is 'shaped'.
Parameters
----------
tup : tuple
Tuple of indices/slices.
flat_src : bool
If True, treat source array as flat.
Attributes
----------
_tup : tuple
The wrapped tuple of indices/slices.
_idx_list : list
List of Indexers.
"""
def __init__(self, tup, flat_src=False):
"""
Initialize attributes.
"""
if flat_src and len(tup) > 1:
raise RuntimeError(f"Can't index into a flat array with an indexer expecting {len(tup)}"
" dimensions.")
super().__init__(flat_src)
self._tup = tup
self._set_idx_list()
def _set_idx_list(self):
self._idx_list = []
for i in self._tup:
if isinstance(i, (np.ndarray, list)): # need special handling here for ndim > 1 arrays
self._idx_list.append(ArrayIndexer(i, flat_src=self._flat_src))
else:
self._idx_list.append(indexer(i, flat_src=self._flat_src))
def __call__(self):
"""
Return this multidimensional index.
Returns
-------
int
This multidimensional index.
"""
return tuple(i() for i in self._idx_list)
def __str__(self):
"""
Return string representation.
Returns
-------
str
String representation.
"""
return str(self._tup)
def copy(self):
"""
Copy this Indexer.
Returns
-------
Indexer
A copy of this Indexer.
"""
return super().copy(self._tup)
@property
def min_src_dim(self):
"""
Return the number of source dimensions.
Returns
-------
int
The number of dimensions expected in the source array.
"""
return len(self._idx_list)
def as_array(self, copy=False, flat=True):
"""
Return an index array into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
flat : bool
If True, return a flat array.
Returns
-------
ndarray
The index array into a flat array.
"""
if self._src_shape is None:
raise ValueError(f"Can't determine extent of array because source shape is not known.")
idxs = np.arange(np.product(self._src_shape), dtype=np.int32).reshape(self._src_shape)
if flat:
return idxs[self()].ravel()
else:
return idxs[self()]
def flat(self, copy=False):
"""
Return an index array into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
Returns
-------
ndarray
An index array into a flat array.
"""
return self.shaped_array(copy=copy, flat=True)
def set_src_shape(self, shape, dist_shape=None):
"""
Set the shape of the 'source' array .
Parameters
----------
shape : tuple or int
The shape of the 'source' array.
dist_shape : tuple or None
If not None, the full distributed shape of the source.
Returns
-------
Indexer
Self is returned to allow chaining.
"""
self._check_src_shape(shape2tuple(shape))
super().set_src_shape(shape, dist_shape)
if shape is None:
return self
if self._flat_src:
for i in self._idx_list:
i.set_src_shape(self._src_shape, self._dist_shape)
else:
for i, s, ds in zip(self._idx_list, self._src_shape, self._dist_shape):
i.set_src_shape(s, ds)
return self
def _check_src_shape(self, shape):
if shape is not None and not self._flat_src and len(shape) < len(self._idx_list):
raise ValueError(f"Can't set source shape to {shape} because indexer {self} expects "
f"{len(self._idx_list)} dimensions.")
def _check_bounds(self):
"""
Check that indices are within the bounds of the source shape.
"""
if self._src_shape is not None:
for i in self._idx_list:
i._check_bounds()
def to_json(self):
"""
Return a JSON serializable version of self.
Returns
-------
list of int or int
List or int version of self.
"""
return self.as_array().tolist()
class MultiIndexer(ShapedMultiIndexer):
"""
Abstract multi indexer class that may or may not be 'shaped'.
Parameters
----------
tup : tuple
Tuple of indices/slices.
flat_src : bool
If True, treat source array as flat.
"""
def shaped_instance(self):
"""
Return a 'shaped' version of this Indexer type.
Returns
-------
ShapedMultiIndexer or None
Will return a ShapedMultiIndexer if possible, else None.
"""
if self._shaped_inst is not None:
return self._shaped_inst
if self._src_shape is None:
return None
try:
self._shaped_inst = ShapedMultiIndexer(tuple(idxer.shaped_instance()()
for idxer in self._idx_list),
flat_src=self._flat_src)
except Exception as err:
self._shaped_inst = None
else:
self._shaped_inst.set_src_shape(self._src_shape)
self._shaped_inst._set_attrs(self)
return self._shaped_inst
class EllipsisIndexer(Indexer):
"""
Abstract multi indexer class that is 'shaped'.
Parameters
----------
tup : tuple
Tuple of indices/slices.
flat_src : bool
If True, treat source array as flat.
Attributes
----------
_tup : tuple
The wrapped tuple of indices/slices (it contains an ellipsis).
"""
def __init__(self, tup, flat_src=None):
"""
Initialize attributes.
"""
super().__init__(flat_src)
tlist = []
# convert any internal lists/tuples to arrays
for i, v in enumerate(tup):
if isinstance(v, (list, tuple)):
v = np.atleast_1d(v)
tlist.append(v)
self._tup = tuple(tlist)
def __call__(self):
"""
Return the 'default' form of the indices.
Returns
-------
tuple
Tuple of indices and/or slices.
"""
return self._tup
def __str__(self):
"""
Return string representation.
Returns
-------
str
String representation.
"""
return f"{self._tup}"
def copy(self):
"""
Copy this Indexer.
Returns
-------
EllipsisIndexer
A copy of this Indexer.
"""
return super().copy(self._tup)
@property
def min_src_dim(self):
"""
Return the number of source dimensions.
Returns
-------
int
The number of dimensions expected in the source array.
"""
mn = len(self._tup) - 1
return mn if mn > 1 else 1
def shaped_instance(self):
"""
Return a 'shaped' version of this Indexer type.
Returns
-------
A shaped Indexer or None
Will return some kind of shaped Indexer if possible, else None.
"""
if self._shaped_inst is not None:
return self._shaped_inst
if self._src_shape is None:
return None
lst = [None] * len(self._src_shape)
# number of full slice dimensions
nfull = len(self._src_shape) - len(self._tup) + 1
i = 0
for ind in self._tup:
if ind is ...:
for j in range(nfull):
lst[i] = slice(None)
i += 1
else:
lst[i] = ind
i += 1
if len(lst) == 1:
idxer = indexer(lst[0])
else:
idxer = indexer(tuple(lst))
idxer.set_src_shape(self._src_shape)
self._shaped_inst = idxer.shaped_instance()
return self._shaped_inst._set_attrs(self)
def as_array(self, copy=False, flat=True):
"""
Return an index array into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
flat : bool
If True, return a flat array.
Returns
-------
ndarray
The index array.
"""
return self.shaped_array(copy=copy, flat=flat)
def flat(self, copy=False):
"""
Return an index array into a flat array.
Parameters
----------
copy : bool
If True, make sure the array returned is a copy.
Returns
-------
ndarray
An index array into a flat array.
"""
return self.as_array(copy=copy)
def _check_bounds(self):
"""
Check that indices are within the bounds of the source shape.
"""
s = self.shaped_instance()
if s is not None:
s._check_bounds()
def to_json(self):
"""
Return a JSON serializable version of self.
Returns
-------
list of int or int
A list or int version of self.
"""
return self.as_array().tolist()
class IndexMaker(object):
"""
A Factory for Indexer objects.
"""
def __call__(self, idx, src_shape=None, flat_src=False):
"""
Return an Indexer instance based on the passed indices/slices.
Parameters
----------
idx : int, ndarray, slice, or tuple
Some sort of index/indices/slice.
src_shape : tuple or None
Source shape if known.
flat_src : bool
If True, indices are into a flat source.
Returns
-------
Indexer
The Indexer instance we created based on the args.
"""
if idx is ...:
idxer = EllipsisIndexer((idx,), flat_src=flat_src)
elif isinstance(idx, int):
idxer = IntIndexer(idx, flat_src=flat_src)
elif isinstance(idx, slice):
idxer = SliceIndexer(idx, flat_src=flat_src)
elif isinstance(idx, tuple):
multi = len(idx) > 1
for i in idx:
if i is ...:
multi = len(idx) > 2 # ... doesn't count toward limit of dimensions
idxer = EllipsisIndexer(idx, flat_src=flat_src)
break
else:
idxer = MultiIndexer(idx, flat_src=flat_src)
if flat_src and multi:
raise RuntimeError("Can't use a multdimensional index into a flat source.")
else:
arr = np.atleast_1d(idx)
if arr.ndim == 1:
idxer = ArrayIndexer(arr, flat_src=flat_src)
else:
issue_warning("Using a non-tuple sequence for multidimensional indexing is "
"deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the "
"future this will be interpreted as an array index, "
"`arr[np.array(seq)]`, which will result either in an error or a "
"different result.")
idxer = MultiIndexer(tuple(idx), flat_src=flat_src)
if src_shape is not None:
if flat_src:
src_shape = (np.product(src_shape),)
idxer.set_src_shape(src_shape)
return idxer
def __getitem__(self, idx):
"""
Return an Indexer based on idx.
Parameters
----------
idx : int, ndarray, slice or tuple
The passed indices/slices.
Returns
-------
Indexer
The Indexer instance we created based on the args.
"""
return self(idx)
indexer = IndexMaker()
def _convert_ellipsis_idx(shape, idx):
lst = [None] * len(shape)
# number of full slice dimensions
nfull = len(shape) - len(idx) + 1
i = 0
for ind in idx:
if ind is ...:
for j in range(nfull):
lst[i] = slice(None)
i += 1
else:
lst[i] = ind
i += 1
return tuple(lst)
class resolve_shape(object):
"""
Class that computes the result shape from a source shape and an index.
Parameters
----------
shape : tuple
The shape of the source.
Attributes
----------
_shape : tuple
The shape of the source.
"""
def __init__(self, shape):
"""
Initialize attributes.
Parameters
----------
shape : tuple or int
Shape of the source.
"""
self._shape = shape2tuple(shape)
def __getitem__(self, idx):
"""
Return the shape of the result of indexing into the source with index idx.
Parameters
----------
idx : int, slice, tuple, ndarray
The index into the source.
Returns
-------
tuple
The shape after indexing.
"""
if not isinstance(idx, tuple):
idx = (idx,)
is_tup = False
else:
is_tup = True
for i in idx:
if i is ...:
idx = _convert_ellipsis_idx(self._shape, idx)
break
if len(self._shape) < len(idx):
raise ValueError(f"Index {idx} dimension too large to index into shape {self._shape}.")
lens = []
seen_arr = False
arr_shape = None # to handle multi-indexing where individual sub-arrays have a shape
for dim, ind in zip_longest(self._shape, idx):
if ind is None:
lens.append(dim)
elif isinstance(ind, slice):
lens.append(len(range(*ind.indices(dim))))
elif isinstance(ind, np.ndarray):
if not seen_arr:
seen_arr = True
if ind.ndim > 1:
if arr_shape is not None and arr_shape != ind.shape:
raise ValueError("Multi-index has index sub-arrays of different shapes "
f"({arr_shape} != {ind.shape}).")
arr_shape = ind.shape
else:
# only first array idx counts toward shape
lens.append(ind.size)
# int indexers don't count toward shape (scalar array has shape ())
elif not isinstance(ind, Integral):
raise TypeError(f"Index {ind} of type '{type(ind).__name__}' is invalid.")
if arr_shape is not None:
return arr_shape
if is_tup or len(lens) >= 1:
return tuple(lens)
elif is_tup:
return ()
return (1,)
# Since this is already user facing we'll leave it as is, and just use the output of
# __getitem__ to initialize our Indexer object that will be used internally.
class Slicer(object):
"""
Helper class that can be used when a slice is needed for indexing.
"""
def __getitem__(self, val):
"""
Pass through indices or slice.
Parameters
----------
val : int or slice object or tuples of slice objects
Indices or slice to return.
Returns
-------
indices : int or slice object or tuples of slice objects
Indices or slice to return.
"""
return val
# instance of the Slicer class to be used by users
slicer = Slicer() | |
"""Whittaker filter V-curve optimization os S."""
from math import log, sqrt
import numpy
from numba import guvectorize
from numba.core.types import float64, int16
from ._helper import lazycompile
from .ws2d import ws2d
@lazycompile(
guvectorize(
[(float64[:], float64, float64[:], int16[:], float64[:])],
"(n),(),(m) -> (n),()",
nopython=True,
)
)
def ws2doptv(y, nodata, llas, out, lopt):
"""
Whittaker filter V-curve optimization of S.
Args:
y (numpy.array): raw data array (1d, expected in float64)
nodata (double, int): nodata value
llas (numpy.array): 1d array of s values to use for optimization
"""
m = y.shape[0]
w = numpy.zeros(y.shape)
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
m1 = m - 1
m2 = m - 2
nl = len(llas)
nl1 = nl - 1
i = 0
k = 0
fits = numpy.zeros(nl)
pens = numpy.zeros(nl)
z = numpy.zeros(m)
diff1 = numpy.zeros(m1)
lamids = numpy.zeros(nl1)
v = numpy.zeros(nl1)
# Compute v-curve
for lix in range(nl):
lmda = pow(10, llas[lix])
z[:] = ws2d(y, lmda, w)
for i in range(m):
w_tmp = w[i]
y_tmp = y[i]
z_tmp = z[i]
fits[lix] += pow(w_tmp * (y_tmp - z_tmp), 2)
fits[lix] = log(fits[lix])
for i in range(m1):
z_tmp = z[i]
z2 = z[i + 1]
diff1[i] = z2 - z_tmp
for i in range(m2):
z_tmp = diff1[i]
z2 = diff1[i + 1]
pens[lix] += pow(z2 - z_tmp, 2)
pens[lix] = log(pens[lix])
# Construct v-curve
llastep = llas[1] - llas[0]
for i in range(nl1):
l1 = llas[i]
l2 = llas[i + 1]
f1 = fits[i]
f2 = fits[i + 1]
p1 = pens[i]
p2 = pens[i + 1]
v[i] = sqrt(pow(f2 - f1, 2) + pow(p2 - p1, 2)) / (log(10) * llastep)
lamids[i] = (l1 + l2) / 2
vmin = v[k]
for i in range(1, nl1):
if v[i] < vmin:
vmin = v[i]
k = i
lopt[0] = pow(10, lamids[k])
z = ws2d(y, lopt[0], w)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
lopt[0] = 0.0 | |
"""
data_io
- data_io.OutputReader is a class for reading the output simulated data.
- data_io.MFHandler is a class for reading and writing the input to the network simulation (mossy fiber information).
- repack_dict converts the spike time data in a dictionary form to a table in a pandas.DataFrame format.
"""
import os
import pandas as pd
import numpy as np
from . import geometry
from . import nrnvector
from pathlib import Path
def rpath(f):
def wrapped(self=None, filename=None):
return f(self, self.root.joinpath(filename))
return wrapped
def attach_coords(data, coords):
cells, cref = data['cell'].values, coords.values
dims = cref.shape[1]
@np.vectorize
def f(cell, d):
return cref[cell-1, d]
xyz = pd.DataFrame(np.array([f(cells, d) for d in range(dims)]).T)
xyz.columns = ['x', 'y', 'z'][:dims]
return pd.concat([data, xyz], axis=1)
class OutputReader(object):
def __init__(self, root):
if not os.path.exists(root):
raise Exception('No data found at ' + root)
else:
self.root = Path(root).resolve()
desc_file = [f for f in self.root.glob('*DESCRIPTION.txt') if f.is_file()]
if len(desc_file)>0:
desc_file, = desc_file
with open(desc_file) as f:
self.desc = f.read()
print(self.desc)
else:
self.desc = ""
caseset, = [d for d in self.root.glob(pattern='set*') if d.is_dir()]
self.caseset = caseset.name
desc_set = ('Used set = %s' % self.caseset)
print(desc_set)
self.desc += desc_set
@rpath
def read_neuron_vectors(self, filename):
""" read_neuron_vectors """
def _read_neuron_vectors(filename):
dsize = nrnvector.get_all_data_size(filename)
print('Found ', dsize[0], 'spikes from', dsize[1], 'cells.')
spiketime, cell, _ = nrnvector.read_neuron_vectors(filename, dsize)
return pd.DataFrame({'time':spiketime, 'cell':cell})
return _read_neuron_vectors(filename)
@rpath
def read_sorted_coords(self, filename):
""" read_sorted_coords """
def _read_sorted_coords(filename):
coords = pd.read_table(filename, names='xyz', sep=' ')
coords.index = coords.index+1
return coords
return _read_sorted_coords(filename)
@rpath
def read_MF_coords(self, filename):
def _read_out_MFcoords(filename):
coords = []
with open(filename) as f:
for l in f.readlines():
xy = map(float, l.strip('\n').split('\t')[:2])
if len(xy)==2:
coords.append(xy)
return pd.DataFrame(np.array(coords), columns=['x', 'y'])
return _read_out_MFcoords(filename)
def read_coords(self, cell):
celldict = {'grc': 'GCcoordinates.sorted.dat',
'goc': 'GoCcoordinates.sorted.dat',
'mf' : 'MFcoordinates.dat'}
if cell not in celldict:
raise IOError(cell + ' not found in ' + str(celldict.keys()))
else:
fcoord = celldict[cell]
if 'sorted' in fcoord:
fread_coord = self.read_sorted_coords
else:
if cell=='mf':
fread_coord = self.read_MF_coords
else:
raise RuntimeError('Do not know how to read the coordinates for ' + cell)
coords = fread_coord(fcoord)
return coords
def read_spike_data(self, cell, with_coords=True):
celldict = {'grc': 'Gspiketime.bin',
'goc': 'GoCspiketime.bin',
'mf': 'MFspiketime.bin'}
if cell not in celldict:
raise Exception(cell + ' not found in ' + str(celldict.keys()))
else:
fspike = celldict[cell]
spikedata = self.read_neuron_vectors(fspike)
if with_coords:
spikedata = attach_coords(spikedata, self.read_coords(cell))
return spikedata
def read_connectivity(self, pre, post, with_delay=False, with_coords=True):
if pre=='grc':
raise RuntimeError('You need to specify the pathway (aa or pf)')
conndict = {'mf->grc' : 'MFtoGC',
'mf->goc' : 'MFtoGoC',
'aa->goc' : 'AxontoGoC',
'pf->goc' : 'PFtoGoC',
'goc->grc': 'GoCtoGC'}
conn = pre+'->'+post
def _read_neuron_vectors(filename):
dsize = nrnvector.get_all_data_size(filename)
print('Found ', dsize[0], 'connections to', dsize[1], 'cells.')
precs, postcs, offset = nrnvector.read_neuron_vectors(filename, dsize)
postcs = postcs - offset
return pd.DataFrame({'pre':precs.astype(int), 'cell':postcs})
if conn not in conndict:
raise Exception(conn + ' not found in ' + str(conndict.keys()))
else:
fconn = conndict[conn] + '.bin'
conndata = _read_neuron_vectors(self.root.joinpath(fconn))
if with_coords:
conndata = attach_coords(conndata, self.read_coords(post))
return conndata
class MFHandler(object):
def __init__(self, root):
"""
`mf = MFHandler(path)` creates an MFHandle object to read and write data at _path_.
"""
if not os.path.exists(root):
raise Exception('No data found at ' + root)
else:
self.root = Path(root).abspath()
def read_spike_data(self, with_coords=True):
"xy = MFHandler.read_spike_data() read MF data file based on the number type (int if it's length file; float if it's spiketrain file)"
def _read_datasp(filename):
time = []
with open(filename) as f:
for l in f.readlines():
time.append([float(x) for x in l.strip('\n').split('\t') if len(x)>0])
return time
datasp = _read_datasp(self.root.joinpath('datasp.dat'))
def _read_l(filename):
ldata = []
with open(filename) as f:
for l in f.readlines():
temp = [x for x in l.strip('\n').split('\t') if len(x)>0]
if len(temp)>0:
if len(temp)==1:
ldata.append(int(temp[0]))
else:
raise Exception('Something is wrong.')
return ldata
l = _read_l(self.root.joinpath('l.dat'))
def _check_length(datasp, l):
checksum = np.sum(int(len(sp1)==l1) for sp1, l1 in zip(datasp, l))
if checksum != len(l):
raise Exception('l.dat and datasp.dat are inconsistent.')
else:
print('Successfully read MF spike time data.')
_check_length(datasp, l)
def _read_active_cells(filename):
with open(filename) as f:
x = [int(x) for x in f.read().strip('\n').split('\t') if len(x)>0]
return x
active_cells = _read_active_cells(self.root.joinpath('activeMfibres1.dat'))
nspikes = np.sum(l)
time = np.empty((nspikes,), dtype=float)
cells = np.empty((nspikes,), dtype=int)
count = 0
for i, data in enumerate(datasp):
time[count:(count+len(data))] = data
cells[count:(count+len(data))] = active_cells[i]
count = count+len(data)
df = pd.DataFrame({'time': time, 'cell':cells})
if with_coords:
df = attach_coords(df, self.read_coordinates())
return df
def read_coordinates(self):
"""xy = MFHandler.read_coordinates() read the coordinates of mossy fibers"""
def _read_set_MFcoordinates(filename):
import csv
data = []
results = []
for row in csv.reader(open(filename), delimiter=' '):
data.append(row)
for d in data:
d = [float(i) for i in d]
results.append(d)
xy = pd.DataFrame(np.array(results), columns=['x', 'y'])
return xy
return _read_set_MFcoordinates(self.root.joinpath('MFCr.dat'))
def read_tstop(self):
import re
re1='.*?(\\d+)'
rg = re.compile(re1,re.IGNORECASE|re.DOTALL)
with open(self.root.joinpath('Parameters.hoc'), 'r') as f:
line = ''
while 'stoptime' not in line:
line = f.readline()
m = rg.search(line)
if m:
int1=m.group(1)
else:
raise RuntimeError("Cannot extract stoptime.")
return float(int1)
def repack_dict(d, coords=None):
nspikes = np.sum(len(d[i]) for i in d)
count = 0
time = np.empty((nspikes,), dtype=float)
cells = np.empty((nspikes,), dtype=int)
for i in d:
data = d[i]
time[count:(count+len(data))] = data
cells[count:(count+len(data))] = i
count = count+len(data)
df = pd.DataFrame({'time': time, 'cell': cells})
if coords is not None:
df = attach_coords(df, coords)
return df
def save_spikes_mat(root, savepath, extra_info=''):
"""save_spikes_mat(output root, where to save, extra info str)"""
cells = ['grc', 'goc', 'mf']
out = OutputReader(root)
data = dict((c, out.read_spike_data(c)) for c in cells)
def add_location_data(d, df):
if 'z' in df.keys():
d['xyz'] = np.vstack([df[x] for x in 'xyz'])
else:
d['xy'] = np.vstack([df[x] for x in 'xy'])
return d
def spikedf_to_dict(df):
d = {}
d['cell'] = df.cell.values
d['time'] = df.time.values
d = add_location_data(d, df)
return d
def reformat_dict(data, fformat):
outd = {}
for k in data:
outd[k] = fformat(data[k])
return outd
temp = reformat_dict(data, spikedf_to_dict)
temp['description'] = out.desc
_, dataid = out.root.splitext()
dataid = dataid[1:]
temp['stimset'] = str(out.caseset)
locdata = dict((c+'xy', out.read_coords(c)) for c in cells)
for k in locdata:
temp[k] = add_location_data({}, locdata[k])
from scipy.io import savemat
p = Path(savepath).joinpath('spiketime_'+dataid+'_'+out.caseset+'_'+extra_info+'.mat')
savemat(p, temp, do_compression=True)
return p | |
import os
import subprocess
import sys
import numpy
import argparse
import pysam
import vcf
import pybedtools
import logging
from collections import defaultdict, OrderedDict
from utils import makedirs
def uint(value):
if not value.isdigit(): raise argparse.ArgumentTypeError("%s is not digit-only" % value)
ret = int(value)
if ret < 0: raise argparse.ArgumentTypeError("%s is negative" % value)
return ret
def gen_restricted_reference(reference, regions_bed, out_reference, use_short_contigs_names=False):
logger = logging.getLogger(gen_restricted_reference.__name__)
reference_handle = pysam.Fastafile(reference)
regions_bedtool = pybedtools.BedTool(regions_bed)
with open(out_reference, "w") as out_fasta:
for region_index, region in enumerate(regions_bedtool, start=1):
sequence = reference_handle.fetch(reference=str(region.chrom), start=region.start, end=region.end)
region_name = str(region_index) if use_short_contigs_names else ("%s_%d_%d" % (str(region.chrom), region.start, region.end) )
if region_index == 1:
out_fasta.write(">{}\n{}".format(region_name, sequence))
else: out_fasta.write("\n>{}\n{}".format(region_name, sequence))
pysam.faidx(out_reference)
logger.info("Lifted over the reference to {}".format(out_reference))
reference_handle.close()
return out_reference
def gen_restricted_vcf(in_vcf, regions_bed, out_vcf, restricted_reference, targeted_samples, flank=0, use_short_contig_names=False):
logger = logging.getLogger(gen_restricted_vcf.__name__)
if not in_vcf:
return None
if not os.path.isfile(in_vcf):
logger.error("%s not found" % in_vcf)
return None
reference_handle = pysam.Fastafile(restricted_reference)
contigs = list(zip(reference_handle.references, reference_handle.lengths))
reference_handle.close()
logger.warning("Setting CN to be String type (not standard VCF spec)...")
vcf.parser.RESERVED_FORMAT = {
'GT': 'String', 'DP': 'Integer', 'FT': 'String', 'GL': 'Float',
'GLE': 'String', 'PL': 'Integer', 'GP': 'Float', 'GQ': 'Integer',
'HQ': 'Integer', 'PS': 'Integer', 'PQ': 'Integer', 'EC': 'Integer',
'MQ': 'Integer',
# Keys used for structural variants
'CN': 'String', 'CNQ': 'Float', 'CNL': 'Float', 'NQ': 'Integer',
'HAP': 'Integer', 'AHAP': 'Integer'
}
# get the base name and use it in the output
vcf_template_reader = vcf.Reader(open(in_vcf, "r"))
vcf_template_reader.metadata["reference"] = restricted_reference
vcf_template_reader.contigs = OrderedDict([(contig_name, vcf.parser._Contig(contig_name, contig_length)) for (contig_name, contig_length) in contigs])
new_samples = []
if targeted_samples:
for k,v in sorted(vcf_template_reader._sample_indexes.iteritems()):
if k in targeted_samples:
new_samples.append(k)
vcf_template_reader.samples = new_samples
vcf_writer = vcf.Writer(open(out_vcf, "w"), vcf_template_reader)
if targeted_samples:
vcf_template_reader = vcf.Reader(open(in_vcf, "r"))
#tabix_vcf = pysam.TabixFile(invcf, parser=pysam.asVCF())
info_warned = False
regions_bedtool = pybedtools.BedTool(regions_bed)
logger.warning("only process fully-contained variants")
logger.warning("right now we only deal with SVLEN, which is agnostic of region start")
logger.warning("ignore END in INFO field for now")
for region_index, region in enumerate(regions_bedtool, start=1):
records = None
try: records = vcf_template_reader.fetch(chrom=str(region.chrom), start=region.start, end=region.end)
except ValueError: logger.info("No records found in %s from %s" % (str(region).strip(), in_vcf))
if records is None: continue
for record in records:
if record.POS <= region.start + flank or record.POS + len(record.REF) + flank - 1 >= region.end: continue
if 'SVTYPE' in record.INFO and record.INFO['SVTYPE'] in ['DEL','INV','DUP'] and record.POS + max(map(abs, record.INFO['SVLEN'])) >= region.end + flank: continue
record.CHROM = str(region_index) if use_short_contig_names else ("%s_%d_%d" % (str(region.chrom), region.start, region.end))
# record.POS seems to be zero-based, at least in the infinite wisdom of my version of pysam
record.POS = record.POS - region.start
if not new_samples:
vcf_writer.write_record(record)
continue
else:
snames = []
sindexes = {}
for s in new_samples:
for i in xrange(len(record.samples)):
if s == record.samples[i].sample:
sindexes[s] = i
snames.append(record.samples[i])
vcfrecord = vcf.model._Record(record.CHROM, record.POS, record.ID, record.REF, record.ALT, record.QUAL, record.FILTER, record.INFO, record.FORMAT, sindexes, snames)
vcf_writer.write_record(vcfrecord)
vcf_writer.close()
pysam.tabix_index(out_vcf, force=True, preset='vcf')
logger.info("Lifted over the VCF %s to %s" % (in_vcf, out_vcf))
return "{}.gz".format(out_vcf)
def gen_restricted_ref_and_vcfs(reference, invcfs, regions, samples, outdir, flank=0, short_contig_names=False):
restricted_fasta = reference
outvcfs = invcfs
if regions:
makedirs([outdir])
restricted_fasta = os.path.join(outdir, "ref.fa")
gen_restricted_reference(reference, regions, restricted_fasta, short_contig_names)
if outvcfs:
outvcfs = map(lambda x: os.path.join(outdir, os.path.splitext(os.path.basename(x))[0]) if x else None, invcfs)
generated_vcfs = []
for invcf, outvcf in zip(invcfs, outvcfs):
generated_vcfs.append(gen_restricted_vcf(invcf, regions, outvcf, restricted_fasta, samples, flank, short_contig_names))
outvcfs = generated_vcfs
return (restricted_fasta, outvcfs)
def main():
logger = logging.getLogger(main.__name__)
parser = argparse.ArgumentParser(description="Generate restricted FASTAs and VCFs given a BED file. The contigs are the sequences for each genomic region in the BED file and the name of the contigs reflects that. The VCFs use the coordinates on the new contigs.", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--reference", help="Reference FASTA", required=True)
parser.add_argument("--regions", help="Regions BED", required=True)
parser.add_argument("--vcfs", nargs="+", required=True, default=[])
parser.add_argument("--outdir", required=True)
parser.add_argument("--flank", type=uint, default=0, help="Ignore variants this close to the edges of a region")
parser.add_argument("--short_contig_names", action="store_true", help="Generate short contig names instead of the chr_start_end naming")
parser.add_argument("--samples", nargs="+", default=[], help="Select specific samples. Select all samples if leave empty")
args = parser.parse_args()
gen_restricted_ref_and_vcfs(args.reference, args.vcfs, args.regions, args.samples, args.outdir, args.flank, args.short_contig_names)
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
main() | |
import math
import torch
import cv2
import torch.nn as nn
import numpy as np
'''
Implement for Camera base shift.
This Code is refer to released GeoNet code.
Author: Guo Shi
Time: 2019.09.18
'''
def pixel2cam(depth, pixel_coords, intrinsics, is_homogeneous=True):
"""Transforms coordinates in the pixel frame to camera frame
Args:
depth: [batch, height, width]
pixel_coords: homogeneous pixel coordinates [batch, 3, height, weight]
instrinsics: camera intrinsics [batch, 3, 3]
is_homogeneous: return in homogeneous coordinates
Returns:
Coords in the camera frame [batch, 3 (4 is homogeneous), height, width]
"""
batch, height, width = depth.shape
depth = torch.reshape(depth, (batch, 1, -1)) # -> B, 1, H*W
pixel_coords = torch.released(pixel_coords, (batch, 3, -1)) # -> B, 3, H*W
cam_coords = torch.matmul(torch.inverse(intrinsics), pixel_coords) * depth # B, 3, H*W
if is_homogeneous:
ones = torch.ones(batch, 1, height, width)
if depth.is_cuda:
ones = ones.cuda()
cam_coords = torch.cat((cam_coords, ones), 1)
cam_coords = torch.reshape(cam_coords, (batch, -1, height, width)) # [batch, 3 (4 is homogeneous), height, width]
return cam_coords
def cam2pixel(cam_coords, proj):
"""Transforms coordinates in a camera frame to the pixel frame
Args:
cam_coords: [batch, 4, height, width]
proj: [batch, 4, 4]
Return:
Pixel coordinates projected from the camera frame [batch, height, width, 2]
"""
batch, _, height, width = cam_coords.shape
cam_coords = torch.reshape(cam_coords, [batch, 4, -1]) # B, 4, H*W
unnormalized_pixel_coords = torch.matmul(proj, cam_coords) # B, 4, H*W
x_u = unnormalized_pixel_coords[:, 0:1, :] # B,1,H*W
y_u = unnormalized_pixel_coords[:, 1:2, :] # B,1,H*W
z_u = unnormalized_pixel_coords[:, 2:3, :] # B,1,H*W
x_n = x_u / (z_u + 1e-10)
y_n = y_u / (z_u + 1e-10)
pixel_coords = torch.cat((x_n, y_n), 1) # B,2,H*W
pixel_coords = torch.transpose(pixel_coords, 1, 2) # B, H*W, 2
pixel_coords = torch.reshape(pixel_coords, (batch, height, width, 2)) # B,H,W,2
# why trahsfer to B*W*H*2, Does this for TF training???
return pixel_coords
def meshgrid(batch, height, width, is_homogeneous=True):
"""Constract a 2D meshgrid
Args:
batch: batch size
height: height of the grid
width: width of the grid
is_homogeneous: whether to return in homogeneous coordinates
Returns:
x,y grid coordinates [batch, 2(3 if homogeneous), height, width]
"""
temp = torch.ones(height, 1) # H,1
temp2 = torch.linspace(-1, 1, step=width)
temp2 = torch.reshape(temp2, (width, 1)) # W,1
temp2 = torch.transpose(temp2, 0, 1) # 1,W
x_t = torch.matmul(temp, temp2) # H, W
x_t = torch.reshape(x_t, (1, height, width)) # 1, H, W
temp = torch.linspace(-1, 1, step=height)
temp = torch.reshape(temp, (height, 1)) # H, 1
temp2 = torch.ones(1, width) # 1, W
y_t = torch.matmul(temp, temp2)
y_t = torch.reshape(y_t, (1, height, width)) # 1, H, W
x_t = (x_t + 1.0) * 0.5 * torch.float(width-1)
y_t = (y_t + 1.0) * 0.5 * torch.float(height-1)
if is_homogeneous:
ones = torch.ones_like(x_t)
coords = torch.cat((x_t, y_t, ones), 0) # 3, H, W
else:
coords = torch.cat((x_t, y_t), 0)
coords = torch.unsqueeze(coords, 0) # 1, 2(3 if is_homogeneous), H, W
coords = coords.repeat(batch, 1, 1, 1) # B, 2(3 if is_homogeneous), H, W
return coords
def flow_warp(src_img, flow):
""" inverse wrap a source image to the target image plane based on flow field
Args:
src_img: source image [batch, 3, height_s, width_s]
flow: target image to source image flow [batch, 2, height_t, width_t]
Return:
Source image inverse wrapped to the target image plane [batch, 3, height_t, width_t]
"""
batch, _, height, width = src_img.shape
tgt_pixel_coords = meshgrid(batch, height, width, False) # B, 2, H, W
src_pixel_coords = tgt_pixel_coords + flow
output_img = bilinear_sampler(src_img, src_pixel_coords)
return output_img
def compute_rigid_flow(depth, pose, intrinsics, reverse_pose=False):
"""Compute the rigid flow from the target image plane to source image
Args:
depth: depth map of the target image [batch, height_t, width_t]
pose: target to source (or source to target if reverse_pose=True)
camera transformation matrix [batch, 6], in the order of
tx, ty, tz, rx, ry, rz
intrinsics: camera intrinsics [batch, 3, 3]
Returns:
Rigid flow from target image to source image [batch, height_t, width_t, 2]
"""
batch, height, width = depth.shape
# convert pose vector to matrix
pose = pose_vec2mat(pose) # Batch, 4, 4
if reverse_pose:
pose = torch.inverse(pose)
# Construct pixel grid coordinates
pixel_coords = meshgrid(batch, height, width) # B, 3, H, W
tgt_pixel_coords = pixel_coords[:,:2,:,:] # B, 2, H, W
# Convert pixel coordinates to the camera frame
cam_coords = pixel2cam(depth, pixel_coords, intrinsics)
# Construct a 4*4 intrinsic matrix
filler = torch.tensor([0.0, 0.0, 0.0, 1.0])
filler = torch.reshape(filler, (1, 1, 4))
filler = filler.repeat(batch, 1, 1) # B, 1, 4
intrinsics = torch.cat((intrinsics, torch.zeros(batch, 3, 1)), 2) # B, 3, 4
intrinsics = torch.cat((intrinsics, filler), 1) # B, 4, 4
# Get a 4*4 transformation matrix from 'target' camera frame to 'source' pixel frame
# pixel frame
proj_tgt_cam_to_src_pixel = torch.matmul(intrinsics, pose) # B, 4, 4
src_pixel_coords = cam2pixel(cam_coords, proj_tgt_cam_to_src_pixel)
rigid_flow = src_pixel_coords - tgt_pixel_coords
return rigid_flow
def bilinear_sampler(img, coords):
"""Construct a new image by bilinear sampling from the input image
Points falling outside the source image boundary have value 0.
Args:
imgs: source image to be sampled from [batch, channels, height_s, width_s]
coords: coordinates of source pixels to sample from [batch, 2, height_t, width_t]
. height_t/width_t correspond to the dimensions of the output image
(don't need to be the same as height_s/width_s).
The two channels correspond to x and y coordinates respectively.
Returns:
A new sampled image [batch, channels, height_t, width_t]
"""
def _repeat(x, n_repeat):
temp = torch.ones(n_repeat)
temp = temp.unsqueeze(1)
temp = torch.transpose(temp, 1, 0)
rep = torch.float(temp)
x = torch.matmul(torch.reshape(x, (-1, 1)), rep)
return torch.reshape(x, [-1])
# bilinear process
coords_x = coords[:,0,:,:] # B, out_H, out_W
coords_y = coords[:,1,:,:] # B, out_H, out_W
batch, inp_ch, inp_h, inp_w = img.shape # B, C, in_H, in_W
_, _, out_h, out_w = coords_x.shape
coords_x = torch.float(coords_x)
coords_y = torch.float(coords_y)
x0 = torch.floor(coords_x) # largest integer less than coords_x
x1 = x0 + 1
y0 = torch.floor(coords_y)
y1 = y0 + 1
y_max = torch.float(inp_h - 1)
x_max = torch.float(inp_w - 1)
zero = torch.float(1.0)
x0_safe = torch.clamp(x0, zero, x_max)
y0_safe = torch.clamp(y0, zero, y_max)
x1_safe = torch.clamp(x1, zero, x_max)
y1_safe = torch.clamp(y1, zero, y_max)
# bilinear interp weight, with points outside the grid weight 0
wt_x0 = x1_safe - coords_x # B, out_H, out_W
wt_x1 = coords_x - x0_safe
wt_y0 = y1_safe - coords_y
wt_y1 = coords_y - y0_safe
# indices in the flat image to sample from
dim2 = torch.float(inp_w)
dim1 = torch.float(inp_h*inp_w)
temp = torch.range(batch) * dim1 # 0~batch-1 * (W*H)
temp = _repeat(temp, out_h*out_w)
base = torch.reshape(temp, (batch, 1, out_h, out_w))
base_y0 = base + y0_safe * dim2
base_y1 = base + y1_safe * dim2
idx00 = torch.reshape(x0_safe + base_y0, (-1)) # B*out_H*out_W
idx01 = torch.reshape(x0_safe + base_y1, (-1)) # B*out_H*out_W
idx10 = torch.reshape(x1_safe + base_y0, (-1)) # B*out_H*out_W
idx11 = torch.reshape(x1_safe + base_y1, (-1)) # B*out_H*out_W
## sample from images
img_temp = torch.reshape(img, (batch, inp_ch, -1))
img_temp = torch.transpose(img_temp, 2, 1) # B, H*W, C
imgs_flat = torch.reshape(imgs, (-1, inp_ch)) # B*H*W, C
imgs_flat = torch.float(imgs_flat)
im00_temp = torch.index_select(imgs_flat, 0, torch.int(idx00)) # B*out_H*out_W, C
im00 = torch.reshape(im00_temp, (batch, out_h, out_w, inp_ch)) # B, out_H, out_W, C
im01_temp = torch.index_select(imgs_flat, 0, torch.int(idx01)) # B*out_H*out_W, C
im01 = torch.reshape(im01_temp, (batch, out_h, out_w, inp_ch)) # B, out_H, out_W, C
im10_temp = torch.index_select(imgs_flat, 0, torch.int(idx10)) # B*out_H*out_W, C
im10 = torch.reshape(im10_temp, (batch, out_h, out_w, inp_ch)) # B, out_H, out_W, C
im11_temp = torch.index_select(imgs_flat, 0, torch.int(idx11)) # B*out_H*out_W, C
im11 = torch.reshape(im11_temp, (batch, out_h, out_w, inp_ch)) # B, out_H, out_W, C
w00 = wt_x0 * wt_y0
w01 = wt_x0 * wt_y1
w10 = wt_x1 * wt_y0
w11 = wt_x1 * wt_y1
output = w00 * im00 + w01 * im01 + w10 * im10 + w11 * im11 # Does exit the broadcast problem???
# Assume to be B, out_H, out_W, C
output = torch.reshape(output, (batch, -1, inp_ch))
output = torch.transpose(output, 2, 1)
output = torch.reshape(output, (batch, inp_ch, out_h, out_w))
return output
def pose_vec2mat(vec):
"""Converts 6DoF parameters to transformation matrix
Args:
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [b, 6]
Return:
Atransformation matrix -- [B, 4, 4]
"""
batch, _ = vec.shape
translation = vec[:, 0:3] # B, 3
rx = vec[:, 3:4] # B, 1
ry = vec[:, 4:5] # B, 1
rz = vec[:, 5:6] # B, 1
rot_mat = euler2mat(rz, ry, rx) # B, 1, 3, 3
rot_mat = rot_mat.squeeze(1) # B, 3, 3
filler = torch.tensor([0.0, 0.0, 0.0, 1.0])
filler = torch.reshape(filler, (1, 1, 4)) # 1, 1, 4
transform_mat = torch.cat((rot_mat, translation.unsqueeze(2)), 2) # B, 3, 4
transform_mat = torch.cat((transform_mat, filler), 1) # B, 4, 4
return rot_mat
def euler2mat(z, y, x):
"""Converts euler angles to rotation matrix
TODO: remove the dimension for 'N' (deprecated for converting all source
pose altogether)
Args:
z: rotation angle along z axis (in radians) -- size = [B, N]
y: rotation angle along y axis (in radians) -- size = [B, N]
x: rotation angle along x axis (in radians) -- size = [B, N]
Returns:
Rotation matrix corresponding to the euler angles -- size = [B, N, 3, 3]
"""
batch = z.shape[0]
N = 1
z = torch.clamp(z, -np.pi, np.pi)
y = torch.clamp(y, -np.pi, np.pi)
x = torch.clamp(x, -np.pi, np.pi)
# Expand to B, N, 1, 1
z = z.unsqueeze(2)
z = z.unsqueeze(3)
y = y.unsqueeze(2)
y = y.unsqueeze(3)
x = x.unsqueeze(2)
x = x.unsqueeze(3)
zeros = torch.zeros(batch, N, 1, 1)
ones = torch.ones(batch, N, 1, 1)
cosz = torch.cos(z)
sinz = torch.sin(z)
rotz_1 = torch.cat((cosz, -sinz, zeros), 3) # B, N, 1, 3
rotz_2 = torch.cat((sinz, cosz, zeros), 3) # B, N, 1, 3
rotz_3 = torch.cat((zeros, zeros, ones), 3) # B, N, 1, 3
zmat = torch.cat((rotz_1, rotz_2, rotz_3), 2) # B, N, 3, 3
cosy = torch.cos(y)
siny = torch.sin(y)
roty_1 = torch.cat((cosy, zeros, siny), 3) # B, N, 1, 3
roty_2 = torch.cat((zeros, ones, zeros), 3) # B, N, 1, 3
roty_3 = torch.cat((-siny, zeros, cosy), 3) # B, N, 1, 3
ymat = torch.cat((roty_1, roty_2, roty_3), 2) # B, N, 3, 3
cosx = torch.cos(x)
sinx = torch.sin(x)
rotx_1 = torch.cat((ones, zeros, zeros), 3) # B, N, 1, 3
rotx_2 = torch.cat((zeros, cosx, -sinx), 3) # B, N, 1, 3
rotx_3 = torch.cat((zeros, sinx, cosx), 3) # B, N, 1, 3
xmat = torch.cat((rotx_1, rotx_2, rotx_3), 2) # B, N, 3, 3
rotMat_temp = torch.matmul(xmat, ymat) # B, N, 3, 3
rotMat = torch.matmul(rotMat_temp, zmat)
return rotMat | |
#!/usr/bin/env python
import numpy as np
import cv2
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.waitKey(0)
cv2.destroyAllWindows() | |
#!/usr/bin/python3
import os
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import numpy as np
#import pandas as pd
from random import randint
import matplotlib.pyplot as plt
keras = tf.keras
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.utils import np_utils
(train_data, train_label), (test_data, test_label) = keras.datasets.mnist.load_data()
#reshape images of mnist dataset train and set(building the input vector from 28x28)
train_data = train_data.reshape(60000, 28, 28)
test_data = test_data.reshape(10000, 28, 28)
train_data = train_data.astype('float32')
test_data = test_data.astype('float32')
# Scale pixel intensities of the images from [0, 255] down to [0, 1]
train_data /= 255.0
test_data /= 255.0
#building a linear stack of layers (with add() function) with the sequetial model
model = Sequential()
model.add(Flatten(input_shape=(28, 28))) #input layer
model.add(Dense(512)) # hidden layer
model.add(Activation('relu'))
model.add(Dense(512)) # hidden layer
model.add(Activation('relu'))
model.add(Dense(10)) # output layer
model.add(Activation('softmax'))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_data, train_label, epochs=10, validation_data=(test_data, test_label))
#saving the model
save_dir = "results"
model_name = "trained_keras_mnist"
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('saved trained model at %s ', model_path)
#plotting the metrics
fig = plt.figure()
plt.subplot(2,1,1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')
plt.subplot(2,1,2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.tight_layout()
plt.savefig('metrics')
loss_and_metrics = model.evaluate(test_data, test_label, verbose=2)
print("test loss", loss_and_metrics[0])
print("test accuracy", loss_and_metrics[1])
labels = [i for i in range(10)]
prediction = model.predict(test_data)
row = 5
colums = 6
labels = [i for i in range(10)]
for i in range(30):
plt.subplot(colums, row, i + 1)
index = randint(0, len(test_data))
plt.imshow(test_data[index], cmap='gray_r')
plt.title(f"pre={labels[np.argmax(prediction[index])]} real={labels[test_label[index]]}")
plt.axis('off')
plt.show()
#fig.savefig('demo.png', bbox_inches='tight') | |
import numpy as np
import os
import matplotlib.pyplot as plt
import skimage.io
from mpl_toolkits.mplot3d import Axes3D
np.set_printoptions(suppress=True)
from matplotlib import cm
from sklearn.neighbors import LocalOutlierFactor
from imblearn.under_sampling import ClusterCentroids
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
from utils.reconstruction_utils import warp_points, get_position_covariance, get_points_from_masks, get_dynamic_transform, get_center_point
from visualization.visualize import visualize
np.random.seed(42)
def remove_outliers(object_points):
if len(object_points) > 100:
points_t0 = object_points[:, 0]
points_t1 = object_points[:, 1]
mask = np.zeros(len(object_points), dtype=np.bool)
# fit the model for outlier detection (default)
for points in [points_t0, points_t1]:
clf = LocalOutlierFactor(n_neighbors=20)
clf.fit_predict(points)
X_scores = clf.negative_outlier_factor_
X_scores = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
median_score = np.median(X_scores)
mask = np.logical_or([X_scores[i] > median_score for i in range(len(points))], mask)
# print(X_scores)
# print('median_score ', mean_score)
# plt.title("Local Outlier Factor (LOF)")
# plt.scatter(points[:, 0], points[:, 2], color='k', s=3., label='Data points')
# # plot circles with radius proportional to the outlier scores
# plt.scatter(points[:, 0], points[:, 2], s=1000 * X_scores, edgecolors='r',
# facecolors='none', label='Outlier scores')
# plt.axis('tight')
# plt.xlim((-5, 5))
# plt.ylim((-5, 5))
# legend = plt.legend(loc='upper left')
# legend.legendHandles[0]._sizes = [10]
# legend.legendHandles[1]._sizes = [20]
# points = points[np.logical_not(mask)]
# X_scores = X_scores[np.logical_not(mask)]
# plt.title("Local Outlier Factor (LOF)")
# plt.scatter(points[:, 0], points[:, 2], color='k', s=3., label='Data points')
# # plot circles with radius proportional to the outlier scores
# plt.scatter(points[:, 0], points[:, 2], s=1000 * X_scores, edgecolors='r',
# facecolors='none', label='Outlier scores')
# plt.axis('tight')
# plt.xlim((-5, 5))
# plt.ylim((-5, 5))
# legend = plt.legend(loc='upper left')
# legend.legendHandles[0]._sizes = [10]
# legend.legendHandles[1]._sizes = [20]
# plt.show()
if len(object_points[np.logical_not(mask)]) > 10:
object_points = object_points[np.logical_not(mask)]
return object_points
def sparsify(object_points):
if len(object_points) > 200:
undersample = np.random.uniform(0, len(object_points), 200).astype(np.int)
object_points = np.take(object_points, undersample, axis=0)
return object_points
def create_dynamic_transforms(config, tracks, flow, point_imgs, raw_imgs, calibration_params):
tracks.add_new_attribute('dynamic_transforms')
tracks.add_new_attribute('transform_covariances')
tracks.add_new_attribute('global_positions')
tracks.add_new_attribute('position_covariances')
tracks.add_new_attribute('global_points')
if config.bool('debug'):
tracks.add_new_attribute('global_pointclouds')
tracks.add_new_attribute('global_colors')
tracks.add_new_attribute('global_3D_bbox')
tracks.add_new_attribute('global_points_unprocessed')
for step in range(tracks.timesteps-1):
for id in tracks.get_active_tracks(step):
if id in tracks.get_active_tracks(step+1):
mask_t0 = tracks.get_mask(step, id, postprocess=True)
mask_t1 = tracks.get_mask(step+1, id, postprocess=True)
points, colors = get_points_from_masks(mask_t0, mask_t1, point_imgs[step], point_imgs[step+1], flow[step+1], raw_imgs[step], raw_imgs[step+1], calibration_params)
if len(points) > 1:
points_processed = remove_outliers(sparsify(points))
# points_vis = np.concatenate((points[:, 0], points_processed[:, 0]), axis=0)
# colors_vis = np.concatenate((colors[:, 0], np.tile([255, 255, 0], (len(points_processed), 1))), axis=0)
#
# visualize(points_vis, colors_vis)
if not tracks.is_active(step-1, id):
tracks.set_attribute(step, id, 'position_covariances', get_position_covariance(points_processed[:, 0], calibration_params))
tracks.set_attribute(step, id, 'global_positions', np.mean(points_processed[:, 0], axis=0))
tracks.set_attribute(step, id, 'global_points', points_processed[:, 0])
if config.bool('debug'):
tracks.set_attribute(step, id, 'global_3D_bbox', get_center_point(config, points_processed[:, 0], tracks.get_detection(step, id)['class']))
tracks.set_attribute(step, id, 'global_pointclouds', points[:, 0])
tracks.set_attribute(step, id, 'global_points_unprocessed', points[:, 0])
tracks.set_attribute(step, id, 'global_colors', colors[:, 0])
dynamic_transform, transform_covariance = get_dynamic_transform(points_processed)
tracks.set_attribute(step + 1, id, 'dynamic_transforms', dynamic_transform)
tracks.set_attribute(step + 1, id, 'transform_covariances', transform_covariance)
tracks.set_attribute(step + 1, id, 'global_positions', np.mean(points_processed[:, 1], axis=0))
tracks.set_attribute(step + 1, id, 'position_covariances', get_position_covariance(points_processed[:, 1], calibration_params))
tracks.set_attribute(step + 1, id, 'global_points', points_processed[:, 1])
if config.bool('debug'):
tracks.set_attribute(step + 1, id, 'global_3D_bbox', get_center_point(config, points_processed[:, 1], tracks.get_detection(step, id)['class'], points_processed[:, 0]))
tracks.set_attribute(step + 1, id, 'global_pointclouds', warp_points(tracks.get_track_attribute(id, 'dynamic_transforms'), id, points_processed[:, 1]))
tracks.set_attribute(step + 1, id, 'global_points_unprocessed', points[:, 1])
tracks.set_attribute(step + 1, id, 'global_colors', colors[:, 1])
else:
points = np.concatenate((np.expand_dims(point_imgs[step+1][mask_t1.astype(np.bool)], axis=1), np.expand_dims(point_imgs[step+1][mask_t1.astype(np.bool)], axis=1)), axis=1)
colors = np.concatenate((np.expand_dims(raw_imgs[step+1][mask_t1.astype(np.bool)], axis=1), np.expand_dims(raw_imgs[step+1][mask_t1.astype(np.bool)], axis=1)), axis=1)
points_processed = remove_outliers(sparsify(points))
if not tracks.is_active(step - 1, id):
tracks.set_attribute(step, id, 'position_covariances', get_position_covariance(points_processed[:, 0], calibration_params))
tracks.set_attribute(step, id, 'global_positions', np.mean(points_processed[:, 0], axis=0))
tracks.set_attribute(step, id, 'global_points', points_processed[:, 0])
if config.bool('debug'):
tracks.set_attribute(step, id, 'global_3D_bbox', get_center_point(config, points_processed[:, 0], tracks.get_detection(step, id)['class']))
tracks.set_attribute(step, id, 'global_pointclouds', points[:, 0])
tracks.set_attribute(step, id, 'global_points_unprocessed', points[:, 0])
tracks.set_attribute(step, id, 'global_colors', colors[:, 0])
dynamic_transform = np.asarray([0, 0, 0])
transform_covariance = np.eye(3)
tracks.set_attribute(step + 1, id, 'dynamic_transforms', dynamic_transform)
tracks.set_attribute(step + 1, id, 'transform_covariances', transform_covariance)
tracks.set_attribute(step + 1, id, 'global_positions', np.mean(points_processed[:, 1], axis=0))
tracks.set_attribute(step + 1, id, 'position_covariances', get_position_covariance(points_processed[:, 1], calibration_params))
tracks.set_attribute(step + 1, id, 'global_points', points_processed[:, 1])
if config.bool('debug'):
tracks.set_attribute(step + 1, id, 'global_3D_bbox', get_center_point(config, points_processed[:, 1], tracks.get_detection(step, id)['class'], points_processed[:, 0]))
tracks.set_attribute(step + 1, id, 'global_pointclouds', points[:, 1])
tracks.set_attribute(step + 1, id, 'global_points_unprocessed', points[:, 1])
tracks.set_attribute(step + 1, id, 'global_colors', colors[:, 1])
else:
if not tracks.is_active(step - 1, id):
mask_t0 = tracks.get_mask(step, id, postprocess=True).astype(np.bool)
points = point_imgs[step][mask_t0]
colors = raw_imgs[step][mask_t0]
points_processed = remove_outliers(sparsify(np.concatenate((np.expand_dims(points, axis=1), np.expand_dims(points, axis=1)), axis=1)))
dynamic_transform = np.asarray([0, 0, 0])
transform_covariance = np.eye(3)
tracks.set_attribute(step + 1, id, 'dynamic_transforms', dynamic_transform)
tracks.set_attribute(step + 1, id, 'transform_covariances', transform_covariance)
tracks.set_attribute(step, id, 'position_covariances', get_position_covariance(points_processed[:, 0], calibration_params))
tracks.set_attribute(step, id, 'global_positions', np.mean(points_processed[:, 0], axis=0))
tracks.set_attribute(step, id, 'global_points', points_processed[:, 0])
if config.bool('debug'):
tracks.set_attribute(step, id, 'global_3D_bbox', get_center_point(config, points_processed[:, 0], tracks.get_detection(step, id)['class']))
tracks.set_attribute(step, id, 'global_pointclouds', points)
tracks.set_attribute(step, id, 'global_points_unprocessed', points)
tracks.set_attribute(step, id, 'global_colors', colors)
return tracks | |
import ba
import numpy as np
import time
import matplotlib.pyplot as pl
#import pandas
power = 7
# Number of vertices - for each time t -> t + 1, add new vertex
N_vertex = 10**power
# Probability mode for adding edges
prob = 1 # Pure preferential attachment
# Number of edges added per vertex
m = 6
# Set up graph G0
BA = ba.bamodel(prob, m, N_vertex)
start_time = time.clock()
while BA.current < N_vertex:
BA.add_vertex()
run_time = time.clock() - start_time
# Store degree distribution k
np.savetxt('k_10^%s_%s.csv' %(power, m), BA.k, delimiter = ',') | |
'''
'''
import os
import h5py
import numpy as np
# -- astropy --
from astropy import units as u
# -- desi --
from desispec.io import read_spectra
# -- feasibgs --
from feasibgs import util as UT
from feasibgs import catalogs as Cat
from feasibgs import forwardmodel as FM
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
def gleg_bgsSpec(nspec, iexp, nsub, expfile=None, method='spacefill', silent=True, validate=False):
''' Given noiseless spectra, simulate DESI BGS noise based on observing
conditions provided by iexp of nexp sampled observing conditions
:param nsub:
number of no noise spectra
:param iexp:
index of nexp observing conditions sampled using `method`
:param nexp: (default: 15)
number of observing conditions sampled from `surveysim`
:param method: (default: 'spacefill')
method used for sampling `nexp` observing conditions
:param spec_flag: (default: '')
string that specifies what type of spectra options are
'', '.lowHalpha', '.noEmline'
:param silent: (default: True)
:param validate: (default: False)
if True generate some plots
'''
# read in no noise spectra
_fspec = os.path.join(UT.dat_dir(), 'bgs_zsuccess', 'GALeg.g15.sourceSpec.%i.hdf5' % nspec)
fspec = h5py.File(_fspec, 'r')
wave = fspec['wave'].value
flux = fspec['flux'].value
# read in sampled exposures
expfile_subset = os.path.join(UT.dat_dir(), 'bgs_zsuccess',
'%s.subset.%i%s.hdf5' % (os.path.splitext(os.path.basename(expfile))[0], nsub, method))
fexps = h5py.File(expfile_subset, 'r')
texp = fexps['texp'].value
airmass = fexps['airmass'].value
wave_sky= fexps['wave'].value
u_sb = 1e-17 * u.erg / u.angstrom / u.arcsec**2 / u.cm**2 / u.second
sky = fexps['sky'].value
if not silent:
print('t_exp=%f' % texp[iexp])
print('airmass=%f' % airmass[iexp])
print('moon ill=%.2f alt=%.f, sep=%.f' %
(fexps['moon_ill'].value[iexp], fexps['moon_alt'].value[iexp], fexps['moon_sep'].value[iexp]))
print('sun alt=%.f, sep=%.f' % (fexps['sun_alt'].value[iexp], fexps['sun_sep'].value[iexp]))
# simulate the exposures
fdesi = FM.fakeDESIspec()
if not silent: print('simulate exposures with sky model')
f_bgs = os.path.join(UT.dat_dir(), 'bgs_zsuccess',
'GALeg.g15.sourceSpec.%s.%i.fits' % (os.path.splitext(os.path.basename(expfile_subset))[0], iexp))
bgs = fdesi.simExposure(wave, flux,
exptime=texp[iexp],
airmass=airmass[iexp],
skycondition={'name': 'input', 'sky': np.clip(sky[iexp,:], 0, None) * u_sb, 'wave': wave_sky},
filename=f_bgs)
if validate:
fig = plt.figure(figsize=(10,20))
sub = fig.add_subplot(411)
sub.plot(wave_sky, sky[iexp], c='C1')
sub.text(0.05, 0.95,
'texp=%.0f, airmass=%.2f\nmoon ill=%.2f, alt=%.0f, sep=%.0f\nsun alt=%.0f, sep=%.f' %
(texp[iexp], airmass[iexp], fexps['moon_ill'][iexp], fexps['moon_alt'][iexp],
fexps['moon_sep'][iexp], fexps['sun_alt'][iexp], fexps['sun_sep'][iexp]),
ha='left', va='top', transform=sub.transAxes, fontsize=15)
sub.legend(loc='upper right', frameon=True, fontsize=20)
sub.set_xlim([3e3, 1e4])
sub.set_ylim([0., 20.])
for i in range(3):
sub = fig.add_subplot(4,1,i+2)
for band in ['b', 'r', 'z']:
sub.plot(bgs.wave[band], bgs.flux[band][i], c='C1')
sub.plot(wave, flux[i], c='k', ls=':', lw=1, label='no noise')
if i == 0: sub.legend(loc='upper right', fontsize=20)
sub.set_xlim([3e3, 1e4])
sub.set_ylim([0., 15.])
bkgd = fig.add_subplot(111, frameon=False)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
bkgd.set_xlabel('rest-frame wavelength [Angstrom]', labelpad=10, fontsize=25)
bkgd.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', labelpad=10, fontsize=25)
fig.savefig(f_bgs.replace('.fits', '.png'), bbox_inches='tight')
return None
def gleg_sourceSpec_hackday(nsub, validate=False):
'''generate noiseless simulated spectra for a subset of GAMAlegacy
galaxies. The output hdf5 file will also contain all the galaxy
properties
:param nsub:
number of galaxies to randomly select from the GAMALegacy
joint catalog
:param spec_flag: (default: '')
string that specifies what type of spectra options are
'', '.lowHalpha', '.noEmline'
:param validate: (default: False)
if True make some plots that validate the chosen spectra
'''
# read in GAMA-Legacy catalog
cata = Cat.GamaLegacy()
gleg = cata.Read('g15', dr_gama=3, dr_legacy=7, silent=False) # these values shouldn't change
redshift = gleg['gama-spec']['z']
absmag_ugriz = cata.AbsMag(gleg, kcorr=0.1, H0=70, Om0=0.3, galext=False) # ABSMAG k-correct to z=0.1
r_mag_apflux = UT.flux2mag(gleg['legacy-photo']['apflux_r'][:,1])
r_mag_gama = gleg['gama-photo']['r_model'] # r-band magnitude from GAMA (SDSS) photometry
ha_gama = gleg['gama-spec']['ha_flux'] # halpha line flux
ngal = len(redshift) # number of galaxies
vdisp = np.repeat(100.0, ngal) # velocity dispersions [km/s]
# match GAMA galaxies to templates
bgs3 = FM.BGStree()
match = bgs3._GamaLegacy(gleg)
hasmatch = (match != -999)
criterion = hasmatch
# randomly pick a few more than nsub galaxies from the catalog
subsamp = np.random.choice(np.arange(ngal)[criterion], int(1.1 * nsub), replace=False)
# generate noiseless spectra for these galaxies
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
emline_flux = s_bgs.EmissionLineFlux(gleg, index=subsamp, dr_gama=3, silent=True) # emission lines from GAMA
flux, wave, _, magnorm_flag = s_bgs.Spectra(r_mag_apflux[subsamp], redshift[subsamp],
vdisp[subsamp], seed=1, templateid=match[subsamp],
emflux=emline_flux, mag_em=r_mag_gama[subsamp],
silent=True)
# some of the galaxies will have issues where the emission line is brighter
# than the photometric magnitude. Lets make sure we take nsub galaxies that
# do not include these.
isubsamp = np.random.choice(np.arange(len(subsamp))[magnorm_flag], nsub, replace=False)
subsamp = subsamp[isubsamp]
fspec = os.path.join(UT.dat_dir(), 'bgs_zsuccess', 'GALeg.g15.metadata.%i.hdf5' % nsub)
fmeta = h5py.File(fspec, 'w')
fmeta.create_dataset('zred', data=redshift[subsamp])
fmeta.create_dataset('absmag_ugriz', data=absmag_ugriz[:,subsamp])
fmeta.create_dataset('r_mag_apflux', data=r_mag_apflux[subsamp])
fmeta.create_dataset('r_mag_gama', data=r_mag_gama[subsamp])
for grp in gleg.keys():
group = fsub.create_group(grp)
for key in gleg[grp].keys():
group.create_dataset(key, data=gleg[grp][key][subsamp])
fmeta.close()
fspec = os.path.join(UT.dat_dir(), 'bgs_zsuccess', 'GALeg.g15.sourceSpec.%i.hdf5' % nsub)
fsub = h5py.File(fspec, 'w')
fsub.create_dataset('zred', data=redshift[subsamp])
fsub.create_dataset('absmag_ugriz', data=absmag_ugriz[:,subsamp])
fsub.create_dataset('r_mag_apflux', data=r_mag_apflux[subsamp])
fsub.create_dataset('r_mag_gama', data=r_mag_gama[subsamp])
for grp in gleg.keys():
group = fsub.create_group(grp)
for key in gleg[grp].keys():
group.create_dataset(key, data=gleg[grp][key][subsamp])
fsub.create_dataset('flux', data=flux[isubsamp, :])
fsub.create_dataset('wave', data=wave)
fsub.close()
if validate:
fig = plt.figure(figsize=(10,8))
sub = fig.add_subplot(111)
for i in range(10): #np.random.choice(isubsamp, 10, replace=False):
wave_rest = wave / (1.+redshift[subsamp][i])
sub.plot(wave_rest, flux[isubsamp[i],:])
emline_keys = ['oiib', 'oiir', 'hb', 'oiiib', 'oiiir', 'ha', 'siib', 'siir']
emline_lambda = [3727.092, 3729.874, 4862.683, 4960.295, 5008.239, 6564.613, 6718.294, 6732.673]
for k, l in zip(emline_keys, emline_lambda):
if k == 'ha':
sub.vlines(l, 0., 20, color='k', linestyle='--', linewidth=1)
else:
sub.vlines(l, 0., 20, color='k', linestyle=':', linewidth=0.5)
sub.set_xlabel('rest-frame wavelength [Angstrom]', fontsize=25)
sub.set_xlim([3e3, 1e4])
sub.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim([0., 20.])
fig.savefig(fspec.replace('.hdf5', '.png'), bbox_inches='tight')
return None
def gleg_simSpec(nsub, spec_flag='', validate=False):
'''generate noiseless simulated spectra for a subset of GAMAlegacy
galaxies. The output hdf5 file will also contain all the galaxy
properties
:param nsub:
number of galaxies to randomly select from the GAMALegacy
joint catalog
:param spec_flag: (default: '')
string that specifies what type of spectra options are
'', '.lowHalpha', '.noEmline'
:param validate: (default: False)
if True make some plots that validate the chosen spectra
'''
# read in GAMA-Legacy catalog
cata = Cat.GamaLegacy()
gleg = cata.Read('g15', dr_gama=3, dr_legacy=7, silent=False) # these values shouldn't change
redshift = gleg['gama-spec']['z']
absmag_ugriz = cata.AbsMag(gleg, kcorr=0.1, H0=70, Om0=0.3, galext=False) # ABSMAG k-correct to z=0.1
r_mag_apflux = UT.flux2mag(gleg['legacy-photo']['apflux_r'][:,1])
r_mag_gama = gleg['gama-photo']['r_model'] # r-band magnitude from GAMA (SDSS) photometry
ha_gama = gleg['gama-spec']['ha_flux'] # halpha line flux
ngal = len(redshift) # number of galaxies
vdisp = np.repeat(100.0, ngal) # velocity dispersions [km/s]
# match GAMA galaxies to templates
bgs3 = FM.BGStree()
match = bgs3._GamaLegacy(gleg)
hasmatch = (match != -999)
if spec_flag == '':
criterion = hasmatch
elif spec_flag == '.lowHalpha':
low_halpha = (ha_gama < 10.)
criterion = hasmatch & low_halpha
else:
raise ValueError
# randomly pick a few more than nsub galaxies from the catalog
subsamp = np.random.choice(np.arange(ngal)[criterion], int(1.1 * nsub), replace=False)
# generate noiseless spectra for these galaxies
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
emline_flux = s_bgs.EmissionLineFlux(gleg, index=subsamp, dr_gama=3, silent=True) # emission lines from GAMA
flux, wave, _, magnorm_flag = s_bgs.Spectra(r_mag_apflux[subsamp], redshift[subsamp],
vdisp[subsamp], seed=1, templateid=match[subsamp],
emflux=emline_flux, mag_em=r_mag_gama[subsamp],
silent=True)
# some of the galaxies will have issues where the emission line is brighter
# than the photometric magnitude. Lets make sure we take nsub galaxies that
# do not include these.
isubsamp = np.random.choice(np.arange(len(subsamp))[magnorm_flag], nsub, replace=False)
subsamp = subsamp[isubsamp]
fspec = os.path.join(UT.dat_dir(), 'bgs_zsuccess', 'g15.simSpectra.%i%s.v2.hdf5' % (nsub, spec_flag))
fsub = h5py.File(fspec, 'w')
fsub.create_dataset('zred', data=redshift[subsamp])
fsub.create_dataset('absmag_ugriz', data=absmag_ugriz[:,subsamp])
fsub.create_dataset('r_mag_apflux', data=r_mag_apflux[subsamp])
fsub.create_dataset('r_mag_gama', data=r_mag_gama[subsamp])
fsub.create_dataset('flux', data=flux[isubsamp, :])
fsub.create_dataset('wave', data=wave)
for grp in gleg.keys():
group = fsub.create_group(grp)
for key in gleg[grp].keys():
group.create_dataset(key, data=gleg[grp][key][subsamp])
fsub.close()
if validate:
fig = plt.figure(figsize=(10,8))
sub = fig.add_subplot(111)
for i in range(10): #np.random.choice(isubsamp, 10, replace=False):
wave_rest = wave / (1.+redshift[subsamp][i])
sub.plot(wave_rest, flux[isubsamp[i],:])
emline_keys = ['oiib', 'oiir', 'hb', 'oiiib', 'oiiir', 'ha', 'siib', 'siir']
emline_lambda = [3727.092, 3729.874, 4862.683, 4960.295, 5008.239, 6564.613, 6718.294, 6732.673]
for k, l in zip(emline_keys, emline_lambda):
if k == 'ha':
sub.vlines(l, 0., 20, color='k', linestyle='--', linewidth=1)
else:
sub.vlines(l, 0., 20, color='k', linestyle=':', linewidth=0.5)
sub.set_xlabel('rest-frame wavelength [Angstrom]', fontsize=25)
sub.set_xlim([3e3, 1e4])
sub.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim([0., 20.])
fig.savefig(fspec.replace('.hdf5', '.png'), bbox_inches='tight')
return None
def gleg_simSpec_mockexp(nsub, iexp, nexp=15, method='spacefill', spec_flag='', silent=True, validate=False):
''' Given noiseless spectra, simulate DESI BGS noise based on observing
conditions provided by iexp of nexp sampled observing conditions
:param nsub:
number of no noise spectra
:param iexp:
index of nexp observing conditions sampled using `method`
:param nexp: (default: 15)
number of observing conditions sampled from `surveysim`
:param method: (default: 'spacefill')
method used for sampling `nexp` observing conditions
:param spec_flag: (default: '')
string that specifies what type of spectra options are
'', '.lowHalpha', '.noEmline'
:param silent: (default: True)
:param validate: (default: False)
if True generate some plots
'''
# read in no noise spectra
_fspec = os.path.join(UT.dat_dir(), 'bgs_zsuccess', 'g15.simSpectra.%i%s.v2.hdf5' % (nsub, spec_flag))
fspec = h5py.File(_fspec, 'r')
wave = fspec['wave'].value
flux = fspec['flux'].value
# read in sampled exposures
fexps = h5py.File(os.path.join(UT.dat_dir(), 'bgs_zsuccess',
'bgs_survey_exposures.subset.%i%s.hdf5' % (nexp, method)), 'r')
texp = fexps['exptime'].value
airmass = fexps['airmass'].value
wave_old = fexps['wave_old'].value
wave_new = fexps['wave_new'].value
u_sb = 1e-17 * u.erg / u.angstrom / u.arcsec**2 / u.cm**2 / u.second
sky_old = fexps['sky_old'].value
sky_new = fexps['sky_new'].value
if not silent:
print('t_exp=%f' % texp[iexp])
print('airmass=%f' % airmass[iexp])
print('moon ill=%.2f alt=%.f, sep=%.f' %
(fexps['moon_ill'].value[iexp], fexps['moon_alt'].value[iexp], fexps['moon_sep'].value[iexp]))
print('sun alt=%.f, sep=%.f' % (fexps['sun_alt'].value[iexp], fexps['sun_sep'].value[iexp]))
# simulate the exposures
fdesi = FM.fakeDESIspec()
if not silent: print('simulate exposures with old sky model')
f_bgs_old = ''.join([UT.dat_dir(), 'bgs_zsuccess/',
'g15.simSpectra.', str(nsub), spec_flag, '.texp_default.iexp', str(iexp), 'of', str(nexp), method,
'.old_sky.v2.fits'])
bgs_old = fdesi.simExposure(wave, flux,
exptime=texp[iexp],
airmass=airmass[iexp],
skycondition={'name': 'input', 'sky': np.clip(sky_old[iexp,:], 0, None) * u_sb, 'wave': wave_old},
filename=f_bgs_old)
if not silent: print('simulate exposures with new sky model')
f_bgs_new = ''.join([UT.dat_dir(), 'bgs_zsuccess/',
'g15.simSpectra.', str(nsub), spec_flag, '.texp_default.iexp', str(iexp), 'of', str(nexp), method,
'.new_sky.v2.fits'])
bgs_new = fdesi.simExposure(wave, flux,
exptime=texp[iexp],
airmass=airmass[iexp],
skycondition={'name': 'input', 'sky': np.clip(sky_new[iexp,:], 0, None) * u_sb, 'wave': wave_new},
filename=f_bgs_new)
if validate:
fig = plt.figure(figsize=(10,20))
sub = fig.add_subplot(411)
sub.plot(wave_new, sky_new[iexp], c='C1', label='new sky brightness')
sub.plot(wave_old, sky_old[iexp], c='C0', label='old sky brightness')
sub.text(0.05, 0.95, 'texp=%.0f, airmass=%.2f\nmoon ill=%.2f, alt=%.0f, sep=%.0f\nsun alt=%.0f, sep=%.f' %
(texp[iexp], airmass[iexp], fexps['moonfrac'][iexp], fexps['moonalt'][iexp], fexps['moonsep'][iexp],
fexps['sunalt'][iexp], fexps['sunsep'][iexp]),
ha='left', va='top', transform=sub.transAxes, fontsize=15)
sub.legend(loc='upper right', frameon=True, fontsize=20)
sub.set_xlim([3e3, 1e4])
sub.set_ylim([0., 20.])
for i in range(3):
sub = fig.add_subplot(4,1,i+2)
for band in ['b', 'r', 'z']:
lbl = None
if band == 'b': lbl = 'new sky'
sub.plot(bgs_new.wave[band], bgs_new.flux[band][i], c='C1', label=lbl)
for band in ['b', 'r', 'z']:
lbl = None
if band == 'b': lbl = 'old sky'
sub.plot(bgs_old.wave[band], bgs_old.flux[band][i], c='C0', label=lbl)
sub.plot(wave, flux[i], c='k', ls=':', lw=1, label='no noise')
if i == 0: sub.legend(loc='upper right', fontsize=20)
sub.set_xlim([3e3, 1e4])
sub.set_ylim([0., 15.])
bkgd = fig.add_subplot(111, frameon=False)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
bkgd.set_xlabel('rest-frame wavelength [Angstrom]', labelpad=10, fontsize=25)
bkgd.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', labelpad=10, fontsize=25)
fig.savefig(os.path.join(UT.dat_dir(), 'bgs_zsuccess',
'g15.simSpectra.%i%s.texp_default.iexp%iof%i%s.v2.png' % (nsub, spec_flag, iexp, nexp, method)),
bbox_inches='tight')
return None
if __name__=="__main__":
#gleg_sourceSpec_hackday(3000, validate=True)
fexp = os.path.join(UT.dat_dir(), 'bright_exposure', 'exposures_surveysim_fork_150sv0p4.fits')
for iexp in range(15):
print('--- exposure #%i ---' % (iexp+1))
gleg_bgsSpec(3000, iexp, 22, expfile=fexp, silent=False, validate=True)
#gleg_simSpec(3000, validate=True)
#for iexp in [0]: #range(0,15):
# gleg_simSpec_mockexp(3000, iexp, nexp=15, method='spacefill', validate=True)
#gleg_simSpec(3000, spec_flag='.lowHalpha', validate=True)
#gleg_simSpec_mockexp(3000, 0, spec_flag='.lowHalpha', nexp=15, method='spacefill', validate=True) | |
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from decoder.mlp import mlp_layer
def deconv_layer(output_shape, filter_shape, activation, strides, name):
W = tf.get_variable(shape=filter_shape, initializer=tf.contrib.layers.xavier_initializer(), name=name + '_W') # use output channel
b = tf.get_variable(shape=(filter_shape[-2],),initializer=tf.zeros_initializer, name=name + '_b') # use output channel
def apply_train(x):
output_shape_x = (x.get_shape().as_list()[0],) + output_shape
a = slim.nn.conv2d_transpose(x, W, output_shape_x, strides, 'SAME') + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'softplus':
return tf.nn.softplus(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
def apply_not_train(x):
output_shape_x = (x.get_shape().as_list()[0],) + output_shape
a = slim.nn.conv2d_transpose(x, tf.stop_gradient(W), output_shape_x, strides, 'SAME') + tf.stop_gradient(b)
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'softplus':
return tf.nn.softplus(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply_train, apply_not_train
def generator_train(dimH=500, dimZ=32, name='generator'):
with tf.variable_scope("vae_decoder"):
# now construct a decoder
input_shape = (28, 28, 1)
filter_width = 5
decoder_input_shape = [(4, 4, 32), (7, 7, 32), (14, 14, 16)]
decoder_input_shape.append(input_shape)
fc_layers = [dimZ, dimH, int(np.prod(decoder_input_shape[0]))]
l = 0
# first include the MLP
mlp_layers = []
N_layers = len(fc_layers) - 1
for i in range(N_layers):
name_layer = name + '_mlp_l%d' % l
mlp_layers.append(mlp_layer(fc_layers[i], fc_layers[i + 1], 'relu', name_layer)[0])
l += 1
conv_layers = []
N_layers = len(decoder_input_shape) - 1
for i in range(N_layers):
if i < N_layers - 1:
activation = 'relu'
else:
activation = 'linear'
name_layer = name + '_conv_l%d' % l
output_shape = decoder_input_shape[i + 1]
input_shape = decoder_input_shape[i]
up_height = int(np.ceil(output_shape[0] / float(input_shape[0])))
up_width = int(np.ceil(output_shape[1] / float(input_shape[1])))
strides = (1, up_height, up_width, 1)
filter_shape = (filter_width, filter_width, output_shape[-1], input_shape[-1])
conv_layers.append(deconv_layer(output_shape, filter_shape, activation, \
strides, name_layer)[0])
l += 1
print('decoder architecture', fc_layers, 'reshape', decoder_input_shape)
def apply(z):
x = z
for layer in mlp_layers:
x = layer(x)
x = tf.reshape(x, (x.get_shape().as_list()[0],) + decoder_input_shape[0])
for layer in conv_layers:
x = layer(x)
return x
return apply
def generator_not_train(dimH=500, dimZ=32, name='generator'):
with tf.variable_scope("vae_decoder") as scope:
scope.reuse_variables()
# now construct a decoder
input_shape = (28, 28, 1)
filter_width = 5
decoder_input_shape = [(4, 4, 32), (7, 7, 32), (14, 14, 16)]
decoder_input_shape.append(input_shape)
fc_layers = [dimZ, dimH, int(np.prod(decoder_input_shape[0]))]
l = 0
# first include the MLP
mlp_layers = []
N_layers = len(fc_layers) - 1
for i in range(N_layers):
name_layer = name + '_mlp_l%d' % l
mlp_layers.append(mlp_layer(fc_layers[i], fc_layers[i + 1], 'relu', name_layer)[1])
l += 1
conv_layers = []
N_layers = len(decoder_input_shape) - 1
for i in range(N_layers):
if i < N_layers - 1:
activation = 'relu'
else:
activation = 'linear'
name_layer = name + '_conv_l%d' % l
output_shape = decoder_input_shape[i + 1]
input_shape = decoder_input_shape[i]
up_height = int(np.ceil(output_shape[0] / float(input_shape[0])))
up_width = int(np.ceil(output_shape[1] / float(input_shape[1])))
strides = (1, up_height, up_width, 1)
filter_shape = (filter_width, filter_width, output_shape[-1], input_shape[-1])
conv_layers.append(deconv_layer(output_shape, filter_shape, activation, \
strides, name_layer)[1])
l += 1
print('decoder architecture', fc_layers, 'reshape', decoder_input_shape)
def apply(z):
x = z
for layer in mlp_layers:
x = layer(x)
x = tf.reshape(x, (x.get_shape().as_list()[0],) + decoder_input_shape[0])
for layer in conv_layers:
x = layer(x)
return x
return apply
def get_decoder_param():
return tuple(tf.trainable_variables("vae_decoder"))
if __name__ == '__main__':
generator_train()
a = get_decoder_param()
print(len(a)) | |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import warnings
import numpy as np
from typing import Optional
from ..pipeline import Pipeline
from ..parameter import Uniform
from pyannote.core.utils.distance import cdist
from pyannote.core.utils.distance import dist_range
from pyannote.core.utils.distance import l2_normalize
class ClosestAssignment(Pipeline):
"""Assign each sample to the closest target
Parameters
----------
metric : `str`, optional
Distance metric. Defaults to 'cosine'
normalize : `bool`, optional
L2 normalize vectors before clustering.
Hyper-parameters
----------------
threshold : `float`
Do not assign if distance greater than `threshold`.
"""
def __init__(
self, metric: Optional[str] = "cosine", normalize: Optional[bool] = False
):
super().__init__()
self.metric = metric
self.normalize = normalize
min_dist, max_dist = dist_range(metric=self.metric, normalize=self.normalize)
if not np.isfinite(max_dist):
# this is arbitray and might lead to suboptimal results
max_dist = 1e6
msg = (
f"bounding distance threshold to {max_dist:g}: "
f"this might lead to suboptimal results."
)
warnings.warn(msg)
self.threshold = Uniform(min_dist, max_dist)
def __call__(self, X_target, X):
"""Assign each sample to its closest class (if close enough)
Parameters
----------
X_target : `np.ndarray`
(n_targets, n_dimensions) target embeddings
X : `np.ndarray`
(n_samples, n_dimensions) sample embeddings
Returns
-------
assignments : `np.ndarray`
(n_samples, ) sample assignments
"""
if self.normalize:
X_target = l2_normalize(X_target)
X = l2_normalize(X)
distance = cdist(X_target, X, metric=self.metric)
targets = np.argmin(distance, axis=0)
for i, k in enumerate(targets):
if distance[k, i] > self.threshold:
# do not assign
targets[i] = -i
return targets | |
import sqlite3
import numpy as np
from convlab2.policy.mdrg.multiwoz.utils.nlp import normalize
# loading databases
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital']#, 'police']
dbs = {}
for domain in domains:
db = 'db/{}-dbase.db'.format(domain)
conn = sqlite3.connect(db)
c = conn.cursor()
dbs[domain] = c
def oneHotVector(num, domain, vector):
"""Return number of available entities for particular domain."""
number_of_options = 6
if domain != 'train':
idx = domains.index(domain)
if num == 0:
vector[idx * 6: idx * 6 + 6] = np.array([1, 0, 0, 0, 0,0])
elif num == 1:
vector[idx * 6: idx * 6 + 6] = np.array([0, 1, 0, 0, 0, 0])
elif num == 2:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 1, 0, 0, 0])
elif num == 3:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 1, 0, 0])
elif num == 4:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 1, 0])
elif num >= 5:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 0, 1])
else:
idx = domains.index(domain)
if num == 0:
vector[idx * 6: idx * 6 + 6] = np.array([1, 0, 0, 0, 0, 0])
elif num <= 2:
vector[idx * 6: idx * 6 + 6] = np.array([0, 1, 0, 0, 0, 0])
elif num <= 5:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 1, 0, 0, 0])
elif num <= 10:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 1, 0, 0])
elif num <= 40:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 1, 0])
elif num > 40:
vector[idx * 6: idx * 6 + 6] = np.array([0, 0, 0, 0, 0, 1])
return vector
def queryResult(domain, turn):
"""Returns the list of entities for a given domain
based on the annotation of the belief state"""
# query the db
sql_query = "select * from {}".format(domain)
flag = True
#print turn['metadata'][domain]['semi']
for key, val in turn['metadata'][domain]['semi'].items():
if val == "" or val == "dont care" or val == 'not mentioned' or val == "don't care" or val == "dontcare" or val == "do n't care":
pass
else:
if flag:
sql_query += " where "
val2 = val.replace("'", "''")
#val2 = normalize(val2)
# change query for trains
if key == 'leaveAt':
sql_query += r" " + key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += r" " + key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" " + key + "=" + r"'" + val2 + r"'"
flag = False
else:
val2 = val.replace("'", "''")
#val2 = normalize(val2)
if key == 'leaveAt':
sql_query += r" and " + key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += r" and " + key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" and " + key + "=" + r"'" + val2 + r"'"
#try: # "select * from attraction where name = 'queens college'"
#print sql_query
#print domain
num_entities = len(dbs[domain].execute(sql_query).fetchall())
return num_entities
def queryResultVenues(domain, turn, real_belief=False):
# query the db
sql_query = "select * from {}".format(domain)
flag = True
if real_belief == True:
items = turn.items()
elif real_belief=='tracking':
for slot in turn[domain]:
key = slot[0].split("-")[1]
val = slot[0].split("-")[2]
if key == "price range":
key = "pricerange"
elif key == "leave at":
key = "leaveAt"
elif key == "arrive by":
key = "arriveBy"
if val == "do n't care":
pass
else:
if flag:
sql_query += " where "
val2 = val.replace("'", "''")
val2 = normalize(val2)
if key == 'leaveAt':
sql_query += key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" " + key + "=" + r"'" + val2 + r"'"
flag = False
else:
val2 = val.replace("'", "''")
val2 = normalize(val2)
if key == 'leaveAt':
sql_query += r" and " + key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += r" and " + key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" and " + key + "=" + r"'" + val2 + r"'"
try: # "select * from attraction where name = 'queens college'"
return dbs[domain].execute(sql_query).fetchall()
except:
return [] # TODO test it
pass
else:
items = turn['metadata'][domain]['semi'].items()
flag = True
for key, val in items:
if val == "" or val == "dontcare" or val == 'not mentioned' or val == "don't care" or val == "dont care" or val == "do n't care":
pass
else:
if flag:
sql_query += " where "
val2 = val.replace("'", "''")
val2 = normalize(val2)
if key == 'leaveAt':
sql_query += r" " + key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += r" " +key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" " + key + "=" + r"'" + val2 + r"'"
flag = False
else:
val2 = val.replace("'", "''")
val2 = normalize(val2)
if key == 'leaveAt':
sql_query += r" and " + key + " > " + r"'" + val2 + r"'"
elif key == 'arriveBy':
sql_query += r" and " + key + " < " + r"'" + val2 + r"'"
else:
sql_query += r" and " + key + "=" + r"'" + val2 + r"'"
try: # "select * from attraction where name = 'queens college'"
return dbs[domain].execute(sql_query).fetchall()
except:
raise
return [] # TODO test it
def table_schema(domain):
return [col[1] for col in dbs[domain].execute("PRAGMA table_info({})".format(domain)).fetchall()] | |
import pandas as pd
import numpy as np
import sklearn
import matplotlib.pyplot as plt
import json
import os
import time
import logging
font={
'family':'STSONG'
}
plt.rc("font",**font)
# plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
logging.basicConfig(level=logging.DEBUG #设置日志输出格式
,filename="demo.log" #log日志输出的文件位置和文件名
,filemode="w" #文件的写入格式,w为重新写入文件,默认是追加
,format="%(asctime)s - %(levelname)-9s: %(message)s" #日志输出的格式
# -8表示占位符,让输出左对齐,输出长度都为8位
,datefmt="%Y-%m-%d %H:%M:%S" #时间输出的格式
)
logger = logging.getLogger(__name__)
class sklearn_regressor():
def __init__(self,filepath:str):
'''
初始化参数
---------------
filepath:数据文件路径
'''
self.filepath=filepath
def get_file_data(self):
'''
读取文件数据
----------------
return self.dataset:pandas.dataframe格式数据
'''
filepath_low=self.filepath.lower()#转换为小写,用于比较后缀名
last_name=filepath_low.rsplit(".",1)[1]
if last_name=='txt':
self.dataset=pd.read_csv(self.filepath,delimiter='\t')
elif last_name=='csv':
self.dataset=pd.read_csv(self.filepath)
elif last_name=='json':
self.dataset=pd.read_json(self.filepath)
return self.dataset
def test_train_split(self,test_rate:float,x_columns:list,y_columns:list):
'''
划分训练集,测试集
----------------------
test_rate:测试集比例
x_columns:自变量所在列
y_columns:目标变量所在列
-----------------------
'''
self.dataset=self.get_file_data()
x=self.dataset.iloc[:,x_columns]
y=self.dataset.iloc[:,y_columns]
from sklearn.preprocessing import StandardScaler
self.x_scaler=StandardScaler()
self.x_normalize=self.x_scaler.fit_transform(x)
self.y_scaler=StandardScaler()
self.y_normalize=self.y_scaler.fit_transform(y)
from sklearn.model_selection import train_test_split
self.x_train, self.x_test, self.y_train, self.y_test =train_test_split(self.x_normalize, self.y_normalize, test_size=test_rate)
def select_model(self,model_name:str,paramters:dict={}):
'''
选择模型,输入参数
-----------------
model_name:sklearn库模型的名称
paramters:对应的模型参数
---------------------------
'''
if model_name=="LinearRegression":
'''
#region
简单线性模型
------------------
paramters: fit_intercept:截距项 参数类型:bool
#endregion
'''
from sklearn.linear_model import LinearRegression
self.model=LinearRegression(**paramters)
elif model_name=="Ridge":
'''
#region
带L2正则化的线性模型
-------------------
paramters: alpha:L2正则化系数 参数类型:list
多目标回归可设置不同alpha,eg:alpha=[0.1,0.2]
---------
fit_intercept:截距项 参数类型:bool
----------------
tol:误差控制项 参数类型:float
#endregion
'''
from sklearn.linear_model import Ridge
self.model=Ridge(**paramters)
elif model_name=="Lasso":
'''
#region
带L1正则化的线性模型,产生一个稀疏的模型
------------------------------------
paramters: alpha:L1正则化系数 参数类型:float
Lasso多目标回归不可设置不同alpha
------------------
fit_intercept:截距项 参数类型:bool
------------------
tol:误差控制项 参数类型:float
#endregion
'''
from sklearn.linear_model import Lasso
self.model=Lasso(**paramters)
elif model_name=="ElasticNet":
'''
#region
同时带L1,L2正则化的线性模型
------------------------------------
paramters: alpha:正则化系数 参数类型:float
------------------
l1_ratio:L1/L2比值 参数类型:float
--------------------------------
fit_intercept:截距项 参数类型:bool
------------------
tol:误差控制项 参数类型:float
#endregion
'''
from sklearn.linear_model import ElasticNet
self.model=ElasticNet(**paramters)
elif model_name=="Lars":
'''
#region
Least-angle regression:逐步回归的改进模型,效率更高,具有特征选择的功能
------------------------------------
paramters: alpha:正则化系数 参数类型:float
------------------
l1_ratio:L1/L2比值 参数类型:float
--------------------------------
fit_intercept:截距项 参数类型:bool
------------------
tol:误差控制项 参数类型:float
#endregion
'''
from sklearn.linear_model import ElasticNet
self.model=ElasticNet(**paramters)
elif model_name=="BayesianRidge":
'''
#region
https://zhuanlan.zhihu.com/p/403618259
贝叶斯线性回归的最大对数后验 等价于 最小化平方损失+L2正则化
拟合的结果是最大化后验概率下的模型参数
-------------------------------------------------------
paramters: alpha_1:数据gamma分布的形状参数 参数类型:float
--------------------------------------
alpha_2:数据gamma分布的逆尺度参数 参数类型:float
--------------------------------------
lambda_1:模型系数gamma分布的形状参数 参数类型:float
---------------------------------------
lambda_2:模型系数gamma分布的形状参数 参数类型:float
-------------------------------------
fit_intercept:截距项 参数类型:bool
--------------------------------------
tol:误差控制项 参数类型:float
#endregion
'''
from sklearn.linear_model import BayesianRidge
self.model=BayesianRidge(**paramters)
elif model_name=="KernelRidge":
'''
#region
https://zhuanlan.zhihu.com/p/72517223
利用核方法的岭回归
-----------------------------------
paramters: alpha:正则化系数 参数类型:float
-----------------------------
kernel:核函数名称 可选["additive_chi2","chi2","linear","laplacian","polynomial","rbf","sigmoid"] 参数类型:str
------------------------------
gamma:RBF, laplacian, polynomial, exponential chi2 and sigmoid kernels核函数参数 参数类型:float
------------------------------
degree:多项式核参数 参数类型:float
#endregion
'''
from sklearn.kernel_ridge import KernelRidge
self.model=KernelRidge(**paramters)
elif model_name=="SVR":
'''
#region
https://zhuanlan.zhihu.com/p/72517223
利用核方法的支持向量机回归
-----------------------------------
paramters: alpha:正则化系数 参数类型:float
-----------------------------
kernel:核函数名称 可选["linear","poly","rbf","sigmoid"] 参数类型:str
------------------------------
gamma:RBF, laplacian, poly,sigmoid kernels核函数参数 可选["scale","auto"] 参数类型:str
参数取值定义:https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html?highlight=svr#sklearn.svm.SVR
------------------------------
degree:多项式核参数 参数类型:float
--------------------------------------
tol:误差控制项 参数类型:float
--------------------------------------
C:支持向量机正则化系数 参数类型:float
-------------------------------------
epsilon:svr回归模型中对样本的误差小于epsilon时,不计算在损失函数中 参数类型:float
#endregion
'''
from sklearn.svm import SVR
self.model=SVR(**paramters)
elif model_name=="SGDRegressor":
'''
#region
batch GD,mini-batch GD,SGD:https://zhuanlan.zhihu.com/p/357963858
使用随机梯度下降法优化的线性模型,适用于大规模数据集
-----------------------------------
paramters: loss:损失函数类型 可选["squared_loss","huber","epsilon_insensitive"] 参数类型:str
-----------------------------------
penalty:正则化系数类型 可选["l1","l2","elasticnet"] 参数类型:sr
------------------------------------
alpha:正则化系数 参数类型:float
-----------------------------------
l1_ratio:elasticnet中l1与l2正则化系数的比值 取值范围[0,1] 参数类型:float
--------------------------------------------------------------------
fit_intercept:截距项 参数类型:bool
------------------------------
tol:误差控制项 参数类型:float
------------------------------
max_iter:最大迭代次数 参数类型:int
--------------------------------------
learning_rate:学习率类型 可选["constant","optimal","invscaling","adaptive"] 参数类型:str
--------------------------------------
eta0:初始学习率 参数类型:float
-------------------------------------
epsilon:svr回归模型中对样本的误差小于epsilon时,不计算在损失函数中,loss为epsilon_insensitive时生效 参数类型:float
#endregion
'''
from sklearn.linear_model import SGDRegressor
self.model=SGDRegressor(**paramters)
elif model_name=="KNeighborsRegressor":
'''
#region
最近邻回归,用最相近的点的值来拟合位置样本的值
-----------------------------------
paramters: n_neighbors:最近邻的样本数量 参数类型:int
----------------------------------------
weights:权重 可选["uniform","distance"] 参数类型:str
uniform:所有点在回归时同等权重
distance:距离更近的点具有更高的权重
----------------------------------------
algorithm:搜索方法 可选["auto","ball_tree","kd_tree","brute"] 参数类型:str
-----------------------------------------
leaf_size:构造树的节点数量 参数类型:int
-----------------------------------------
p:距离度量 参数类型:int
-----------------------------------------
#endregion
'''
from sklearn.neighbors import KNeighborsRegressor
self.model=KNeighborsRegressor(**paramters)
elif model_name=="GaussianProcessRegressor":
'''
#region
https://zhuanlan.zhihu.com/p/350389546
高斯过程回归:贝叶斯线性回归+加核函数,解决高维度问题
---------------------------------------------
paramters: kernel:高斯过程的协方差函数
-------------------------
alpha:观察过程中的噪声方差,可对应于L2正则化参数 参数类型:float
#endregion
'''
from sklearn.gaussian_process import GaussianProcessRegressor
self.model=GaussianProcessRegressor(**paramters)
elif model_name=="PLSRegression":
'''
#region
偏最小二乘回归:将高维度特征降低到低纬度空间,寻找X-Y最大相关性的降维回归方法
---------------------------------------------
paramters: n_components:降维后的主成分 参数类型:int
-------------------------
tol:误差控制项 参数类型:float
#endregion
'''
from sklearn.cross_decomposition import PLSRegression
self.model=PLSRegression(**paramters)
elif model_name=="DecisionTreeRegressor":
'''
#region
cart回归树
---------------------------------------------
paramters: criterion:用于评价树划分质量的指标,可选["mse","friedman_mse","mae","poisson"] 参数类型:str
-------------------------
splitter:每个节点的划分策略,可选["best","random"] 参数类型:str
--------------------------
max_depth:树的最大深度,如不指定,树会一直分割,导致过拟合 参数类型:int
--------------------------
min_samples_split:拆分内部节点所需的最小样本数量 参数类型:int or float
---------------------------
min_samples_leaf:叶节点所需的最小样本数量 参数类型:int or float
------------------------------
min_impurity_decrease:如果继续划分的误差不小于该值,则不会继续划分 参数类型:float
#endregion
'''
from sklearn.tree import DecisionTreeRegressor
self.model=DecisionTreeRegressor(**paramters)
elif model_name=="MLPRegressor":
'''
#region
神经网络回归
---------------------------------------------
paramters: hidden_layer_sizes:隐藏层神经元数量,如(100,) 参数类型:tuple
-------------------------
activation:激活函数,可选["identity","logistic","tanh","relu"] 参数类型:str
--------------------------
solver:优化器,可选["lbfgs","sgd","adam"] 参数类型:str
--------------------------
alpha:L2正则化系数 参数类型:float
---------------------------
batch_size:"sgd"优化时使用的最小样本数量 参数类型:int
------------------------------
learning_rate:学习率设定方法,可选["constant","invscaling","adaptive"] 参数类型:str
------------------------------
learning_rate_init:初始学习率 参数类型:double
------------------------------
max_iter:最大迭代次数,每个样本被使用多少次 参数类型:int
------------------------------
tol:误差控制项 参数类型:float
#endregion
'''
from sklearn.neural_network import MLPRegressor
self.model=MLPRegressor(**paramters)
elif model_name=="BaggingRegressor":
'''
#region
bagging回归
---------------------------------------------
paramters: base_estimator:基学习器 参数类型:object
-------------------------
n_estimators:学习器数量 参数类型:int
--------------------------
max_samples:基学习器训练的样本数量 参数类型:int or float
---------------------------
max_features:基学习器训练的特征数量 参数类型:int or float
----------------------------
bootstrap:是否有放回的采样 参数类型:bool
----------------------------
bootstrap_features:是否有放回的采样特征 参数类型:bool
#endregion
'''
from sklearn.ensemble import BaggingRegressor
self.model=BaggingRegressor(**paramters)
elif model_name=="RandomForestRegressor":
'''
#region
随机森林回归
---------------------------------------------
paramters: n_estimators:学习器数量 参数类型:int
-------------------------
criterion:评价树的划分指标,可选["mse','mae'] 参数类型:str
----------------------------
bootstrap:是否有放回的采样 参数类型:bool
--------------------------
max_depth:树的最大深度,如不指定,树会一直分割,导致过拟合 参数类型:int
--------------------------
min_samples_split:拆分内部节点所需的最小样本数量 参数类型:int or float
---------------------------
min_samples_leaf:叶节点所需的最小样本数量 参数类型:int or float
------------------------------
min_impurity_decrease:如果继续划分的误差不小于该值,则不会继续划分 参数类型:float
#endregion
'''
from sklearn.ensemble import RandomForestRegressor
self.model=RandomForestRegressor(**paramters)
elif model_name=="AdaBoostRegressor":
'''
#region
https://zhuanlan.zhihu.com/p/39972832
AdaBoost回归,对上一个基模型训练错误的样本给予更高的权重后加入到下一个基学习器,直到达到学习器数量
---------------------------------------------
paramters: base_estimator:基学习器 参数类型:object
-------------------------
n_estimators:学习器数量 参数类型:int
----------------------------
learning_rate:学习率,权重改变的快慢 参数类型:float
--------------------------
loss:更新权重时使用的误差函数,可选["linear","square","exponential"] 参数类型:str
#endregion
'''
from sklearn.ensemble import AdaBoostRegressor
self.model=AdaBoostRegressor(**paramters)
elif model_name=="GradientBoostingRegressor":
'''
#region
https://blog.csdn.net/zhsworld/article/details/102951061
GradientBoosting回归,基学习器为树模型,下一个基学习器学习上一个学习器预测结果的残差
---------------------------------------------
paramters: loss:要优化的是损失函数,可选["ls","lad","huber","quantile"] 参数类型:str
-------------------------
n_estimators:学习器数量 参数类型:int
----------------------------
learning_rate:学习率 参数类型:float
--------------------------
criterion:划分树时使用的评价函数,可选["linear","square","exponential"] 参数类型:str
--------------------------
max_depth:树的最大深度,如不指定,树会一直分割,导致过拟合 参数类型:int
--------------------------
min_samples_split:拆分内部节点所需的最小样本数量 参数类型:int or float
---------------------------
min_samples_leaf:叶节点所需的最小样本数量 参数类型:int or float
------------------------------
min_impurity_decrease:如果继续划分的误差不小于该值,则不会继续划分 参数类型:float
------------------------------
tol:误差控制项 参数类型:float
#endregion
'''
from sklearn.ensemble import GradientBoostingRegressor
self.model=GradientBoostingRegressor(**paramters)
elif model_name=="VotingRegressor":
'''
#region
VotingRegressor回归,多个学习器对同一组样本共同学习,返回它们预测的平均值
---------------------------------------------
paramters: estimators:学习器 如[("学习器名称1",LinearRegression()),("学习器名称2",RandomForestRegressor())] 参数类型:list
-------------------------
weights:学习器数量的权重 参数类型:list
#endregion
'''
from sklearn.ensemble import VotingRegressor
self.model=VotingRegressor(**paramters)
elif model_name=="StackingRegressor":
'''
#region
https://www.cnblogs.com/Christina-Notebook/p/10063146.html
StackingRegressor,将基模型在训练集和测试集的预测值做为元模型的特征,在元模型上得出结果
---------------------------------------------
paramters: estimators:基模型 如[("学习器名称1",LinearRegression()),("学习器名称2",RandomForestRegressor())] 参数类型:list
-------------------------
final_estimator:元模型 参数类型:object
--------------------------
cv:交叉验证数目 参数类型:int
#endregion
'''
from sklearn.ensemble import StackingRegressor
self.model=StackingRegressor(**paramters)
return self.model
def params_search(self,params:dict,model,methods:str="Gridcv",score_name:str="neg_mean_squared_error",cv_num:int=5):
'''
参数搜索:网格搜索和随机搜索
------------------------------
params: 字典类型 如{'C': [1, 10, 100, 1000], 'kernel': ['linear']} 其键名为模型的参数名,值为要搜索的点
如 methods为Randomcv, 字典的值可以是一个分布,如高斯分布,均匀分布等
methods:超参数搜索方法 ["Gridcv","Randomcv"]
model:要搜索的模型,sklearn模型,可以调用select_model()获得
score_name:搜索评价指标,可选["neg_mean_absolute_error","neg_mean_squared_error","r2","max_error"]
cv_num:交叉验证次数
-----------------------------
return:
search_result:搜索结果
cv_results_:搜索过程
best_estimator_:最好的模型
best_score_:最高的指标
best_params_:最好的参数
'''
if methods=="Gridcv":
from sklearn.model_selection import GridSearchCV
clf = GridSearchCV(estimator=model,param_grid=params,cv=cv_num,scoring=score_name)
clf.fit(self.x_train,self.y_train)
elif methods=="Randomcv":
from sklearn.model_selection import RandomizedSearchCV
clf = RandomizedSearchCV(estimator=model,param_grid=params,cv=cv_num,scoring=score_name)
clf.fit(self.x_train,self.y_train)
search_result={}
search_result['cv_results_']=clf.cv_results_
search_result['best_estimator_']=clf.best_estimator_
if score_name=="neg_mean_absolute_error" or score_name=="neg_mean_absolute_error":
search_result['best_score_']=-clf.best_score_
else:
search_result['best_score_']=-clf.best_score_
search_result['best_params_']=clf.best_params_
return search_result
def get_model_results(self):
'''
获取模型在训练集/测试集上的结果
-----------------------------------------
return: model_fit_time:模型拟合时间
model_coef_:模型参数
max_positive_error:最大正误差
min_negetive_error:最小负误差
MAE:平均绝对误差
MSE:均方误差
RMS:均方根误差
R2:决定系数
error_filename:误差分布直方图路径
r2_filename:R2图路径
learning_curve_filename:学习曲线图路径
--------------------------------------------
'''
start=time.time()
self.model.fit(self.x_train,self.y_train)
end=time.time()
model_fit_time=end-start
try:
model_coef_=self.model.coef_.tolist()
except AttributeError:
model_coef_="该模型无法输出参数"
y_test_pre=self.model.predict(self.x_test)
y_test_pre=self.y_scaler.inverse_transform(np.array(y_test_pre)).ravel()
self.y_test=self.y_scaler.inverse_transform(self.y_test).ravel()
#误差记录
ERROR=self.y_test-y_test_pre
max_positive_error=max(ERROR)#最大正误差
min_negetive_error=min(ERROR)#最小负误差
MAE=sklearn.metrics.mean_absolute_error(self.y_test,y_test_pre)#平均绝对误差
MSE=sklearn.metrics.mean_squared_error(self.y_test,y_test_pre)#均方误差
RMS=np.sqrt(MSE)#均方根误差
from sklearn.metrics import r2_score
R2=round(r2_score(self.y_test,y_test_pre),3)#r2_score
filepath_1=os.path.dirname(os.path.abspath(__file__))
#误差分布直方图
error_filename=os.path.join(filepath_1,'error.png')
error=[abs(x) for x in ERROR]
plt.hist(error)
plt.savefig(error_filename)
plt.close()
#R2图
r2_filename=os.path.join(filepath_1,'r2.png')
plt.plot(self.y_test,self.y_test,color='b')
plt.scatter(self.y_test,y_test_pre,color='r')
plt.legend(title=f"r2_score={R2}")
plt.savefig(r2_filename)
plt.close()
#学习曲线
learning_curve_filename=os.path.join(filepath_1,'learning_curve.png')
train_sizes, train_scores, valid_scores = sklearn.model_selection.learning_curve(
self.model,self.x_normalize, self.y_normalize,train_sizes=[0.1,0.3,0.5,0.7,0.9],scoring="neg_median_absolute_error")
train_scores_mean=np.mean(train_scores,axis=1)
valid_scores_mean=np.mean(valid_scores,axis=1)
plt.plot(train_sizes,train_scores_mean, color="r",label='训练曲线')
plt.plot(train_sizes,valid_scores_mean, color="g",label='验证曲线')
plt.legend()
plt.savefig(learning_curve_filename)
plt.close()
return {
"拟合时间":model_fit_time,
"模型参数":model_coef_,
"最大正误差":max_positive_error,
"最小负误差":min_negetive_error,
"平均绝对误差":MAE,
"均方误差":MSE,
"均方根误差":RMS,
"误差分布直方图":error_filename,
"R2 图":r2_filename,
"学习曲线":learning_curve_filename
}
if __name__=="__main__":
filepath=r"C:\Users\pc\Desktop\test.json"
#实例
s=sklearn_regressor(filepath)
#获取数据
s.get_file_data()
#划分数据集,指定x,y列
s.test_train_split(0.3,[1,2,4,5],[3])
#选择模型
s.select_model(model_name='Ridge',paramters={})
#参数搜索
dic_=s.params_search({"alpha":[0.001,0.01,0.1,0.5,0.8,1,1.5,2,3,5,10]},model=s.select_model(model_name='Ridge',paramters={}))
#获取结果
dic=s.get_model_results()
dic=json.dumps(dic,ensure_ascii=False) | |
import numpy as np
from torch import nn
from torch.nn import functional as F
from .global_config import HyperParam, update_hyperparams
class SE_Block(nn.Module):
"""credits: https://github.com/moskomule/senet.pytorch/blob/master/senet/se_module.py"""
def __init__(self, c, r=16):
super().__init__()
self.squeeze = nn.AdaptiveAvgPool2d(1)
self.excitation = nn.Sequential(
nn.Linear(c, c // r, bias=False),
nn.ReLU(inplace=True),
nn.Linear(c // r, c, bias=False),
nn.Sigmoid()
)
def forward(self, x):
bs, c, _, _ = x.shape
y = self.squeeze(x).view(bs, c)
y = self.excitation(y).view(bs, c, 1, 1)
return x * y.expand_as(x)
def conv_se_block(in_c, out_c, se_reduction=None, *args, **kwargs):
if se_reduction is not None:
return nn.Sequential(
nn.Conv2d(in_c, out_c, *args, **kwargs),
nn.BatchNorm2d(out_c),
SE_Block(out_c, r=se_reduction),
nn.ReLU()
)
else:
return nn.Sequential(
nn.Conv2d(in_c, out_c, *args, **kwargs),
nn.BatchNorm2d(out_c),
nn.ReLU()
)
def safelife_cnn_se(input_shape, se_reduction=8):
"""
Defines a CNN with good default values for safelife.
This works best for inputs of size 25x25.
Parameters
----------
input_shape : tuple of ints
Height, width, and number of channels for the board.
Returns
-------
cnn : torch.nn.Sequential
output_shape : tuple of ints
Channels, width, and height.
Returns both the CNN module and the final output shape.
"""
h, w, c = input_shape
cnn = nn.Sequential(
conv_se_block(c, 32, se_reduction=se_reduction, kernel_size=5, stride=2),
conv_se_block(32, 64, se_reduction=se_reduction, kernel_size=3, stride=2),
conv_se_block(64, 64, se_reduction=None, kernel_size=3, stride=1),
)
h = (h-4+1)//2
h = (h-2+1)//2
h = (h-2)
w = (w-4+1)//2
w = (w-2+1)//2
w = (w-2)
return cnn, (64, w, h)
def safelife_cnn(input_shape):
"""
Defines a CNN with good default values for safelife.
This works best for inputs of size 25x25.
Parameters
----------
input_shape : tuple of ints
Height, width, and number of channels for the board.
Returns
-------
cnn : torch.nn.Sequential
output_shape : tuple of ints
Channels, width, and height.
Returns both the CNN module and the final output shape.
"""
h, w, c = input_shape
cnn = nn.Sequential(
nn.Conv2d(c, 32, kernel_size=5, stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
h = (h-4+1)//2
h = (h-2+1)//2
h = (h-2)
w = (w-4+1)//2
w = (w-2+1)//2
w = (w-2)
return cnn, (64, w, h)
class SafeLifeQNetwork(nn.Module):
"""
Module for calculating Q functions.
"""
def __init__(self, input_shape):
super().__init__()
self.cnn, cnn_out_shape = safelife_cnn(input_shape)
num_features = np.product(cnn_out_shape)
num_actions = 9
self.advantages = nn.Sequential(
nn.Linear(num_features, 256),
nn.ReLU(),
nn.Linear(256, num_actions)
)
self.value_func = nn.Sequential(
nn.Linear(num_features, 256),
nn.ReLU(),
nn.Linear(256, 1)
)
def forward(self, obs):
# Switch observation to (c, w, h) instead of (h, w, c)
obs = obs.transpose(-1, -3)
x = self.cnn(obs).flatten(start_dim=1)
advantages = self.advantages(x)
value = self.value_func(x)
qval = value + advantages - advantages.mean()
return qval
@update_hyperparams
class SafeLifePolicyNetwork(nn.Module):
dense_depth: HyperParam = 1
dense_width: HyperParam = 512
def __init__(self, input_shape, r=16):
super().__init__()
# self.cnn, cnn_out_shape = safelife_cnn(input_shape)
self.cnn, cnn_out_shape = safelife_cnn_se(input_shape, se_reduction=r)
num_features = np.product(cnn_out_shape)
num_actions = 9
self.dropout = nn.Dropout(0.25)
dense = [nn.Sequential(nn.Linear(num_features, self.dense_width), nn.ReLU())]
for n in range(self.dense_depth - 1):
dense.append(nn.Sequential(nn.Linear(self.dense_width, self.dense_width), nn.ReLU()))
self.dense = nn.Sequential(*dense)
self.logits = nn.Linear(self.dense_width, num_actions)
self.value_func = nn.Linear(self.dense_width, 1)
def forward(self, obs):
# Switch observation to (c, w, h) instead of (h, w, c)
obs = obs.transpose(-1, -3)
x = self.cnn(obs)
# x = self.se(x) # try applying it before relu after bn
x = x.flatten(start_dim=1)
for layer in self.dense:
x = layer(x)
# x = self.dropout(x)
value = self.value_func(x)[...,0]
policy = F.softmax(self.logits(x), dim=-1)
return value, policy | |
# -*- coding: utf-8 -*-
"""
re-do streamlit with:
a. Toes, SVL, Traplists
b. Toes
"""
import pandas as pd
import numpy as np
import streamlit as st
import itertools
from itertools import chain
def app():
st.write("""## Search by missing toes only""")
#--- 1. Load data
def load_file(filename):
df = pd.read_csv(filename, converters={'Trap': eval, 'Toes':eval})
df['Sex'] = df['Sex'].astype(str)
return df
df = load_file("data/source10Apr20_02.csv")
#--- 2. Create filter options
vals = np.unique([*itertools.chain.from_iterable(df.Toes)]) # get all unique toes from df.Toes
toes_choice = st.multiselect("Select or type missing toes: ", vals) # list
#--- 3. Search
#----- testcases
#toes_choice = ['LF1', 'LF2', 'LF3', 'LF4', 'LF5']
newdf = df.loc[df['Toes'].apply(lambda x: len(set(x) - set(toes_choice)) == 0)]
newdf['toes_len'] = newdf.Toes.apply(len)
newdf = newdf.sort_values(by='toes_len', ascending=False).drop(columns='toes_len')
#-- optional: remove all rows with *intact* toes
newdf = newdf[newdf['Toes'].map(len) != 0]
# first 5 rows weighted, the remaining part of the df ordered by descending - requested by team
newdfa = newdf.iloc[:5]
newdfb = newdf.iloc[5:]
newdfb = newdfb.sort_values(by='ID', ascending=False) # order by ID descending
# merge /concat
frames = [newdfa, newdfb]
res = pd.concat(frames)
# get all with missing toes --> df.Toes.apply(lambda x: len(x)!=0)
# or df['Toes'].map(len)!=0
st.info("The first 5 results are weighted, the remaining results are ordered by descending skink ID number.")
st.write(" ## Search Results: ##")
st.table(res.style.set_properties(**{'background-color': '#cfdaaa'}, subset=['ID'])) | |
# -*- coding: utf-8 -*-
# tomolab
# Michele Scipioni
# Harvard University, Martinos Center for Biomedical Imaging
# University of Pisa
__all__ = ["load_motion_sensor_data"]
from ...Transformation.Transformations import Transform_Affine
from ...Transformation import transformations_operations as tr
import numpy as np
import matplotlib.pyplot as plt
import copy
BOX_MIN = [-50.0, -50.0, -50.0]
BOX_MAX = [50.0, 50.0, 50.0]
THRESHOLD_MM = 5.0
LINE_COLOR = "#2f8dff"
def quaternion_to_rotation(q, axes="sxyz"):
return tr.euler_from_quaternion(q, axes)
def angle_axis_to_quaternion(angle_rad, axis):
f = np.sin(0.5 * angle_rad)
quaternion = np.asarray(
[np.cos(0.5 * angle_rad), f * axis[0], f * axis[1], f * axis[2]]
)
return quaternion
def angle_axis_to_rotation(angle_rad, axis):
quaternion = angle_axis_to_quaternion(angle_rad, axis)
rotation = quaternion_to_rotation(quaternion)
return rotation
def affine_from_quaternion(q, axes="sxyz"):
pass
def quaternion_from_matrix(affine):
return tr.quaternion_from_matrix(affine)
def rad_to_deg(rad):
return np.asarray(rad) * 180.0 / np.pi
def deg_to_rad(deg):
return np.asarray(deg) / 180.0 * np.pi
class Motion_Sensor:
def __init__(self, filename=None, channels=["B"]):
self._reset()
if filename is not None:
self.load_from_file(filename, channels)
def get_motion_quaternion(self, index):
motion_affine = self.get_motion_affine(index)
return quaternion_from_matrix(motion_affine)
def get_motion_affine(self, index):
return self._motion[index]
def _reset(self):
self._motion = []
self._n_time_points = 0
self._tx = []
self._ty = []
self._tz = []
self._rx = []
self._ry = []
self._rz = []
self._q0 = []
self._q1 = []
self._q2 = []
self._q3 = []
def get_n_time_points(self):
return self._n_time_points
def load_from_file(self, filename, channels=["B"]):
f = open(filename)
self._reset()
while 1:
l = f.readline()
if l == "":
break
d = l.split()
channel = d[0][1]
do_process = False
if channels is None:
do_process = True
elif channels == []:
do_process = True
elif channels == "":
do_process = True
else:
if channel in channels:
do_process = True
else:
do_process = False
if do_process:
data = d[1]
x = np.float32(d[2])
y = np.float32(d[3])
z = np.float32(d[4])
q0 = np.float32(d[5])
q1 = np.float32(d[6])
q2 = np.float32(d[7])
q3 = np.float32(d[8])
q = [q0, q1, q2, q3]
# q = tr.quaternion_conjugate(q)
status = np.int32(d[11])
uncertainty = np.float32(d[12])
self._n_time_points += 1
tra_mat = tr.translation_matrix([x, y, z])
rot_mat = tr.quaternion_matrix(q)
self._motion.append(np.dot(tra_mat, rot_mat))
rotation = quaternion_to_rotation(q)
self._tx.append(x)
self._ty.append(y)
self._tz.append(z)
self._rx.append(rotation[0])
self._ry.append(rotation[1])
self._rz.append(rotation[2])
self._q0.append(q[0])
self._q1.append(q[1])
self._q2.append(q[2])
self._q3.append(q[3])
def _draw_rectangle(
self, axis, x, y, alpha=0.2, ec="gray", fc="gray"
): # "CornflowerBlue"
axis.add_patch(
plt.Rectangle(
(x[0], y[0]),
x[1] - x[0],
y[1] - y[0],
alpha=alpha,
ec=ec,
fc=fc,
visible=True,
)
)
# plt.draw()
def _draw_rectangles(self, axis, windows, range_y):
for ii in range(len(windows)):
tt = windows[ii]
yy = (range_y[0], range_y[1])
self._draw_rectangle(axis, tt, yy)
def _draw_line(
self, axis, x, range_y, color="#ff8d8d", linestyle="dashed", label=""
):
axis.vlines(
x,
range_y[0],
range_y[1],
colors=color,
linestyles=linestyle,
label=label,
visible=True,
)
def _draw_events(
self, axis, time, range_y, events, color="#ff8d8d", linestyle="dashed", label=""
):
for t_index in time:
if t_index:
if t_index < self.get_n_time_points():
if events[t_index - 1]:
# print "Drawing line: ", t_index, range_y, color, linestyle
self._draw_line(axis, t_index, range_y, color, linestyle, label)
def plot_motion(
self,
save_to_file=None,
extract_events_threshold=THRESHOLD_MM,
method="box",
box_min=BOX_MIN,
box_max=BOX_MAX,
min_duration=10,
line_color=LINE_COLOR,
):
t = list(range(len(self._tx)))
# make windows:
if extract_events_threshold is not None:
windows = []
events = self.extract_motion_events(
method, extract_events_threshold, box_min, box_max
)
t_index_start = 0
for t_index in t:
if t_index: # this excludes the possibility of a motion event at time 0
if t_index < self.get_n_time_points():
if events[t_index - 1]:
t_index_end = t_index - 1
if t_index_end - t_index_start > min_duration:
windows.append(
(t_index_start, t_index_end)
) # end window with frame before a motion event
t_index_start = (
t_index + 1
) # start window with frame after a motion event
windows.append((t_index_start, t[-1]))
if 1:
fig1 = plt.figure(1, figsize=(17.5, 4), dpi=200)
ax1 = fig1.add_subplot(321)
ax1.plot(t, self._tx, line_color)
ax1.grid(True)
pr0 = np.float32(self._tx).min()
pr1 = np.float32(self._tx).max()
d = pr1 - pr0
pr = [pr0 - d / 4.0, pr1 + d / 4.0]
ax1.set_ylim(pr)
ax1.set_ylabel("TX [mm]")
# for label in ax1.get_xticklabels():
# label.set_color('r')
if extract_events_threshold is not None:
self._draw_rectangles(ax1, windows, pr)
self._draw_events(ax1, t, pr, events)
ax1 = fig1.add_subplot(323)
ax1.plot(t, self._ty, line_color)
ax1.grid(True)
pr0 = np.float32(self._ty).min()
pr1 = np.float32(self._ty).max()
d = pr1 - pr0
pr = [pr0 - d / 4.0, pr1 + d / 4.0]
ax1.set_ylim(pr)
ax1.set_ylabel("TY [mm]")
# for label in ax1.get_xticklabels():
# label.set_color('r')
if extract_events_threshold is not None:
self._draw_rectangles(ax1, windows, pr)
self._draw_events(ax1, t, pr, events)
ax1 = fig1.add_subplot(325)
ax1.plot(t, self._tz, line_color)
ax1.grid(True)
pr0 = np.float32(self._tz).min()
pr1 = np.float32(self._tz).max()
d = pr1 - pr0
pr = [pr0 - d / 4.0, pr1 + d / 4.0]
ax1.set_ylim(pr)
ax1.set_ylabel("TZ [mm]")
# for label in ax1.get_xticklabels():
# label.set_color('r')
if extract_events_threshold is not None:
self._draw_rectangles(ax1, windows, pr)
self._draw_events(ax1, t, pr, events)
# if save_to_file is not None:
# plt.savefig(save_to_file)
fig2 = fig1 # plt.figure(2)
ax1 = fig2.add_subplot(322)
ax1.plot(t, rad_to_deg(self._rx), line_color)
ax1.grid(True)
pr0 = np.float32(rad_to_deg(self._rx)).min()
pr1 = np.float32(rad_to_deg(self._rx)).max()
d = pr1 - pr0
pr = [pr0 - d / 4.0, pr1 + d / 4.0]
ax1.set_ylim(pr)
ax1.set_ylabel("RX [deg]")
# for label in ax1.get_xticklabels():
# label.set_color('r')
if extract_events_threshold is not None:
self._draw_rectangles(ax1, windows, pr)
self._draw_events(ax1, t, pr, events)
ax1 = fig2.add_subplot(324)
ax1.plot(t, rad_to_deg(self._ry), line_color)
ax1.grid(True)
pr0 = np.float32(rad_to_deg(self._ry)).min()
pr1 = np.float32(rad_to_deg(self._ry)).max()
d = pr1 - pr0
pr = [pr0 - d / 4.0, pr1 + d / 4.0]
ax1.set_ylim(pr)
ax1.set_ylabel("RY [deg]")
# for label in ax1.get_xticklabels():
# label.set_color('r')
if extract_events_threshold is not None:
self._draw_rectangles(ax1, windows, pr)
self._draw_events(ax1, t, pr, events)
ax1 = fig2.add_subplot(326)
ax1.plot(t, rad_to_deg(self._rz), line_color)
ax1.grid(True)
pr0 = np.float32(rad_to_deg(self._rz)).min()
pr1 = np.float32(rad_to_deg(self._rz)).max()
d = pr1 - pr0
pr = [pr0 - d / 4.0, pr1 + d / 4.0]
ax1.set_ylim(pr)
ax1.set_ylabel("RZ [deg]")
# for label in ax1.get_xticklabels():
# label.set_color('r')
if extract_events_threshold is not None:
self._draw_rectangles(ax1, windows, pr)
self._draw_events(ax1, t, pr, events)
# if save_to_file is not None:
# plt.savefig(save_to_file)
plt.show()
# return fig1,fig2
def get_mean_displacement(
self, index, method="box", box_min=BOX_MIN, box_max=BOX_MAX
):
mat = self.get_motion_affine(index)
mat = Transform_Affine(mat)
if method == "box":
b = box_min
B = box_max
corners = np.asarray(
[
[b[0], b[1], b[2], 1],
[B[0], b[1], b[2], 1],
[b[0], B[1], b[2], 1],
[B[0], B[1], b[2], 1],
[b[0], b[1], B[2], 1],
[B[0], b[1], B[2], 1],
[b[0], B[1], B[2], 1],
[B[0], B[1], B[2], 1],
]
).transpose()
corners_t = mat.left_multiply(corners)
corners_t[3, :] = 0
corners[3, :] = 0
# print "CORNERS: ", corners_t
# dist = np.sqrt(((corners-corners_t)**2).sum(0))
dist = (corners - corners_t).sum(0)
mean_displ = np.mean(dist)
else:
raise ValueError("Method to compute mean displacement is unknown. ")
return mean_displ
def get_mean_displacement_variation(
self, index, method="box", box_min=BOX_MIN, box_max=BOX_MAX
):
mat = self.get_motion_affine(index)
if index > 0:
mat0 = self.get_motion_affine(index - 1)
else:
mat0 = tr.identity_matrix()
mat = Transform_Affine(mat)
mat0 = Transform_Affine(mat0)
if method == "box":
b = box_min
B = box_max
corners = np.asarray(
[
[b[0], b[1], b[2], 1],
[B[0], b[1], b[2], 1],
[b[0], B[1], b[2], 1],
[B[0], B[1], b[2], 1],
[b[0], b[1], B[2], 1],
[B[0], b[1], B[2], 1],
[b[0], B[1], B[2], 1],
[B[0], B[1], B[2], 1],
]
).transpose()
corners_t = mat.left_multiply(corners)
corners_t[3, :] = 0
corners_t0 = mat0.left_multiply(corners)
corners_t0[3, :] = 0
dist = np.sqrt(((corners_t - corners_t0) ** 2).sum(0))
# dist = (corners-corners_t).sum(0)
mean_displ = np.mean(dist)
else:
raise ValueError("Method to compute mean displacement is unknown. ")
return mean_displ
def get_mean_displacement_variation_since_time(
self, index_new, index_old, method="box", box_min=BOX_MIN, box_max=BOX_MAX
):
mat = self.get_motion_affine(index_new)
mat0 = self.get_motion_affine(index_old)
mat = Transform_Affine(mat)
mat0 = Transform_Affine(mat0)
if method == "box":
b = box_min
B = box_max
corners = np.asarray(
[
[b[0], b[1], b[2], 1],
[B[0], b[1], b[2], 1],
[b[0], B[1], b[2], 1],
[B[0], B[1], b[2], 1],
[b[0], b[1], B[2], 1],
[B[0], b[1], B[2], 1],
[b[0], B[1], B[2], 1],
[B[0], B[1], B[2], 1],
]
).transpose()
corners_t = mat.left_multiply(corners)
corners_t[3, :] = 0
corners_t0 = mat0.left_multiply(corners)
corners_t0[3, :] = 0
dist = np.sqrt(((corners_t - corners_t0) ** 2).sum(0))
# dist = (corners-corners_t).sum(0)
mean_displ = np.mean(dist)
else:
raise ValueError("Method to compute mean displacement is unknown. ")
return mean_displ
def extract_motion_events(
self,
method="box",
threshold=THRESHOLD_MM,
box_min=BOX_MIN,
box_max=BOX_MAX,
prune_distance=50,
):
t = list(range(self.get_n_time_points()))
is_event = np.zeros(len(t) - 1)
t_index_old = 0
for t_index in t[1:]:
# mean_displ = self.get_mean_displacement_variation(t_index, method, box_min, box_max )
# if np.sqrt((mean_displ)**2) >= threshold:
mean_displ = self.get_mean_displacement_variation_since_time(
t_index, t_index_old, method, box_min, box_max
)
if np.sqrt((mean_displ) ** 2) >= threshold:
t_index_old = np.copy(t_index)
is_event[t_index - 1] = 1
else:
is_event[t_index - 1] = 0
if prune_distance >= 2:
last_event = 0
for i in range(len(is_event)):
if is_event[i]:
if (i - last_event) <= prune_distance:
is_event[i] = 0
else:
last_event = i
return is_event
def plot_mean_displacement(
self,
method="box",
box_min=BOX_MIN,
box_max=BOX_MAX,
save_to_file=None,
plot_zero=False,
extract_events_threshold=THRESHOLD_MM,
plot_range=[None, None],
line_color=LINE_COLOR,
min_duration=10,
):
t = list(range(self.get_n_time_points()))
mean_displ = np.zeros(len(t))
mean_displ_var = np.zeros(len(t))
mean_displ_var_since_event = np.zeros(len(t))
if extract_events_threshold is not None:
events = self.extract_motion_events(
method, extract_events_threshold, box_min, box_max
)
t_index_old = 0
for t_index in t:
mean_displ[t_index] = self.get_mean_displacement(
t_index, method, box_min, box_max
)
mean_displ_var[t_index] = self.get_mean_displacement_variation(
t_index, method, box_min, box_max
)
mean_displ_var_since_event[
t_index
] = self.get_mean_displacement_variation_since_time(
t_index, t_index_old, method, box_min, box_max
)
if t_index:
if events[t_index - 1] == 1:
t_index_old = t_index - 1
if not plot_zero:
t = t[1:]
mean_displ = mean_displ[1:]
mean_displ_var = mean_displ_var[1:]
mean_displ_var_since_event = mean_displ_var_since_event[1:]
# make windows:
if extract_events_threshold is not None:
windows = []
events = self.extract_motion_events(
method, extract_events_threshold, box_min, box_max
)
t_index_start = 0
for t_index in t:
if t_index: # this excludes the possibility of a motion event at time 0
if events[t_index - 1]:
t_index_end = t_index - 1
if t_index_end - t_index_start > min_duration:
windows.append(
(t_index_start, t_index_end)
) # end window with frame before a motion event
t_index_start = (
t_index + 1
) # start window with frame after a motion event
windows.append((t_index_start, t[-1]))
# mean_displ[np.where(mean_displ==0)]=-1000
# mean_displ_var[np.where(mean_displ_var==0)]=-1000
# mean_displ_var_since_event[np.where(mean_displ_var_since_event==0)]=-1000
fig = plt.figure(5, figsize=(8, 4), dpi=200)
ax1 = fig.add_subplot(311)
ax1.set_title("Times frames - vNAV")
ax1.plot(t, mean_displ, line_color)
ax1.grid(True)
if plot_range[0] is None:
pr0 = copy.copy(mean_displ.min())
else:
pr0 = copy.copy(plot_range[0])
if plot_range[1] is None:
pr1 = copy.copy(mean_displ.max())
else:
pr1 = copy.copy(plot_range[1])
ax1.set_ylim([pr0, pr1])
ax1.set_ylabel("disp [mm]")
if extract_events_threshold is not None:
ax1.hold(1)
E = events * mean_displ
E[np.where(E < 0.1)] = -1000
ax1.plot(t, E, "r.")
# ax1.plot(t,0.5*(events*(mean_displ.max()-mean_displ.min())+mean_displ.min()),'r.')
# for label in ax1.get_xticklabels():
# label.set_color('r')
self._draw_rectangles(ax1, windows, [pr0, pr1])
self._draw_events(ax1, t, [pr0, pr1], events)
ax1 = fig.add_subplot(312)
# ax1.set_title("Mean displacement delta ")
ax1.plot(t, mean_displ_var, line_color)
ax1.grid(True)
if plot_range[0] is None:
pr0 = copy.copy(mean_displ_var.min())
else:
pr0 = copy.copy(plot_range[0])
if plot_range[1] is None:
pr1 = copy.copy(mean_displ_var.max())
else:
pr1 = copy.copy(plot_range[1])
ax1.set_ylim([pr0, pr1])
ax1.set_ylabel("delta [mm]")
if extract_events_threshold is not None:
ax1.hold(1)
E = events * mean_displ_var
E[np.where(E < 0.1)] = -1000
ax1.plot(t, E, "r.")
# ax1.plot(t,0.5*(events*(mean_displ.max()-mean_displ.min())+mean_displ.min()),'r.')
# for label in ax1.get_xticklabels():
# label.set_color('r')
self._draw_rectangles(ax1, windows, [pr0, pr1])
self._draw_events(ax1, t, [pr0, pr1], events)
ax1 = fig.add_subplot(313)
# ax1.set_title("Mean displacement event")
ax1.plot(t, mean_displ_var_since_event, line_color)
ax1.grid(True)
if plot_range[0] is None:
pr0 = copy.copy(mean_displ_var_since_event.min())
else:
pr0 = copy.copy(plot_range[0])
if plot_range[1] is None:
pr1 = copy.copy(mean_displ_var_since_event.max())
else:
pr1 = copy.copy(plot_range[1])
ax1.set_ylim([pr0, pr1])
ax1.set_ylabel("event [mm]")
if extract_events_threshold is not None:
ax1.hold(1)
E = events * mean_displ_var_since_event
E[np.where(E < 0.1)] = -1000
ax1.plot(t, E, "r.")
# ax1.plot(t,0.5*(events*(mean_displ.max()-mean_displ.min())+mean_displ.min()),'r.')
# for label in ax1.get_xticklabels():
# label.set_color('r')
self._draw_rectangles(ax1, windows, [pr0, pr1])
self._draw_events(ax1, t, [pr0, pr1], events)
if save_to_file is not None:
plt.savefig(save_to_file)
plt.show()
# return fig
def plot_quaternion(self, save_to_file=None, line_color=LINE_COLOR):
t = list(range(self.get_n_time_points()))[1:]
s = rad_to_deg(np.asarray(self._q0))[1:]
fig = plt.figure(6, figsize=(8, 4), dpi=200)
ax1 = fig.add_subplot(211)
ax1.set_title("Rotation agnle [deg] vs. vNAV frame number")
ax1.plot(t, s, line_color)
ax1.grid(True)
pr0 = s.min()
pr1 = s.max()
d = pr1 - pr0
pr = [pr0 - d / 4.0, pr1 + d / 4.0]
ax1.set_ylim(pr)
ax1.set_ylabel("Rotation angle [deg]")
# for label in ax1.get_xticklabels():
# label.set_color('r')
arc = np.zeros(self.get_n_time_points())
v0 = np.asarray(self._q1[0], self._q2[0], self._q3[0])
for t in range(self.get_n_time_points()):
vt = np.asarray(self._q1[t], self._q2[t], self._q3[t])
arc[t] = np.dot(np.transpose(v0), vt)
ax1 = fig.add_subplot(212)
ax1.set_title("Arc vs. vNAV frame number")
t = list(range(self.get_n_time_points()))
ax1.plot(t, arc, line_color)
ax1.grid(True)
pr0 = arc.min()
pr1 = arc.max()
d = pr1 - pr0
pr = [pr0 - d / 4.0, pr1 + d / 4.0]
ax1.set_ylim(pr)
ax1.set_ylabel("Arc [steradians]")
# for label in ax1.get_xticklabels():
# label.set_color('r')
if save_to_file is not None:
plt.savefig(save_to_file)
plt.show()
# return fig
def _repr_html_(self):
self.plot_mean_displacement()
def load_motion_sensor_data(filename, channels=["B"]):
return Motion_Sensor(filename, channels) | |
"""
Script for observing something during the day.
- Open / close dome.
- Slew to target.
- Focus cameras.
- Take observations.
- Verify safety at each step (solar distance, weather etc).
NOTE: This script will be superceeded by scheduler when we can impose arbitrary horizon ranges
for a given target.
"""
import argparse
from astropy import units as u
from panoptes.utils.time import current_time
from huntsman.pocs.utils.huntsman import create_huntsman_pocs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--simulate_weather", action="store_true",
help="If provided, will run POCS with weather simulator.")
parser.add_argument("--no_dome", action="store_true",
help="If provided, will run POCS with the dome closed e.g. for testing.")
parser.add_argument("--autofocus_size", type=int, default=1000,
help="The autofocus cutout size.")
parser.add_argument("--with_autoguider", action="store_true", help="Use autoguider?")
# Parse command line input
args = parser.parse_args()
use_weather_simulator = args.simulate_weather
with_dome = not args.no_dome
autofocus_size = args.autofocus_size
with_autoguider = args.with_autoguider
# Note we use "night" simulator so we can observe in the day
# Weather simulator is optional because weather reading currently unreliable
simulators = ["night", "power"]
if use_weather_simulator:
simulators.append("weather")
# Create HuntsmanPOCS instance
huntsman = create_huntsman_pocs(simulators=simulators, with_dome=with_dome,
with_autoguider=with_autoguider)
# NOTE: Avoid coarse focusing state because it slews to a fixed position on-sky
# This position may be too close to the Sun and it is unlikely there will be any stars
huntsman.observatory.last_coarse_focus_time = current_time()
huntsman.observatory.last_coarse_focus_temp = huntsman.observatory.temperature
huntsman.observatory._coarse_focus_temptol = 100 * u.Celsius
huntsman.observatory._coarse_focus_interval = 100 * u.hour
# Select the observation and use it to configure focusing exposure times
# TODO: Do this automatically
obs_name = huntsman.observatory.scheduler.get_observation()[0]
observation = huntsman.observatory.scheduler.observations[obs_name]
# Override the fine focus settings to mimic coarse focus
# TODO: Set this automatically based on time of day and alt / az?
for camera in huntsman.observatory.cameras.values():
autofocus_range = list(camera._proxy.get("autofocus_range", "focuser"))
autofocus_range[0] = autofocus_range[1]
camera._proxy.set("autofocus_range", autofocus_range, "focuser")
autofocus_step = list(camera._proxy.get("autofocus_step", "focuser"))
autofocus_step[0] = autofocus_step[1]
camera._proxy.set("autofocus_step", autofocus_step, "focuser")
# Also override the focusing exposure time
# TODO: Set this automatically based on time of day and alt / az
camera._proxy.set("autofocus_seconds", observation.exptime, "focuser")
# Override the default focusing window size
# TODO: Remove when Jetsons are on
camera._proxy.set("autofocus_size", autofocus_size, "focuser")
# Run the state machine
# NOTE: We don't have to bypass darks, flats etc because using night simulator
# NOTE: Bypass initial coarse focus in favour of coarser fine focus
huntsman.run(initial_focus=False) | |
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Unit tests for the JPEG-LS Pixel Data handler."""
import os
import sys
import pytest
import pydicom
from pydicom.filereader import dcmread
from pydicom.data import get_testdata_file
jpeg_ls_missing_message = ("jpeg_ls is not available "
"in this test environment")
jpeg_ls_present_message = "jpeg_ls is being tested"
from pydicom.pixel_data_handlers import numpy_handler
have_numpy_handler = numpy_handler.is_available()
from pydicom.pixel_data_handlers import jpeg_ls_handler
have_jpeg_ls_handler = jpeg_ls_handler.is_available()
test_jpeg_ls_decoder = have_numpy_handler and have_jpeg_ls_handler
empty_number_tags_name = get_testdata_file(
"reportsi_with_empty_number_tags.dcm")
rtplan_name = get_testdata_file("rtplan.dcm")
rtdose_name = get_testdata_file("rtdose.dcm")
ct_name = get_testdata_file("CT_small.dcm")
mr_name = get_testdata_file("MR_small.dcm")
truncated_mr_name = get_testdata_file("MR_truncated.dcm")
jpeg2000_name = get_testdata_file("JPEG2000.dcm")
jpeg2000_lossless_name = get_testdata_file(
"MR_small_jp2klossless.dcm")
jpeg_ls_lossless_name = get_testdata_file(
"MR_small_jpeg_ls_lossless.dcm")
jpeg_lossy_name = get_testdata_file("JPEG-lossy.dcm")
jpeg_lossless_name = get_testdata_file("JPEG-LL.dcm")
deflate_name = get_testdata_file("image_dfl.dcm")
rtstruct_name = get_testdata_file("rtstruct.dcm")
priv_SQ_name = get_testdata_file("priv_SQ.dcm")
nested_priv_SQ_name = get_testdata_file("nested_priv_SQ.dcm")
meta_missing_tsyntax_name = get_testdata_file(
"meta_missing_tsyntax.dcm")
no_meta_group_length = get_testdata_file(
"no_meta_group_length.dcm")
gzip_name = get_testdata_file("zipMR.gz")
color_px_name = get_testdata_file("color-px.dcm")
color_pl_name = get_testdata_file("color-pl.dcm")
explicit_vr_le_no_meta = get_testdata_file(
"ExplVR_LitEndNoMeta.dcm")
explicit_vr_be_no_meta = get_testdata_file(
"ExplVR_BigEndNoMeta.dcm")
emri_name = get_testdata_file("emri_small.dcm")
emri_big_endian_name = get_testdata_file(
"emri_small_big_endian.dcm")
emri_jpeg_ls_lossless = get_testdata_file(
"emri_small_jpeg_ls_lossless.dcm")
emri_jpeg_2k_lossless = get_testdata_file(
"emri_small_jpeg_2k_lossless.dcm")
color_3d_jpeg_baseline = get_testdata_file(
"color3d_jpeg_baseline.dcm")
dir_name = os.path.dirname(sys.argv[0])
save_dir = os.getcwd()
SUPPORTED_HANDLER_NAMES = (
'jpegls', 'jpeg_ls', 'JPEG_LS', 'jpegls_handler', 'JPEG_LS_Handler'
)
class TestJPEGLS_no_jpeg_ls:
def setup(self):
self.jpeg_ls_lossless = dcmread(jpeg_ls_lossless_name)
self.mr_small = dcmread(mr_name)
self.emri_jpeg_ls_lossless = dcmread(emri_jpeg_ls_lossless)
self.emri_small = dcmread(emri_name)
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [numpy_handler]
def teardown(self):
pydicom.config.pixel_data_handlers = self.original_handlers
def test_JPEG_LS_PixelArray(self):
with pytest.raises((RuntimeError, NotImplementedError)):
self.jpeg_ls_lossless.pixel_array
class TestJPEGLS_JPEG2000_no_jpeg_ls:
def setup(self):
self.jpeg_2k = dcmread(jpeg2000_name)
self.jpeg_2k_lossless = dcmread(jpeg2000_lossless_name)
self.mr_small = dcmread(mr_name)
self.emri_jpeg_2k_lossless = dcmread(emri_jpeg_2k_lossless)
self.emri_small = dcmread(emri_name)
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [numpy_handler]
def teardown(self):
pydicom.config.pixel_data_handlers = self.original_handlers
def test_JPEG2000PixelArray(self):
"""JPEG2000: Now works"""
with pytest.raises(NotImplementedError):
self.jpeg_2k.pixel_array
def test_emri_JPEG2000PixelArray(self):
"""JPEG2000: Now works"""
with pytest.raises(NotImplementedError):
self.emri_jpeg_2k_lossless.pixel_array
class TestJPEGLS_JPEGlossy_no_jpeg_ls:
def setup(self):
self.jpeg_lossy = dcmread(jpeg_lossy_name)
self.color_3d_jpeg = dcmread(color_3d_jpeg_baseline)
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [numpy_handler]
def teardown(self):
pydicom.config.pixel_data_handlers = self.original_handlers
def testJPEGlossy(self):
"""JPEG-lossy: Returns correct values for sample data elements"""
got = self.jpeg_lossy.DerivationCodeSequence[0].CodeMeaning
assert 'Lossy Compression' == got
def testJPEGlossyPixelArray(self):
"""JPEG-lossy: Fails gracefully when uncompressed data is asked for"""
with pytest.raises(NotImplementedError):
self.jpeg_lossy.pixel_array
def testJPEGBaselineColor3DPixelArray(self):
with pytest.raises(NotImplementedError):
self.color_3d_jpeg.pixel_array
class TestJPEGLS_JPEGlossless_no_jpeg_ls:
def setup(self):
self.jpeg_lossless = dcmread(jpeg_lossless_name)
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [numpy_handler]
def teardown(self):
pydicom.config.pixel_data_handlers = self.original_handlers
def testJPEGlossless(self):
"""JPEGlossless: Returns correct values for sample data elements"""
got = self.\
jpeg_lossless.\
SourceImageSequence[0].\
PurposeOfReferenceCodeSequence[0].CodeMeaning
assert 'Uncompressed predecessor' == got
def testJPEGlosslessPixelArray(self):
"""JPEGlossless: Fails gracefully when uncompressed data asked for"""
with pytest.raises(NotImplementedError):
self.jpeg_lossless.pixel_array
@pytest.mark.skipif(not test_jpeg_ls_decoder, reason=jpeg_ls_missing_message)
class TestJPEGLS_JPEG_LS_with_jpeg_ls:
def setup(self):
self.jpeg_ls_lossless = dcmread(jpeg_ls_lossless_name)
self.mr_small = dcmread(mr_name)
self.emri_jpeg_ls_lossless = dcmread(emri_jpeg_ls_lossless)
self.emri_small = dcmread(emri_name)
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [jpeg_ls_handler, numpy_handler]
def teardown(self):
pydicom.config.pixel_data_handlers = self.original_handlers
def test_JPEG_LS_PixelArray(self):
a = self.jpeg_ls_lossless.pixel_array
b = self.mr_small.pixel_array
assert b.mean() == a.mean()
assert a.flags.writeable
def test_emri_JPEG_LS_PixelArray(self):
a = self.emri_jpeg_ls_lossless.pixel_array
b = self.emri_small.pixel_array
assert b.mean() == a.mean()
assert a.flags.writeable
@pytest.mark.parametrize("handler_name", SUPPORTED_HANDLER_NAMES)
def test_decompress_using_handler(self, handler_name):
self.emri_jpeg_ls_lossless.decompress(handler_name=handler_name)
a = self.emri_jpeg_ls_lossless.pixel_array
b = self.emri_small.pixel_array
assert b.mean() == a.mean()
@pytest.mark.skipif(not test_jpeg_ls_decoder, reason=jpeg_ls_missing_message)
class TestJPEGLS_JPEG2000_with_jpeg_ls:
def setup(self):
self.jpeg_2k = dcmread(jpeg2000_name)
self.jpeg_2k_lossless = dcmread(jpeg2000_lossless_name)
self.mr_small = dcmread(mr_name)
self.emri_jpeg_2k_lossless = dcmread(emri_jpeg_2k_lossless)
self.emri_small = dcmread(emri_name)
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [jpeg_ls_handler, numpy_handler]
def teardown(self):
pydicom.config.pixel_data_handlers = self.original_handlers
def test_JPEG2000PixelArray(self):
with pytest.raises(NotImplementedError):
self.jpeg_2k.pixel_array
def test_emri_JPEG2000PixelArray(self):
with pytest.raises(NotImplementedError):
self.emri_jpeg_2k_lossless.pixel_array
@pytest.mark.skipif(not test_jpeg_ls_decoder, reason=jpeg_ls_missing_message)
class TestJPEGLS_JPEGlossy_with_jpeg_ls:
def setup(self):
self.jpeg_lossy = dcmread(jpeg_lossy_name)
self.color_3d_jpeg = dcmread(color_3d_jpeg_baseline)
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [jpeg_ls_handler, numpy_handler]
def teardown(self):
pydicom.config.pixel_data_handlers = self.original_handlers
def testJPEGlossy(self):
"""JPEG-lossy: Returns correct values for sample data elements"""
got = self.jpeg_lossy.DerivationCodeSequence[0].CodeMeaning
assert 'Lossy Compression' == got
def testJPEGlossyPixelArray(self):
with pytest.raises(NotImplementedError):
self.jpeg_lossy.pixel_array
def testJPEGBaselineColor3DPixelArray(self):
with pytest.raises(NotImplementedError):
self.color_3d_jpeg.pixel_array
@pytest.mark.skipif(not test_jpeg_ls_decoder, reason=jpeg_ls_missing_message)
class TestJPEGLS_JPEGlossless_with_jpeg_ls:
def setup(self):
self.jpeg_lossless = dcmread(jpeg_lossless_name)
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [jpeg_ls_handler, numpy_handler]
def teardown(self):
pydicom.config.pixel_data_handlers = self.original_handlers
def testJPEGlossless(self):
"""JPEGlossless: Returns correct values for sample data elements"""
got = self.\
jpeg_lossless.\
SourceImageSequence[0].\
PurposeOfReferenceCodeSequence[0].CodeMeaning
assert 'Uncompressed predecessor' == got
def testJPEGlosslessPixelArray(self):
"""JPEGlossless: Fails gracefully when uncompressed data asked for"""
with pytest.raises(NotImplementedError):
self.jpeg_lossless.pixel_array | |
# Test the exploration module
import os
import numpy as np
import tempdir
from activepapers.storage import ActivePaper
from activepapers import library
from activepapers.exploration import ActivePaper as ActivePaperExploration
def make_local_paper(filename):
paper = ActivePaper(filename, "w")
paper.data.create_dataset("frequency", data=0.2)
paper.data.create_dataset("time", data=0.1*np.arange(100))
paper.add_module("my_math",
"""
import numpy as np
def my_func(x):
return np.sin(x)
""")
paper.close()
def check_local_paper(filename):
ap = ActivePaperExploration(filename)
from my_math import my_func
frequency = ap.data['frequency'][...]
time = ap.data['time'][...]
sine = my_func(2.*np.pi*frequency*time)
assert (sine == np.sin(2.*np.pi*frequency*time)).all()
ap.close()
def test_local_paper():
with tempdir.TempDir() as t:
filename = os.path.join(t, "test.ap")
make_local_paper(filename)
check_local_paper(filename)
if "NO_NETWORK_ACCESS" not in os.environ:
def test_published_paper():
with tempdir.TempDir() as t:
library.library = [t]
ap = ActivePaperExploration("doi:10.6084/m9.figshare.808595")
import time_series
ts = np.arange(10)
assert time_series.integral(ts, 1)[-1] == 40.5
ap.close() | |
# /user/bin/python3
import numpy as np
import cv2
# utility function to display image
def imshow(filename, image):
cv2.imshow(filename,image)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = np.zeros((256, 256, 1), np.uint8)
intensity = 0
for i in range(256):
img[i] = intensity
intensity += 1
imshow('greyscale range', img) | |
import numpy as np
import torch
# https://github.com/davisvideochallenge/davis/blob/master/python/lib/davis/measures/jaccard.py
def eval_iou(annotation, segmentation):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
Return:
jaccard (float): region similarity
"""
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if np.isclose(np.sum(annotation), 0) and np.isclose(np.sum(segmentation), 0):
return 1
else:
return np.sum((annotation & segmentation)) / \
np.sum((annotation | segmentation), dtype=np.float32)
# https://github.com/svip-lab/PlanarReconstruction/blob/master/utils/metric.py
# https://github.com/art-programmer/PlaneNet/blob/master/utils.py#L2115
def eval_plane_recall_depth(predSegmentations, gtSegmentations, predDepths, gtDepths, pred_plane_num, threshold=0.5):
predNumPlanes = pred_plane_num # actually, it is the maximum number of the predicted planes
if 20 in np.unique(gtSegmentations): # in GT plane Seg., number '20' indicates non-plane
gtNumPlanes = len(np.unique(gtSegmentations)) - 1
else:
gtNumPlanes = len(np.unique(gtSegmentations))
if len(gtSegmentations.shape) == 2:
gtSegmentations = (np.expand_dims(gtSegmentations, -1) == np.arange(gtNumPlanes)).astype(np.float32) # h, w, gtNumPlanes
if len(predSegmentations.shape) == 2:
predSegmentations = (np.expand_dims(predSegmentations, -1) == np.arange(predNumPlanes)).astype(np.float32) # h, w, predNumPlanes
planeAreas = gtSegmentations.sum(axis=(0, 1)) # gt plane pixel number
intersectionMask = np.expand_dims(gtSegmentations, -1) * np.expand_dims(predSegmentations, 2) > 0.5 # h, w, gtNumPlanes, predNumPlanes
depthDiffs = gtDepths - predDepths # h, w
depthDiffs = depthDiffs[:, :, np.newaxis, np.newaxis] # h, w, 1, 1
intersection = np.sum((intersectionMask).astype(np.float32), axis=(0, 1)) # gtNumPlanes, predNumPlanes
planeDiffs = np.abs(depthDiffs * intersectionMask).sum(axis=(0, 1)) / np.maximum(intersection, 1e-4) # gtNumPlanes, predNumPlanes
planeDiffs[intersection < 1e-4] = 1
union = np.sum(
((np.expand_dims(gtSegmentations, -1) + np.expand_dims(predSegmentations, 2)) > 0.5).astype(np.float32),
axis=(0, 1)) # gtNumPlanes, predNumPlanes
planeIOUs = intersection / np.maximum(union, 1e-4) # gtNumPlanes, predNumPlanes
numPredictions = int(predSegmentations.max(axis=(0, 1)).sum())
numPixels = planeAreas.sum()
IOUMask = (planeIOUs > threshold).astype(np.float32)
minDiff = np.min(planeDiffs * IOUMask + 1000000 * (1 - IOUMask), axis=1)
stride = 0.05
pixelRecalls = []
planeStatistics = []
for step in range(int(0.61 / stride + 1)):
diff = step * stride
pixelRecalls.append(np.minimum((intersection * (planeDiffs <= diff).astype(np.float32) * IOUMask).sum(1),
planeAreas).sum() / numPixels)
planeStatistics.append(((minDiff <= diff).sum(), gtNumPlanes, numPredictions))
return pixelRecalls, planeStatistics
# https://github.com/svip-lab/PlanarReconstruction/blob/master/utils/metric.py
def eval_plane_recall_normal(segmentation, gt_segmentation, param, gt_param, pred_non_plane_idx, threshold=0.5):
"""
:param segmentation: label map for plane segmentation [h, w] where 20 indicate non-planar
:param gt_segmentation: ground truth label for plane segmentation where 20 indicate non-planar
:param threshold: value for iou
:return: percentage of correctly predicted ground truth planes correct plane
"""
depth_threshold_list = np.linspace(0.0, 30, 13)
# both prediction and ground truth segmentation contains non-planar region which indicated by label 20
# so we minus one
pred_plane_idxs = np.unique(segmentation)
if pred_non_plane_idx in pred_plane_idxs:
pred_plane_idx_max = pred_plane_idxs[-2]
else:
pred_plane_idx_max = pred_plane_idxs[-1]
plane_num = pred_plane_idx_max + 1
if 20 in np.unique(gt_segmentation): # in GT plane Seg., number '20' indicates non-plane
gt_plane_num = len(np.unique(gt_segmentation)) - 1
else:
gt_plane_num = len(np.unique(gt_segmentation))
# 13: 0:0.05:0.6
plane_recall = np.zeros((gt_plane_num, len(depth_threshold_list)))
pixel_recall = np.zeros((gt_plane_num, len(depth_threshold_list)))
plane_area = 0.0
gt_param = gt_param.reshape(20, 3)
# check if plane is correctly predict
for i in range(gt_plane_num):
gt_plane = gt_segmentation == i
plane_area += np.sum(gt_plane)
for j in range(plane_num):
pred_plane = segmentation == j
iou = eval_iou(gt_plane, pred_plane)
if iou > threshold:
# mean degree difference over overlap region:
gt_p = gt_param[i]
pred_p = param[j]
n_gt_p = gt_p / np.linalg.norm(gt_p)
n_pred_p = pred_p / np.linalg.norm(pred_p)
angle = np.arccos(np.clip(np.dot(n_gt_p, n_pred_p), -1.0, 1.0))
degree = np.degrees(angle)
depth_diff = degree
# compare with threshold difference
plane_recall[i] = (depth_diff < depth_threshold_list).astype(np.float32)
pixel_recall[i] = (depth_diff < depth_threshold_list).astype(np.float32) * \
(np.sum(gt_plane * pred_plane))
break
pixel_recall = np.sum(pixel_recall, axis=0).reshape(-1) / plane_area
plane_recall_new = np.zeros((len(depth_threshold_list), 3))
plane_recall = np.sum(plane_recall, axis=0).reshape(-1, 1)
plane_recall_new[:, 0:1] = plane_recall
plane_recall_new[:, 1] = gt_plane_num
plane_recall_new[:, 2] = plane_num
return plane_recall_new, pixel_recall
# https://github.com/svip-lab/PlanarReconstruction/blob/master/utils/metric.py
# https://github.com/yi-ming-qian/interplane/blob/master/utils/metric.py
def evaluateMasks(predSegmentations, gtSegmentations, device, pred_non_plane_idx, gt_non_plane_idx=20, printInfo=False):
"""
:param predSegmentations:
:param gtSegmentations:
:param device:
:param pred_non_plane_idx:
:param gt_non_plane_idx:
:param printInfo:
:return:
"""
predSegmentations = torch.from_numpy(predSegmentations).to(device)
gtSegmentations = torch.from_numpy(gtSegmentations).to(device)
pred_masks = []
if pred_non_plane_idx > 0:
for i in range(pred_non_plane_idx):
mask_i = predSegmentations == i
mask_i = mask_i.float()
if mask_i.sum() > 0:
pred_masks.append(mask_i)
else:
assert pred_non_plane_idx == -1 or pred_non_plane_idx == 0
for i in range(gt_non_plane_idx + 1, 100):
mask_i = predSegmentations == i
mask_i = mask_i.float()
if mask_i.sum() > 0:
pred_masks.append(mask_i)
predMasks = torch.stack(pred_masks, dim=0)
gt_masks = []
if gt_non_plane_idx > 0:
for i in range(gt_non_plane_idx):
mask_i = gtSegmentations == i
mask_i = mask_i.float()
if mask_i.sum() > 0:
gt_masks.append(mask_i)
else:
assert pred_non_plane_idx == -1 or pred_non_plane_idx == 0
for i in range(gt_non_plane_idx+1, 100):
mask_i = gtSegmentations == i
mask_i = mask_i.float()
if mask_i.sum() > 0:
gt_masks.append(mask_i)
gtMasks = torch.stack(gt_masks, dim=0)
valid_mask = (gtMasks.max(0)[0]).unsqueeze(0)
gtMasks = torch.cat([gtMasks, torch.clamp(1 - gtMasks.sum(0, keepdim=True), min=0)], dim=0) # M+1, H, W
predMasks = torch.cat([predMasks, torch.clamp(1 - predMasks.sum(0, keepdim=True), min=0)], dim=0) # N+1, H, W
intersection = (gtMasks.unsqueeze(1) * predMasks * valid_mask).sum(-1).sum(-1).float()
union = (torch.max(gtMasks.unsqueeze(1), predMasks) * valid_mask).sum(-1).sum(-1).float()
N = intersection.sum()
RI = 1 - ((intersection.sum(0).pow(2).sum() + intersection.sum(1).pow(2).sum()) / 2 - intersection.pow(2).sum()) / (
N * (N - 1) / 2)
joint = intersection / N
marginal_2 = joint.sum(0)
marginal_1 = joint.sum(1)
H_1 = (-marginal_1 * torch.log2(marginal_1 + (marginal_1 == 0).float())).sum()
H_2 = (-marginal_2 * torch.log2(marginal_2 + (marginal_2 == 0).float())).sum()
B = (marginal_1.unsqueeze(-1) * marginal_2)
log2_quotient = torch.log2(torch.clamp(joint, 1e-8) / torch.clamp(B, 1e-8)) * (torch.min(joint, B) > 1e-8).float()
MI = (joint * log2_quotient).sum()
voi = H_1 + H_2 - 2 * MI
IOU = intersection / torch.clamp(union, min=1)
SC = ((IOU.max(-1)[0] * torch.clamp((gtMasks * valid_mask).sum(-1).sum(-1), min=1e-4)).sum() / N + (
IOU.max(0)[0] * torch.clamp((predMasks * valid_mask).sum(-1).sum(-1), min=1e-4)).sum() / N) / 2
info = [RI.item(), voi.item(), SC.item()]
if printInfo:
print('mask statistics', info)
pass
return info | |
import torch
import random
import numpy as np
from time import sleep
from copy import deepcopy
from core.common import ParamDict
from core.utilities import decide_device
from torch.multiprocessing import Process, Pipe, Value, Lock
# import interface class instead of its implementation
from core.agent.agent import Agent
from core.model.policy import Policy
from core.filter.filter import Filter
from core.environment.environment import Environment
class Agent_async(Agent):
"""
An agent class will maintain multiple policy net and multiple environments, each works asynchronously
useful for most of single agent RL/IL settings
"""
def __init__(self, config: ParamDict, environment: Environment, policy: Policy, filter_op: Filter):
threads, gpu = config.require("threads", "gpu")
threads_gpu = config["gpu threads"] if "gpu threads" in config else 2
super(Agent_async, self).__init__(config, environment, policy, filter_op)
# sync signal, -1: terminate, 0: normal running, >0 restart and waiting for parameter update
self._sync_signal = Value('i', 0)
# environment sub-process list
self._environment_proc = []
# policy sub-process list
self._policy_proc = []
# used for synchronize policy parameters
self._param_pipe = None
self._policy_lock = Lock()
# used for synchronize roll-out commands
self._control_pipe = None
self._environment_lock = Lock()
step_pipe = []
cmd_pipe_child, cmd_pipe_parent = Pipe(duplex=True)
param_pipe_child, param_pipe_parent = Pipe(duplex=False)
self._control_pipe = cmd_pipe_parent
self._param_pipe = param_pipe_parent
for i_envs in range(threads):
child_name = f"environment_{i_envs}"
step_pipe_pi, step_pipe_env = Pipe(duplex=True)
step_lock = Lock()
worker_cfg = ParamDict({"seed": self.seed + 1024 + i_envs, "gpu": gpu})
child = Process(target=Agent_async._environment_worker, name=child_name,
args=(worker_cfg, cmd_pipe_child, step_pipe_env, self._environment_lock, step_lock,
self._sync_signal, deepcopy(environment), deepcopy(filter_op)))
self._environment_proc.append(child)
step_pipe.append((step_pipe_pi, step_lock))
child.start()
for i_policies in range(threads_gpu):
child_name = f"policy_{i_policies}"
worker_cfg = ParamDict({"seed": self.seed + 2048 + i_policies, "gpu": gpu})
child = Process(target=Agent_async._policy_worker, name=child_name,
args=(worker_cfg, param_pipe_child, step_pipe,
self._policy_lock, self._sync_signal, deepcopy(policy)))
self._policy_proc.append(child)
child.start()
sleep(5)
def __del__(self):
"""
We should terminate all child-process here
"""
self._sync_signal.value = -1
sleep(1)
for _pi in self._policy_proc:
_pi.join(2)
if _pi.is_alive():
_pi.terminate()
for _env in self._environment_proc:
_env.join(2)
if _env.is_alive():
_env.terminate()
self._control_pipe.close()
self._param_pipe.close()
def broadcast(self, config: ParamDict):
policy_state, filter_state, max_step, self._batch_size, fixed_env, fixed_policy, fixed_filter = \
config.require("policy state dict", "filter state dict", "trajectory max step", "batch size",
"fixed environment", "fixed policy", "fixed filter")
self._replay_buffer = []
policy_state["fixed policy"] = fixed_policy
filter_state["fixed filter"] = fixed_filter
cmd = ParamDict({"trajectory max step": max_step,
"fixed environment": fixed_env,
"filter state dict": filter_state})
assert self._sync_signal.value < 1, "Last sync event not finished due to some error, some sub-proc maybe died, abort"
# tell sub-process to reset
self._sync_signal.value = len(self._policy_proc) + len(self._environment_proc)
# sync net parameters
with self._policy_lock:
for _ in range(len(self._policy_proc)):
self._param_pipe.send(policy_state)
# wait for all agents' ready feedback
while self._sync_signal.value > 0:
sleep(0.01)
# sending commands
with self._environment_lock:
for _ in range(self._batch_size):
self._control_pipe.send(cmd)
def collect(self):
if self._control_pipe.poll(0.1):
self._replay_buffer.append(self._control_pipe.recv())
if len(self._replay_buffer) < self._batch_size:
return None
else:
batch = self._filter.operate_trajectoryList(self._replay_buffer)
return batch
@staticmethod
def _environment_worker(setups: ParamDict, pipe_cmd, pipe_step, read_lock, step_lock, sync_signal, environment, filter_op):
gpu, seed = setups.require("gpu", "seed")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
environment.init(display=False)
filter_op.to_device(torch.device("cpu"))
filter_op.init()
# -1: syncing, 0: waiting for command, 1: waiting for action
local_state = 0
step_buffer = []
cmd = None
def _get_piped_data(pipe, lock):
with lock:
if pipe.poll(0.001):
return pipe.recv()
else:
return None
while sync_signal.value >= 0:
# check sync counter for sync event
if sync_signal.value > 0 and local_state >= 0:
# receive sync signal, reset all workspace settings, decrease sync counter,
# and set state machine to -1 for not init again
while _get_piped_data(pipe_cmd, read_lock) is not None:
pass
while _get_piped_data(pipe_step, step_lock) is not None:
pass
step_buffer.clear()
with sync_signal.get_lock():
sync_signal.value -= 1
local_state = -1
# if sync ends, tell state machine to recover from syncing state, and reset environment
elif sync_signal.value == 0 and local_state == -1:
local_state = 0
# idle and waiting for new command
elif sync_signal.value == 0 and local_state == 0:
cmd = _get_piped_data(pipe_cmd, read_lock)
if cmd is not None:
step_buffer.clear()
cmd.require("fixed environment", "trajectory max step")
current_step = environment.reset(random=not cmd["fixed environment"])
filter_op.reset(cmd["filter state dict"])
policy_step = filter_op.operate_currentStep(current_step)
with step_lock:
pipe_step.send(policy_step)
local_state = 1
# waiting for action
elif sync_signal.value == 0 and local_state == 1:
last_step = _get_piped_data(pipe_step, step_lock)
if last_step is not None:
last_step, current_step, done = environment.step(last_step)
record_step = filter_op.operate_recordStep(last_step)
step_buffer.append(record_step)
if len(step_buffer) >= cmd["trajectory max step"] or done:
traj = filter_op.operate_stepList(step_buffer, done=done)
with read_lock:
pipe_cmd.send(traj)
local_state = 0
else:
policy_step = filter_op.operate_currentStep(current_step)
with step_lock:
pipe_step.send(policy_step)
# finalization
environment.finalize()
filter_op.finalize()
pipe_cmd.close()
pipe_step.close()
print("Environment sub-process exited")
@staticmethod
def _policy_worker(setups: ParamDict, pipe_param, pipe_steps, read_lock, sync_signal, policy):
gpu, seed = setups.require("gpu", "seed")
device = decide_device(gpu)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
policy.to_device(device)
policy.init()
# -1: syncing, 0: waiting for state
local_state = 0
max_batchsz = 8
def _get_piped_data(pipe, lock):
with lock:
if pipe.poll():
return pipe.recv()
else:
return None
while sync_signal.value >= 0:
# check sync counter for sync event, and waiting for new parameters
if sync_signal.value > 0:
# receive sync signal, reset all workspace settings, decrease sync counter,
# and set state machine to -1 for not init again
for _pipe, _lock in pipe_steps:
while _get_piped_data(_pipe, _lock) is not None:
pass
if local_state >= 0:
_policy_state = _get_piped_data(pipe_param, read_lock)
if _policy_state is not None:
# set new parameters
policy.reset(_policy_state)
with sync_signal.get_lock():
sync_signal.value -= 1
local_state = -1
else:
sleep(0.01)
# if sync ends, tell state machine to recover from syncing state, and reset environment
elif sync_signal.value == 0 and local_state == -1:
local_state = 0
# waiting for states (states are list of dicts)
elif sync_signal.value == 0 and local_state == 0:
idx = []
data = []
for i, (_pipe, _lock) in enumerate(pipe_steps):
if len(idx) >= max_batchsz:
break
_steps = _get_piped_data(_pipe, _lock)
if _steps is not None:
data.append(_steps)
idx.append(i)
if len(idx) > 0:
# prepare for data batch
with torch.no_grad():
data = policy.step(data)
# send back actions
for i, d in zip(idx, data):
with pipe_steps[i][1]:
pipe_steps[i][0].send(d)
else:
sleep(0.00001)
# finalization
policy.finalize()
pipe_param.close()
for _pipe, _lock in pipe_steps:
_pipe.close()
print("Policy sub-process exited") | |
import logging
import numpy as np
from pylops.basicoperators import Diagonal, BlockDiag, Restriction, \
HStack
from pylops.utils.tapers import taper3d
from pylops.signalprocessing.Sliding2D import _slidingsteps
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARNING)
def Sliding3D(Op, dims, dimsd, nwin, nover, nop,
tapertype='hanning', design=False, nproc=1):
"""3D Sliding transform operator.
Apply a transform operator ``Op`` repeatedly to patches of the model
vector in forward mode and patches of the data vector in adjoint mode.
More specifically, in forward mode the model vector is divided into patches
each patch is transformed, and patches are then recombined in a sliding
window fashion. Both model and data should be 3-dimensional
arrays in nature as they are internally reshaped and interpreted as
3-dimensional arrays. Each patch contains in fact a portion of the
array in the first and second dimensions (and the entire third dimension).
This operator can be used to perform local, overlapping transforms (e.g.,
:obj:`pylops.signalprocessing.FFTND`
or :obj:`pylops.signalprocessing.Radon3D`) of 3-dimensional arrays.
.. note:: The shape of the model has to be consistent with
the number of windows for this operator not to return an error. As the
number of windows depends directly on the choice of ``nwin`` and
``nover``, it is recommended to use ``design=True`` if unsure about the
choice ``dims`` and use the number of windows printed on screen to
define such input parameter.
.. warning:: Depending on the choice of `nwin` and `nover` as well as the
size of the data, sliding windows may not cover the entire first and/or
second dimensions. The start and end indeces of each window can be
displayed using ``design=True`` while defining the best sliding window
approach.
Parameters
----------
Op : :obj:`pylops.LinearOperator`
Transform operator
dims : :obj:`tuple`
Shape of 3-dimensional model. Note that ``dims[0]`` and ``dims[1]``
should be multiple of the model sizes of the transform in the
first and second dimensions
dimsd : :obj:`tuple`
Shape of 3-dimensional data
nwin : :obj:`tuple`
Number of samples of window
nover : :obj:`tuple`
Number of samples of overlapping part of window
nop : :obj:`tuple`
Number of samples in axes of transformed domain associated
to spatial axes in the data
tapertype : :obj:`str`, optional
Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``)
design : :obj:`bool`, optional
Print number sliding window (``True``) or not (``False``)
Returns
-------
Sop : :obj:`pylops.LinearOperator`
Sliding operator
Raises
------
ValueError
Identified number of windows is not consistent with provided model
shape (``dims``).
"""
# model windows
mwin0_ins, mwin0_ends = _slidingsteps(dims[0],
Op.shape[1]//(nop[1]*dims[2]), 0)
mwin1_ins, mwin1_ends = _slidingsteps(dims[1],
Op.shape[1]//(nop[0]*dims[2]), 0)
# data windows
dwin0_ins, dwin0_ends = _slidingsteps(dimsd[0], nwin[0], nover[0])
dwin1_ins, dwin1_ends = _slidingsteps(dimsd[1], nwin[1], nover[1])
nwins0 = len(dwin0_ins)
nwins1 = len(dwin1_ins)
nwins = nwins0*nwins1
# create tapers
if tapertype is not None:
tap = taper3d(dimsd[2], nwin, nover, tapertype=tapertype)
# check that identified number of windows agrees with mode size
if design:
logging.warning('(%d,%d) windows required...', nwins0, nwins1)
logging.warning('model wins - start0:%s, end0:%s, start1:%s, end1:%s',
str(mwin0_ins), str(mwin0_ends),
str(mwin1_ins), str(mwin1_ends))
logging.warning('data wins - start0:%s, end0:%s, start1:%s, end1:%s',
str(dwin0_ins), str(dwin0_ends),
str(dwin1_ins), str(dwin1_ends))
if nwins*Op.shape[1]//dims[2] != dims[0]*dims[1]:
raise ValueError('Model shape (dims=%s) is not consistent with chosen '
'number of windows. Choose dims[0]=%d and '
'dims[1]=%d for the operator to work with '
'estimated number of windows, or create '
'the operator with design=True to find out the'
'optimal number of windows for the current '
'model size...'
% (str(dims), nwins0*Op.shape[1]//(nop[1]*dims[2]),
nwins1 * Op.shape[1]//(nop[0]*dims[2])))
# transform to apply
if tapertype is None:
OOp = BlockDiag([Op for _ in range(nwins)], nproc=nproc)
else:
OOp = BlockDiag([Diagonal(tap.flatten()) * Op
for _ in range(nwins)], nproc=nproc)
hstack = HStack([Restriction(dimsd[1] * dimsd[2] * nwin[0],
range(win_in, win_end),
dims=(nwin[0], dimsd[1], dimsd[2]),
dir=1).H
for win_in, win_end in zip(dwin1_ins,
dwin1_ends)])
combining1 = BlockDiag([hstack]*nwins0)
combining0 = HStack([Restriction(np.prod(dimsd),
range(win_in, win_end),
dims=dimsd, dir=0).H
for win_in, win_end in zip(dwin0_ins, dwin0_ends)])
Sop = combining0 * combining1 * OOp
return Sop | |
import tensorflow as tf
import numpy as np
'''
Very simple feature extractor. Basically copying anything from MNIST classifier tutorial on tensorflow website, except that we only choose first several layers since we only need features generated in the middle of neural network and the final outputs are not needed.
Architecture as follow:
conv layer 1, ReLU
pool layer 1, max pool
conv layer 2, ReLU
pool layer 2, max pool
'''
def cnn_model(feature):
input_layer = tf.reshape(feature,[-1,28,28,1])
# Convolutional layer 1
conv1 = tf.layers.conv3d(inputs=input_layer,
filters = 32,
kernel_size = [5,5],
padding = "same",
activation=tf.nn.relu)
# Pooling layer 1
pool1 = tf.layers.max_pooling3d(inputs = conv1,
pool_size=[2,2])
# Convolutional layer2
conv2 = tf.layers.conv3d(inputs = pool1,
filters = 64,
kernel_size = [5,5],
padding = "same",
activation = tf.nn.relu)
# Pooling layer 2
pool2 = tf.layers.max_pooling3d(inputs=conv2,
pool_size = [2,2])
return pool2
def model_fn(features,labels,mode):
'''Model function'''
# Feature extraction part, both spatial and temporal features will be extracted
cnn_hue = cnn_model(features["hue"])
cnn_saturation = cnn_model(features["saturation"])
cnn_illuminance = cnn_model(features["illuminance"])
# Flatten model
cnn_hue_flat = tf.reshape(cnn_hue,[-1,7*7*64])
cnn_saturation_flat = tf.reshape(cnn_saturation,[-1,7*7*64])
cnn_illuminance_flat = tf.reshape(cnn_illuminance,[-1,7*7*64])
# Combine features from 3 channels
cnn_features = tf.concat([cnn_hue_flat,
cnn_saturation_flat,
cnn_illuminance_flat])
# Domain discriminator
# Dense layer, dropout regularization
dense1 = tf.layers.dense(inputs=cnn_features,
units=1024,
activation = tf.nn.relu)
dropout1 = tf.layers.dropout(inputs=dense1,
rate = 0.4,
training = mode==tf.estimator.Model)
# Logits Layer
logits1 = tf.layers.dense(inputs=dropout1,
units=10)
# Performer discriminator
# Dense layer, dropout regularization
dense2 = tf.layers.dense(inputs=cnn_features,
units=1024,
activation = tf.nn.relu)
dropout2 = tf.layers.dropout(inputs=dense2,
rate = 0.4,
training = mode==tf.estimator.Model)
# Logits Layer
logits2 = tf.layers.dense(inputs=dropout2,
units=10)
predictions = {
"domain":tf.argmax(input = logits1,axis=1),
"domain_prob":tf.nn.softmax(logits1,name="softmax_tensor"),
"performer":tf.argmax(input=logits2,axis=1),
"performer_prob":tf.nn.softmax(logits2,name="softmax_tensor")
}
if mode ==tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode,predictions=predictions)
# Domain discrimination loss
loss1 = tf.losses.sparse_softmax_cross_entropy(labels=labels[,1],logits=logits1)
# Performer discrimination loss
loss2 = tf.losses.sparse_softmax_cross_entropy(labels=labels[,2],logits=logits2)
if mode==tf.estimator.ModeKeys.TRAIN:
adam_op = tf.train.AdamOptimizer()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
# Train Df, Dr
train_op1 = adam_op.minimize(loss=loss2,
global_step = tf.train.get_global_step())
train_op2 = adam_op.minimize(loss=loss2,
global_step=tf.train.get_global_step())
train_op3 = optimizer.minimize() | |
import sys, string, re, os, commands, time
from scipy import stats
import scipy as sp
import numpy as np
################## Class Env_var ######################
class Env_var:
def __init__(self, v):
self.std_env_list = []
self.env_list = []
v = v.replace('\r\n', '')
v = v.replace(',', '.')
data = v.split('\t')
self.name = data[0]
variables = data[1::]
for var in variables:
self.env_list.append(float(var))
self.env_data = np.array(self.env_list)
mean = np.mean(self.env_data)
std = np.std(self.env_data)
self.std_env_data = np.ones((len(self.env_data), 1))
for i in range(0, len(self.env_data)):
std_var = (self.env_data[i] - mean)/std
self.std_env_data[i] = std_var
def print_env_data(self):
print self.name
print self.env_data
def get_std_env(self):
self.std_env_list = self.std_env_data.tolist()
return self.std_env_list
#self.pop = [[0 for x in xrange(2)] for x in xrange(pops)]
####################### Main #############################
def standardize_env(in_file):
#num_env = "4"
out_file = "std_" + in_file
env_list = [] #List of locus names
env_data = open(in_file, 'r')
lines = env_data.readlines()
num_vars = len(lines)
for line in lines:
env = Env_var(line)
env_list.append(env)
env_lines = ""
for i in range(0, len(env_list)):
var = env_list[i].get_std_env()
for v in var:
env_lines += str(v) + '\t'
env_lines += '\n'
env_lines = env_lines.replace('[', '')
env_lines = env_lines.replace(']', '')
env_out = open(out_file, 'w')
env_out.write(env_lines)
env_out.close()
return out_file, num_vars
if __name__ == '__main__':
# Terminate if too few arguments
if len(sys.argv) < 2:
print 'usage: %s <infile>' % sys.argv[0]
sys.exit(-1)
main(sys.argv[1]) | |
import os,sys
import numpy as np
import yaml
import scipy.integrate as integrate
import matplotlib.pyplot as plt
import math
"""
------------
Parameters
"""
with open('configure.yml','r') as conf_para:
conf_para = yaml.load(conf_para,Loader=yaml.FullLoader)
"""
------------
wavefront_initialize
>> input
pixelsize_x = 55e-06,pixelsize_y=55e-06,
fs_size = 2000,ss_size = 2000,
focus_x = 1.2e-3,focus_y = 1.0e-3,
defocus = 400e-6,
det_dist = 14e-03,
ap_x = 40e-06, ap_y= 40e-6,
wl = 7.29e-11,
amplitude_value=0.0
>> output
x_arr,y_arr,xx_arr,yy_arr,wf_dec
"""
def wavefront_initialize(pixelsize_x = 55e-06,pixelsize_y = 55e-06,fs_size = 2000,ss_size = 2000,focus_x = 1.2e-3,focus_y = 1.0e-3,defocus = 400e-6, det_dist = 14e-03, ap_x = 40e-06, ap_y= 40e-6,wl = 7.29e-11,amplitude_value=0.0):
wf_dec = np.zeros((ss_size,fs_size),dtype='complex')
wf_dec += amplitude_value
# the range of detector plane(x-axis,y-axis)
xx_span = fs_size * pixelsize_x
yy_span = ss_size * pixelsize_y
# the range of object plane(x-axis,y-axis)
x_span = 1.6 * ap_x / focus_x * defocus
y_span = 1.6 * ap_y / focus_y * defocus
# the sample rate in the object plane
n_x = int(x_span * xx_span / wl / det_dist)
n_y = int(y_span * yy_span / wl / det_dist)
# Initializing coordinate arrays
# coordinate in object plane
x_arr = np.linspace(-x_span / 2, x_span / 2, n_x)
y_arr = np.linspace(-y_span / 2, y_span / 2, n_y)
wf_obj = np.zeros((n_x,n_y),dtype='complex')
# coordinate in detector plan
xx_arr = np.linspace(-xx_span / 2, xx_span / 2, fs_size, endpoint=False)
yy_arr = np.linspace(-yy_span / 2, yy_span / 2, ss_size, endpoint=False)
return x_arr,y_arr,xx_arr,yy_arr,wf_obj
"""
lens wavefront
Parameters:
------------
r : coordinates
f : focus of lens
df: defocus of the object
a : alpha, Third order abberations coefficient [rad/mrad^3]
cen_ab : center point of the lens' abberations
output
------
wavefront_lens,err
"""
def lens_wf(x_arr, y_arr, wf_obj, ap_x = 40e-06,ap_y = 40e-06, focus_x = 1.2e-3, focus_y=1.0e-3, x_abcen = 0.5, y_abcen = 0.5, alpha_x = -0.05, alpha_y = -0.05, wl = 7.29e-11,defocus =400e-06):
xx = x_arr.copy()
yy = y_arr.copy()
wf_lens = np.array(np.meshgrid(y_arr,x_arr))
wf_obj_cor = np.array(np.meshgrid(yy,xx))
wavefront_lens = np.zeros_like(wf_obj,dtype='complex')
wavenumber = 2*np.pi / wl
z_dis = focus_y + defocus
M_x = (focus_x+defocus)/focus_x
M_y = (focus_y+defocus)/focus_y
A = wavenumber/1.j/2/np.pi/z_dis
ph_0 = wavenumber* 1.j / 2 / z_dis * (wf_obj_cor[0,:,:]**2 + wf_obj_cor[1,:,:]**2) + 1.j*wavenumber*z_dis
x_cen = (x_abcen - 0.5)*ap_x
y_cen = (y_abcen - 0.5)*ap_y
ph_x = -wavenumber / 2 / M_x / focus_x * wf_lens[0,:,:]**2
ph_ab_x = alpha_x * 1e9 * ((wf_lens[0,:,:] - x_cen) / focus_x) **3
ph_y = -wavenumber / 2 / M_y / wf_lens[1,:,:] * y_arr**2
ph_ab_y= alpha_y * 1e9 * ((wf_lens[1,:,:] - y_cen) / focus_y) **3
ph_mix = wavenumber / defocus * (wf_obj_cor[0,:,:]*wf_lens[0,:,:] + wf_obj_cor[1,:,:]*wf_lens[1,:,:])
func = np.exp(1.j * (ph_x + ph_ab_x + ph_y + ph_ab_y + ph_mix))
for i in range(y_arr.size):
for j in range(x_arr.size):
wavefront_lens[i][j], err = integrate.dblquad(func,
-ap_x / 2,
ap_x / 2,
-ap_y / 2,
ap_y / 2,
args=(),
epsabs=1e-07,
epsrel=1e-09)
wavefront_lens *= A*np.exp(ph_0)
return wavefront_lens,err
def propagator2d_integrate(x_arr,y_arr,xx_arr,yy_arr,wf_dec,wavefront_lens, det_dist = 14e-03, wl = 7.29e-11 ):
# convolving with the Fresnel kernel via FFT multiplication
p_xy = np.array(np.meshgrid(y_arr,x_arr))
det_xy = np.array(np.meshgrid(yy_arr,xx_arr))
#wf_propagated = wavefront_lens
wf_progagated = np.zeros_like(wf_dec,dtype='complex')
wavenumber = 2 * np.pi / wl
ph = wavenumber / 2 / det_dist
for i in range(yy_arr.size):
for j in range(xx_arr.size):
ph_x = wavenumber/ det_dist * p_xy[0,:,:] * det_xy[0,j,i]
ph_y = wavenumber/ det_dist * p_xy[1,:,:] * det_xy[0,j,i]
value = wavefront_lens * np.exp(-ph_x-ph_y)
wf_propagated[i][j] *= np.exp(1.j*ph) * integrate.simps(integrate.simps(value,ph_y),ph_x)
return wf_propagated
x_arr,y_arr,xx_arr,yy_arr,wf_dec = wavefront_initialize(pixelsize_x = 55e-06,pixelsize_y = 55e-06,fs_size = 20,ss_size = 20,focus_x = 1.2e-3,focus_y = 1.0e-3,defocus = 400e-6, det_dist = 14e-03, ap_x = 40e-06, ap_y= 40e-6,wl = 7.29e-4,amplitude_value=0.0)
wavefront_lens,err = lens_wf(x_arr, y_arr, xx_arr, yy_arr, wf_dec)
wf_progagated = propagator2d_integrate(x_arr,y_arr,xx_arr,yy_arr,wf_dec,wavefront_lens, det_dist = 14e-03, wl = 7.29e-11 )
fig,(ax1, ax2) = plt.subplots(1,2)
ax1.set_title('amplitude')
im1 = ax1.imshow(np.real(wf_progagated))
ax2.set_title('phase')
im2 = ax2.imshow(np.imag(np.unwrap(wf_progagated)))
plt.tight_layout()
# Make space for title
plt.subplots_adjust(top=0.85)
plt.show() | |
from tkinter import *
root = Tk()
root.withdraw()
from sklearn import linear_model
import numpy as np
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
# Implement the default Matplotlib key bindings.
import matplotlib.pyplot as plt
import numpy as np
class Custombox:
def __init__(self, title, text):
self.title = title
self.text = text
def store():
self.new = self.entry.get() #storing data from entry box onto variable
self.new = self.new.upper()
import datetime
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
import numpy as np
name_list = []
ticker_any = self.new
ticker_any = ticker_any.upper()
og_link = "https://finance.yahoo.com/quote/AAPL?p=AAPL&.tsrc=fin-srch"
stock_link = "https://finance.yahoo.com/quote/" + ticker_any + "?p=" + ticker_any + "&.tsrc=fin-srch"
csv_link = "https://query1.finance.yahoo.com/v7/finance/download/" + ticker_any + "?period1=-252374400&period2=11635348709&interval=1d&events=history&includeAdjustedClose=true"
import urllib.request
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
url = "http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers"
headers={'User-Agent':user_agent,}
request=urllib.request.Request(csv_link,None,headers) #The assembled request
response = urllib.request.urlopen(request)
store.data = response.read()
csv_file = open('values.csv', 'wb')
csv_file.write(store.data)
df = pd.read_csv(csv_link)
#data = df
store.data = df.dropna()
bruh = pd.DataFrame(store.data)
#print(data)
#print(data)
print(bruh)
print(bruh.iloc[[-1]])
new_high = bruh["High"].iloc[-1]
new_low = bruh["Low"].iloc[-1]
#new_high = input('Latest High: ')
# new_low = input('Latest Low: ')
store.High=pd.DataFrame(store.data['High'])
store.Low=pd.DataFrame(store.data['Low'])
lm = linear_model.LinearRegression()
model = lm.fit(store.High, store.Low)
import numpy as np
High_new=np.array([float(new_high)])
Low_new=np.array([float(new_low)])
High_new = High_new.reshape(-1,1)
Low_new = Low_new.reshape(-1,1)
High_predict=model.predict(High_new)
Low_predict=model.predict(Low_new)
print("Predicted High: ")
print(High_predict)
print("Predicted Low: ")
print(Low_predict)
print("Model Score: ")
print(model.score(store.High, store.Low))
print("Dollar Change($)")
print((High_predict - Low_predict).astype(float))
store.modelscore = model.score
store.dollarchange = ((High_predict - Low_predict).astype(float))
df = pd.read_csv(csv_link)
#data = df
data = df.dropna()
bruh = pd.DataFrame(data)
new_high = bruh["High"].iloc[-1]
new_low = bruh["Low"].iloc[-1]
lm = linear_model.LinearRegression()
model = lm.fit(store.High, store.Low)
import numpy as np
High_new=np.array([float(new_high)])
Low_new=np.array([float(new_low)])
High_new = High_new.reshape(-1,1)
Low_new = Low_new.reshape(-1,1)
store.High_predict=model.predict(High_new)
store.Low_predict=model.predict(Low_new)
(store.data).plot(kind='scatter', x='High', y='Low')
plt.scatter(store.High,store.Low)
plt.plot(store.High, store.Low, '.r-')
x1 = store.High.iloc[0,:]
y1 = store.Low.iloc[0,:]
m, b = np.polyfit(x1, y1, 1)
plt.plot(x1, y1, 'b')
plt.plot(x1, m*x1 + b)
store.High_predict = np.squeeze(store.High_predict)[()]
store.Low_predict = np.squeeze(store.Low_predict)[()]
a.change(f"High predict: {store.High_predict} Low predict: {store.Low_predict}")
def meow():
plt.show()
self.win = Toplevel()
self.win.title(self.title)
# self.win.geometry('400x150')
self.win.wm_attributes('-topmost', True)
self.label = Label(self.win, text=self.text)
self.label.grid(row=0, column=0, pady=(20, 10),columnspan=3,sticky='w',padx=10)
self.l = Label(self.win)
self.entry = Entry(self.win, width=50)
self.entry.grid(row=1, column=1,columnspan=2,padx=10)
self.graph = Button(self.win, text='Graph', width=10,command=meow)
self.graph.grid(row=3, column=2,pady=10)
self.b2 = Button(self.win, text='Cancel', width=10,command=self.win.destroy)
self.b2.grid(row=3, column=3,pady=10)
self.b2 = Button(self.win, text='Enter', width=10,command=store)
self.b2.grid(row=3, column=1,pady=10)
def __str__(self):
return str(self.new)
def change(self,ran_text):
self.l.config(text=ran_text,font=(0,12))
self.l.grid(row=2,column=1,columnspan=3,sticky='nsew',pady=5)
a = Custombox('Linear Regression Stock Calculator', 'Enter a stock ticker')
root.mainloop() | |
import numpy as np
import os
import geopandas as gpd
import pandas as pd
url_list = \
['https://opendata.arcgis.com/datasets/7015d5d46a284f94ac05c2ea4358bcd7_0.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/5fc63b2a48474100b560a7d98b5097d7_1.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/27af9a2485c5442bb061fa7e881d7022_2.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/4f62515558174f53979b3be0335004d3_3.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/29f801d03c9b4b608bca6a8e497278c3_4.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/a0019dd0d6464747a88921f5e103d509_5.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/40bcfbc4054549ebba8b5777bbdd40ff_6.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/16cedd233d914118a275c6510115d466_7.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/902fd604ecf54adf8579894508cacc68_8.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/170b764c52f34c9497720c0463f3b58b_9.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/2c37babc94d64bbb938a9b520bc5538c_10.geojson', # noqa: E501
'https://opendata.arcgis.com/datasets/a35aa9249110472ba2c69cc574eff984_11.geojson'] # noqa: E501
def get_tracts():
"""
This function returns a GeoDataFrame of census tract boundaries
"""
tracts_url = \
'https://opendata.arcgis.com/datasets/de58dc3e1efc49b782ab357e044ea20c_9.geojson' # noqa: E501
tracts = gpd.read_file(tracts_url)
tracts_columns = ['NAME10', 'SHAPE_Area', 'geometry']
tracts_cleaned = tracts.loc[:, tracts_columns]
tracts_cleaned['NAME10'] = tracts_cleaned['NAME10'].astype(float)
tracts_cleaned.rename(columns={'NAME10': 'Tract'}, inplace=True)
return tracts_cleaned
def get_tractcenters():
"""
This function returns a GeoDataFrame of census tract centroids
"""
tract_centers = get_tracts().copy()
tract_centers['geometry'] = tract_centers['geometry'].centroid
return tract_centers
def get_zips():
"""
This function returns a GeoDataFrame of zip code boundaries.
"""
zips_url = \
'https://opendata.arcgis.com/datasets/83fc2e72903343aabff6de8cb445b81c_2.geojson' # noqa: E501
zipcodes = gpd.read_file(zips_url)
zipcodes_columns = ['ZIPCODE', 'SHAPE_Area', 'geometry']
zipcodes_cleaned = zipcodes.loc[:, zipcodes_columns]
zipcodes_cleaned['ZIPCODE'] = zipcodes_cleaned['ZIPCODE'].astype(int)
zipcodes_cleaned.head()
tracts_cleaned = get_tracts()
zips = gpd.sjoin(zipcodes_cleaned, tracts_cleaned, op='intersects')
zips_columns = ['ZIPCODE', 'SHAPE_Area_left', 'geometry']
zips = zips[zips_columns]
zips = zips.dissolve(by='ZIPCODE')
zips.rename(columns={'SHAPE_Area_left': 'SHAPE_Area'}, inplace=True)
zips.reset_index(inplace=True)
zips = zips[['ZIPCODE', 'SHAPE_Area', 'geometry']]
return zips
def get_gdf(year):
'''
Enter the desired year
to download the traffic flow count data for that year.
Example: enter '7' for the year 2007.
'''
num = year-7
gdf_year = gpd.read_file(url_list[num])
if year == 11:
gdf_year = gdf_year.rename(columns={"YEAR_": 'YEAR'})
gdf_year = gdf_year[gdf_year.STNAME != '16TH AVE S']
if year == 12:
gdf_year = gdf_year.rename(columns={'STDY_YEAR': 'YEAR'})
if year == 15 or year == 16:
gdf_year = gdf_year.rename(columns={"COUNTAAWDT": 'AAWDT',
"FLOWSEGID": "GEOBASID",
'FIRST_STNAME_ORD': 'STNAME'})
gdf_year = gdf_year[['AAWDT', 'GEOBASID', 'STNAME',
'SHAPE_Length', 'geometry']]
if year == 15:
year_list = [2015]*len(gdf_year)
gdf_year['YEAR'] = year_list
elif year == 16:
year_list = [2016]*len(gdf_year)
gdf_year['YEAR'] = year_list
elif year == 17 or year == 18:
gdf_year = gdf_year.rename(columns={"AWDT": 'AAWDT',
"FLOWSEGID": "GEOBASID",
'STNAME_ORD': 'STNAME'})
gdf_year = gdf_year[['AAWDT', 'GEOBASID',
'STNAME', 'SHAPE_Length',
'geometry']]
if year == 17:
year_list = [2017]*len(gdf_year)
gdf_year['YEAR'] = year_list
elif year == 18:
year_list = [2018]*len(gdf_year)
gdf_year['YEAR'] = year_list
gdf_year = gdf_year[['YEAR', 'AAWDT',
'GEOBASID', 'STNAME',
'SHAPE_Length', 'geometry']]
gdf_year = gdf_year[gdf_year.YEAR != 0]
gdf_year = gdf_year[gdf_year.YEAR.notnull()]
return gdf_year
def get_traffic(year):
'''
This function a GeoDataFrame of traffic volume
by zip code for a given year.
'''
gdf_test = get_gdf(year)
midpoints = gdf_test.copy()
midpoints['MIDPOINT'] = \
gdf_test['geometry'].interpolate(0.5, normalized=True)
midpoint_columns = ['YEAR', 'AAWDT', 'MIDPOINT']
midpoint_cleaned = midpoints.loc[:, midpoint_columns]
midpoint_cleaned['geometry'] = midpoint_cleaned['MIDPOINT']
zip_mids = gpd.sjoin(get_zips(), midpoint_cleaned, op='contains')
zip_mids_clean = zip_mids.copy()
zip_mids_clean = zip_mids_clean.drop(columns=['SHAPE_Area',
'index_right',
'MIDPOINT'])
zip_mids_clean_c = zip_mids_clean.copy()
zip_mids_clean_c.drop_duplicates(inplace=True)
zip_mids_clean_cc = zip_mids_clean_c.copy()
zip_mids_clean_cc.drop(columns=['geometry'])
zip_mids_clean_cc = \
zip_mids_clean_cc.dissolve(by=['ZIPCODE'], aggfunc=sum)
zip_traffic = zip_mids_clean_cc.copy()
zip_traffic.drop(columns=['geometry'], inplace=True)
zip_traffic['YEAR'] = year + 2000
zip_traffic.reset_index(inplace=True)
zip_traffic = zip_traffic[['ZIPCODE', 'YEAR', 'AAWDT']]
return zip_traffic
def get_alldata():
"""
This function returns a GeoDataFrame of population,
bike rack capacities, bike lane lengths, and traffic volume
by zip code and year.
"""
def get_racks():
"""
This function returns a GeoDataFrame of bike rack capacities
by zip code and year.
"""
# This data is downloaded from Seattle Open GIS
racks_url = \
'https://opendata.arcgis.com/datasets/f86c29ce743e47819e588c3d643ceb63_0.geojson' # noqa: E501
r = gpd.read_file(racks_url)
# Selects wanted columns of dataframe, drops null values,
# and puts install date into terms of years to matcho other data
racks = r[['INSTALL_DATE', 'RACK_CAPACITY', 'geometry']]
racks = racks[racks.INSTALL_DATE.notnull()]
racks['Year'] = pd.DatetimeIndex(racks['INSTALL_DATE']).year
racks.drop(columns='INSTALL_DATE', inplace=True)
# Filters dataframe to include only years 2007 - 2018
filter1 = racks['Year'] >= 2007
filter2 = racks['Year'] <= 2018
racks_filtered = racks[filter1 & filter2]
# Spatially joins bike racks dataframe
# with zip code boundaries dataframe
racks_zips = gpd.sjoin(get_zips(), racks_filtered, op='contains')
racks_zips.reset_index(inplace=True)
# Dissolves data by zip code and year
zips_racks = racks_zips.dissolve(by=['ZIPCODE', 'Year'], aggfunc=sum)
zips_racks.reset_index(inplace=True)
# Selects relevant columns
zips_racks_cleaned = zips_racks[['ZIPCODE', 'Year', 'RACK_CAPACITY']]
# Inserts cumulative bike rack capacities over time
zip_list = zips_racks_cleaned.ZIPCODE.unique()
for zipcode in zip_list:
indices = \
zips_racks_cleaned[zips_racks_cleaned.ZIPCODE == zipcode].index
zips_racks_cleaned.loc[indices, 'RACK_CAPACITY'] = \
zips_racks_cleaned.loc[indices, 'RACK_CAPACITY'].cumsum()
return zips_racks_cleaned
def get_lanes():
"""
This function returns a GeoDataFrame of existing bike lane lengths
by zip code and year.
"""
# Downloads data from Seattle Open GIS
bike_lanes_url = \
'https://gisdata.seattle.gov/server/rest/services/SDOT/SDOT_Bikes/MapServer/1/query?where=1%3D1&outFields=OBJECTID,STREET_NAME,LENGTH_MILES,SHAPE,DATE_COMPLETED,SHAPE_Length&outSR=4326&f=json' # noqa: E501
input_lane = gpd.read_file(bike_lanes_url)
# Initial selection of relevant columns
lane_columns = ['LENGTH_MILES', 'DATE_COMPLETED', 'geometry']
bike_lanes = input_lane[lane_columns]
# Converts the date completed column to year
bike_lanes['DATE_COMPLETED'] = \
pd.to_datetime(bike_lanes['DATE_COMPLETED'], unit='ms')
bike_lanes['Year'] = \
pd.DatetimeIndex(bike_lanes['DATE_COMPLETED']).year
bike_lanes.drop(columns='DATE_COMPLETED', inplace=True)
bike_lanes['Year'] = bike_lanes['Year'].fillna(0)
# Builds a baseline of bike lanes before 2007
# to add cumulatively to the 2007-2018 data
bike_lanes_baseline = bike_lanes[bike_lanes['Year'] < 2007]
zips_lanes_base = \
gpd.sjoin(get_zips(), bike_lanes_baseline, op='intersects')
zips_lanes_base.reset_index(inplace=True)
zips_lanes_base = zips_lanes_base.dissolve(by='ZIPCODE', aggfunc='sum')
zips_lanes_base.reset_index(inplace=True)
# Filters dataframe to include only years 2007 - 2018
filter1 = bike_lanes['Year'] >= 2007
filter2 = bike_lanes['Year'] <= 2018
bike_lanes_filtered = bike_lanes[filter1 & filter2]
# Spatially joins bike lanes dataframe
# with zip code boundaries dataframe
zips_lanes = gpd.sjoin(get_zips(),
bike_lanes_filtered, op='intersects')
zips_lanes.reset_index(inplace=True)
# Dissolves data by zip code and year
zips_lanes = zips_lanes.dissolve(by=['ZIPCODE', 'Year'], aggfunc=sum)
zips_lanes.reset_index(inplace=True)
# Selects relevant columns and renames quantity variable for clarity
zips_lanes_cleaned = zips_lanes[['ZIPCODE', 'Year', "LENGTH_MILES"]]
zips_lanes_cleaned.rename(columns={'LENGTH_MILES': 'Miles_Bike_Lanes'},
inplace=True)
# Inserts cumulative bike lane lengths over time
zip_list = zips_lanes_cleaned.ZIPCODE.unique()
for zipcode in zip_list:
indices = \
zips_lanes_cleaned[zips_lanes_cleaned.ZIPCODE == zipcode].index
zips_lanes_cleaned.loc[indices, 'Miles_Bike_Lanes'] = \
zips_lanes_cleaned.loc[indices, 'Miles_Bike_Lanes'].cumsum()
return zips_lanes_cleaned
def get_pop():
"""
This function returns
a GeoDataFrame of population by zip code and year.
"""
# This data is downloaded from Seattle Open GIS
pop_url_2010 = \
'https://gisrevprxy.seattle.gov/arcgis/rest/services/CENSUS_EXT/CENSUS_2010_BASICS/MapServer/15/query?where=1%3D1&outFields=SHAPE,GEOID10,NAME10,ACRES_TOTAL,Total_Population,OBJECTID&outSR=4326&f=json' # noqa: E501
pop_2010 = gpd.read_file(pop_url_2010)
# Redefines census tract geometries
# by their centroid points to avoid double counting,
# when spatial join happens
census_cent = get_tractcenters()
# census_bounds_cleaned = get_census_bounds()
# census_cent = census_bounds_cleaned.copy()
# census_cent['geometry'] = census_cent['geometry'].centroid
pop_2010['geometry'] = census_cent['geometry']
# Spatial join to put populations with associated zipcode
pop_zips = gpd.sjoin(get_zips(), pop_2010, op='contains')
pop_zips.reset_index(inplace=True)
pop_zips = pop_zips[['ZIPCODE', 'geometry', 'Total_Population']]
# Dissolve into single zipcode geometry
# and aggregate within that geometry
pop_zips_diss = pop_zips.dissolve(by='ZIPCODE', aggfunc='sum')
pop_zips_diss.reset_index(inplace=True)
pop_zips_diss_clean = pop_zips_diss[['ZIPCODE', 'Total_Population']]
# Create estimates for zip code populations
# in years besides 2010 based on the population fraction
# and total population
total_pop = pop_zips_diss_clean['Total_Population'].sum()
pop_zips_diss_clean['Pop_fraction'] = \
pop_zips_diss_clean['Total_Population']/total_pop
years = list(range(2007, 2019))
populations = [585436, 591870, 598539,
608660, 622694, 635928,
653588, 670109, 687386,
709631, 728661, 742235]
pop_by_year = dict(zip(years, populations))
def est_zip_pop(year, pop_zips_diss_clean, pop_by_year):
pop_frac = pop_zips_diss_clean['Pop_fraction'].values
year_pop = pop_by_year.get(year)
pop_zip_year = pop_zips_diss_clean.copy()
pop_zip_year['Total_Population'] = pop_frac*year_pop
return pop_zip_year
pop_zips_years = gpd.GeoDataFrame()
for year in years:
pop_zip_year = est_zip_pop(year, pop_zips_diss_clean, pop_by_year)
pop_zip_year['Year'] = year
pop_zips_years = pop_zips_years.append(pop_zip_year)
pop_zips_years.sort_values(by=['ZIPCODE', 'Year'], inplace=True)
pop_zips_years = pop_zips_years[['ZIPCODE', 'Year',
'Total_Population',
'Pop_fraction']]
return pop_zips_years
def get_alltraffic():
'''
This function a GeoDataFrame of traffic volume by zip code and year.
'''
all_traffic = pd.DataFrame()
years = list(np.arange(7, 19))
for year in years:
traffic = get_traffic(year)
all_traffic = all_traffic.append(traffic)
all_traffic.groupby(by='ZIPCODE')
all_traffic.sort_values(['ZIPCODE', 'YEAR'], inplace=True)
all_traffic.rename(columns={'YEAR': 'Year'}, inplace=True)
return all_traffic
# Creates dataframes for bike rack capacities,
# bike lane lengths, population and traffic volumes
rack_data = get_racks()
lane_data = get_lanes()
pop_data = get_pop()
traffic_data = get_alltraffic()
# Merges dataframes
a = pd.merge(traffic_data, pop_data, how='left', on=['Year', 'ZIPCODE'])
b = pd.merge(a, rack_data, how='left', on=['Year', 'ZIPCODE'])
all_data = pd.merge(b, lane_data, how='left', on=['Year', 'ZIPCODE'])
# Removes zip codes with lots of missing data
all_data.fillna(0, inplace=True)
zip_list = list(all_data.ZIPCODE.unique())
removed_zips_index = [21, 23, 24, 25, 26, 27, 28]
for i in removed_zips_index:
all_data.drop(all_data[all_data.ZIPCODE == zip_list[i]].index,
inplace=True)
return all_data
# Creates dataframe containing all features and targets
all_data = get_alldata()
def get_zipdata(zipcode):
'''
This function a GeoDataFrame of traffic volume
by year for a given zip code.
'''
zip_data = all_data[all_data.ZIPCODE == zipcode]
return zip_data
def get_csv(df, file_name):
"""
This function takes an input DataFrame (df),
file name (file_name),
and writes the DataFrame to a csv file titled file_name.
"""
directory = os.getcwd()
path = directory + '/' + file_name + '.csv'
csvfile = df.to_csv(path)
return csvfile
def get_csv_alldata():
"""
This function writes the DataFrame,
with all attributes by zip code and year,
to a csv file titled 'all_data.csv'.
"""
get_csv(all_data, 'all_data')
return | |
# -*- coding: utf-8 -*-
"""HDF5 Dataset Generators
The generator class is responsible for yielding batches of images and labels from our HDF5 database.
Attributes:
dataset_path (str):
Path to the HDF5 database that stores our images and corresponding class labels.
batch_size (int):
Size of mini-batches to yield when training our network.
preprocessors (list):
List of image preprocessors we are going to apply (default: None)
augmentation (boole):
If True, then a Keras ImageDataGenerator will be supplied to augment the data directly
inside our HDF5DatasetGenerator (default: None).
binarize (int):
If True, then the labels will be binarized as one-hot encoded vector (default: True)
classes (int):
Number of unique class labels in our database (default: 2).
"""
from keras.utils import np_utils
import numpy as np
import h5py
class HDF5DatasetGenerator:
"""Dataset Generator
"""
def __init__(self, dataset_path, batch_size, preprocessors=None, augmentation=None, binarize=True, classes=2):
"""Initialize the database generatro
Arguments:
dataset_path {str} -- path to the HDF5 database
batch_size {[type]} -- size of mini-batches when training the network
Keyword Arguments:
preprocessors {list} -- list of image preprocessors (default: {None})
augmentation {[bool} -- augment data in HDF5DatasetGenerator (default: {None})
binarize {bool} -- labels will be encoded as one-hot vector (default: {True})
classes {int} -- Number of unique class labels in our database (default: {2})
"""
# store the batch size, preprocessors, and data augmentor, whether or
# not the labels should be binarized, along with the total number of classes
self.batch_size = batch_size
self.preprocessors = preprocessors
self.augmentation = augmentation
self.binarize = binarize
self.classes = classes
# open the HDF5 database for reading and determine the total
# number of entries in the database
self.database = h5py.File(dataset_path)
self.num_images = self.database["labels"].shape[0]
def generator(self, passes=np.inf):
"""Yield batches of images and class labels to the Keras .fit_generator function when
training a network
Keyword Arguments:
passes {int} -- value representing the total number of epochs (default: {np.inf})
"""
# initialize the epoch count
epochs = 0
# keep looping infinitely -- the model will stop once we have
# reach the desired number of epochs
while epochs < passes:
# loop over the HDF5 database
for i in np.arange(0, self.num_images, self.batch_size):
# extract the images and labels from the HDF database
images = self.database["images"][i : i + self.batch_size]
labels = self.database["labels"][i : i + self.batch_size]
# check to see if the labels should be binarized
if self.binarize:
labels = np_utils.to_categorical(labels, self.classes)
# check to see if our preprocessors are not None
if self.preprocessors is not None:
# initialize the list of processed images
processed_images = []
# loop over the images
for image in images:
# loop over the preprocessors and apply each to the image
for preprocessor in self.preprocessors:
image = preprocessor.preprocess(image)
# update the list of processed images
processed_images.append(image)
# update the images array to be the processed images
images = np.array(processed_images)
# if the data augmenator exists, apply it
if self.augmentation is not None:
(images, labels) = next(self.augmentation.flow(images, labels, batch_size=self.batch_size))
# yield a tuple of images and labels
yield (images, labels)
# increment the total number of epochs
epochs += 1
def close(self):
"""Close the database connection
"""
# close the database
self.database.close() | |
#exec(open('templates\\algs_compare_regression.py').read())
# testing different classification algorithms
import subprocess as sp
import pandas as pd
import sklearn.model_selection as ms
import sklearn.linear_model as sl
import sklearn.metrics as sm
import sklearn.discriminant_analysis as da
import sklearn.neighbors as neighbors
import sklearn.naive_bayes as nb
import sklearn.tree as tree
import sklearn.svm as svm
import numpy as np
import pickle as pk
import matplotlib.pyplot as plt
if __name__ == '__main__':
sp.call('cls', shell = True)
plt.close('all')
# load some data
with open('.\\data\\bostonHousing.pkl', 'rb') as fl:
df = pk.load(fl)
# specify the x and y matrices
ycols = ['PRICE']
xcols = list(set(df.columns) - set(ycols))
X = df.loc[:, xcols].values
y = np.ravel(df.loc[:, ycols].values)
# specify cross-validation
k = 10 # number of folds
cvsplitter = ms.KFold(n_splits = k, shuffle = True, random_state = 0) # cross-validation splitter
# specify all models
models = list()
models.append(('LR', sl.LinearRegression()))
models.append(('RIDGE', sl.Ridge()))
models.append(('LASSO', sl.Lasso()))
models.append(('EN', sl.ElasticNet()))
models.append(('KNN', neighbors.KNeighborsRegressor()))
models.append(('CART', tree.DecisionTreeRegressor()))
models.append(('SVM', svm.SVR()))
# fit and compute scores
scoring = 'neg_mean_squared_error'
algs = list()
scores = list()
for entry in models:
score = -1 * ms.cross_val_score(entry[1], X, y, cv = cvsplitter, scoring = scoring)
scores.append(score)
algs.append(entry[0])
#print('{0} - {1:.4f} - {2:.4f}'.format(entry[0], np.mean(score), np.std(score, ddof = 1)))
# boxplot of results
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.boxplot(scores)
ax.set_xticklabels(algs)
ax.set_xlabel('Algorithm')
ax.set_ylabel('Mean Squared Error')
ax.set_title('Mean Squared Error of Different Classifiers')
# table of results
scores = np.array(scores)
dfScores = pd.DataFrame(index = algs)
dfScores['mean'] = np.mean(scores, axis = 1)
dfScores['std'] = np.std(scores, ddof = 1, axis = 1)
print('Mean and standard deviation of MSE for different algorithms:')
print(dfScores)
plt.show()
plt.close('all') | |
#!/usr/bin/env python
import rospy
import numpy as np
from std_msgs.msg import Float64
import math
def talker():
pub_theta1 = rospy.Publisher('/robot_arm/theta1_controller/command', Float64, queue_size=10)
pub_theta2 = rospy.Publisher('/robot_arm/theta2_controller/command', Float64, queue_size=10)
pub_theta3 = rospy.Publisher('/robot_arm/theta3_controller/command', Float64, queue_size=10)
pub_theta4 = rospy.Publisher('/robot_arm/theta4_controller/command', Float64, queue_size=10)
pub_theta5 = rospy.Publisher('/robot_arm/theta5_controller/command', Float64, queue_size=10)
pub_theta6 = rospy.Publisher('/robot_arm/theta6_controller/command', Float64, queue_size=10)
pub_grasp_angle1 = rospy.Publisher('/robot_arm/grasp_angle1_controller/command', Float64, queue_size=10)
pub_grasp_angle2 = rospy.Publisher('/robot_arm/grasp_angle2_controller/command', Float64, queue_size=10)
T = 160 #Time to perform the task
delT = 0.1
n= int(T/delT)
i=0
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(1/delT) # 10hz
while not rospy.is_shutdown():
if i<=50: #time to stabilize in the default joint configuration
pub_theta1.publish(0)
pub_theta2.publish(3.14)
pub_theta3.publish(0)
pub_theta4.publish(3.14)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 55:
pub_theta1.publish(0)
pub_theta2.publish(3.14-0.1)
pub_theta3.publish(0)
pub_theta4.publish(3.14-0.1)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 60:
pub_theta1.publish(0)
pub_theta2.publish(3.14-0.2)
pub_theta3.publish(0)
pub_theta4.publish(3.14-0.2)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 65:
pub_theta1.publish(0)
pub_theta2.publish(3.14-0.3)
pub_theta3.publish(0)
pub_theta4.publish(3.14-0.3)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 70:
pub_theta1.publish(0)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-0.5)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 75:
pub_theta1.publish(0)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-0.7)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 80:
pub_theta1.publish(0)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-0.9)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 85:
pub_theta1.publish(0.2)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.05)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 90:
pub_theta1.publish(0.27)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.05)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0.2)
pub_grasp_angle2.publish(0.2)
elif i<= 120:
print("Unscrewing Fuel Tank Lid") #time for unscrewing the lid
pub_theta1.publish(0.27)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.05)
pub_theta5.publish(0)
pub_theta6.publish(0.1)
pub_grasp_angle1.publish(0.2)
pub_grasp_angle2.publish(0.2)
elif i<= 122:
pub_theta1.publish(0.35)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.05)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0.2)
pub_grasp_angle2.publish(0.2)
elif i<= 125:
pub_theta1.publish(0.4)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.1)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0.2)
pub_grasp_angle2.publish(0.2)
elif i<= 128:
pub_theta1.publish(0.5)
pub_theta2.publish(3.14-0.35)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.1)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0.2)
pub_grasp_angle2.publish(0.2)
elif i<= 130:
pub_theta1.publish(0.6)
pub_theta2.publish(3.14-0.3)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.2)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0.2)
pub_grasp_angle2.publish(0.2)
elif i<= 133:
pub_theta1.publish(0.65)
pub_theta2.publish(3.14-0.25)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.2)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0.2)
pub_grasp_angle2.publish(0.2)
elif i<= 135:
pub_theta1.publish(0.7)
pub_theta2.publish(3.14-0.2)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.4)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0.2)
pub_grasp_angle2.publish(0.2)
elif i<= 138:
pub_theta1.publish(0.7)
pub_theta2.publish(3.14-0.25)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.2)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0.2)
pub_grasp_angle2.publish(0.2)
elif i<= 140:
pub_theta1.publish(0.7)
pub_theta2.publish(3.14-0.2)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.4)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 143:
pub_theta1.publish(0.7)
pub_theta2.publish(3.14-0.2)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.4)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 145:
pub_theta1.publish(0.7)
pub_theta2.publish(3.14-0.2)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.4)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 145:
pub_theta1.publish(0.65)
pub_theta2.publish(3.14-0.2)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.4)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 150:
pub_theta1.publish(0.6)
pub_theta2.publish(3.14-0.3)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.2)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 150:
pub_theta1.publish(0.5)
pub_theta2.publish(3.14-0.3)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.2)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 155:
pub_theta1.publish(0.4)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.1)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 158:
pub_theta1.publish(0.35)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.1)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
elif i<= 160:
pub_theta1.publish(0.27)
pub_theta2.publish(3.14-0.4)
pub_theta3.publish(0)
pub_theta4.publish(3.14-1.05)
pub_theta5.publish(0)
pub_theta6.publish(0)
pub_grasp_angle1.publish(0)
pub_grasp_angle2.publish(0)
else:
break
rate.sleep()
print("Executing Task")
i=i+1
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass | |
import sys, os, math
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
import matplotlib.pyplot as plt
import numpy as np
from six.moves import xrange
#from urllib3.connectionpool import xrange
def getopts(argv):
opts = {} # Empty dictionary to store key-value pairs.
while argv: # While there are arguments left to parse...
if argv[0][0] == '-': # Found a "-name value" pair.
opts[argv[0]] = argv[1] # Add key and value to the dictionary.
argv = argv[1:] # Reduce the argument list by copying it starting from index 1.
return opts
def main():
cmd_opts = getopts(sys.argv)
# configuration, problem description
depot = 0
num_vehicles = '--vehicles' in cmd_opts and int(cmd_opts['--vehicles']) or 10
vehicle_capacity = '--capacity' in cmd_opts and int(cmd_opts['--capacity']) or 10
speed = '--speed' in cmd_opts and int(cmd_opts['--speed']) or 40
search_time_limit = '--search_time_limit' in cmd_opts and int(
cmd_opts['--search_time_limit']) or 10 * 1000 # milliseconds
trip_service_duration_max = 0
max_dur_mult = '--max_dur_mult' in cmd_opts and float(cmd_opts['--max_dur_mult']) or 1.3
glob_span_cost_coef = '--glob_span_cost_coef' in cmd_opts and int(cmd_opts['--glob_span_cost_coef']) or None
plot = '--plot' in cmd_opts
print(
{
'vehicles': num_vehicles,
'capacity': vehicle_capacity,
'speed': speed,
'max_dur_mult': max_dur_mult,
'glob_span_cost_coef': glob_span_cost_coef,
})
customers = []
locations = []
demands = []
start_times = []
end_times = []
pickups = []
dropoffs = []
data = [
# customer lat lng demand start end pickup_index dropoff_index
[-1, 37.477749, -122.148499, 0, -1, -1, 0, 0],
[1, 37.467112, -122.253060, 1, 487, 2287, 0, 2],
[1, 37.477995, -122.148442, -1, 2623, 4423, 1, 0],
[2, 37.444040, -122.214423, 1, 678, 2478, 0, 4],
[2, 37.478331, -122.149008, -1, 2623, 4423, 3, 0],
[3, 37.455956, -122.285887, 1, 23, 1823, 0, 6],
[3, 37.478002, -122.148850, -1, 2623, 4423, 5, 0],
[4, 37.452259, -122.240702, 1, 537, 2337, 0, 8],
[4, 37.481572, -122.152584, -1, 2623, 4423, 7, 0],
[5, 37.447776, -122.257816, 1, 0, 1800, 0, 10],
[5, 37.485104, -122.147462, -1, 2623, 4423, 9, 0],
[6, 37.473287, -122.271279, 1, 704, 2504, 0, 12],
[6, 37.480284, -122.167614, -1, 2623, 4423, 11, 0],
[7, 37.558294, -122.263208, 1, 823, 2610, 0, 14],
[7, 37.481087, -122.166956, -1, 2640, 4423, 13, 0],
[8, 37.558294, -122.263208, 1, 0, 1800, 0, 16],
[8, 37.481087, -122.166956, -1, 2623, 4423, 15, 0],
]
for i in range(0, len(data)):
row = data[i]
customers.append(row[0])
locations.append([row[1], row[2]])
demands.append(row[3])
start_times.append(row[4])
end_times.append(row[5])
pickups.append(row[6])
dropoffs.append(row[7])
# build model
num_locations = len(locations)
#model_parameters = pywrapcp.RoutingModel.DefaultModelParameters()
model_parameters = pywrapcp.DefaultRoutingModelParameters()
# print model_parameters
manager = pywrapcp.RoutingIndexManager(num_locations,num_vehicles,depot)
routing = pywrapcp.RoutingModel(manager,model_parameters)
#routing = pywrapcp.RoutingModel(num_locations, num_vehicles, depot, model_parameters)
#search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters()
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.time_limit.seconds = search_time_limit
search_parameters.log_search = True
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION)
# print search_parameters
#time_between_locations = CreateCustomTimeCallback(locations, speed)
time_between_locations = CreateCustomTimeCallback(manager, locations,speed)
arc_cost_callback = time_between_locations.Duration
arc_cost_callback1 = routing.RegisterTransitCallback(arc_cost_callback)
#arc_cost_callback = routing.RegisterTransitCallback(time_between_locations.Duration)
routing.SetArcCostEvaluatorOfAllVehicles(arc_cost_callback1)
demands_at_locations = CreateDemandCallback(manager, demands)
demands_callback = demands_at_locations.Demand
demands_callback1 = routing.RegisterUnaryTransitCallback(demands_callback)
routing.AddDimension(demands_callback1, 0, vehicle_capacity, True, "Capacity")
# time taken to load/unload at each location
service_times = CreateServiceTimeCallback(manager, demands, trip_service_duration_max)
service_time_callback = service_times.ServiceTime
# time taken to travel between locations
travel_time_callback = time_between_locations.Duration
total_times = CreateTotalTimeCallback(service_time_callback, travel_time_callback)
total_time_callback = total_times.TotalTime
total_time_callback1 = routing.RegisterTransitCallback(total_time_callback)
horizon = max(end_times) + 7600 # buffer beyond latest dropoff
routing.AddDimension(total_time_callback1, horizon, horizon, False, "Time")
# build pickup and delivery model
time_dimension = routing.GetDimensionOrDie("Time")
if glob_span_cost_coef:
time_dimension.SetGlobalSpanCostCoefficient(glob_span_cost_coef)
solver = routing.solver()
for i in range(1, num_locations):
#index = routing.IndexToNode(i)
index = manager.NodeToIndex(i)
time_dimension.CumulVar(i).SetRange(start_times[i], end_times[i])
if demands[i] != depot and pickups[i] == 0 and dropoffs[i] != 0: # don't setup precedence for depots
#delivery_index = routing.IndexToNode(dropoffs[i])
delivery_index = manager.NodeToIndex(dropoffs[i])
if delivery_index > 0:
solver.Add(routing.VehicleVar(index) == routing.VehicleVar(delivery_index))
solver.Add(time_dimension.CumulVar(index) <= time_dimension.CumulVar(delivery_index))
min_dur = int(travel_time_callback(index, delivery_index))
max_dur = int(max_dur_mult * min_dur)
dur_expr = time_dimension.CumulVar(delivery_index) - time_dimension.CumulVar(index)
solver.Add(dur_expr <= max_dur)
routing.AddPickupAndDelivery(i, dropoffs[i])
if plot:
plt.barh(customers, np.array(end_times) - np.array(start_times), left=start_times)
plt.yticks(customers)
plt.xlabel('pickup start,end .. dropoff start,end')
plt.ylabel('customers')
plt.show()
print('begin solving')
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
print('solution exists')
printer = ConsolePrinter(num_vehicles, customers, demands, start_times,
end_times, pickups, dropoffs, travel_time_callback,
max_dur_mult, routing, assignment)
printer.print_solution()
else:
print('solution not found')
class ConsolePrinter():
def __init__(self, num_vehicles, customers, demands, start_times, end_times,
pickups, dropoffs, calc_travel_time, max_dur_mult, routing, assignment):
self.num_vehicles = num_vehicles
self.customers = customers
self.demands = demands
self.start_times = start_times
self.end_times = end_times
self.pickups = pickups
self.dropoffs = dropoffs
self.calc_travel_time = calc_travel_time
self.max_dur_mult = max_dur_mult
self.routing = routing
self.assignment = assignment
def print_solution(self):
print("Total duration of all routes: " + str(self.assignment.ObjectiveValue()) + "\n")
capacity_dimension = self.routing.GetDimensionOrDie("Capacity")
time_dimension = self.routing.GetDimensionOrDie("Time")
errors = None
plan_output = ''
rides = {}
for vehicle_nbr in range(self.num_vehicles):
veh_output = ''
index = self.routing.Start(vehicle_nbr)
empty = True
while not self.routing.IsEnd(index):
node_index = self.manager.IndexToNode(index)
customer = self.customers[node_index]
demand = self.demands[node_index]
load_var = capacity_dimension.CumulVar(index)
time_var = time_dimension.CumulVar(index)
visit = Visit(vehicle_nbr, node_index, customer, demand,
self.assignment.Value(load_var),
self.assignment.Min(time_var),
self.assignment.Max(time_var),
self.assignment.Value(time_var))
ride = rides.get(customer)
if not ride:
ride = rides[customer] = Ride(customer, vehicle_nbr)
if visit.is_pickup():
ride.pickup_visit = visit
else:
ride.dropoff_visit = visit
veh_output += \
"{route_id} {node_index} {customer} {demand} {load} {tmin} {tmax} {tval}".format(
route_id=vehicle_nbr,
node_index=node_index,
customer=customer,
demand=demand,
load=self.assignment.Value(load_var),
tmin=self.assignment.Min(time_var),
tmax=self.assignment.Max(time_var),
tval=self.assignment.Value(time_var))
if self.assignment.Value(load_var) > 0:
empty = False
veh_output += "\n"
index = self.assignment.Value(self.routing.NextVar(index))
node_index = self.manager.IndexToNode(index)
customer = self.customers[node_index]
demand = self.demands[node_index]
load_var = capacity_dimension.CumulVar(index)
time_var = time_dimension.CumulVar(index)
visit = Visit(vehicle_nbr, node_index, customer, demand,
self.assignment.Value(load_var),
self.assignment.Min(time_var),
self.assignment.Max(time_var),
self.assignment.Value(time_var))
veh_output += \
"{route_id} {node_index} {customer} {demand} {load} {tmin} {tmax} {tval}".format(
route_id=vehicle_nbr,
node_index=node_index,
customer=customer,
demand=demand,
load=self.assignment.Value(load_var),
tmin=self.assignment.Min(time_var),
tmax=self.assignment.Max(time_var),
tval=self.assignment.Value(time_var))
veh_output += "\n"
if not empty:
plan_output += veh_output
print("route_id node_index customer demand load tmin tmax tval")
print(plan_output)
ride_list = rides.values()
cols = ['cust (pnode..dnode)', 'route',
'pickup_at..dropoff_at',
'cnstr_pickup', 'cnstr_dropoff',
'plan_dur',
'cnstr_dur',
'plan_pickup_range',
'plan_dropoff_range',
'plan_min_poss_dur']
row_format = "".join(map(lambda c: "{:>" + str(len(c) + 4) + "}", cols))
print(row_format.format(*cols))
for i in range(0, len(ride_list)):
ride = ride_list[i]
if not ride.pickup_visit:
continue
min_dur = self.calc_travel_time(ride.pickup_visit.node_index, ride.dropoff_visit.node_index)
vals = ["{} {}..{}".format(ride.customer, ride.pickup_visit.node_index, ride.dropoff_visit.node_index),
ride.route,
"{}..{}".format(ride.pickup_visit.tval, ride.dropoff_visit.tval),
"{}..{}".format(time_dimension.CumulVar(ride.pickup_visit.node_index).Min(),
time_dimension.CumulVar(ride.pickup_visit.node_index).Max()),
"{}..{}".format(time_dimension.CumulVar(ride.dropoff_visit.node_index).Min(),
time_dimension.CumulVar(ride.dropoff_visit.node_index).Max()),
ride.dropoff_visit.tval - ride.pickup_visit.tval,
"{}..{}".format(int(min_dur), int(self.max_dur_mult * min_dur)),
"{}..{}".format(ride.pickup_visit.tmin, ride.pickup_visit.tmax),
"{}..{}".format(ride.dropoff_visit.tmin, ride.dropoff_visit.tmax),
ride.dropoff_visit.tmin - ride.pickup_visit.tmax
]
print(row_format.format(*vals))
class Ride(object):
def __init__(self, customer, route):
self.customer = customer
self.route = route
self.pickup_visit = None
self.dropoff_visit = None
class Visit(object):
def __init__(self, route_id, node_index, customer, demand, load, tmin, tmax, tval):
self.route_id = route_id
self.node_index = node_index
self.customer = customer
self.demand = demand
self.load = load
self.tmin = tmin
self.tmax = tmax
self.tval = tval
def is_pickup(self):
return self.demand > 0
# Custom travel time callback
class CreateCustomTimeCallback(object):
def __init__(self, manager, locations, speed):
self.manager = manager
self.locations = locations
self.speed = speed
self._durations = {}
num_locations = len(self.locations)
# precompute distance between location to have distance callback in O(1)
for from_node in xrange(num_locations):
self._durations[from_node] = {}
for to_node in xrange(num_locations):
if from_node == to_node:
self._durations[from_node][to_node] = 0
else:
loc1 = self.locations[from_node]
loc2 = self.locations[to_node]
dist = self.distance(loc1[0], loc1[1], loc2[0], loc2[1])
dur = self._durations[from_node][to_node] = (3600 * dist) / self.speed
# print "{} {} {}".format(from_node, to_node, dur)
def Duration(self, from_index, to_index):
from_node = self.manager.IndexToNode(from_index)
to_node = self.manager.IndexToNode(to_index)
return self._durations[from_node][to_node]
#def Duration(self, manager,from_node, to_node):
#return self._durations[manager.NodeToIndex(from_node)][manager.NodeToIndex(to_node)]
def distance(self, lat1, long1, lat2, long2):
# Note: The formula used in this function is not exact, as it assumes
# the Earth is a perfect sphere.
# Mean radius of Earth in miles
radius_earth = 3959
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi / 180.0
phi1 = lat1 * degrees_to_radians
phi2 = lat2 * degrees_to_radians
lambda1 = long1 * degrees_to_radians
lambda2 = long2 * degrees_to_radians
dphi = phi2 - phi1
dlambda = lambda2 - lambda1
a = self.haversine(dphi) + math.cos(phi1) * math.cos(phi2) * self.haversine(dlambda)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius_earth * c
return d
def haversine(self, angle):
h = math.sin(angle / 2) ** 2
return h
class CreateDemandCallback(object):
def __init__(self, manager, demands):
self.manager = manager
self.matrix = demands
def Demand(self, from_index):
from_node = self.manager.IndexToNode(from_index)
return self.matrix[from_node]
class CreateServiceTimeCallback(object):
def __init__(self, manager, demands=None, max_service_time=0):
self.manager = manager
self.matrix = demands
self.max_service_time = max_service_time
def ServiceTime(self, from_index):
from_node = self.manager.IndexToNode(from_index)
if self.matrix is None:
return self.max_service_time
else:
return self.matrix[from_node]
class CreateTotalTimeCallback(object):
"""Create callback to get total times between locations."""
def __init__(self, service_time_callback, travel_time_callback):
self.service_time_callback = service_time_callback
self.travel_time_callback = travel_time_callback
def TotalTime(self, from_index, to_index):
service_time = self.service_time_callback(from_index)
travel_time = self.travel_time_callback(from_index, to_index)
return service_time + travel_time
if __name__ == '__main__':
main() | |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from gibson2.envs.igibson_env import iGibsonEnv
import gibson2
import gym
import numpy as np
import os
import sys
def set_path(path: str):
try:
sys.path.index(path)
except ValueError:
sys.path.insert(0, path)
# path to custom tf_agents
set_path('/media/suresh/research/awesome-robotics/active-slam/catkin_ws/src/sim-environment/src/tensorflow/stanford/agents')
# set_path('/home/guttikon/awesome_robotics/sim-environment/src/tensorflow/stanford/agents')
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
from tf_agents.environments import wrappers
from tf_agents.policies import random_tf_policy
from utils.navigate_env import NavigateGibsonEnv
def load(config_file,
model_id=None,
env_mode='headless',
action_timestep=1.0 / 10.0,
physics_timestep=1.0 / 40.0,
device_idx=0,
gym_env_wrappers=(),
env_wrappers=(),
spec_dtype_map=None):
env = NavigateGibsonEnv(config_file=config_file,
scene_id=model_id,
mode=env_mode,
action_timestep=action_timestep,
physics_timestep=physics_timestep,
device_idx=device_idx)
discount = env.config.get('discount_factor', 0.99)
max_episode_steps = env.config.get('max_step', 500)
return wrap_env(
env,
discount=discount,
max_episode_steps=max_episode_steps,
gym_env_wrappers=gym_env_wrappers,
time_limit_wrapper=wrappers.TimeLimit,
env_wrappers=env_wrappers,
spec_dtype_map=spec_dtype_map,
auto_reset=True
)
def wrap_env(env,
discount=1.0,
max_episode_steps=0,
gym_env_wrappers=(),
time_limit_wrapper=wrappers.TimeLimit,
env_wrappers=(),
spec_dtype_map=None,
auto_reset=True):
for wrapper in gym_env_wrappers:
env = wrapper(env)
env = gym_wrapper.GymWrapper(
env,
discount=discount,
spec_dtype_map=spec_dtype_map,
match_obs_space_dtype=True,
auto_reset=auto_reset,
simplify_box_bounds=True
)
if max_episode_steps > 0:
env = time_limit_wrapper(env, max_episode_steps)
for wrapper in env_wrappers:
env = wrapper(env)
return env
if __name__ == '__main__':
eval_py_env = load(
config_file=os.path.join('./configs/', 'turtlebot_navigate.yaml'),
env_mode='gui',
device_idx=0,
)
eval_tf_env = tf_py_environment.TFPyEnvironment(eval_py_env)
rnd_policy = random_tf_policy.RandomTFPolicy(
time_step_spec=eval_tf_env.time_step_spec(),
action_spec=eval_tf_env.action_spec())
for _ in range(5):
time_step = eval_tf_env.reset()
for _ in range(100):
action_step = rnd_policy.action(time_step)
time_step = eval_tf_env.step(action_step.action) | |
"""Object for storing experimental results.
Matthew Alger
The Australian National University
2016
"""
import json
import h5py
import numpy
class Results(object):
"""Stores experimental results."""
def __init__(self, path, methods, n_splits, n_examples, n_params, model):
"""
path: Path to the results file (h5). File will be created if it does not
already exist.
methods: List of methods being tested.
n_splits: Number of data splits.
n_examples: Number of examples.
n_params: Number of parameters in the model.
model: String representing the model function and version.
"""
if not path.endswith('.h5'):
path += '.h5'
self.h5_path = path
self.methods = methods
self.n_methods = None # Initialised later.
self.n_splits = n_splits
self.n_examples = n_examples
self.n_params = n_params
self.model = model
try:
with h5py.File(path, 'r+') as f:
self._create(f)
except OSError:
with h5py.File(path, 'w') as f:
self._create(f)
# We could store a reference to the actual file, but then there's no
# guarantee we'll close it safely later.
def set_model(self, model):
"""Sets the model."""
self.model = model
with h5py.File(self.h5_path, 'r+') as f:
f.attrs['model'] = model
@classmethod
def from_path(cls, path):
"""Loads a Results object from a path."""
if not path.endswith('.h5'):
path += '.h5'
with h5py.File(path, 'r') as f:
methods = json.loads(f.attrs['methods'])
n_splits = f['results'].shape[1]
n_examples = f['results'].shape[2]
assert len(methods) == f['results'].shape[0]
n_params = f['models'].shape[2]
model = f.attrs['model']
return cls(path, methods, n_splits, n_examples, n_params, model)
def _create(self, f):
"""Creates the results dataset."""
if 'methods' in f.attrs:
# What methods are we adding and what methods do we already have?
existing_methods = json.loads(f.attrs['methods'])
new_methods = [m for m in self.methods
if m not in existing_methods]
f.attrs['methods'] = json.dumps(existing_methods + new_methods)
self.methods = existing_methods + new_methods
n_existing_methods = len(existing_methods)
else:
f.attrs['methods'] = json.dumps(self.methods)
n_existing_methods = 0
self.method_idx = {j:i for i, j in enumerate(self.methods)}
self.n_methods = len(self.method_idx)
results_shape = (self.n_methods, self.n_splits, self.n_examples)
if 'results' in f and f['results'].shape[0] != self.n_methods:
# Extend the HDF5 file to hold new methods.
f.create_dataset('results_', data=f['results'].value)
del f['results']
f.create_dataset('results', shape=results_shape, dtype=float)
f['results'][:n_existing_methods] = f['results_'].value
del f['results_']
elif 'results' not in f:
f.create_dataset('results', shape=results_shape, dtype=float)
else:
assert f['results'].shape == results_shape, \
'results: Expected shape {}, found {}.'.format(
results_shape, f['results'].shape)
models_shape = (self.n_methods, self.n_splits, self.n_params)
if 'models' in f and f['models'].shape[0] != self.n_methods:
# Extend the HDF5 file to hold new methods.
f.create_dataset('models_', data=f['models'].value)
del f['models']
f.create_dataset('models', shape=models_shape, dtype=float)
f['models'][:n_existing_methods] = f['models_'].value
del f['models_']
elif 'models' not in f:
f.create_dataset('models', shape=models_shape, dtype=float)
f.attrs['model'] = self.model
assert f.attrs['model'] == self.model
else:
assert f['models'].shape == models_shape, \
'models: Expected shape {}, found {}.'.format(
models_shape, f['models'].shape)
run_flag_shape = (self.n_methods, self.n_splits, self.n_examples)
if 'run_flag' in f and f['run_flag'].shape[0] != self.n_methods:
# Extend the HDF5 file to hold new methods.
f.create_dataset('run_flag_', data=f['run_flag'].value)
del f['run_flag']
f.create_dataset('run_flag', data=numpy.zeros(run_flag_shape))
f['run_flag'][:n_existing_methods] = f['run_flag_'].value
del f['run_flag_']
elif 'run_flag' not in f:
f.create_dataset('run_flag', shape=run_flag_shape,
data=numpy.zeros(run_flag_shape))
else:
assert f['run_flag'].shape == run_flag_shape, \
'run_flag: Expected shape {}, found {}.'.format(
run_flag_shape, f['run_flag'].shape)
def store_trial(self, method, split, results, params, indices=None):
"""Stores results from one trial.
method: Method. str
split: Split ID. int
results: Results for each example. (n_examples,) array
params: (n_params,) array representing the classifier.
indices: Indices of examples in this trial. [int]. Default all indices.
"""
with h5py.File(self.h5_path, 'r+') as f:
if indices is not None:
f['results'][self.method_idx[method], split, indices] = results
f['run_flag'][self.method_idx[method], split, indices] = 1
else:
f['results'][self.method_idx[method], split] = results
f['run_flag'][self.method_idx[method], split] = 1
f['models'][self.method_idx[method],
split, :params.shape[0]] = params
def has_run(self, method, split):
"""Returns whether a trial has run successfully.
If *any* example in the trial has been run successfully, then the trial
has run successfully.
method: Method. str.
split: Split ID. int
-> bool
"""
with h5py.File(self.h5_path, 'r') as f:
return any(
f['run_flag'][self.method_idx[method], split].astype(bool))
@property
def models(self):
with h5py.File(self.h5_path, 'r') as f:
return f['models'].value
def get_mask(self, method, split):
"""Get a mask for trials that have been run successfully.
Mask will be 1 for trials that have run, and 0 otherwise.
method: Method. str
split: Split ID. int
-> (n_examples,) array
"""
with h5py.File(self.h5_path, 'r') as f:
return f['run_flag'][self.method_idx[method], split].astype(bool)
def __getitem__(self, item, *args, **kwargs):
with h5py.File(self.h5_path, 'r') as f:
try:
item = (self.method_idx[item[0]],) + item[1:]
except TypeError as e:
print(item)
print(e)
pass
return f['results'].__getitem__(item, *args, **kwargs)
def __repr__(self):
return 'Results({}, methods={}, n_splits={}, n_examples={}, ' \
'n_params={})'.format(
repr(self.h5_path),
sorted(self.method_idx, key=self.method_idx.get),
self.n_splits, self.n_examples, self.n_params)
def get_model(self, method, split):
"""Get the serialised model associated with a method and split.
method: Method. str
split: Split ID. int
-> (n_params,) array
"""
with h5py.File(self.h5_path, 'r') as f:
return f['models'][self.method_idx[method], split] | |
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import json
import logging
import numpy as np
import os
from collections import OrderedDict
import PIL.Image as Image
import pycocotools.mask as mask_util
import torch
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.comm import all_gather, is_main_process, synchronize
from detectron2.utils.file_io import PathManager
from .evaluator import DatasetEvaluator
class SemSegEvaluator(DatasetEvaluator):
"""
Evaluate semantic segmentation metrics.
"""
def __init__(self, dataset_name, distributed=True, output_dir=None, *, num_classes=None, ignore_label=None, write_outputs=False):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
distributed (bool): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): an output directory to dump results.
num_classes, ignore_label: deprecated argument
"""
self._logger = logging.getLogger(__name__)
if num_classes is not None:
self._logger.warn(
"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata."
)
if ignore_label is not None:
self._logger.warn(
"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata."
)
self._dataset_name = dataset_name
self._distributed = distributed
self._output_dir = output_dir
self._write_outputs = write_outputs
self._cpu_device = torch.device("cpu")
self.input_file_to_gt_file = {
dataset_record["file_name"]: dataset_record["sem_seg_file_name"]
for dataset_record in DatasetCatalog.get(dataset_name)
}
meta = MetadataCatalog.get(dataset_name)
# Dict that maps contiguous training ids to COCO category ids
try:
c2d = meta.stuff_dataset_id_to_contiguous_id
self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}
except AttributeError:
self._contiguous_id_to_dataset_id = None
self._class_names = meta.stuff_classes
self._num_classes = len(meta.stuff_classes)
if num_classes is not None:
assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}"
self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label
def reset(self):
self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64)
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model.
It is a list of dicts. Each dict corresponds to an image and
contains keys like "height", "width", "file_name".
outputs: the outputs of a model. It is either list of semantic segmentation predictions
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
segmentation prediction in the same format.
"""
from cityscapesscripts.helpers.labels import trainId2label
pred_output = os.path.join(self._output_dir, 'predictions')
if not os.path.exists(pred_output):
os.makedirs(pred_output)
pred_colour_output = os.path.join(self._output_dir, 'colour_predictions')
if not os.path.exists(pred_colour_output):
os.makedirs(pred_colour_output)
for input, output in zip(inputs, outputs):
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
pred = np.array(output, dtype=np.uint8)
pred64 = np.array(output, dtype=np.int64) # to use it on bitcount for conf matrix
with PathManager.open(self.input_file_to_gt_file[input["file_name"]], "rb") as f:
gt = np.array(Image.open(f), dtype=np.int64)
gt[gt == self._ignore_label] = self._num_classes
self._conf_matrix += np.bincount(
(self._num_classes + 1) * pred64.reshape(-1) + gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
if self._write_outputs:
file_name = input["file_name"]
basename = os.path.splitext(os.path.basename(file_name))[0]
pred_filename = os.path.join(pred_output, basename + '.png')
Image.fromarray(pred).save(pred_filename)
# colour prediction
output = output.numpy()
pred_colour_filename = os.path.join(pred_colour_output, basename + '.png')
pred_colour = 255 * np.ones([output.shape[0],output.shape[1],3], dtype=np.uint8)
for train_id, label in trainId2label.items():
#if label.ignoreInEval:
# continue
#pred_colour[np.broadcast_to(output == train_id, pred_colour.shape)] = 0 #label.color
pred_colour[(output == train_id),0] = label.color[0]
pred_colour[(output == train_id),1] = label.color[1]
pred_colour[(output == train_id),2] = label.color[2]
Image.fromarray(pred_colour).save(pred_colour_filename)
#self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
def evaluate(self):
"""
Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
* Mean intersection-over-union averaged across classes (mIoU)
* Frequency Weighted IoU (fwIoU)
* Mean pixel accuracy averaged across classes (mACC)
* Pixel Accuracy (pACC)
"""
if self._distributed:
synchronize()
conf_matrix_list = all_gather(self._conf_matrix)
self._predictions = all_gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if not is_main_process():
return
self._conf_matrix = np.zeros_like(self._conf_matrix)
for conf_matrix in conf_matrix_list:
self._conf_matrix += conf_matrix
'''if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "sem_seg_predictions.json")
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(self._predictions))'''
print(self._conf_matrix)
acc = np.full(self._num_classes, np.nan, dtype=np.float)
iou = np.full(self._num_classes, np.nan, dtype=np.float)
tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
class_weights = pos_gt / np.sum(pos_gt)
pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
acc_valid = pos_gt > 0
acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
iou_valid = (pos_gt + pos_pred) > 0
union = pos_gt + pos_pred - tp
iou[acc_valid] = tp[acc_valid] / union[acc_valid]
macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)
miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
fiou = np.sum(iou[acc_valid] * class_weights[acc_valid])
pacc = np.sum(tp) / np.sum(pos_gt)
res = {}
res["mIoU"] = 100 * miou
res["fwIoU"] = 100 * fiou
for i, name in enumerate(self._class_names):
res["IoU-{}".format(name)] = 100 * iou[i]
res["mACC"] = 100 * macc
res["pACC"] = 100 * pacc
for i, name in enumerate(self._class_names):
res["ACC-{}".format(name)] = 100 * acc[i]
'''if self._output_dir:
file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(res, f)'''
results = OrderedDict({"sem_seg": res})
self._logger.info(results)
return results
def encode_json_sem_seg(self, sem_seg, input_file_name):
"""
Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.
See http://cocodataset.org/#format-results
"""
json_list = []
for label in np.unique(sem_seg):
if self._contiguous_id_to_dataset_id is not None:
assert (
label in self._contiguous_id_to_dataset_id
), "Label {} is not in the metadata info for {}".format(label, self._dataset_name)
dataset_id = self._contiguous_id_to_dataset_id[label]
else:
dataset_id = int(label)
mask = (sem_seg == label).astype(np.uint8)
mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0]
mask_rle["counts"] = mask_rle["counts"].decode("utf-8")
json_list.append(
{"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle}
)
return json_list | |
# coding=utf-8
"""
.. moduleauthor:: Torbjörn Klatt <t.klatt@fz-juelich.de>
"""
from copy import deepcopy
import numpy as np
from pypint.utilities import assert_is_instance, assert_condition, class_name
class IDiagnosisValue(object):
"""Storage and handler of diagnosis values of iterative time solvers.
Comparability
It can be equality-compared (i.e. operators ``==`` and ``!=`` are implemented).
The other comparison operators such as ``<``, ``<=``, ``>`` and ``>=`` are not implemented as these do not make
any sense for this type of container.
Two instances are the same, if they have the same :py:attr:`.numeric_type` and their :py:attr:`.value` are the
same with respect to :py:meth:`numpy.array_equal`.
Hashable
It is not hashable due to its wrapping around :py:class:`numpy.ndarray`.
.. todo::
Extend this interface to emulate a numeric type.
This includes :py:meth:`.__add__`, :py:meth:`.__sub__`, etc.
"""
def __init__(self, value):
"""
Parameters
----------
value : :py:class:`numpy.ndarray`
Raises
------
ValueError:
If ``value`` is not a :py:class:`numpy.ndarray`.
"""
assert_is_instance(value, np.ndarray, descriptor="Diagnosis Values", checking_obj=self)
self._data = value
self._numeric_type = self.value.dtype
@property
def value(self):
"""Read-only accessor for the value.
Returns
-------
value : :py:class:`numpy.ndarray`
"""
return self._data
@property
def numeric_type(self):
"""Read-only accessor for the numerical type of the value.
The type is derived from the given values.
Returns
-------
numeric_type : :py:class:`numpy.dtype`
"""
return self._numeric_type
def __copy__(self):
copy = self.__class__.__new__(self.__class__)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
copy = self.__class__.__new__(self.__class__)
memo[id(self)] = copy
for item, value in self.__dict__.items():
setattr(copy, item, deepcopy(value, memo))
return copy
def __eq__(self, other):
assert_condition(isinstance(other, self.__class__), TypeError,
message="Can not compare {} with {}".format(self.__class__, class_name(other)),
checking_obj=self)
return (
self.numeric_type == other.numeric_type
and np.array_equal(self.value, other.value)
)
def __ge__(self, other):
return NotImplemented
def __gt__(self, other):
return NotImplemented
def __le__(self, other):
return NotImplemented
def __lt__(self, other):
return NotImplemented
def __ne__(self, other):
assert_condition(isinstance(other, self.__class__), TypeError,
message="Can not compare {} with {}".format(self.__class__, class_name(other)),
checking_obj=self)
return not self.__eq__(other)
__hash__ = None
__all__ = ['IDiagnosisValue'] | |
"""
=======================
Transform Concatenation
=======================
In this example, we have a point p that is defined in a frame C, we know
the transform C2B and B2A. We can construct a transform C2A to extract the
position of p in frame A.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import pytransform3d.rotations as pyrot
import pytransform3d.transformations as pytr
p = np.array([0.0, 0.0, -0.5])
a = np.array([0.0, 0.0, 1.0, np.pi])
B2A = pytr.transform_from(pyrot.matrix_from_axis_angle(a), p)
p = np.array([0.3, 0.4, 0.5])
a = np.array([0.0, 0.0, 1.0, -np.pi / 2.0])
C2B = pytr.transform_from(pyrot.matrix_from_axis_angle(a), p)
C2A = pytr.concat(C2B, B2A)
p = pytr.transform(C2A, np.ones(4))
ax = pytr.plot_transform(A2B=B2A)
pytr.plot_transform(ax, A2B=C2A)
ax.scatter(p[0], p[1], p[2])
plt.show() | |
import os
import jsonlines
import numpy as np
import torch
from torch.utils.data import Dataset
class DatasetEL(Dataset):
def __init__(
self,
tokenizer,
data_path,
max_length=32,
max_length_span=15,
test=False,
):
super().__init__()
self.tokenizer = tokenizer
with jsonlines.open(data_path) as f:
self.data = list(f)
self.max_length = max_length
self.max_length_span = max_length_span
self.test = test
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
def collate_fn(self, batch):
batch = {
**{
f"src_{k}": v
for k, v in self.tokenizer(
[b["input"] for b in batch],
return_tensors="pt",
padding=True,
max_length=self.max_length,
truncation=True,
return_offsets_mapping=True,
).items()
},
"offsets_start": (
[
i
for i, b in enumerate(batch)
for a in b["anchors"]
if a[1] < self.max_length and a[1] - a[0] < self.max_length_span
],
[
a[0]
for i, b in enumerate(batch)
for a in b["anchors"]
if a[1] < self.max_length and a[1] - a[0] < self.max_length_span
],
),
"offsets_end": (
[
i
for i, b in enumerate(batch)
for a in b["anchors"]
if a[1] < self.max_length and a[1] - a[0] < self.max_length_span
],
[
a[1]
for i, b in enumerate(batch)
for a in b["anchors"]
if a[1] < self.max_length and a[1] - a[0] < self.max_length_span
],
),
"offsets_inside": (
[
i
for i, b in enumerate(batch)
for a in b["anchors"]
if a[1] < self.max_length and a[1] - a[0] < self.max_length_span
for j in range(a[0] + 1, a[1] + 1)
],
[
j
for i, b in enumerate(batch)
for a in b["anchors"]
if a[1] < self.max_length and a[1] - a[0] < self.max_length_span
for j in range(a[0] + 1, a[1] + 1)
],
),
"raw": batch,
}
if not self.test:
negatives = [
np.random.choice([e for e in cands if e != a[2]])
if len([e for e in cands if e != a[2]]) > 0
else None
for b in batch["raw"]
for a, cands in zip(b["anchors"], b["candidates"])
if a[1] < self.max_length and a[1] - a[0] < self.max_length_span
]
targets = [
a[2]
for b in batch["raw"]
for a in b["anchors"]
if a[1] < self.max_length and a[1] - a[0] < self.max_length_span
]
assert len(targets) == len(negatives)
batch_upd = {
**(
{
f"trg_{k}": v
for k, v in self.tokenizer(
targets,
return_tensors="pt",
padding=True,
max_length=self.max_length,
truncation=True,
).items()
}
if not self.test
else {}
),
**(
{
f"neg_{k}": v
for k, v in self.tokenizer(
[e for e in negatives if e],
return_tensors="pt",
padding=True,
max_length=self.max_length,
truncation=True,
).items()
}
if not self.test
else {}
),
"neg_mask": torch.tensor([e is not None for e in negatives]),
}
batch = {**batch, **batch_upd}
return batch | |
import glob
import io
import os
import sys
from pathlib import Path
import pytest
import numpy as np
import torch
from PIL import Image, __version__ as PILLOW_VERSION
import torchvision.transforms.functional as F
from common_utils import get_tmp_dir, needs_cuda, assert_equal
from torchvision.io.image import (
decode_png, decode_jpeg, encode_jpeg, write_jpeg, decode_image, read_file,
encode_png, write_png, write_file, ImageReadMode, read_image)
IMAGE_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets")
FAKEDATA_DIR = os.path.join(IMAGE_ROOT, "fakedata")
IMAGE_DIR = os.path.join(FAKEDATA_DIR, "imagefolder")
DAMAGED_JPEG = os.path.join(IMAGE_ROOT, 'damaged_jpeg')
ENCODE_JPEG = os.path.join(IMAGE_ROOT, "encode_jpeg")
INTERLACED_PNG = os.path.join(IMAGE_ROOT, "interlaced_png")
IS_WINDOWS = sys.platform in ('win32', 'cygwin')
PILLOW_VERSION = tuple(int(x) for x in PILLOW_VERSION.split('.'))
def _get_safe_image_name(name):
# Used when we need to change the pytest "id" for an "image path" parameter.
# If we don't, the test id (i.e. its name) will contain the whole path to the image, which is machine-specific,
# and this creates issues when the test is running in a different machine than where it was collected
# (typically, in fb internal infra)
return name.split(os.path.sep)[-1]
def get_images(directory, img_ext):
assert os.path.isdir(directory)
image_paths = glob.glob(directory + f'/**/*{img_ext}', recursive=True)
for path in image_paths:
if path.split(os.sep)[-2] not in ['damaged_jpeg', 'jpeg_write']:
yield path
def pil_read_image(img_path):
with Image.open(img_path) as img:
return torch.from_numpy(np.array(img))
def normalize_dimensions(img_pil):
if len(img_pil.shape) == 3:
img_pil = img_pil.permute(2, 0, 1)
else:
img_pil = img_pil.unsqueeze(0)
return img_pil
@pytest.mark.parametrize('img_path', [
pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path))
for jpeg_path in get_images(IMAGE_ROOT, ".jpg")
])
@pytest.mark.parametrize('pil_mode, mode', [
(None, ImageReadMode.UNCHANGED),
("L", ImageReadMode.GRAY),
("RGB", ImageReadMode.RGB),
])
def test_decode_jpeg(img_path, pil_mode, mode):
with Image.open(img_path) as img:
is_cmyk = img.mode == "CMYK"
if pil_mode is not None:
if is_cmyk:
# libjpeg does not support the conversion
pytest.xfail("Decoding a CMYK jpeg isn't supported")
img = img.convert(pil_mode)
img_pil = torch.from_numpy(np.array(img))
if is_cmyk:
# flip the colors to match libjpeg
img_pil = 255 - img_pil
img_pil = normalize_dimensions(img_pil)
data = read_file(img_path)
img_ljpeg = decode_image(data, mode=mode)
# Permit a small variation on pixel values to account for implementation
# differences between Pillow and LibJPEG.
abs_mean_diff = (img_ljpeg.type(torch.float32) - img_pil).abs().mean().item()
assert abs_mean_diff < 2
def test_decode_jpeg_errors():
with pytest.raises(RuntimeError, match="Expected a non empty 1-dimensional tensor"):
decode_jpeg(torch.empty((100, 1), dtype=torch.uint8))
with pytest.raises(RuntimeError, match="Expected a torch.uint8 tensor"):
decode_jpeg(torch.empty((100,), dtype=torch.float16))
with pytest.raises(RuntimeError, match="Not a JPEG file"):
decode_jpeg(torch.empty((100), dtype=torch.uint8))
def test_decode_bad_huffman_images():
# sanity check: make sure we can decode the bad Huffman encoding
bad_huff = read_file(os.path.join(DAMAGED_JPEG, 'bad_huffman.jpg'))
decode_jpeg(bad_huff)
@pytest.mark.parametrize('img_path', [
pytest.param(truncated_image, id=_get_safe_image_name(truncated_image))
for truncated_image in glob.glob(os.path.join(DAMAGED_JPEG, 'corrupt*.jpg'))
])
def test_damaged_corrupt_images(img_path):
# Truncated images should raise an exception
data = read_file(img_path)
if 'corrupt34' in img_path:
match_message = "Image is incomplete or truncated"
else:
match_message = "Unsupported marker type"
with pytest.raises(RuntimeError, match=match_message):
decode_jpeg(data)
@pytest.mark.parametrize('img_path', [
pytest.param(png_path, id=_get_safe_image_name(png_path))
for png_path in get_images(FAKEDATA_DIR, ".png")
])
@pytest.mark.parametrize('pil_mode, mode', [
(None, ImageReadMode.UNCHANGED),
("L", ImageReadMode.GRAY),
("LA", ImageReadMode.GRAY_ALPHA),
("RGB", ImageReadMode.RGB),
("RGBA", ImageReadMode.RGB_ALPHA),
])
def test_decode_png(img_path, pil_mode, mode):
with Image.open(img_path) as img:
if pil_mode is not None:
img = img.convert(pil_mode)
img_pil = torch.from_numpy(np.array(img))
img_pil = normalize_dimensions(img_pil)
data = read_file(img_path)
img_lpng = decode_image(data, mode=mode)
tol = 0 if pil_mode is None else 1
if PILLOW_VERSION >= (8, 3) and pil_mode == "LA":
# Avoid checking the transparency channel until
# https://github.com/python-pillow/Pillow/issues/5593#issuecomment-878244910
# is fixed.
# TODO: remove once fix is released in PIL. Should be > 8.3.1.
img_lpng, img_pil = img_lpng[0], img_pil[0]
torch.testing.assert_close(img_lpng, img_pil, atol=tol, rtol=0)
def test_decode_png_errors():
with pytest.raises(RuntimeError, match="Expected a non empty 1-dimensional tensor"):
decode_png(torch.empty((), dtype=torch.uint8))
with pytest.raises(RuntimeError, match="Content is not png"):
decode_png(torch.randint(3, 5, (300,), dtype=torch.uint8))
@pytest.mark.parametrize('img_path', [
pytest.param(png_path, id=_get_safe_image_name(png_path))
for png_path in get_images(IMAGE_DIR, ".png")
])
def test_encode_png(img_path):
pil_image = Image.open(img_path)
img_pil = torch.from_numpy(np.array(pil_image))
img_pil = img_pil.permute(2, 0, 1)
png_buf = encode_png(img_pil, compression_level=6)
rec_img = Image.open(io.BytesIO(bytes(png_buf.tolist())))
rec_img = torch.from_numpy(np.array(rec_img))
rec_img = rec_img.permute(2, 0, 1)
assert_equal(img_pil, rec_img)
def test_encode_png_errors():
with pytest.raises(RuntimeError, match="Input tensor dtype should be uint8"):
encode_png(torch.empty((3, 100, 100), dtype=torch.float32))
with pytest.raises(RuntimeError, match="Compression level should be between 0 and 9"):
encode_png(torch.empty((3, 100, 100), dtype=torch.uint8),
compression_level=-1)
with pytest.raises(RuntimeError, match="Compression level should be between 0 and 9"):
encode_png(torch.empty((3, 100, 100), dtype=torch.uint8),
compression_level=10)
with pytest.raises(RuntimeError, match="The number of channels should be 1 or 3, got: 5"):
encode_png(torch.empty((5, 100, 100), dtype=torch.uint8))
@pytest.mark.parametrize('img_path', [
pytest.param(png_path, id=_get_safe_image_name(png_path))
for png_path in get_images(IMAGE_DIR, ".png")
])
def test_write_png(img_path):
with get_tmp_dir() as d:
pil_image = Image.open(img_path)
img_pil = torch.from_numpy(np.array(pil_image))
img_pil = img_pil.permute(2, 0, 1)
filename, _ = os.path.splitext(os.path.basename(img_path))
torch_png = os.path.join(d, '{0}_torch.png'.format(filename))
write_png(img_pil, torch_png, compression_level=6)
saved_image = torch.from_numpy(np.array(Image.open(torch_png)))
saved_image = saved_image.permute(2, 0, 1)
assert_equal(img_pil, saved_image)
def test_read_file():
with get_tmp_dir() as d:
fname, content = 'test1.bin', b'TorchVision\211\n'
fpath = os.path.join(d, fname)
with open(fpath, 'wb') as f:
f.write(content)
data = read_file(fpath)
expected = torch.tensor(list(content), dtype=torch.uint8)
os.unlink(fpath)
assert_equal(data, expected)
with pytest.raises(RuntimeError, match="No such file or directory: 'tst'"):
read_file('tst')
def test_read_file_non_ascii():
with get_tmp_dir() as d:
fname, content = '日本語(Japanese).bin', b'TorchVision\211\n'
fpath = os.path.join(d, fname)
with open(fpath, 'wb') as f:
f.write(content)
data = read_file(fpath)
expected = torch.tensor(list(content), dtype=torch.uint8)
os.unlink(fpath)
assert_equal(data, expected)
def test_write_file():
with get_tmp_dir() as d:
fname, content = 'test1.bin', b'TorchVision\211\n'
fpath = os.path.join(d, fname)
content_tensor = torch.tensor(list(content), dtype=torch.uint8)
write_file(fpath, content_tensor)
with open(fpath, 'rb') as f:
saved_content = f.read()
os.unlink(fpath)
assert content == saved_content
def test_write_file_non_ascii():
with get_tmp_dir() as d:
fname, content = '日本語(Japanese).bin', b'TorchVision\211\n'
fpath = os.path.join(d, fname)
content_tensor = torch.tensor(list(content), dtype=torch.uint8)
write_file(fpath, content_tensor)
with open(fpath, 'rb') as f:
saved_content = f.read()
os.unlink(fpath)
assert content == saved_content
@pytest.mark.parametrize('shape', [
(27, 27),
(60, 60),
(105, 105),
])
def test_read_1_bit_png(shape):
np_rng = np.random.RandomState(0)
with get_tmp_dir() as root:
image_path = os.path.join(root, f'test_{shape}.png')
pixels = np_rng.rand(*shape) > 0.5
img = Image.fromarray(pixels)
img.save(image_path)
img1 = read_image(image_path)
img2 = normalize_dimensions(torch.as_tensor(pixels * 255, dtype=torch.uint8))
assert_equal(img1, img2)
@pytest.mark.parametrize('shape', [
(27, 27),
(60, 60),
(105, 105),
])
@pytest.mark.parametrize('mode', [
ImageReadMode.UNCHANGED,
ImageReadMode.GRAY,
])
def test_read_1_bit_png_consistency(shape, mode):
np_rng = np.random.RandomState(0)
with get_tmp_dir() as root:
image_path = os.path.join(root, f'test_{shape}.png')
pixels = np_rng.rand(*shape) > 0.5
img = Image.fromarray(pixels)
img.save(image_path)
img1 = read_image(image_path, mode)
img2 = read_image(image_path, mode)
assert_equal(img1, img2)
def test_read_interlaced_png():
imgs = list(get_images(INTERLACED_PNG, ".png"))
with Image.open(imgs[0]) as im1, Image.open(imgs[1]) as im2:
assert not (im1.info.get("interlace") is im2.info.get("interlace"))
img1 = read_image(imgs[0])
img2 = read_image(imgs[1])
assert_equal(img1, img2)
@needs_cuda
@pytest.mark.parametrize('img_path', [
pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path))
for jpeg_path in get_images(IMAGE_ROOT, ".jpg")
])
@pytest.mark.parametrize('mode', [ImageReadMode.UNCHANGED, ImageReadMode.GRAY, ImageReadMode.RGB])
@pytest.mark.parametrize('scripted', (False, True))
def test_decode_jpeg_cuda(mode, img_path, scripted):
if 'cmyk' in img_path:
pytest.xfail("Decoding a CMYK jpeg isn't supported")
data = read_file(img_path)
img = decode_image(data, mode=mode)
f = torch.jit.script(decode_jpeg) if scripted else decode_jpeg
img_nvjpeg = f(data, mode=mode, device='cuda')
# Some difference expected between jpeg implementations
assert (img.float() - img_nvjpeg.cpu().float()).abs().mean() < 2
@needs_cuda
@pytest.mark.parametrize('cuda_device', ('cuda', 'cuda:0', torch.device('cuda')))
def test_decode_jpeg_cuda_device_param(cuda_device):
"""Make sure we can pass a string or a torch.device as device param"""
path = next(path for path in get_images(IMAGE_ROOT, ".jpg") if 'cmyk' not in path)
data = read_file(path)
decode_jpeg(data, device=cuda_device)
@needs_cuda
def test_decode_jpeg_cuda_errors():
data = read_file(next(get_images(IMAGE_ROOT, ".jpg")))
with pytest.raises(RuntimeError, match="Expected a non empty 1-dimensional tensor"):
decode_jpeg(data.reshape(-1, 1), device='cuda')
with pytest.raises(RuntimeError, match="input tensor must be on CPU"):
decode_jpeg(data.to('cuda'), device='cuda')
with pytest.raises(RuntimeError, match="Expected a torch.uint8 tensor"):
decode_jpeg(data.to(torch.float), device='cuda')
with pytest.raises(RuntimeError, match="Expected a cuda device"):
torch.ops.image.decode_jpeg_cuda(data, ImageReadMode.UNCHANGED.value, 'cpu')
def test_encode_jpeg_errors():
with pytest.raises(RuntimeError, match="Input tensor dtype should be uint8"):
encode_jpeg(torch.empty((3, 100, 100), dtype=torch.float32))
with pytest.raises(ValueError, match="Image quality should be a positive number "
"between 1 and 100"):
encode_jpeg(torch.empty((3, 100, 100), dtype=torch.uint8), quality=-1)
with pytest.raises(ValueError, match="Image quality should be a positive number "
"between 1 and 100"):
encode_jpeg(torch.empty((3, 100, 100), dtype=torch.uint8), quality=101)
with pytest.raises(RuntimeError, match="The number of channels should be 1 or 3, got: 5"):
encode_jpeg(torch.empty((5, 100, 100), dtype=torch.uint8))
with pytest.raises(RuntimeError, match="Input data should be a 3-dimensional tensor"):
encode_jpeg(torch.empty((1, 3, 100, 100), dtype=torch.uint8))
with pytest.raises(RuntimeError, match="Input data should be a 3-dimensional tensor"):
encode_jpeg(torch.empty((100, 100), dtype=torch.uint8))
def _collect_if(cond):
# TODO: remove this once test_encode_jpeg_reference and test_write_jpeg_reference
# are removed
def _inner(test_func):
if cond:
return test_func
else:
return pytest.mark.dont_collect(test_func)
return _inner
@_collect_if(cond=IS_WINDOWS)
@pytest.mark.parametrize('img_path', [
pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path))
for jpeg_path in get_images(ENCODE_JPEG, ".jpg")
])
def test_encode_jpeg_reference(img_path):
# This test is *wrong*.
# It compares a torchvision-encoded jpeg with a PIL-encoded jpeg (the reference), but it
# starts encoding the torchvision version from an image that comes from
# decode_jpeg, which can yield different results from pil.decode (see
# test_decode... which uses a high tolerance).
# Instead, we should start encoding from the exact same decoded image, for a
# valid comparison. This is done in test_encode_jpeg, but unfortunately
# these more correct tests fail on windows (probably because of a difference
# in libjpeg) between torchvision and PIL.
# FIXME: make the correct tests pass on windows and remove this.
dirname = os.path.dirname(img_path)
filename, _ = os.path.splitext(os.path.basename(img_path))
write_folder = os.path.join(dirname, 'jpeg_write')
expected_file = os.path.join(
write_folder, '{0}_pil.jpg'.format(filename))
img = decode_jpeg(read_file(img_path))
with open(expected_file, 'rb') as f:
pil_bytes = f.read()
pil_bytes = torch.as_tensor(list(pil_bytes), dtype=torch.uint8)
for src_img in [img, img.contiguous()]:
# PIL sets jpeg quality to 75 by default
jpeg_bytes = encode_jpeg(src_img, quality=75)
assert_equal(jpeg_bytes, pil_bytes)
@_collect_if(cond=IS_WINDOWS)
@pytest.mark.parametrize('img_path', [
pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path))
for jpeg_path in get_images(ENCODE_JPEG, ".jpg")
])
def test_write_jpeg_reference(img_path):
# FIXME: Remove this eventually, see test_encode_jpeg_reference
with get_tmp_dir() as d:
data = read_file(img_path)
img = decode_jpeg(data)
basedir = os.path.dirname(img_path)
filename, _ = os.path.splitext(os.path.basename(img_path))
torch_jpeg = os.path.join(
d, '{0}_torch.jpg'.format(filename))
pil_jpeg = os.path.join(
basedir, 'jpeg_write', '{0}_pil.jpg'.format(filename))
write_jpeg(img, torch_jpeg, quality=75)
with open(torch_jpeg, 'rb') as f:
torch_bytes = f.read()
with open(pil_jpeg, 'rb') as f:
pil_bytes = f.read()
assert_equal(torch_bytes, pil_bytes)
@pytest.mark.skipif(IS_WINDOWS, reason=(
'this test fails on windows because PIL uses libjpeg-turbo on windows'
))
@pytest.mark.parametrize('img_path', [
pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path))
for jpeg_path in get_images(ENCODE_JPEG, ".jpg")
])
def test_encode_jpeg(img_path):
img = read_image(img_path)
pil_img = F.to_pil_image(img)
buf = io.BytesIO()
pil_img.save(buf, format='JPEG', quality=75)
# pytorch can't read from raw bytes so we go through numpy
pil_bytes = np.frombuffer(buf.getvalue(), dtype=np.uint8)
encoded_jpeg_pil = torch.as_tensor(pil_bytes)
for src_img in [img, img.contiguous()]:
encoded_jpeg_torch = encode_jpeg(src_img, quality=75)
assert_equal(encoded_jpeg_torch, encoded_jpeg_pil)
@pytest.mark.skipif(IS_WINDOWS, reason=(
'this test fails on windows because PIL uses libjpeg-turbo on windows'
))
@pytest.mark.parametrize('img_path', [
pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path))
for jpeg_path in get_images(ENCODE_JPEG, ".jpg")
])
def test_write_jpeg(img_path):
with get_tmp_dir() as d:
d = Path(d)
img = read_image(img_path)
pil_img = F.to_pil_image(img)
torch_jpeg = str(d / 'torch.jpg')
pil_jpeg = str(d / 'pil.jpg')
write_jpeg(img, torch_jpeg, quality=75)
pil_img.save(pil_jpeg, quality=75)
with open(torch_jpeg, 'rb') as f:
torch_bytes = f.read()
with open(pil_jpeg, 'rb') as f:
pil_bytes = f.read()
assert_equal(torch_bytes, pil_bytes)
if __name__ == "__main__":
pytest.main([__file__]) | |
from fastFM import als
from scipy import sparse
class FactorizationMachine():
'''
A wrapper around an implementation of Factorization Machines
'''
def __init__(self):
self.model = als.FMRegression(n_iter=1000, init_stdev=0.1, rank=2, l2_reg_w=0.1, l2_reg_V=0.5)
def fit(self, features, target):
self.model.fit(sparse.csr_matrix(features), target)
def predict(self, features):
return self.model.predict(sparse.csr_matrix(features)) | |
"""Compute depth maps for images in the input folder.
"""
# import os
# import glob
import torch
# from monodepth_net import MonoDepthNet
# import utils
# import matplotlib.pyplot as plt
import numpy as np
import cv2
# import imageio
def run_depth(img, model_path, Net, utils, target_w=None,f=False):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
# print("initialize")
# select device
# device = torch.device("cpu")
# print("device: %s" % device)
# load network
model = Net(model_path)
if torch.cuda.is_available() and not f:
model.cuda()
model.eval()
# get input
# img_names = glob.glob(os.path.join(input_path, "*"))
# num_images = len(img_names)
# create output folder
# os.makedirs(output_path, exist_ok=True)
# print("start processing")
# for ind, img_name in enumerate(img_names):
# print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
# img = utils.read_image(img_name)
w = img.shape[1]
scale = 640. / max(img.shape[0], img.shape[1])
target_height, target_width = int(round(img.shape[0] * scale)), int(round(img.shape[1] * scale))
img_input = utils.resize_image(img)
# print(img_input.shape)
if torch.cuda.is_available() and not f:
img_input = img_input.cuda()
# compute
with torch.no_grad():
out = model.forward(img_input)
depth = utils.resize_depth(out, target_width, target_height)
img = cv2.resize((img * 255).astype(np.uint8), (target_width, target_height), interpolation=cv2.INTER_AREA)
# np.save(filename + '.npy', depth)
# utils.write_depth(filename, depth, bits=2)
depth_min = depth.min()
depth_max = depth.max()
bits = 1
max_val = (2 ** (8 * bits)) - 1
if depth_max - depth_min > np.finfo("float").eps:
out = max_val * (depth - depth_min) / (depth_max - depth_min)
else:
out = 0
out = out.astype("uint8")
# cv2.imwrite("out.png", out)
return out
# print("finished") | |
# -*- coding: utf-8 -*-
import cv2
from DQLBrain import Brain
import numpy as np
from collections import deque
import sqlite3
import pygame
import time
import game_setting
import importlib
SCREEN_X = 288
SCREEN_Y = 512
FPS = 60
class AI:
def __init__(self, title,model_path,replay_memory,current_timestep,explore,initial_epsilon,final_epsilon,gamma,replay_size,batch_size):
#初始化常量
self.scores = deque()
self.games_info = game_setting.getSetting()
#连接临时数据库(并确保已经存在对应的表)
self.data_base = sqlite3.connect('temp.db', check_same_thread=False)
self.c = self.data_base.cursor()
try:
self.c.execute('create table scores (time integer, score integer) ')
except:
pass
#创建Deep-Reinforcement Learning对象
self.brain = Brain(self.games_info[title]["action"],model_path,replay_memory,current_timestep,explore,initial_epsilon,final_epsilon,gamma,replay_size,batch_size)
#创建游戏窗口
self.startGame(title,SCREEN_X,SCREEN_Y)
#加载对应的游戏
game=importlib.import_module(self.games_info[title]['class'])
self.game=game.Game(self.screen)
def startGame(self,title,SCREEN_X, SCREEN_Y):
#窗口的初始化
pygame.init()
screen_size = (SCREEN_X, SCREEN_Y)
pygame.display.set_caption(title)
#屏幕的创建
self.screen = pygame.display.set_mode(screen_size)
#游戏计时器的创建
self.clock = pygame.time.Clock()
#为降低画面复杂度,将画面进行预处理
def preProcess(self, observation):
#将512*288的画面裁剪为80*80并将RGB(三通道)画面转换成灰度图(一通道)
observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
#将非黑色的像素都变成白色
threshold,observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)
#返回(80,80,1),最后一维是保证图像是一个tensor(张量),用于输入tensorflow
return np.reshape(observation, (80, 80, 1))
def playGame(self):
#先随便给一个决策输入,启动游戏
observation0, reward0, terminal,score =self.game.frameStep(np.array([1, 0, 0]))
observation0 = self.preProcess(observation0)
self.brain.setInitState(observation0[:,:,0])
#开始正式游戏
i = 1
while True:
i = i + 1
action = self.brain.getAction()
next_bservation, reward, terminal,score = self.game.frameStep(action)
#处理游戏界面销毁消息
if (terminal == -1):
self.closeGame()
return
else:
#继续游戏
next_bservation = self.preProcess(next_bservation)
self.brain.setPerception(next_bservation, action, reward, terminal)
#提取每一局的成绩
if terminal:
t = int(time.time())
self.c.execute("insert into scores values (%s,%s)" % (t, score))
def closeGame(self):
pygame.quit()
self.brain.close()
self.data_base.close()
def getState(self):
return self.brain.getState()
def getReplay(self):
return self.brain.replayMemory | |
import math
import fire
import jax
import jax.numpy as jnp
import numpy as np
import opax
import pax
import tensorflow as tf
from PIL import Image
from tqdm.auto import tqdm
from data_loader import load_celeb_a
from model import GaussianDiffusion, UNet
def make_image_grid(images, padding=2):
"""Place images in a square grid."""
n = images.shape[0]
size = int(math.sqrt(n))
assert size * size == n, "expecting a square grid"
img = images[0]
H = img.shape[0] * size + padding * (size + 1)
W = img.shape[1] * size + padding * (size + 1)
out = np.zeros((H, W, img.shape[-1]), dtype=img.dtype)
for i in range(n):
x = i % size
y = i // size
xstart = x * (img.shape[0] + padding) + padding
xend = xstart + img.shape[0]
ystart = y * (img.shape[1] + padding) + padding
yend = ystart + img.shape[1]
out[xstart:xend, ystart:yend, :] = images[i]
return out
def train(
batch_size: int = 32,
learning_rate: float = 1e-4,
num_training_steps: int = 10_000,
log_freq: int = 1000,
image_size: int = 64,
random_seed: int = 42,
):
pax.seed_rng_key(random_seed)
model = UNet(dim=64, dim_mults=(1, 2, 4, 8))
diffusion = GaussianDiffusion(
model,
image_size=image_size,
timesteps=1000,
loss_type="l1", # L1 or L2
)
dataset = load_celeb_a()
dataloader = (
dataset.repeat()
.shuffle(batch_size * 100)
.batch(batch_size)
.take(num_training_steps)
.prefetch(tf.data.AUTOTUNE)
)
def loss_fn(model, inputs):
model, loss = pax.purecall(model, inputs)
return loss, (loss, model)
update_fn = pax.utils.build_update_fn(loss_fn)
fast_update_fn = jax.jit(update_fn)
optimizer = opax.adam(learning_rate)(diffusion.parameters())
total_loss = 0.0
tr = tqdm(dataloader)
for step, batch in enumerate(tr, 1):
batch = jax.tree_map(lambda x: x.numpy(), batch)
diffusion, optimizer, loss = fast_update_fn(diffusion, optimizer, batch)
total_loss = total_loss + loss
if step % log_freq == 0:
loss = total_loss / log_freq
total_loss = 0.0
tr.write(f"[step {step:05d}] train loss {loss:.3f}")
imgs = jax.device_get(diffusion.eval().sample(16))
imgs = ((imgs * 0.5 + 0.5) * 255).astype(jnp.uint8)
imgs = make_image_grid(imgs)
im = Image.fromarray(imgs)
im.save(f"sample_{step:05d}.png")
if __name__ == "__main__":
fire.Fire(train) | |
"""
Mapping indices for complexes / multi-domain sequences to
internal model numbering.
Authors:
Thomas A. Hopf
Anna G. Green (MultiSegmentCouplingsModel)
"""
from collections import Iterable
from copy import deepcopy
from evcouplings.couplings.model import CouplingsModel
import pandas as pd
import numpy as np
class Segment:
"""
Represents a continuous stretch of sequence in a sequence
alignment to infer evolutionary couplings (e.g. multiple domains,
or monomers in a concatenated complex alignment)
"""
def __init__(self, segment_type, sequence_id,
region_start, region_end, positions=None,
segment_id="A"):
"""
Create a new sequence segment
Parameters
----------
segment_type : {"aa", "dna", "rna"}
Type of sequence
sequence_id : str
Identifier of sequence
region_start : int
Start index of sequence segment
region_end : int
End index of sequence segment (position
is inclusive)
positions : list(int), optional (default: None)
Positions in the sequence alignment that
will be used for EC calculation
(all positions corresponding to uppercase
residues). Compulsory parameter when using
non-focus mode.
segment_id : str
Identifier of segment (must be unique)
"""
self.segment_type = segment_type
self.sequence_id = sequence_id
self.region_start = region_start
self.region_end = region_end
if positions is not None:
self.positions = list(map(int, positions))
else:
self.positions = None
self.segment_id = segment_id
@classmethod
def from_list(cls, segment):
"""
Create a segment object from list representation
(e.g. from config).
Parameters
----------
segment : list
List representation of segment, with the following items:
segment_id (str), segment_type (str), sequence_id (str),
region_start (int), region_end (int), positions (list(int))
Returns
-------
Segment
New Segment instance from list
"""
segment_id, segment_type, sequence_id, region_start, region_end, positions = segment
return cls(segment_type, sequence_id, region_start, region_end, positions, segment_id)
def to_list(self):
"""
Represent segment as list (for storing in configs)
Returns
-------
list
List representation of segment, with the following items:
segment_id (str), segment_type (str), sequence_id (str),
region_start (int), region_end (int), positions (list(int))
"""
return [
self.segment_id,
self.segment_type,
self.sequence_id,
self.region_start,
self.region_end,
self.positions
]
def default_chain_name(self):
"""
Retrieve default PDB chain identifier the segment will
be mapped to in 3D structures (by convention, segments in
the pipeline are named A_1, A_2, ..., B_1, B_2, ...; the default
chain identifier is anything before the underscore).
Returns
-------
chain : str
Default PDB chain identifier the segment maps to
"""
return self.segment_id.split("_")[0]
class SegmentIndexMapper:
"""
Map indices of one or more sequence segments into
CouplingsModel internal numbering space. Can also
be used to (trivially) remap indices for a single sequence.
"""
def __init__(self, focus_mode, first_index, *segments):
"""
Create index mapping from individual segments
Parameters
----------
focus_mode : bool
Set to true if model was inferred in focus mode,
False otherwise.
first_index : int
Index of first position in model/sequence.
For nonfocus mode, should always be one. For focus
mode, corresponds to index given in sequence header
(1 if not in alignment)
*segments: evcouplings.couplings.mapping.Segment:
Segments containing numberings for each
individual segment
"""
# store segments so we retain full information
self.segments = deepcopy(segments)
# build up target indices by going through all segments
self.target_pos = []
for s in segments:
if focus_mode:
# in focus mode, we simply assemble the
# ranges of continuous indices, because
# numbering in model is also continuous
cur_target = range(
s.region_start, s.region_end + 1
)
else:
# in non-focus mode, we need to assemble
# the indices of actual model positions,
# since the numbering in model may be
# discontinuous
cur_target = s.positions
# create tuples of (segment_id, target_pos)
self.target_pos += list(zip(
[s.segment_id] * len(cur_target),
cur_target
))
# create correspond list of model positions;
# note that in focus mode, not all of these
# positions might actually be in the model
# (if they correspond to lowercase columns)
self.model_pos = list(range(
first_index, first_index + len(self.target_pos)
))
# mapping from target sequences (segments) into
# model numbering space (continuous numbering)
self.target_to_model = dict(
zip(self.target_pos, self.model_pos)
)
# inverse mapping from model numbering into target
# numbering
self.model_to_target = dict(
zip(self.model_pos, self.target_pos)
)
def patch_model(self, model, inplace=True):
"""
Change numbering of CouplingModel object
so that it uses segment-based numbering
Parameters
----------
model : CouplingsModel
Model that will be updated to segment-
based numbering
inplace : bool, optional (default: True)
If True, change passed model; otherwise
returnnew object
Returns
-------
CouplingsModel
Model with updated numbering
(if inplace is False, this will
point to original model)
Raises
------
ValueError
If segment mapping does not match
internal model numbering
"""
if not inplace:
model = deepcopy(model)
try:
mapped = [
self.model_to_target[pos]
for pos in model.index_list
]
except KeyError:
raise ValueError(
"Mapping from target to model positions does "
"not contain all positions of internal model numbering"
)
# update model mapping
model.index_list = mapped
# return updated model
return model
def __map(self, indices, mapping_dict):
"""
Applies index mapping either to a single index,
or to a list of indices
Parameters
----------
indices: int, or (str, int), or lists thereof
Indices in input numbering space
mapping_dict : dict(int->(str, int)) or dict((str, int)-> int)
Mapping from one indexing space into the other
Returns
-------
list of int, or list of (str, int)
Mapped indices
"""
if isinstance(indices, Iterable) and not isinstance(indices, tuple):
return [mapping_dict[x] for x in indices]
else:
return mapping_dict[indices]
def __call__(self, segment_id, pos):
"""
Function-style syntax for single position to be mapped
(calls to_model method)
Parameters
----------
segment_id : str
Identifier of segment
pos : int
Position in segment numbering
Returns
-------
int
Index in coupling object numbering space
"""
return self.to_model((segment_id, pos))
def to_target(self, x):
"""
Map model index to target index
Parameters
----------
x : int, or list of ints
Indices in model numbering
Returns
-------
(str, int), or list of (str, int)
Indices mapped into target numbering.
Tuples are (segment_id, index_in_segment)
"""
return self.__map(x, self.model_to_target)
def to_model(self, x):
"""
Map target index to model index
Parameters
----------
x : (str, int), or list of (str, int)
Indices in target indexing
(segment_id, index_in_segment)
Returns
-------
int, or list of int
Monomer indices mapped into couplings object numbering
"""
return self.__map(x, self.target_to_model)
def segment_map_ecs(ecs, mapper):
"""
Map EC dataframe in model numbering into
segment numbering
Parameters
----------
ecs : pandas.DataFrame
EC table (with columns i and j)
Returns
-------
pandas.DataFrame
Mapped EC table (with columns i and j
mapped, and additional columns
segment_i and segment_j)
"""
ecs = deepcopy(ecs)
def _map_column(col):
seg_col = "segment_" + col
# create new dataframe with two columns
# 1) mapped segment, 2) mapped position
col_m = pd.DataFrame(
mapper.to_target(ecs.loc[:, col]),
columns=[seg_col, col]
)
# assign values instead of Series, because otherwise
# wrong values end up in column
ecs.loc[:, col] = col_m.loc[:, col].values
ecs.loc[:, seg_col] = col_m.loc[:, seg_col].values
# map both position columns (and add segment id)
_map_column("i")
_map_column("j")
return ecs
class MultiSegmentCouplingsModel(CouplingsModel):
"""
Complex specific Couplings Model that handles
segments and provides the option to convert model
into inter-segment only.
"""
def __init__(self, filename, *segments,
precision="float32", file_format="plmc_v2", **kwargs):
"""
filename : str
Binary Jij file containing model parameters from plmc software
precision : {"float32", "float64"}, default: "float32"
Sets if input file has single (float32) or double precision (float64)
}
file_format : {"plmc_v2", "plmc_v1"}, default: "plmc_v2"
File format of parameter file.
segments: list of evcouplings.couplings.Segment
TODO: have a additional constructor/class method that can take an existing
loaded instance of a CouplingsModel and turn it into a MultiSegmentCouplingsModel
"""
super().__init__(filename, precision, file_format, **kwargs)
# initialize the segment index mapper to update model numbering
if len(segments) == 0:
raise(ValueError, "Must provide at least one segment for MultiSegmentCouplingsModel")
first_segment = segments[0]
index_start = first_segment.region_start
r = SegmentIndexMapper(
True, # use focus mode
index_start, # first index of first segment
*segments
)
# update model numbering
r.patch_model(model=self)
def to_inter_segment_model(self):
"""
Convert model to inter-segment only
parameters, ie the J_ijs that correspond
to inter-protein or inter-domain residue pairs.
All other parameters are set to 0.
Returns
-------
CouplingsModel
Copy of object turned into inter-only Epistatic model
"""
h_i = np.zeros((self.L, self.num_symbols))
J_ij = np.zeros(self.J_ij.shape)
for idx_i, i in enumerate(self.index_list):
for idx_j, j in enumerate(self.index_list):
if i[0] != j[0]: # if the segment identifier is different
J_ij[idx_i, idx_j] = self.J_ij[idx_i, idx_j]
ci = deepcopy(self)
ci.h_i = h_i
ci.J_ij = J_ij
ci._reset_precomputed()
return ci | |
#!/usr/bin/env python
""" \example xep_sample_direct_path.py
Latest examples is located at https://github.com/xethru/XeThru_ModuleConnector_Examples or https://dev.azure.com/xethru/XeThruApps/_git/XeThru_ModuleConnector_Examples.
# Target module:
# X4M200
# X4M300
# X4M03(XEP)
# Introduction: This is an example showing how to sample the direct path pulse and generates a similar pulse from a sine and a Gaussian envelope.
Original thread:
https://www.xethru.com/community/threads/radar-pulse-shape.329/#post-1604
# prerequisite:
# ModuleConnector python lib is installed, check XeThruSensorsIntroduction application note to get detail
# xt_modules_print_info.py should be in the same folder
"""
from __future__ import print_function, division
import matplotlib.pyplot as plt
from matplotlib import mlab
import numpy as np
import pymoduleconnector
from pymoduleconnector.extras.auto import auto
from scipy import interpolate
device_name = auto()[0]
# print_module_info(device_name)
mc = pymoduleconnector.ModuleConnector(device_name)
# Assume an X4M300/X4M200 module and try to enter XEP mode
app = mc.get_x4m300()
# Stop running application and set module in manual mode.
try:
app.set_sensor_mode(0x13, 0) # Make sure no profile is running.
except RuntimeError:
# Profile not running, OK
pass
try:
app.set_sensor_mode(0x12, 0) # Manual mode.
except RuntimeError:
# Maybe running XEP firmware only?
pass
xep = mc.get_xep()
# Set full DAC range
xep.x4driver_set_dac_min(0)
xep.x4driver_set_dac_max(2047)
# Set integration
xep.x4driver_set_iterations(16)
xep.x4driver_set_pulses_per_step(26)
xep.x4driver_set_frame_area(-1, 2)
# Sample a frame
xep.x4driver_set_fps(1)
d = xep.read_message_data_float()
frame = np.array(d.data)
xep.x4driver_set_fps(0)
fig = plt.figure(figsize=(16, 8))
fs = 23.328e9
nbins = len(frame)
x = np.linspace(0, (nbins-1)/fs, nbins)
# Calculate center frequency as the mean of -10 dB fl and fh
pxx, freqs = mlab.psd(frame, Fs=fs)
pxxdb = 10*np.log10(pxx)
arg10db = np.argwhere(pxxdb > (pxxdb.max()-10))
fl, fh = freqs[arg10db[0][0]], freqs[arg10db[-1][0]]
fc = (fl+fh)/2
# Pulse generator
# Pulse duration
bw = 1.4e9
#tau = 1/(pi*bw*sqrt(log10(e)))
tau = 340e-12
# Sampler
# Sampling rate
fs2 = fs*2
# delay to pulse
t0 = 3.64e-9
# Time array
t = np.linspace(0, (nbins-1)/fs2, nbins)
# Synthesize frames
frame_gen = np.exp(-((t-t0)**2)/(2*tau**2)) * np.cos(2 * np.pi * fc * (t - t0))
# Interpolate X4 frame
tck_1 = interpolate.splrep(x, frame)
frame_interp = interpolate.splev(t, tck_1, der=0)
frame_gen *= frame_interp.max()
# Plot frames
ax = fig.add_subplot(311)
ax.plot(x*1e9, frame, '-x', label='X4 pulse')
ax.plot(t*1e9, frame_interp, '-r', label='X4 pulse, interpolated')
ax.grid()
ax.set_xlim(ax.get_xlim()[0], t[-1]*1e9)
ax.set_xlabel("Time (ns)")
ax.set_ylabel("Normalized amplitude")
ax.legend()
ax.set_title("X4 sampled data")
ax = fig.add_subplot(312)
ax.plot(t*1e9, frame_gen, '-x', label='Generated pulse')
ax.plot(t*1e9, frame_interp, 'r', label='X4 pulse, interpolated')
ax.grid()
ax.set_xlabel("Time (ns)")
ax.set_ylabel("Normalized amplitude")
ax.set_xlim(ax.get_xlim()[0], t[-1]*1e9)
ax.legend()
ax.set_title("Generated and interpolated X4 pulse")
ax = fig.add_subplot(313)
ax.psd(frame_gen, Fs=fs2/1e9, label="Generated pulse")
ax.psd(frame_interp, Fs=fs2/1e9, label="X4 pulse, interpolated", color='r')
ax.set_xlim(0, 12)
ax.set_ylim(-84, -20)
ax.set_ylabel("PSD (Normalized)")
ax.set_xlabel("Frequency (GHz)")
ax.legend()
ax.set_title("PSD of sampled and generated pulse")
fig.suptitle("Sampled and generated X4 pulse in time and frequency domain", y=1)
fig.tight_layout()
fig.savefig("xep_sample_direct_path.png")
plt.show() | |
import datetime as dt
from functools import lru_cache
from pathlib import Path
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from loguru import logger
class CompoMapper(object):
"""
Class to map and plot the previously downloaded ETF composition.
"""
def __init__(self):
self.logger = logger
@logger.catch(reraise=True)
def load_etf_compo(self, file_path: Path) -> pd.DataFrame:
"""
Loads the composition file of an ETF.
Args:
file_path (Path): The compositions file path.
Returns:
pd.DataFrame: The ETF composition, including ISO 3166-1 codes for each constituent.
"""
if not file_path.exists():
error_msg = f'Non-existent file path {file_path}! Data must be downloaded first!'
self.logger.error(error_msg)
raise ValueError(error_msg)
compo_frame = pd.read_csv(file_path, index_col=0)
compo_frame = compo_frame[compo_frame['Asset Class'] == 'Equity'].copy()
compo_frame['Weight (%)'] = compo_frame['Weight (%)'].apply(lambda x: float(x))
compo_frame['iso2_code'] = compo_frame['ISIN'].apply(lambda x: x[:2])
compo_frame['iso3_code'] = compo_frame['iso2_code'].apply(lambda x: self.get_iso3_from_iso2(x))
compo_frame.dropna(inplace=True)
return compo_frame
def get_file_path_by_ticker(self, plot_date: dt.date, ticker: str) -> Path:
"""
Mapping the ticker to the respective ETF composition file path.
Args:
plot_date (dt.date): The download date for the composition
ticker (str): The ETF ticker.
Returns:
Path: The path to the composition file.
"""
self.logger.debug(f'Sampling file path for {ticker}.')
downloads_folder = Path('downloads') / 'compositions' / str(plot_date)
file_name = f'{ticker}_holdings_{plot_date}.csv'
full_file_path = downloads_folder / file_name
return full_file_path
def get_country_weights(self, file_path: Path) -> pd.DataFrame:
"""
Calculates the constituents weights and logarithmic weights for an ETF composition.
Args:
file_path (Path): The ETF composition file path.
Returns:
pd.DataFrame: The extended composition, now including calculated weightings.
"""
self.logger.debug(f'Loading the country weights for {file_path.name}.')
compo_frame = self.load_etf_compo(file_path)
compo_frame_grouped = compo_frame.groupby('iso3_code').sum()
iso_frame = self.load_iso_mapping()
country_weights = pd.DataFrame(data=iso_frame['Alpha-3 code'].values,
index=iso_frame['Alpha-3 code'],
columns=['iso3_code'])
country_weights['weight'] = 0
for iso3 in compo_frame_grouped.index:
country_weights.loc[iso3, 'weight'] = compo_frame_grouped.loc[iso3, 'Weight (%)']
country_weights.reset_index(inplace=True, drop=True)
country_weights['country'] = country_weights['iso3_code'].apply(lambda x: self.get_country_name_from_iso3(x))
country_weights.loc[:, 'weight log'] = country_weights['weight'].apply(lambda x: np.log(x))
return country_weights
@lru_cache()
def load_iso_mapping(self) -> pd.DataFrame:
"""
Load the mapping for ISO 3166-1 alpha-2 and alpha-3 codes of countries.
Returns:
pd.DataFrame: A frame that contains all country information.
"""
self.logger.debug(f'Loading ISO mapping.')
file_path = Path('data') / 'iso_country_mapping.csv'
iso_frame = pd.read_csv(file_path)
iso_frame['Alpha-2 code'] = iso_frame['Alpha-2 code'].apply(lambda x: x.replace(' ', '')).copy()
iso_frame['Alpha-3 code'] = iso_frame['Alpha-3 code'].apply(lambda x: x.replace(' ', '')).copy()
return iso_frame
def get_country_name_from_iso3(self, iso3: str) -> str:
"""
Gets the country name based on the ISO 3166-1 alpha-3 code.
Args:
iso3 (str): The alpha-3 code to map.
Returns:
str: The respective country.
"""
iso_frame = self.load_iso_mapping()
if iso3 in iso_frame['Alpha-3 code'].values:
target_idx = list(iso_frame['Alpha-3 code']).index(iso3)
country = iso_frame['Name'][target_idx]
else:
warning_msg = f'No valid ISO mapping found for {iso3}!'
self.logger.warning(warning_msg)
country = None
return country
def get_iso3_from_iso2(self, iso2: str) -> str:
"""
Returns the alpha-3 code from a given alpha-2 code.
Args:
iso2: The alpha-2 code.
Returns:
str: The alpha-3 code.
"""
iso_frame = self.load_iso_mapping()
if iso2 in iso_frame['Alpha-2 code'].values:
target_idx = list(iso_frame['Alpha-2 code']).index(iso2)
iso3_str = iso_frame['Alpha-3 code'][target_idx]
else:
warning_msg = f'No valid ISO mapping found for {iso2}!'
self.logger.warning(warning_msg)
iso3_str = None
return iso3_str
@logger.catch()
def plot(self, plot_date: dt.date, ticker: str) -> None:
"""
Plots the composition of an ETF as specified by the ETF ticker.
Args:
plot_date (dt.date): The download date for the composition
ticker: The ETF ticker.
"""
compo_path = self.get_file_path_by_ticker(plot_date, ticker)
weights = self.get_country_weights(compo_path)
self.show_weightings_plot(weights)
@staticmethod
def show_weightings_plot(weights: pd.DataFrame) -> None:
"""
Plots the weights on a per-country basis.
Args:
weights: The constituent weights of a composition.
"""
fig = go.Figure(data=go.Choropleth(
locations=weights['iso3_code'],
z=weights['weight'],
colorscale='PuBu',
autocolorscale=False,
marker_line_color='lightgray',
colorbar_title="Country Weight"
))
fig.show() | |
import math as m
import numpy as np
import matplotlib.pyplot as plt
from coswindow import coswin
print("---------------")
print("INPUT PARAMETER")
print("---------------")
a = float(input("Taper Ratio \t\t:"))
dt = float(input("Sampling Time \t\t:"))
f = float(input("Signal Frequancey \t:"))
if a == dt:
print("Sampling rate have to smaller than Taper ratio!")
t = np.arange(0,1,dt)
n = len(t)
# Generate signal
l = 0
signal = []
while l < n:
x = m.sin(2*m.pi*f*t[l])
signal.append(x)
l = l + 1
# using coswindow
res = coswin(a,n)
# Add cosine tapper to signal
p = 0
up = []
while p < n:
xx = signal[p]*res[p]
up.append(xx)
p = p + 1
# Plot
fig = plt.figure()
fig1 = fig.add_subplot(311)
plt.plot(t,signal,color='black',label='signal')
plt.ylabel('Magnitude')
plt.legend(loc='upper left')
fig2 = fig.add_subplot(312)
plt.plot(t,res,color='red',label='cosine taper')
plt.ylabel('Magnitude')
plt.legend(loc='upper left')
fig3 = fig.add_subplot(313)
plt.plot(t,up,color='blue',label='windowed')
plt.xlabel("Time (s)")
plt.ylabel('Magnitude')
plt.legend(loc='upper left')
plt.show() | |
import os
import json
import numpy as np
import dgl
import torch as th
from ogb.nodeproppred import DglNodePropPredDataset
partitions_folder = 'outputs'
graph_name = 'mag'
with open('{}/{}.json'.format(partitions_folder, graph_name)) as json_file:
metadata = json.load(json_file)
num_parts = metadata['num_parts']
# Load OGB-MAG.
dataset = DglNodePropPredDataset(name='ogbn-mag')
hg_orig, labels = dataset[0]
subgs = {}
for etype in hg_orig.canonical_etypes:
u, v = hg_orig.all_edges(etype=etype)
subgs[etype] = (u, v)
subgs[(etype[2], 'rev-'+etype[1], etype[0])] = (v, u)
hg = dgl.heterograph(subgs)
hg.nodes['paper'].data['feat'] = hg_orig.nodes['paper'].data['feat']
# Construct node data and edge data after reshuffling.
node_feats = {}
edge_feats = {}
for partid in range(num_parts):
part_node_feats = dgl.data.utils.load_tensors(
'{}/part{}/node_feat.dgl'.format(partitions_folder, partid))
part_edge_feats = dgl.data.utils.load_tensors(
'{}/part{}/edge_feat.dgl'.format(partitions_folder, partid))
for key in part_node_feats:
if key in node_feats:
node_feats[key].append(part_node_feats[key])
else:
node_feats[key] = [part_node_feats[key]]
for key in part_edge_feats:
if key in edge_feats:
edge_feats[key].append(part_edge_feats[key])
else:
edge_feats[key] = [part_edge_feats[key]]
for key in node_feats:
node_feats[key] = th.cat(node_feats[key])
for key in edge_feats:
edge_feats[key] = th.cat(edge_feats[key])
ntype_map = metadata['ntypes']
ntypes = [None] * len(ntype_map)
for key in ntype_map:
ntype_id = ntype_map[key]
ntypes[ntype_id] = key
etype_map = metadata['etypes']
etypes = [None] * len(etype_map)
for key in etype_map:
etype_id = etype_map[key]
etypes[etype_id] = key
etype2canonical = {etype: (srctype, etype, dsttype)
for srctype, etype, dsttype in hg.canonical_etypes}
node_map = metadata['node_map']
for key in node_map:
node_map[key] = th.stack([th.tensor(row) for row in node_map[key]], 0)
nid_map = dgl.distributed.id_map.IdMap(node_map)
edge_map = metadata['edge_map']
for key in edge_map:
edge_map[key] = th.stack([th.tensor(row) for row in edge_map[key]], 0)
eid_map = dgl.distributed.id_map.IdMap(edge_map)
for ntype in node_map:
assert hg.number_of_nodes(ntype) == th.sum(
node_map[ntype][:, 1] - node_map[ntype][:, 0])
for etype in edge_map:
assert hg.number_of_edges(etype) == th.sum(
edge_map[etype][:, 1] - edge_map[etype][:, 0])
# verify part_0 with graph_partition_book
eid = []
gpb = dgl.distributed.graph_partition_book.RangePartitionBook(0, num_parts, node_map, edge_map,
{ntype: i for i, ntype in enumerate(
hg.ntypes)},
{etype: i for i, etype in enumerate(hg.etypes)})
subg0 = dgl.load_graphs('{}/part0/graph.dgl'.format(partitions_folder))[0][0]
for etype in hg.etypes:
type_eid = th.zeros((1,), dtype=th.int64)
eid.append(gpb.map_to_homo_eid(type_eid, etype))
eid = th.cat(eid)
part_id = gpb.eid2partid(eid)
assert th.all(part_id == 0)
local_eid = gpb.eid2localeid(eid, 0)
assert th.all(local_eid == eid)
assert th.all(subg0.edata[dgl.EID][local_eid] == eid)
lsrc, ldst = subg0.find_edges(local_eid)
gsrc, gdst = subg0.ndata[dgl.NID][lsrc], subg0.ndata[dgl.NID][ldst]
# The destination nodes are owned by the partition.
assert th.all(gdst == ldst)
# gdst which is not assigned into current partition is not required to equal ldst
assert th.all(th.logical_or(
gdst == ldst, subg0.ndata['inner_node'][ldst] == 0))
etids, _ = gpb.map_to_per_etype(eid)
src_tids, _ = gpb.map_to_per_ntype(gsrc)
dst_tids, _ = gpb.map_to_per_ntype(gdst)
canonical_etypes = []
etype_ids = th.arange(0, len(etypes))
for src_tid, etype_id, dst_tid in zip(src_tids, etype_ids, dst_tids):
canonical_etypes.append(
(ntypes[src_tid], etypes[etype_id], ntypes[dst_tid]))
for etype in canonical_etypes:
assert etype in hg.canonical_etypes
# Load the graph partition structure.
orig_node_ids = {ntype: [] for ntype in hg.ntypes}
orig_edge_ids = {etype: [] for etype in hg.etypes}
for partid in range(num_parts):
print('test part', partid)
part_file = '{}/part{}/graph.dgl'.format(partitions_folder, partid)
subg = dgl.load_graphs(part_file)[0][0]
subg_src_id, subg_dst_id = subg.edges()
orig_src_id = subg.ndata['orig_id'][subg_src_id]
orig_dst_id = subg.ndata['orig_id'][subg_dst_id]
global_src_id = subg.ndata[dgl.NID][subg_src_id]
global_dst_id = subg.ndata[dgl.NID][subg_dst_id]
subg_ntype = subg.ndata[dgl.NTYPE]
subg_etype = subg.edata[dgl.ETYPE]
for ntype_id in th.unique(subg_ntype):
ntype = ntypes[ntype_id]
idx = subg_ntype == ntype_id
# This is global IDs after reshuffle.
nid = subg.ndata[dgl.NID][idx]
ntype_ids1, type_nid = nid_map(nid)
orig_type_nid = subg.ndata['orig_id'][idx]
inner_node = subg.ndata['inner_node'][idx]
# All nodes should have the same node type.
assert np.all(ntype_ids1.numpy() == int(ntype_id))
assert np.all(nid[inner_node == 1].numpy() == np.arange(
node_map[ntype][partid, 0], node_map[ntype][partid, 1]))
orig_node_ids[ntype].append(orig_type_nid[inner_node == 1])
# Check the degree of the inner nodes.
inner_nids = th.nonzero(th.logical_and(subg_ntype == ntype_id, subg.ndata['inner_node']),
as_tuple=True)[0]
subg_deg = subg.in_degrees(inner_nids)
orig_nids = subg.ndata['orig_id'][inner_nids]
# Calculate the in-degrees of nodes of a particular node type.
glob_deg = th.zeros(len(subg_deg), dtype=th.int64)
for etype in hg.canonical_etypes:
dst_ntype = etype[2]
if dst_ntype == ntype:
glob_deg += hg.in_degrees(orig_nids, etype=etype)
assert np.all(glob_deg.numpy() == subg_deg.numpy())
# Check node data.
for name in hg.nodes[ntype].data:
local_data = node_feats[ntype + '/' + name][type_nid]
local_data1 = hg.nodes[ntype].data[name][orig_type_nid]
assert np.all(local_data.numpy() == local_data1.numpy())
for etype_id in th.unique(subg_etype):
etype = etypes[etype_id]
srctype, _, dsttype = etype2canonical[etype]
idx = subg_etype == etype_id
exist = hg[etype].has_edges_between(orig_src_id[idx], orig_dst_id[idx])
assert np.all(exist.numpy())
eid = hg[etype].edge_ids(orig_src_id[idx], orig_dst_id[idx])
assert np.all(eid.numpy() == subg.edata['orig_id'][idx].numpy())
ntype_ids, type_nid = nid_map(global_src_id[idx])
assert len(th.unique(ntype_ids)) == 1
assert ntypes[ntype_ids[0]] == srctype
ntype_ids, type_nid = nid_map(global_dst_id[idx])
assert len(th.unique(ntype_ids)) == 1
assert ntypes[ntype_ids[0]] == dsttype
# This is global IDs after reshuffle.
eid = subg.edata[dgl.EID][idx]
etype_ids1, type_eid = eid_map(eid)
orig_type_eid = subg.edata['orig_id'][idx]
inner_edge = subg.edata['inner_edge'][idx]
# All edges should have the same edge type.
assert np.all(etype_ids1.numpy() == int(etype_id))
assert np.all(np.sort(eid[inner_edge == 1].numpy()) == np.arange(
edge_map[etype][partid, 0], edge_map[etype][partid, 1]))
orig_edge_ids[etype].append(orig_type_eid[inner_edge == 1])
# Check edge data.
for name in hg.edges[etype].data:
local_data = edge_feats[etype + '/' + name][type_eid]
local_data1 = hg.edges[etype].data[name][orig_type_eid]
assert np.all(local_data.numpy() == local_data1.numpy())
for ntype in orig_node_ids:
nids = th.cat(orig_node_ids[ntype])
nids = th.sort(nids)[0]
assert np.all((nids == th.arange(hg.number_of_nodes(ntype))).numpy())
for etype in orig_edge_ids:
eids = th.cat(orig_edge_ids[etype])
eids = th.sort(eids)[0]
assert np.all((eids == th.arange(hg.number_of_edges(etype))).numpy()) | |
#!/usr/bin/env python
import luigi
import os
import numpy as np
import subprocess
import glob
import pickle
from astra.tasks import BaseTask
from astra.tasks.io import ApPlanFile
from sdss_access.path import path
from apogee_drp.utils import apload,yanny
from luigi.util import inherits
# Inherit the parameters needed to define an ApPlanFile, since we will need these to
# require() the correct ApPlanFile.
@inherits(ApPlanFile)
class AP1DVISIT(BaseTask):
""" Run the 1D visit portion of the APOGEE pipeline."""
# Parameters
apred = luigi.Parameter()
instrument = luigi.Parameter()
telescope = luigi.Parameter()
field = luigi.Parameter()
plate = luigi.IntParameter()
mjd = luigi.Parameter()
prefix = luigi.Parameter()
release = luigi.Parameter()
def requires(self):
return ApPlanFile(**self.get_common_param_kwargs(ApPlanFile))
def output(self):
# Store the 1D frames in the same directory as the plan file.
output_path_prefix, ext = os.path.splitext(self.input().path)
return luigi.LocalTarget(f"{output_path_prefix}-done1D")
def run(self):
# Run the IDL program!
cmd = "ap1dvisit,'"+self.input().path+"'"
ret = subprocess.call(["idl","-e",cmd],shell=False)
# Load the plan file
# (Note: I'd suggest moving all yanny files to YAML format and/or just supply the plan file
# inputs as variables to the task.)
plan = yanny.yanny(self.input().path,np=True)
exposures = plan['APEXP']
visitdir = os.path.dirname(self.input().path)
# Check that all of the apCframe files exist
cframe_counter = 0
for exp in exposures['name']:
if type(exp) is not str: exp=exp.decode()
exists = [os.path.exists(visitdir+"/apCframe-"+ch+"-"+str(exp)+".fits") for ch in ['a','b','c']]
if np.sum(exists) == 3: cframe_counter += 1
# Check if some apVisits have been made
visitfiles = glob.glob(visitdir+"/"+self.prefix+"Visit-"+self.apred+"-"+str(self.plate)+"-"+str(self.mjd)+"-???.fits")
# Check apVisitSum file
sdss_path = path.Path()
apvisitsum = sdss_path.full('apVisitSum',apred=self.apred,telescope=self.telescope,instrument=self.instrument,
field=self.field,plate=self.plate,mjd=self.mjd,prefix=self.prefix)
# Create "done" file if apVisits exist
if (cframe_counter==len(exposures)) & (len(visitfiles)>50) & (os.path.exists(apvisitsum)==True):
with open(self.output().path, "w") as fp:
fp.write(" ")
if __name__ == "__main__":
# The parameters for RunAP1DVISIT are the same as those needed to identify the ApPlanFile:
# From the path definition at:
# https://sdss-access.readthedocs.io/en/latest/path_defs.html#dr16
# We can see that the following parameters are needed:
# $APOGEE_REDUX/{apred}/visit/{telescope}/{field}/{plate}/{mjd}/{prefix}Plan-{plate}-{mjd}.par
# Define the task.
task = AP1DVISIT(
apred="t14",
telescope="apo25m",
instrument="apogee-n",
field="200+45",
plate=8100, # plate must be in
mjd="57680",
prefix="ap",
release=None
)
# (At least) Two ways to run this:
# Option 1: useful for interactive debugging with %debug
task.run()
# Option 2: Use Luigi to build the dependency graph. Useful if you have a complex workflow, but
# bad if you want to interactively debug (because it doesn't allow it).
#luigi.build(
# [task],
# local_scheduler=True
#)
# Option 3: Use a command line tool to run this specific task.
# Option 4: Use a command line tool and an already-running scheduler to execute the task, and
# then see the progress in a web browser. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.