text stringlengths 957 885k |
|---|
<reponame>DocOtak/gsw-xarray
_names = {
"CT_first_derivatives": ("CT_SA", "CT_pt"),
"CT_first_derivatives_wrt_t_exact": ("CT_SA_wrt_t", "CT_T_wrt_t", "CT_P_wrt_t"),
"CT_freezing": "CT_freezing",
"CT_freezing_first_derivatives": ("CT_freezing_SA", "CT_freezing_P"),
"CT_freezing_first_derivatives_poly": ("CT_freezing_SA", "CT_freezing_P"),
"CT_freezing_poly": "CT_freezing",
"CT_from_enthalpy": "CT",
"CT_from_enthalpy_exact": "CT",
"CT_from_entropy": "CT",
"CT_from_pt": "CT",
"CT_from_rho": ("CT", "CT"),
"CT_from_t": "CT",
"CT_maxdensity": "CT",
"CT_second_derivatives": ("CT_SA_SA", "CT_SA_pt", "CT_pt_pt"),
"C_from_SP": "C",
"Fdelta": "Fdelta",
"Helmholtz_energy_ice": "Helmholtz_energy_ice",
"Hill_ratio_at_SP2": "Hill_ratio",
"IPV_vs_fNsquared_ratio": ("IPV_vs_fNsquared_ratio", "p_mid"),
"Nsquared": ("N2", "p_mid"),
"O2sol": "O2sol",
"O2sol_SP_pt": "O2sol",
"SAAR": "SAAR",
"SA_freezing_from_CT": "SA",
"SA_freezing_from_CT_poly": "SA",
"SA_freezing_from_t": "SA",
"SA_freezing_from_t_poly": "SA",
"SA_from_SP": "SA",
"SA_from_SP_Baltic": "SA",
"SA_from_Sstar": "SA",
"SA_from_rho": "SA",
"SP_from_C": "SP",
"SP_from_SA": "SP",
"SP_from_SA_Baltic": "SP",
"SP_from_SK": "SP",
"SP_from_SR": "SP",
"SP_from_Sstar": "SP",
"SP_salinometer": "SP",
"SR_from_SP": "SR",
"Sstar_from_SA": "Sstar",
"Sstar_from_SP": "Sstar",
"Turner_Rsubrho": ("Tu", "Rsubrho", "p_mid"),
"adiabatic_lapse_rate_from_CT": "adiabatic",
"adiabatic_lapse_rate_ice": "adiabatic",
"alpha": "alpha",
"alpha_on_beta": "alpha",
"alpha_wrt_t_exact": "alpha",
"alpha_wrt_t_ice": "alpha",
"beta": "beta",
"beta_const_t_exact": "beta",
"cabbeling": "cabbeling",
"chem_potential_water_ice": "chem",
"chem_potential_water_t_exact": "chem",
"cp_ice": "cp",
"cp_t_exact": "cp",
"deltaSA_atlas": "deltaSA",
"deltaSA_from_SP": "deltaSA",
"dilution_coefficient_t_exact": "dilution_coefficient",
"distance": "distance",
"dynamic_enthalpy": "dynamic_enthalpy",
"enthalpy": "enthalpy",
"enthalpy_CT_exact": "enthalpy",
"enthalpy_diff": "enthalpy",
"enthalpy_first_derivatives": ("h_SA", "h_CT"),
"enthalpy_first_derivatives_CT_exact": (
"h_SA",
"h_CT",
),
"enthalpy_ice": "enthalpy_ice",
"enthalpy_second_derivatives": ("h_SA_SA", "h_SA_CT", "h_CT_CT"),
"enthalpy_second_derivatives_CT_exact": ("h_SA_SA", "h_SA_CT", "h_CT_CT"),
"enthalpy_t_exact": "enthalpy",
"entropy_first_derivatives": ("eta_SA", "eta_CT"),
"entropy_from_CT": "entropy",
"entropy_from_pt": "entropy",
"entropy_from_t": "entropy",
"entropy_ice": "entropy",
"entropy_second_derivatives": ("eta_SA_SA", "eta_SA_CT", "eta_CT_CT"),
"f": "f",
"frazil_properties": ("SA_final", "CT_final", "w_Ih_final"),
"frazil_properties_potential": ("SA_final", "CT_final", "w_Ih_final"),
"frazil_properties_potential_poly": ("SA_final", "CT_final", "w_Ih_final"),
"frazil_ratios_adiabatic": ("dSA_dCT_frazil", "dSA_dP_frazil", "dCT_dP_frazil"),
"frazil_ratios_adiabatic_poly": (
"dSA_dCT_frazil",
"dSA_dP_frazil",
"dCT_dP_frazil",
),
"geo_strf_dyn_height": "dynamic_height",
"geostrophic_velocity": ("geostrophic_velocity", "mid_lon", "mid_lat"),
"gibbs_ice_part_t": "gibbs_ice_part_t",
"gibbs_ice_pt0": "gibbs_ice_part_pt0",
"gibbs_ice_pt0_pt0": "gibbs_ice_pt0_pt0",
"grav": "grav",
"ice_fraction_to_freeze_seawater": ("SA_freeze", "CT_freeze", "w_Ih"),
"internal_energy": "internal_energy",
"internal_energy_ice": "internal_energy_ice",
"kappa": "kappa",
"kappa_const_t_ice": "kappa_const_t_ice",
"kappa_ice": "kappa_ice",
"kappa_t_exact": "kappa_t_exact",
"latentheat_evap_CT": "latentheat_evap",
"latentheat_evap_t": "latentheat_evap",
"latentheat_melting": "latentheat_melting",
"melting_ice_SA_CT_ratio": "melting_ice_SA_CT_ratio",
"melting_ice_SA_CT_ratio_poly": "melting_ice_SA_CT_ratio",
"melting_ice_equilibrium_SA_CT_ratio": "melting",
"melting_ice_equilibrium_SA_CT_ratio_poly": "melting",
"melting_ice_into_seawater": ("SA", "CT", "w_Ih_final"),
"melting_seaice_SA_CT_ratio": "melting_seaice_SA_CT_ratio",
"melting_seaice_SA_CT_ratio_poly": "melting_seaice_SA_CT_ratio",
"melting_seaice_equilibrium_SA_CT_ratio": "melting_seaice_equilibrium_SA_CT_ratio",
"melting_seaice_equilibrium_SA_CT_ratio_poly": "melting_seaice_equilibrium_SA_CT_ratio",
"melting_seaice_into_seawater": ("SA", "CT"),
"p_from_z": "p",
"pot_enthalpy_from_pt_ice": "pot_enthalpy_ice",
"pot_enthalpy_from_pt_ice_poly": "pot_enthalpy_ice",
"pot_enthalpy_ice_freezing": "pot_enthalpy_ice_freezing",
"pot_enthalpy_ice_freezing_first_derivatives": (
"pot_enthalpy_ice_freezing_SA",
"pot_enthalpy_ice_freezing_P",
),
"pot_enthalpy_ice_freezing_first_derivatives_poly": (
"pot_enthalpy_ice_freezing_SA",
"pot_enthalpy_ice_freezing_P",
),
"pot_enthalpy_ice_freezing_poly": "pot_enthalpy_ice_freezing",
"pot_rho_t_exact": "pot_rho_t_exact",
"pressure_coefficient_ice": "pressure_coefficient_ice",
"pressure_freezing_CT": "pressure_freezing_CT",
"pt0_from_t": "pt0",
"pt0_from_t_ice": "pt0_ice",
"pt_first_derivatives": ("pt_SA", "pt_CT"),
"pt_from_CT": "pt",
"pt_from_entropy": "pt",
"pt_from_pot_enthalpy_ice": "pt0_ice",
"pt_from_pot_enthalpy_ice_poly": "pt0_ice",
"pt_from_t": "pt",
"pt_from_t_ice": "pt_ice",
"pt_second_derivatives": ("pt_SA_SA", "pt_SA_CT", "pt_CT_CT"),
"rho": "rho",
"rho_alpha_beta": ("rho", "alpha", "beta"),
"rho_first_derivatives": ("rho_SA", "rho_CT", "rho_P"),
"rho_first_derivatives_wrt_enthalpy": ("rho_SA", "rho_h"),
"rho_ice": "rho",
"rho_second_derivatives": (
"rho_SA_SA",
"rho_SA_CT",
"rho_CT_CT",
"rho_SA_P",
"rho_CT_P",
),
"rho_second_derivatives_wrt_enthalpy": ("rho_SA_SA", "rho_SA_h", "rho_h_h"),
"rho_t_exact": "rho",
"seaice_fraction_to_freeze_seawater": ("SA_freeze", "CT_freeze", "w_seaice"),
"sigma0": "sigma0",
"sigma1": "sigma1",
"sigma2": "sigma2",
"sigma3": "sigma3",
"sigma4": "sigma4",
"sound_speed": "sound_speed",
"sound_speed_ice": "sound_speed_ice",
"sound_speed_t_exact": "sound_speed",
"specvol": "specvol",
"specvol_alpha_beta": ("specvol", "alpha", "beta"),
"specvol_anom_standard": "specvol_anom",
"specvol_first_derivatives": ("v_SA", "v_CT", "v_P"),
"specvol_first_derivatives_wrt_enthalpy": ("v_SA_wrt_h", "v_h"),
"specvol_ice": "specvol_ice",
"specvol_second_derivatives": ("v_SA_SA", "v_SA_CT", "v_CT_CT", "v_SA_P", "v_CT_P"),
"specvol_second_derivatives_wrt_enthalpy": ("v_SA_SA_wrt_h", "v_SA_h", "v_h_h"),
"specvol_t_exact": "specvol",
"spiciness0": "spiciness0",
"spiciness1": "spiciness1",
"spiciness2": "spiciness2",
"t90_from_t68": "t90",
"t_deriv_chem_potential_water_t_exact": "chem_potential_water_dt",
"t_freezing": "t_freezing",
"t_freezing_first_derivatives": ("tfreezing_SA", "tfreezing_P"),
"t_freezing_first_derivatives_poly": ("tfreezing_SA", "tfreezing_P"),
"t_freezing_poly": "t_freezing",
"t_from_CT": "temperature",
"t_from_pt0_ice": "temperature",
"thermobaric": "thermobaric",
"z_from_p": "z",
}
|
import os
from deepneuro.outputs.inference import ModelPatchesInference
from deepneuro.preprocessing.preprocessor import DICOMConverter
from deepneuro.preprocessing.signal import N4BiasCorrection, ZeroMeanNormalization
from deepneuro.preprocessing.transform import Coregister
from deepneuro.preprocessing.skullstrip import SkullStrip_Model
from deepneuro.postprocessing.label import BinarizeLabel, LargestComponents, FillHoles
from deepneuro.pipelines.shared import load_data, load_model_with_output
from deepneuro.utilities.util import docker_print
def predict_GBM(output_folder, T1POST=None, FLAIR=None, T1PRE=None, ground_truth=None, input_directory=None, bias_corrected=True, resampled=False, registered=False, skullstripped=False, preprocessed=False, save_preprocess=False, save_all_steps=False, output_wholetumor_filename='wholetumor_segmentation.nii.gz', output_enhancing_filename='enhancing_segmentation.nii.gz', verbose=True, input_data=None):
#--------------------------------------------------------------------#
# Step 1, Load Data
#--------------------------------------------------------------------#
data_collection = load_data(inputs=[FLAIR, T1POST, T1PRE], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)
#--------------------------------------------------------------------#
# Step 2, Load Models
#--------------------------------------------------------------------#
wholetumor_prediction_parameters = {'inputs': ['input_data'],
'output_filename': os.path.join(output_folder, output_wholetumor_filename),
'batch_size': 50,
'patch_overlaps': 8,
'output_patch_shape': (56, 56, 6, 1),
'input_channels': [0, 1]}
enhancing_prediction_parameters = {'inputs': ['input_data'],
'output_filename': os.path.join(output_folder, output_enhancing_filename),
'batch_size': 50,
'patch_overlaps': 8,
'output_patch_shape': (56, 56, 6, 1)}
wholetumor_model = load_model_with_output(model_name='gbm_wholetumor_mri', outputs=[ModelPatchesInference(**wholetumor_prediction_parameters)], postprocessors=[BinarizeLabel(postprocessor_string='_label')])
enhancing_model = load_model_with_output(model_name='gbm_enhancingtumor_mri', outputs=[ModelPatchesInference(**enhancing_prediction_parameters)], postprocessors=[BinarizeLabel(postprocessor_string='_label')])
if not preprocessed and not skullstripped:
skullstripping_prediction_parameters = {'inputs': ['input_data'],
'output_filename': os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
'batch_size': 50,
'patch_overlaps': 3,
'output_patch_shape': (56, 56, 6, 1),
'save_to_file': False}
skullstripping_model = load_model_with_output(model_name='skullstrip_mri', outputs=[ModelPatchesInference(**skullstripping_prediction_parameters)], postprocessors=[BinarizeLabel(), FillHoles(), LargestComponents()])
#--------------------------------------------------------------------#
# Step 3, Add Data Preprocessors
#--------------------------------------------------------------------#
if not preprocessed:
# Random hack to save DICOMs to niftis for further processing.
preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
if not bias_corrected:
preprocessing_steps += [N4BiasCorrection(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
if not registered:
preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=(save_preprocess or save_all_steps), verbose=verbose, output_folder=output_folder, reference_channel=0)]
if not skullstripped:
preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
preprocessing_steps += [SkullStrip_Model(data_groups=['input_data'], model=skullstripping_model, save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=[0, 1])]
preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, mask_preprocessor=preprocessing_steps[-1], preprocessor_string='_preprocessed')]
data_collection.append_preprocessor(preprocessing_steps)
#--------------------------------------------------------------------#
# Step 4, Run Inference
#--------------------------------------------------------------------#
for case in data_collection.cases:
docker_print('\nStarting New Case...\n')
docker_print('Whole Tumor Prediction')
docker_print('======================')
wholetumor_file = wholetumor_model.generate_outputs(data_collection, case)[0]['filenames'][-1]
data_collection.add_channel(case, wholetumor_file)
docker_print('Enhancing Tumor Prediction')
docker_print('======================')
enhancing_model.generate_outputs(data_collection, case)
data_collection.clear_outputs()
if __name__ == '__main__':
pass |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import logging
import warnings
import random
import os
import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torch.utils.tensorboard import SummaryWriter
from config import get_config
from dataset.datasets import WLFWDatasets
from pfld.utils import init_weights, save_checkpoint, set_logger, write_cfg
from pfld.loss import LandmarkLoss
from test import compute_nme
from models.PFLD import PFLD
from models.PFLD_Ultralight import PFLD_Ultralight
from models.PFLD_Ultralight_Slim import PFLD_Ultralight_Slim
def train(model, train_dataloader, loss_fn, optimizer, cfg):
losses = []
model.train()
with tqdm(total=len(train_dataloader)) as t:
for img, landmark_gt in train_dataloader:
img = img.to(cfg.DEVICE)
landmark_gt = landmark_gt.to(cfg.DEVICE)
landmark_pred = model(img)
loss = loss_fn(landmark_gt, landmark_pred)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.cpu().detach().numpy())
t.update()
return np.mean(losses)
def validate(model, val_dataloader, loss_fn, cfg):
model.eval()
losses = []
nme_list = []
with torch.no_grad():
for img, landmark_gt in val_dataloader:
img = img.to(cfg.DEVICE)
landmark_gt = landmark_gt.to(cfg.DEVICE)
landmark_pred = model(img)
loss = loss_fn(landmark_gt, landmark_pred)
losses.append(loss.cpu().numpy())
landmark_pred = landmark_pred.reshape(landmark_pred.shape[0], -1, 2).cpu().numpy()
landmark_gt = landmark_gt.reshape(landmark_gt.shape[0], -1, 2).cpu().numpy()
nme_temp = compute_nme(landmark_pred, landmark_gt)
for item in nme_temp:
nme_list.append(item)
return np.mean(losses), np.mean(nme_list)
def main():
cfg = get_config()
SEED = cfg.SEED
np.random.seed(SEED)
random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
warnings.filterwarnings("ignore")
set_logger(cfg.LOGGER_PATH)
write_cfg(logging, cfg)
main_worker(cfg)
def main_worker(cfg):
# ======= LOADING DATA ======= #
logging.warning('=======>>>>>>> Loading Training and Validation Data')
TRAIN_DATA_PATH = cfg.TRAIN_DATA_PATH
VAL_DATA_PATH = cfg.VAL_DATA_PATH
TRANSFORM = cfg.TRANSFORM
train_dataset = WLFWDatasets(TRAIN_DATA_PATH, TRANSFORM)
train_dataloader = DataLoader(train_dataset, batch_size=cfg.TRAIN_BATCH_SIZE, shuffle=True, num_workers=cfg.NUM_WORKERS, drop_last=False)
val_dataset = WLFWDatasets(VAL_DATA_PATH, TRANSFORM)
val_dataloader = DataLoader(val_dataset, batch_size=cfg.VAL_BATCH_SIZE, shuffle=False, num_workers=cfg.NUM_WORKERS)
# ======= MODEL ======= #
MODEL_DICT = {'PFLD': PFLD,
'PFLD_Ultralight': PFLD_Ultralight,
'PFLD_Ultralight_Slim': PFLD_Ultralight_Slim,
}
MODEL_TYPE = cfg.MODEL_TYPE
WIDTH_FACTOR = cfg.WIDTH_FACTOR
INPUT_SIZE = cfg.INPUT_SIZE
LANDMARK_NUMBER = cfg.LANDMARK_NUMBER
model = MODEL_DICT[MODEL_TYPE](WIDTH_FACTOR, INPUT_SIZE[0], LANDMARK_NUMBER).to(cfg.DEVICE)
# model.apply(init_weights)
if cfg.RESUME:
if os.path.isfile(cfg.RESUME_MODEL_PATH):
model.load_state_dict(torch.load(cfg.RESUME_MODEL_PATH))
else:
logging.warning("MODEL: No Checkpoint Found at '{}".format(cfg.RESUME_MODEL_PATH))
logging.warning('=======>>>>>>> {} Model Generated'.format(MODEL_TYPE))
# ======= LOSS ======= #
loss_fn = LandmarkLoss(LANDMARK_NUMBER)
logging.warning('=======>>>>>>> Loss Function Generated')
# ======= OPTIMIZER ======= #
optimizer = torch.optim.Adam(
[{'params': model.parameters()}],
lr=cfg.LR,
weight_decay=cfg.WEIGHT_DECAY)
logging.warning('=======>>>>>>> Optimizer Generated')
# ======= SCHEDULER ======= #
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.MILESTONES, gamma=0.1)
logging.warning('=======>>>>>>> Scheduler Generated' + '\n')
# ======= TENSORBOARDX WRITER ======= #
writer = SummaryWriter(cfg.LOG_PATH)
dummy_input = torch.rand(1, 3, INPUT_SIZE[0], INPUT_SIZE[1]).to(cfg.DEVICE)
writer.add_graph(model, (dummy_input,))
best_nme = float('inf')
for epoch in range(1, cfg.EPOCHES + 1):
logging.warning('Epoch {} Start'.format(epoch))
train_loss = train(model, train_dataloader, loss_fn, optimizer, cfg)
val_loss, val_nme = validate(model, val_dataloader, loss_fn, cfg)
scheduler.step()
if val_nme < best_nme:
best_nme = val_nme
save_checkpoint(cfg, model, extra='best')
logging.info('Save best model')
save_checkpoint(cfg, model, epoch)
writer.add_scalar('Learning_Rate', optimizer.param_groups[0]['lr'], epoch)
writer.add_scalar('Train_Loss', train_loss, epoch)
writer.add_scalar('Val_Loss', val_loss, epoch)
writer.add_scalar('Val_NME', val_nme, epoch)
logging.info('Train_Loss: {}'.format(train_loss))
logging.info('Val_Loss: {}'.format(val_loss))
logging.info('Val_NME: {}'.format(val_nme) + '\n')
save_checkpoint(cfg, model, extra='final')
if __name__ == "__main__":
main()
|
import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
trnImages = []
trnLabels = []
trnImages.append(cv2.cvtColor(cv2.imread("detectedFace0.jpg"), cv2.COLOR_BGR2GRAY))
trnLabels.append(1)
'''
trnImages.append(cv2.cvtColor(cv2.imread("detectedFace1-1.jpg"), cv2.COLOR_BGR2GRAY))
trnLabels.append(2)
trnImages.append(cv2.cvtColor(cv2.imread("detectedFace1-2.jpg"), cv2.COLOR_BGR2GRAY))
trnLabels.append(2)
trnImages.append(cv2.cvtColor(cv2.imread("detectedFace1-3.jpg"), cv2.COLOR_BGR2GRAY))
trnLabels.append(2)
trnImages.append(cv2.cvtColor(cv2.imread("detectedFace1-4.jpg"), cv2.COLOR_BGR2GRAY))
trnLabels.append(2)
trnImages.append(cv2.cvtColor(cv2.imread("detectedFace1-5.jpg"), cv2.COLOR_BGR2GRAY))
trnLabels.append(2)
'''
labelPointer = len(trnImages)
trnImages = np.array(trnImages)
trnLabels = np.array(trnLabels)
#createEigenFaceRecognizer
#createFisherFaceRecognizer
#createLBPHFaceRecognizer
model = cv2.createLBPHFaceRecognizer(threshold=100) # the second argument is the threshold
model.train(trnImages, trnLabels)
while 1:
try:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
faceDetected = len(faces)>0
img2 = gray.copy()
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
prediction = model.predict(cv2.resize(roi_gray, (230,300)))
print prediction
label = int(prediction[0])
cv2.putText(img, "Label: %s"%label, (x-2,y-2), cv2.FONT_HERSHEY_PLAIN, 1, (0,255,0))
hMarge = round(h*0.15)
yStart = y-hMarge if (y-hMarge) >= 0 else y
yEnd = y+h+hMarge if (y+h+hMarge) <= img2.shape[0] else y+h
faceRoi = img2[yStart:yEnd, x:x+w]
faceRoi = cv2.resize(faceRoi, (230,300))
if label>0:
updLabels = np.array([label])
else:
labelPointer += 1
updLabels = np.array([labelPointer])
updImages = np.array([faceRoi])
model.update(updImages, updLabels)
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
except:
pass
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows() |
<gh_stars>100-1000
import asyncio
import base64
from collections import namedtuple
from collections.abc import AsyncIterator
import time
from urllib.parse import quote
from async_timeout import timeout
import grpc
from sonora import protocol
_HandlerCallDetails = namedtuple(
"_HandlerCallDetails", ("method", "invocation_metadata")
)
class grpcASGI(grpc.Server):
def __init__(self, application=None):
self._application = application
self._handlers = []
async def __call__(self, scope, receive, send):
"""
Our actual ASGI request handler. Will execute the request
if it matches a configured gRPC service path or fall through
to the next application.
"""
if not scope["type"] == "http":
return await self._application(scope, receive, send)
rpc_method = self._get_rpc_handler(scope["path"])
request_method = scope["method"]
if rpc_method:
if request_method == "POST":
context = self._create_context(scope)
try:
async with timeout(context.time_remaining()):
await self._do_grpc_request(rpc_method, context, receive, send)
except asyncio.TimeoutError:
context.code = grpc.StatusCode.DEADLINE_EXCEEDED
context.details = "request timed out at the server"
await self._do_grpc_error(send, context)
elif request_method == "OPTIONS":
await self._do_cors_preflight(scope, receive, send)
else:
await send({"type": "http.response.start", "status": 400})
await send(
{"type": "http.response.body", "body": b"", "more_body": False}
)
elif self._application:
await self._application(scope, receive, send)
else:
await send({"type": "http.response.start", "status": 404})
await send({"type": "http.response.body", "body": b"", "more_body": False})
def _get_rpc_handler(self, path):
handler_call_details = _HandlerCallDetails(path, None)
rpc_handler = None
for handler in self._handlers:
rpc_handler = handler.service(handler_call_details)
if rpc_handler:
return rpc_handler
return None
def _create_context(self, scope):
timeout = None
metadata = []
for header, value in scope["headers"]:
if timeout is None and header == b"grpc-timeout":
timeout = protocol.parse_timeout(value)
else:
if header.endswith(b"-bin"):
value = base64.b64decode(value)
else:
value = value.decode("ascii")
metadata.append((header.decode("ascii"), value))
return ServicerContext(timeout, metadata)
async def _do_grpc_request(self, rpc_method, context, receive, send):
headers = context._response_headers
wrap_message = context._wrap_message
unwrap_message = context._unwrap_message
if not rpc_method.request_streaming and not rpc_method.response_streaming:
method = rpc_method.unary_unary
elif not rpc_method.request_streaming and rpc_method.response_streaming:
method = rpc_method.unary_stream
elif rpc_method.request_streaming and not rpc_method.response_streaming:
method = rpc_method.stream_unary
elif rpc_method.request_streaming and rpc_method.response_streaming:
method = rpc_method.stream_stream
else:
raise NotImplementedError
request_proto_iterator = (
rpc_method.request_deserializer(message)
async for _, _, message in unwrap_message(receive)
)
try:
if rpc_method.request_streaming:
coroutine = method(request_proto_iterator, context)
else:
request_proto = await anext(
request_proto_iterator, None
) or rpc_method.request_deserializer(b"")
coroutine = method(request_proto, context)
except NotImplementedError:
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
coroutine = None
try:
if rpc_method.response_streaming:
await self._do_streaming_response(
rpc_method, receive, send, wrap_message, context, coroutine
)
else:
await self._do_unary_response(
rpc_method, receive, send, wrap_message, context, coroutine
)
except grpc.RpcError:
await self._do_grpc_error(send, context)
async def _do_streaming_response(
self, rpc_method, receive, send, wrap_message, context, coroutine
):
headers = context._response_headers
if coroutine:
message = await anext(coroutine)
else:
message = b""
status = 200
body = wrap_message(False, False, rpc_method.response_serializer(message))
if context._initial_metadata:
headers.extend(context._initial_metadata)
await send(
{"type": "http.response.start", "status": status, "headers": headers}
)
await send({"type": "http.response.body", "body": body, "more_body": True})
async for message in coroutine:
body = wrap_message(False, False, rpc_method.response_serializer(message))
send_task = asyncio.create_task(
send({"type": "http.response.body", "body": body, "more_body": True})
)
recv_task = asyncio.create_task(receive())
done, pending = await asyncio.wait(
{send_task, recv_task}, return_when=asyncio.FIRST_COMPLETED
)
if recv_task in done:
send_task.cancel()
result = recv_task.result()
if result["type"] == "http.disconnect":
break
else:
recv_task.cancel()
trailers = [("grpc-status", str(context.code.value[0]))]
if context.details:
trailers.append(("grpc-message", quote(context.details)))
if context._trailing_metadata:
trailers.extend(context._trailing_metadata)
trailer_message = protocol.pack_trailers(trailers)
body = wrap_message(True, False, trailer_message)
await send({"type": "http.response.body", "body": body, "more_body": False})
async def _do_unary_response(
self, rpc_method, receive, send, wrap_message, context, coroutine
):
headers = context._response_headers
if coroutine is None:
message = None
else:
message = await coroutine
status = 200
headers.append((b"grpc-status", str(context.code.value[0]).encode()))
if context.details:
headers.append(
(b"grpc-message", quote(context.details.encode("utf8")).encode("ascii"))
)
if context._initial_metadata:
headers.extend(context._initial_metadata)
if message is not None:
message_data = wrap_message(
False, False, rpc_method.response_serializer(message)
)
else:
message_data = b""
if context._trailing_metadata:
trailers = context._trailing_metadata
trailer_message = protocol.pack_trailers(trailers)
trailer_data = wrap_message(True, False, trailer_message)
else:
trailer_data = b""
content_length = len(message_data) + len(trailer_data)
headers.append((b"content-length", str(content_length).encode()))
await send(
{"type": "http.response.start", "status": status, "headers": headers}
)
await send(
{"type": "http.response.body", "body": message_data, "more_body": True}
)
await send(
{"type": "http.response.body", "body": trailer_data, "more_body": False}
)
async def _do_grpc_error(self, send, context):
status = 200
headers = context._response_headers
headers.append((b"grpc-status", str(context.code.value[0]).encode()))
if context.details:
headers.append(
(b"grpc-message", quote(context.details.encode("utf8")).encode("ascii"))
)
await send(
{"type": "http.response.start", "status": status, "headers": headers}
)
await send({"type": "http.response.body", "body": b"", "more_body": False})
async def _do_cors_preflight(self, scope, receive, send):
origin = next(
(value for header, value in scope["headers"] if header == "host"),
scope["server"][0],
)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [
(b"Content-Type", b"text/plain"),
(b"Content-Length", b"0"),
(b"Access-Control-Allow-Methods", b"POST, OPTIONS"),
(b"Access-Control-Allow-Headers", b"*"),
(b"Access-Control-Allow-Origin", origin),
(b"Access-Control-Allow-Credentials", b"true"),
(b"Access-Control-Expose-Headers", b"*"),
],
}
)
await send({"type": "http.response.body", "body": b"", "more_body": False})
def add_generic_rpc_handlers(self, handlers):
self._handlers.extend(handlers)
def add_insecure_port(self, port):
raise NotImplementedError()
def add_secure_port(self, port):
raise NotImplementedError()
def start(self):
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
class ServicerContext(grpc.ServicerContext):
def __init__(self, timeout=None, metadata=None):
self.code = grpc.StatusCode.OK
self.details = None
self._timeout = timeout
if timeout is not None:
self._deadline = time.monotonic() + timeout
else:
self._deadline = None
self._invocation_metadata = metadata or tuple()
self._initial_metadata = None
self._trailing_metadata = None
response_content_type = "application/grpc-web+proto"
self._wrap_message = protocol.wrap_message
self._unwrap_message = protocol.unwrap_message_asgi
origin = None
for header, value in metadata:
if header == "content-type":
if value == "application/grpc-web-text":
self._wrap_message = protocol.b64_wrap_message
self._unwrap_message = protocol.b64_unwrap_message_asgi
elif header == "accept":
response_content_type = value.split(",")[0].strip()
elif header == "host":
origin = value
if not origin:
raise ValueError("Request is missing the host header")
self._response_headers = [
(b"Content-Type", response_content_type.encode("ascii")),
(b"Access-Control-Allow-Origin", origin.encode("ascii")),
(b"Access-Control-Expose-Headers", b"*"),
]
def set_code(self, code):
if isinstance(code, grpc.StatusCode):
self.code = code
elif isinstance(code, int):
for status_code in grpc.StatusCode:
if status_code.value[0] == code:
self.code = status_code
break
else:
raise ValueError(f"Unknown StatusCode: {code}")
else:
raise NotImplementedError(
f"Unsupported status code type: {type(code)} with value {code}"
)
def set_details(self, details):
self.details = details
async def abort(self, code, details):
if code == grpc.StatusCode.OK:
raise ValueError()
self.set_code(code)
self.set_details(details)
raise grpc.RpcError()
async def abort_with_status(self, status):
if status == grpc.StatusCode.OK:
raise ValueError()
self.set_code(status)
raise grpc.RpcError()
async def send_initial_metadata(self, initial_metadata):
self._initial_metadata = [
(key.encode("ascii"), value.encode("utf8"))
for key, value in protocol.encode_headers(initial_metadata)
]
def set_trailing_metadata(self, trailing_metadata):
self._trailing_metadata = protocol.encode_headers(trailing_metadata)
def invocation_metadata(self):
return self._invocation_metadata
def time_remaining(self):
if self._deadline is not None:
return max(self._deadline - time.monotonic(), 0)
else:
return None
def peer(self):
raise NotImplementedError()
def peer_identities(self):
raise NotImplementedError()
def peer_identity_key(self):
raise NotImplementedError()
def auth_context(self):
raise NotImplementedError()
def add_callback(self):
raise NotImplementedError()
def cancel(self):
raise NotImplementedError()
def is_active(self):
raise NotImplementedError()
# Copied from https://github.com/python/cpython/pull/8895
_NOT_PROVIDED = object()
async def anext(async_iterator, default=_NOT_PROVIDED):
"""anext(async_iterator[, default])
Return the next item from the async iterator.
If default is given and the iterator is exhausted,
it is returned instead of raising StopAsyncIteration.
"""
if not isinstance(async_iterator, AsyncIterator):
raise TypeError(f"anext expected an AsyncIterator, got {type(async_iterator)}")
anxt = async_iterator.__anext__
try:
return await anxt()
except StopAsyncIteration:
if default is _NOT_PROVIDED:
raise
return default
|
# xmlutils.py
#
# Copyright 2013 Mandiant Corporation.
# Licensed under the Apache 2.0 license. Developed for Mandiant by William
# Gibb and Seth.
#
# Mandiant licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Provides an wrapper for et & reading in xml documents
#
import os.path
import logging
from lxml import etree as et
log = logging.getLogger(__name__)
def read_xml(filename):
"""
Use et to read in a xml file, or string, into a Element object.
:param filename: File to parse.
:return: lxml._elementTree object or None
"""
parser = et.XMLParser(remove_blank_text=True)
isfile=False
try:
isfile = os.path.exists(filename)
except ValueError as e:
if 'path too long for Windows' in str(e):
pass
else:
raise
try:
if isfile:
return et.parse(filename, parser)
else:
r = et.fromstring(filename, parser)
return r.getroottree()
except IOError:
log.exception('unable to open file [[}]'.format(filename))
except et.XMLSyntaxError:
log.exception('unable to parse XML [{}]'.format(filename))
return None
return None
def remove_namespace(doc, namespace):
"""
Takes in a ElementTree object and namespace value. The length of that
namespace value is removed from all Element nodes within the document.
This effectively removes the namespace from that document.
:param doc: lxml.etree
:param namespace: Namespace that needs to be removed.
:return: Returns the source document with namespaces removed.
"""
# http://homework.nwsnet.de/products/45be_remove-namespace-in-an-xml-document-using-elementtree
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
ns = '{{{}}}'.format(namespace)
nsl = len(ns)
# print 'DEBUG: removing',ns
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
return doc
def delete_namespace(parsed_xml):
"""
Identifies the namespace associated with the root node of a XML document
and removes that names from the document.
:param parsed_xml: lxml.Etree object.
:return: Returns the sources document with the namespace removed.
"""
if parsed_xml.getroot().tag.startswith('{'):
root = parsed_xml.getroot().tag
end_ns = root.find('}')
remove_namespace(parsed_xml, root[1:end_ns])
return parsed_xml
def read_xml_no_ns(filename):
"""
read in the file or data, populating a lxml._elementTree object
stripping out namespaces
:param filename: filename representing a xml file or a string of xml data
:return: lxml._elementTree object or None
"""
parsed_xml = read_xml(filename)
if parsed_xml is None:
return None
return delete_namespace(parsed_xml)
|
<gh_stars>10-100
import config
import numpy as np
import data_utils
BATCH_SIZE = config.BATCH_SIZE
SEQ_IN = config.SEQ_IN
SEQ_OUT = config.SEQ_OUT
IN_DIM = config.IN_DIM
def convert_velocity(data):
velocity = data[1:] - data[:-1]
return velocity
def get_batch(data, one_hot, actions):
"""Get a random batch of data from the specified bucket, prepare for step.
Args
data: a list of sequences of size n-by-d to fit the model to.
actions: a list of the actions we are using
Returns
The tuple (encoder_inputs, decoder_inputs, decoder_outputs);
the constructed batches have the proper format to call step(...) later.
"""
# Select entries at random
all_keys = list(data.keys())
chosen_keys = np.random.choice( len(all_keys), BATCH_SIZE )
# How many frames in total do we need?
total_frames = SEQ_IN + SEQ_OUT
encoder_inputs = np.zeros((BATCH_SIZE, SEQ_IN-1, IN_DIM), dtype=float)
decoder_inputs = np.zeros((BATCH_SIZE, SEQ_OUT, IN_DIM), dtype=float)
decoder_outputs = np.zeros((BATCH_SIZE, SEQ_OUT, IN_DIM), dtype=float)
for i in range( BATCH_SIZE ):
the_key = all_keys[ chosen_keys[i] ]
# Get the number of frames
n, _ = data[ the_key ].shape
# Sample somewherein the middle
idx = np.random.randint( 16, n-total_frames )
# Select the data around the sampled points
data_sel = data[ the_key ][idx:idx+total_frames ,:]
# Add the data
encoder_inputs[i,:,0:IN_DIM] = data_sel[0:SEQ_IN-1, :]
decoder_inputs[i,:,0:IN_DIM] = data_sel[SEQ_IN-1:SEQ_IN+SEQ_OUT-1, :]
decoder_outputs[i,:,0:IN_DIM] = data_sel[SEQ_IN:, 0:IN_DIM]
return encoder_inputs, decoder_inputs, decoder_outputs
def find_indices_srnn(data, action ):
"""
Find the same action indices as in SRNN.
See https://github.com/asheshjain399/RNNexp/blob/master/structural_rnn/CRFProblems/H3.6m/processdata.py#L325
"""
# Used a fixed dummy seed, following
# https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/forecastTrajectories.py#L29
SEED = 1234567890
rng = np.random.RandomState( SEED )
subject = 5
subaction1 = 1
subaction2 = 2
T1 = data[ (subject, action, subaction1, 'even') ].shape[0]
T2 = data[ (subject, action, subaction2, 'even') ].shape[0]
prefix, suffix = 50, 100
idx = []
idx.append( rng.randint( 16,T1-prefix-suffix ))
idx.append( rng.randint( 16,T2-prefix-suffix ))
idx.append( rng.randint( 16,T1-prefix-suffix ))
idx.append( rng.randint( 16,T2-prefix-suffix ))
idx.append( rng.randint( 16,T1-prefix-suffix ))
idx.append( rng.randint( 16,T2-prefix-suffix ))
idx.append( rng.randint( 16,T1-prefix-suffix ))
idx.append( rng.randint( 16,T2-prefix-suffix ))
return idx
def get_batch_srnn(data, action, actions ):
"""
Get a random batch of data from the specified bucket, prepare for step.
Args
data: dictionary with k:v, k=((subject, action, subsequence, 'even')),
v=nxd matrix with a sequence of poses
action: the action to load data from
Returns
The tuple (encoder_inputs, decoder_inputs, decoder_outputs);
the constructed batches have the proper format to call step(...) later.
"""
#actions = ["directions", "discussion", "eating", "greeting", "phoning",
# "posing", "purchases", "sitting", "sittingdown", "smoking",
# "takingphoto", "waiting", "walking", "walkingdog", "walkingtogether"]
if not action in actions:
raise ValueError("Unrecognized action {0}".format(action))
frames = {}
frames[ action ] = find_indices_srnn( data, action )
batch_size = 8 # we always evaluate 8 seeds
subject = 5 # we always evaluate on subject 5
source_seq_len = SEQ_IN
target_seq_len = SEQ_OUT
seeds = [( action, (i%2)+1, frames[action][i] ) for i in range(batch_size)]
encoder_inputs = np.zeros( (batch_size, source_seq_len-1, IN_DIM), dtype=float )
decoder_inputs = np.zeros( (batch_size, target_seq_len, IN_DIM), dtype=float )
decoder_outputs = np.zeros( (batch_size, target_seq_len, IN_DIM), dtype=float )
# Compute the number of frames needed
total_frames = source_seq_len + target_seq_len
# Reproducing SRNN's sequence subsequence selection as done in
# https://github.com/asheshjain399/RNNexp/blob/master/structural_rnn/CRFProblems/H3.6m/processdata.py#L343
for i in range( batch_size ):
_, subsequence, idx = seeds[i]
idx = idx + 50
data_sel = data[ (subject, action, subsequence, 'even') ]
data_sel = data_sel[(idx-source_seq_len):(idx+target_seq_len) ,:]
encoder_inputs[i, :, :] = data_sel[0:source_seq_len-1, :]
decoder_inputs[i, :, :] = data_sel[source_seq_len-1:(source_seq_len+target_seq_len-1), :]
decoder_outputs[i, :, :] = data_sel[source_seq_len:, :]
return encoder_inputs, decoder_inputs, decoder_outputs
def get_srnn_gts( actions, test_set, data_mean, data_std, dim_to_ignore, one_hot, to_euler=True ):
srnn_gts_euler = {}
for action in actions:
srnn_gt_euler = []
_, _, srnn_expmap, = get_batch_srnn( test_set, action, actions )
# expmap -> rotmat -> euler
for i in np.arange( srnn_expmap.shape[0] ):
denormed = data_utils.unNormalizeData(srnn_expmap[i,:,:], data_mean, data_std, dim_to_ignore, actions, one_hot )
if to_euler:
for j in np.arange( denormed.shape[0] ):
for k in np.arange(3,97,3):
denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat( denormed[j,k:k+3] ))
srnn_gt_euler.append( denormed );
# Put back in the dictionary
srnn_gts_euler[action] = srnn_gt_euler
return srnn_gts_euler
|
import numpy as np
from collections import defaultdict
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.decomposition import PCA
from sklearn.ensemble import BaseEnsemble
class LocalDecisionStump:
"""
An object that implements a callable local decision stump function and that also includes some meta-data that
allows for an API to interact with other methods in the package.
A local decision stump is a tri-valued function that is zero outside of a rectangular region, and on that region,
takes either a positive or negative value, depending on whether a single designated feature is above or below a
threshold. For more information on what a local decision stump is, refer to our paper.
:param feature: int
Feature used in the decision stump
:param threshold: float
Threshold used in the decision stump
:param left_val: float
The value taken when x_k <= threshold
:param right_val: float
The value taken when x_k > threshold
:param a_features: list of ints
List of ancestor feature indices (ordered from highest ancestor to lowest)
:param a_thresholds: list of floats
List of ancestor thresholds (ordered from highest ancestor to lowest)
:param a_signs: list of bools
List of signs indicating whether the current node is in the left child (False) or right child (True) of the
ancestor nodes (ordered from highest ancestor to lowest)
"""
def __init__(self, feature, threshold, left_val, right_val, a_features, a_thresholds, a_signs):
self.feature = feature
self.threshold = threshold
self.left_val = left_val
self.right_val = right_val
self.a_features = a_features
self.a_thresholds = a_thresholds
self.a_signs = a_signs
def __call__(self, data):
"""
Return values of the local decision stump function on an input data matrix with samples as rows
:param data: array-like of shape (n_samples, n_features)
Data matrix to feed into the local decision stump function
:return: array-like of shape (n_samples,)
Function values on the data
"""
root_to_stump_path_indicators = _compare_all(data, self.a_features, np.array(self.a_thresholds),
np.array(self.a_signs))
in_node = np.all(root_to_stump_path_indicators, axis=1).astype(int)
is_right = _compare(data, self.feature, self.threshold).astype(int)
result = in_node * (is_right * self.right_val + (1 - is_right) * self.left_val)
return result
def __repr__(self):
return f"LocalDecisionStump(feature={self.feature}, threshold={self.threshold}, left_val={self.left_val}, " \
f"right_val={self.right_val}, a_features={self.a_features}, a_thresholds={self.a_thresholds}, " \
f"a_signs={self.a_signs})"
def make_stump(node_no, tree_struct, parent_stump, is_right_child, normalize=False):
"""
Create a single local decision stump corresponding to a node in a scikit-learn tree structure object.
The nonzero values of the stump are chosen so that the vector of local decision stump values over the training
set (used to fit the tree) is orthogonal to those of all ancestor nodes.
:param node_no: int
The index of the node
:param tree_struct: object
The scikit-learn tree object
:param parent_stump: LocalDecisionStump object
The local decision stump corresponding to the parent of the node in question
:param is_right_child: bool
True if the new node is the right child of the parent node, False otherwise
:param normalize: bool
Flag. If set to True, then divide the nonzero function values by sqrt(n_samples in node) so that the
vector of function values on the training set has unit norm. If False, then do not divide, so that the
vector of function values on the training set has norm equal to n_samples in node.
:return: LocalDecisionStump object
The local decision stump corresponding to the node in question
"""
# Get features, thresholds and signs for ancestors
if parent_stump is None: # If root node
a_features = []
a_thresholds = []
a_signs = []
else:
a_features = parent_stump.a_features + [parent_stump.feature]
a_thresholds = parent_stump.a_thresholds + [parent_stump.threshold]
a_signs = parent_stump.a_signs + [is_right_child]
# Get indices for left and right children of the node in question
left_child = tree_struct.children_left[node_no]
right_child = tree_struct.children_right[node_no]
# Get quantities relevant to the node in question
feature = tree_struct.feature[node_no]
threshold = tree_struct.threshold[node_no]
left_size = tree_struct.n_node_samples[left_child]
right_size = tree_struct.n_node_samples[right_child]
parent_size = tree_struct.n_node_samples[node_no]
normalization = parent_size if normalize else 1
left_val = - np.sqrt(right_size / (left_size * normalization))
right_val = np.sqrt(left_size / (right_size * normalization))
return LocalDecisionStump(feature, threshold, left_val, right_val, a_features, a_thresholds, a_signs)
def make_stumps(tree_struct, normalize=False):
"""
Create a collection of local decision stumps corresponding to all internal nodes in a scikit-learn tree structure
object.
:param tree_struct: object
The scikit-learn tree object
:param normalize: bool
Flag. If set to True, then divide the nonzero function values by sqrt(n_samples in node) so that the
vector of function values on the training set has unit norm. If False, then do not divide, so that the
vector of function values on the training set has norm equal to n_samples in node.
:return:
stumps: list of LocalDecisionStump objects
The local decision stumps corresponding to all internal node in the tree structure
num_splits_per_feature: array-like of shape (n_features,)
The number of splits in the tree on each original feature
"""
stumps = []
num_splits_per_feature = [0] * tree_struct.n_features
def make_stump_iter(node_no, tree_struct, parent_stump, is_right_child, normalize, stumps, num_splits_per_feature):
"""
Helper function for iteratively making local decision stump objects and appending them to the list stumps.
"""
new_stump = make_stump(node_no, tree_struct, parent_stump, is_right_child, normalize)
stumps.append(new_stump)
num_splits_per_feature[new_stump.feature] += 1
left_child = tree_struct.children_left[node_no]
right_child = tree_struct.children_right[node_no]
if tree_struct.feature[left_child] != -2: # is not leaf
make_stump_iter(left_child, tree_struct, new_stump, False, normalize, stumps, num_splits_per_feature)
if tree_struct.feature[right_child] != -2: # is not leaf
make_stump_iter(right_child, tree_struct, new_stump, True, normalize, stumps, num_splits_per_feature)
make_stump_iter(0, tree_struct, None, None, normalize, stumps, num_splits_per_feature)
return stumps, num_splits_per_feature
def tree_feature_transform(stumps, X):
"""
Transform the data matrix X using a mapping derived from a collection of local decision stump functions.
:param stumps: list of LocalDecisionStump objects
List of stump functions to use to transform data
:param X: array-like of shape (n_samples, n_features)
Original data matrix
:return: X_transformed: array-like of shape (n_samples, n_stumps)
Transformed data matrix
"""
transformed_feature_vectors = []
for stump in stumps:
transformed_feature_vec = stump(X)
transformed_feature_vectors.append(transformed_feature_vec)
X_transformed = np.vstack(transformed_feature_vectors).T
return X_transformed
class TreeTransformer(TransformerMixin, BaseEstimator):
"""
A transformer that transforms data using a representation built from local decision stumps from a tree or tree
ensemble. The transformer also comes with meta data on the local decision stumps and methods that allow
for transformations using sub-representations corresponding to each of the original features.
:param estimator: scikit-learn estimator
The scikit-learn tree or tree ensemble estimator object
:param pca: bool
Flag, if False, the sub-representation for each original feature is just the concatenation of the local
decision stumps splitting on that feature. If true, the sub-representation are the principal components of the
set of local decision stump vectors
:param max_components_type: {"median_splits", "max_splits", "nsamples", "nstumps", "min_nsamples_nstumps",
"min_fracnsamples_nstumps"} or int
Method for choosing the max number of components for PCA transformer for each sub-representation corresponding
to an original feature:
- If "median_splits", then max_components is alpha * median number of splits on the original feature
among trees in the estimator
- If "max_splits", then max_components is alpha * maximum number of splits on the original feature among
trees in the estimator
- If "nsamples", then max_components is alpha * n_samples
- If "nstumps", then max_components is alpha * n_stumps
- If "min_nsamples_nstumps", then max_components is alpha * min(n_samples, n_stumps), where n_stumps is
total number of local decision stumps splitting on that feature in the ensemble
- If "min_fracnsamples_nstumps", then max_components is min(alpha * n_samples, n_stumps), where n_stumps is
total number of local decision stumps splitting on that feature in the ensemble
- If int, then max_components is the given integer
:param alpha: float
Parameter for adjusting the max number of components for PCA.
:param normalize: bool
Flag. If set to True, then divide the nonzero function values for each local decision stump by
sqrt(n_samples in node) so that the vector of function values on the training set has unit norm. If False,
then do not divide, so that the vector of function values on the training set has norm equal to n_samples
in node.
"""
def __init__(self, estimator, pca=True, max_components_type="min_fracnsamples_nstumps", alpha=0.5, normalize=False):
self.estimator = estimator
self.pca = pca
self.max_components_type = max_components_type
self.alpha = alpha
self.normalize = normalize
# Check if single tree or tree ensemble
tree_models = estimator.estimators_ if isinstance(estimator, BaseEnsemble) else [estimator]
# Make stumps for each tree
num_splits_per_feature_all = []
self.all_stumps = []
for tree_model in tree_models:
tree_stumps, num_splits_per_feature = make_stumps(tree_model.tree_, normalize)
self.all_stumps += tree_stumps
num_splits_per_feature_all.append(num_splits_per_feature)
# Identify the stumps that split on feature k, for each k
self._original_feat_to_stump_mapping = defaultdict(list)
for idx, stump in enumerate(self.all_stumps):
self._original_feat_to_stump_mapping[stump.feature].append(idx)
# Obtain the median and max number of splits on each feature across trees
self.median_splits = np.median(num_splits_per_feature_all, axis=0)
self.max_splits = np.max(num_splits_per_feature_all, axis=0)
# Initialize list of PCA transformers, one for each set of stumps corresponding to each original feature
self.pca_transformers = defaultdict(lambda: None)
def fit(self, X, y=None):
def pca_on_stumps(k):
"""
Helper function to fit PCA transformer on stumps corresponding to original feature k
"""
restricted_stumps = self.get_stumps_for_feature(k)
n_stumps = len(restricted_stumps)
n_samples = X.shape[0]
# Get the number of components to use for PCA
if self.max_components_type == 'median_splits':
max_components = int(self.median_splits[k] * self.alpha)
elif self.max_components_type == "max_splits":
max_components = int(self.max_splits[k] * self.alpha)
elif self.max_components_type == "nsamples":
max_components = int(n_samples * self.alpha)
elif self.max_components_type == "nstumps":
max_components = int(n_stumps * self.alpha)
elif self.max_components_type == "min_nsamples_nstumps":
max_components = int(min(n_samples, n_stumps) * self.alpha)
elif self.max_components_type == "min_fracnsamples_nstumps":
max_components = int(min(n_samples * self.alpha, n_stumps))
elif isinstance(self.max_components_type, int):
max_components = self.max_components_type
else:
raise ValueError("Invalid max components type")
n_components = min(max_components, n_stumps, n_samples)
if n_components == 0:
pca_transformer = None
else:
X_transformed = tree_feature_transform(restricted_stumps, X)
pca_transformer = PCA(n_components=n_components)
pca_transformer.fit(X_transformed)
return pca_transformer
if self.pca:
n_features = X.shape[1]
for k in np.arange(n_features):
self.pca_transformers[k] = pca_on_stumps(k)
else:
pass
def transform(self, X):
"""
Obtain all engineered features.
:param X: array-like of shape (n_samples, n_features)
Original data matrix
:return: X_transformed: array-like of shape (n_samples, n_new_features)
Transformed data matrix
"""
X_transformed = []
n_features = X.shape[1]
for k in range(n_features):
X_transformed_k = self.transform_one_feature(X, k)
if X_transformed_k is not None:
X_transformed.append(X_transformed_k)
X_transformed = np.hstack(X_transformed)
return X_transformed
def transform_one_feature(self, X, k):
"""
Obtain the engineered features corresponding to a given original feature X_k
:param X: array-like of shape (n_samples, n_features)
Original data matrix
:param k: int
Index of original feature
:return: X_transformed: array-like of shape (n_samples, n_new_features)
Transformed data matrix
"""
restricted_stumps = self.get_stumps_for_feature(k)
if len(restricted_stumps) == 0:
return None
else:
X_transformed = tree_feature_transform(restricted_stumps, X)
if self.pca_transformers[k] is not None:
X_transformed = self.pca_transformers[k].transform(X_transformed)
return X_transformed
def get_stumps_for_feature(self, k):
"""
Get the list of local decision stumps that split on feature k
:param k: int
Index of original feature
:return: restricted_stumps: list of LocalDecisionStump objects
"""
restricted_stump_indices = self._original_feat_to_stump_mapping[k]
restricted_stumps = [self.all_stumps[idx] for idx in restricted_stump_indices]
return restricted_stumps
def _compare(data, k, threshold, sign=True):
"""
Obtain indicator vector for the samples with k-th feature > threshold
:param data: array-like of shape (n_sample, n_feat)
:param k: int
Index of feature in question
:param threshold: float
Threshold for the comparison
:param sign: bool
Flag, if False, return indicator of the complement
:return: array-like of shape (n_samples,)
"""
if sign:
return data[:, k] > threshold
else:
return data[:, k] <= threshold
def _compare_all(data, ks, thresholds, signs):
"""
Obtain indicator vector for the samples with k-th feature > threshold or <= threshold (depending on sign)
for all k in ks
:param data: array-like of shape (n_sample, n_feat)
:param ks: list of ints
Indices of feature in question
:param thresholds: list of floats
Threshold for the comparison
:param signs: list of bools
Flags, if k-th element if True, then add the condition k-th feature > threshold, otherwise add the
condition k-th feature <= threshold
:return:
"""
return ~np.logical_xor(data[:, ks] > thresholds, signs)
|
from django.db import models
# Create your models here.
class ShareholderInfo(models.Model):
# BE_PRESENT_CHOICE = (
# (0,'否'),
# (1,'是')
# )
# year = models.CharField(max_length=4, verbose_name="会议年份")
# xh = models.SmallIntegerField()
# cx = models.SmallIntegerField(choices=BE_PRESENT_CHOICE, verbose_name="是否出席")
# xcorwl = models.SmallIntegerField(choices=BE_PRESENT_CHOICE,verbose_name="是否出席现场")
gdxm = models.CharField(max_length=20) # 股东姓名
gdtype = models.CharField(max_length=20, null=True) # 股东类型
gddmk = models.CharField(max_length=15, null=True)
sfz = models.CharField(max_length=25, null=True)
rs = models.SmallIntegerField(null=True)
frA = models.IntegerField(null=True)
gzA = models.IntegerField(null=True)
gzB = models.IntegerField(null=True)
dlr = models.CharField(max_length=10, null=True) # 代理人
# meno = models.CharField(max_length=20) # 备注
# hconference = models.ForeignKey(Conference, on_delete=models.CASCADE, verbose_name="会议类型")
def __str__(self):
return self.gdxm
class Meta:
db_table = 'gdbook'
verbose_name = '股东信息花名册'
verbose_name_plural = verbose_name
class GB(models.Model):
year = models.SmallIntegerField(verbose_name="会议年份")
gb = models.IntegerField()
ltag = models.IntegerField()
ltbg = models.IntegerField()
fltg = models.IntegerField()
class Meta:
db_table = 'gb'
verbose_name = '股本信息表'
verbose_name_plural = verbose_name
class Meeting(models.Model):
year = models.SmallIntegerField(verbose_name="会议年份")
current_year = models.BooleanField(default=False)
# DateField精确到天,DateTimeField精确到秒
date = models.DateTimeField()
name = models.CharField(verbose_name="会议类型", max_length=20)
motion = models.CharField(max_length=100, verbose_name="议案主题",default="")
address = models.CharField(max_length=25, default="")
members = models.ManyToManyField(ShareholderInfo, through="OnSiteMeeting")
# gb_id = models.SmallIntegerField(default=1)
gb = models.ForeignKey(GB, on_delete=models.SET_NULL, null=True) # 一对多,gb表是一,annual_meeting是多,当gb表记录删除时,该外键值she为null
def __str__(self):
return str(self.year) + self.name
class Meta:
db_table = 'annual_meeting'
verbose_name = '年度会议登记本'
verbose_name_plural = verbose_name
class OnSiteMeeting(models.Model):
BE_PRESENT_CHOICE = (
(0,'否'),
(1,'是')
)
# gb = models.ForeignKey(GB, on_delete=models.CASCADE)
shareholder = models.ForeignKey(ShareholderInfo, on_delete=models.CASCADE)
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
cx = models.BooleanField(default=False, verbose_name="是否出席")
xcorwl = models.BooleanField(default=False,verbose_name="是否出席现场")
gzA = models.IntegerField(default=0)
gzB = models.IntegerField(default=0)
meno = models.CharField(max_length=20, default=None, null=True) # 备注
class Meta:
db_table = 'on_site_meeting'
verbose_name = '现场会议登记表'
verbose_name_plural = verbose_name
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test suite for module pyszn.injection.
"""
from os.path import join
from shutil import rmtree
from collections import OrderedDict
from deepdiff import DeepDiff
from pyszn.injection import parse_attribute_injection
TOPOLOGY_TO_BUILD = """
# Nodes
[shell=vtysh name="Switch 9"] sw9
[shell=vtysh name="Switch 2"] sw2
[type=switch name="Switch 8"] sw8
[type=host name="Host 3"] hs3
[type=host name="Host 2"] hs2
# Links
hs3:1 -- sw9:1
hs2:1 -- sw2:1
[attr1=value1] sw9:2 -- sw2:2
"""
TOPOLOGY_MATCH_0 = """
# Nodes
[shell=vtysh name="Switch 1"] sw1
[shell=vtysh name="Switch 2"] sw2
[type=switch name="Switch 8"] sw8
[type=host name="Host 1"] hs1
[type=host name="Host 2"] hs2
# Links
hs1:1 -- sw1:1
hs2:1 -- sw2:1
[attr1=1] sw1:2 -- sw2:2
"""
TOPOLOGY_MATCH_1 = """
# Nodes
[shell=vtysh name="Switch 1"] sw1
[shell=vtysh name="Switch 2"] sw2
[type=host name="Host 1"] hs1
[type=host name="Host 2"] hs2
# Links
hs1:1 -- sw1:1
hs2:1 -- sw2:1
[attr1=1] sw1:2 -- sw2:2
"""
TOPOLOGY_MATCH_2 = """
# Nodes
[shell=vtysh name="Switch 1"] sw4
[shell=vtysh name="Switch 2"] sw5
"""
TOPOLOGY_MATCH_3 = """
# Nodes
[shell=vtysh name="Switch 1"] sw6
[shell=vtysh name="Switch 2"] sw7
[type=host name="Host 1"] hs3
[type=host name="Host 2"] hs4
# Links
hs1:1 -- sw1:1
hs2:1 -- sw2:1
[attr1=1] sw1:2 -- sw2:2
"""
TOPOLOGY_MATCHES = {
'0': TOPOLOGY_MATCH_0,
'2': TOPOLOGY_MATCH_2
}
TOPOLOGY_MATCHES_FOLDER = {
'1': TOPOLOGY_MATCH_1,
'3': TOPOLOGY_MATCH_3
}
INJECTION_FILE = """
[
{{
"files": ["test_topology_match_0.py"],
"modifiers": [
{{
"links": ["hs1:1 -- sw1:1", "test_attr=test"],
"attributes": {{
"link_attr": "link_value"
}}
}},
{{
"ports": ["hs1:1", "test_attr=test"],
"attributes": {{
"port_attr": "port_value"
}}
}},
{{
"nodes": ["sw1"],
"attributes": {{
"image": "image_for_sw1",
"hardware": "hardware_for_sw1"
}}
}},
{{
"nodes": ["sw2"],
"attributes": {{
"image": "image_for_sw2",
"shell": "vtysh",
"name": "new_name"
}}
}},
{{
"nodes": ["type=switch"],
"attributes": {{
"chassis": "chassis_for_sw8"
}}
}}
]
}},
{{
"files": [
"test_topology_match_0.py",
"test_topology_match_1.py"
],
"modifiers": [
{{
"nodes": ["sw1", "type=host", "sw3"],
"attributes": {{
"image": "image_for_sw1_sw3_hs1_hs2",
"hardware": "hardware_for_sw1_sw3_hs1_hs2"
}}
}},
{{
"nodes": ["sw4"],
"attributes": {{
"image": "image_for_sw4"
}}
}}
]
}},
{{
"files": ["test_topology_match_2.py"],
"modifiers": [
{{
"nodes": ["*"],
"attributes": {{
"image": "image_for_sw4_sw5"
}}
}}
]
}},
{{
"files": ["*"],
"modifiers": [
{{
"nodes": ["hs*"],
"attributes": {{
"image": "image_for_all_hosts"
}}
}},
{{
"nodes": ["sw6", "sw7"],
"attributes": {{
"image": "image_for_sw6_sw7"
}}
}}
]
}}
]
"""
EXPECTED_PARSED_INJECTION_FILE = OrderedDict([
(
'{search_path}/test_topology_match_0.py',
{
'environment': {},
'ports': OrderedDict([
(('hs1', '1'), {'port_attr': 'port_value'})
]),
'links': OrderedDict([(
(('hs1', '1'), ('sw1', '1')),
{
'link_attr': 'link_value',
}
)]),
'nodes': OrderedDict([
(
'sw1', {
'image': 'image_for_sw1_sw3_hs1_hs2',
'hardware': 'hardware_for_sw1_sw3_hs1_hs2',
}
), (
'sw2', {
'image': 'image_for_sw2',
'shell': 'vtysh',
'name': 'new_name',
}
), (
'sw8', {
'chassis': 'chassis_for_sw8'
}
), (
'hs1', {
'image': 'image_for_all_hosts',
'hardware': 'hardware_for_sw1_sw3_hs1_hs2',
}
), (
'hs2', {
'image': 'image_for_all_hosts',
'hardware': 'hardware_for_sw1_sw3_hs1_hs2',
}
),
])}
), (
'{search_path}/subfolder/test_topology_match_1.py',
{
'environment': {},
'ports': OrderedDict(),
'links': OrderedDict(),
'nodes': OrderedDict([
(
'sw1', {
'image': 'image_for_sw1_sw3_hs1_hs2',
'hardware': 'hardware_for_sw1_sw3_hs1_hs2',
}
), (
'hs1', {
'image': 'image_for_all_hosts',
'hardware': 'hardware_for_sw1_sw3_hs1_hs2',
}
), (
'hs2', {
'image': 'image_for_all_hosts',
'hardware': 'hardware_for_sw1_sw3_hs1_hs2',
}
),
])
}
), (
'{search_path}/test_topology_match_2.py',
{
'environment': {},
'ports': OrderedDict(),
'links': OrderedDict(),
'nodes': OrderedDict([
(
'sw4', {
'image': 'image_for_sw4_sw5',
}
), (
'sw5', {
'image': 'image_for_sw4_sw5',
}
),
])
}
), (
'{search_path}/subfolder/test_topology_match_3.py',
{
'environment': {},
'ports': OrderedDict(),
'links': OrderedDict(),
'nodes': OrderedDict([
(
'hs1', {
'image': 'image_for_all_hosts',
}
), (
'hs2', {
'image': 'image_for_all_hosts',
}
), (
'hs3', {
'image': 'image_for_all_hosts',
}
), (
'hs4', {
'image': 'image_for_all_hosts',
}
), (
'sw6', {
'image': 'image_for_sw6_sw7',
}
), (
'sw7', {
'image': 'image_for_sw6_sw7',
}
),
])
}
)]
)
def test_attribute_injection(tmpdir):
"""
Test the configuration file is parsed correctly.
"""
workdir = str(tmpdir)
search_path = str(tmpdir.mkdir('test'))
subfolder = str(tmpdir.mkdir('test/subfolder'))
try:
# Write matching topologies
for basepath, matches in (
(search_path, TOPOLOGY_MATCHES),
(subfolder, TOPOLOGY_MATCHES_FOLDER)):
for count, content in matches.items():
output_filename = join(
basepath, 'test_topology_match_{}.py'.format(count)
)
with open(output_filename, 'w') as fd:
fd.write('TOPOLOGY = """\n')
fd.write(content)
fd.write('"""')
# Write the attributes injection file
injection_path = join(workdir, 'attributes_injection.json')
with open(injection_path, 'w') as fd:
fd.write(INJECTION_FILE.format(search_path=search_path))
# Change keys of the expected parsed injection file
expected = OrderedDict()
for key, value in EXPECTED_PARSED_INJECTION_FILE.items():
expected[key.format(search_path=search_path)] = value
# Actually parse the injection file
actual = parse_attribute_injection(
injection_path, search_paths=[search_path]
)
# Compare the actual and the expected
differences = DeepDiff(actual, expected)
assert not differences
finally:
try:
rmtree(workdir)
except Exception:
pass
|
<reponame>PavanKishore21/probability<filename>tensorflow_probability/python/bijectors/scale_matvec_lu.py<gh_stars>1-10
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Invertible 1x1 Convolution used in GLOW."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.math.linalg import lu_reconstruct
from tensorflow_probability.python.math.linalg import lu_reconstruct_assertions
from tensorflow_probability.python.math.linalg import lu_solve
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
__all__ = [
'MatvecLU', # Deprecated
'ScaleMatvecLU',
]
class ScaleMatvecLU(bijector.AutoCompositeTensorBijector):
"""Matrix-vector multiply using LU decomposition.
This bijector is identical to the 'Convolution1x1' used in Glow
[(Kingma and Dhariwal, 2018)[1].
#### Examples
Here's an example of initialization via random weights matrix:
```python
def trainable_lu_factorization(
event_size, batch_shape=(), seed=None, dtype=tf.float32, name=None):
with tf.name_scope(name or 'trainable_lu_factorization'):
event_size = tf.convert_to_tensor(
event_size, dtype_hint=tf.int32, name='event_size')
batch_shape = tf.convert_to_tensor(
batch_shape, dtype_hint=event_size.dtype, name='batch_shape')
random_matrix = tf.random.uniform(
shape=tf.concat([batch_shape, [event_size, event_size]], axis=0),
dtype=dtype,
seed=seed)
random_orthonormal = tf.linalg.qr(random_matrix)[0]
lower_upper, permutation = tf.linalg.lu(random_orthonormal)
lower_upper = tf.Variable(
initial_value=lower_upper,
trainable=True,
name='lower_upper')
# Initialize a non-trainable variable for the permutation indices so
# that its value isn't re-sampled from run-to-run.
permutation = tf.Variable(
initial_value=permutation,
trainable=False,
name='permutation')
return lower_upper, permutation
channels = 3
conv1x1 = tfb.ScaleMatvecLU(*trainable_lu_factorization(channels),
validate_args=True)
x = tf.random.uniform(shape=[2, 28, 28, channels])
fwd = conv1x1.forward(x)
rev_fwd = conv1x1.inverse(fwd)
# ==> x
```
To initialize this variable outside of TensorFlow, one can also use SciPy,
e.g.,
```python
def lu_factorized_random_orthonormal_matrix(channels, dtype=np.float32):
random_matrix = np.random.rand(channels, channels).astype(dtype)
lower_upper = scipy.linalg.qr(random_matrix)[0]
permutation = scipy.linalg.lu(lower_upper, overwrite_a=True)[0]
permutation = np.argmax(permutation, axis=-2)
return lower_upper, permutation
```
#### References
[1]: <NAME>, <NAME>. Glow: Generative Flow with
Invertible 1x1 Convolutions. _arXiv preprint arXiv:1807.03039_, 2018.
https://arxiv.org/abs/1807.03039
"""
def __init__(self,
lower_upper,
permutation,
validate_args=False,
name=None):
"""Creates the ScaleMatvecLU bijector.
Args:
lower_upper: The LU factorization as returned by `tf.linalg.lu`.
permutation: The LU factorization permutation as returned by
`tf.linalg.lu`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
Default value: `False`.
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'ScaleMatvecLU').
Raises:
ValueError: If both/neither `channels` and `lower_upper`/`permutation` are
specified.
"""
parameters = dict(locals())
with tf.name_scope(name or 'ScaleMatvecLU') as name:
self._lower_upper = tensor_util.convert_nonref_to_tensor(
lower_upper, dtype_hint=tf.float32, name='lower_upper')
self._permutation = tensor_util.convert_nonref_to_tensor(
permutation, dtype_hint=tf.int32, name='permutation')
super(ScaleMatvecLU, self).__init__(
dtype=self._lower_upper.dtype,
is_constant_jacobian=True,
forward_min_event_ndims=1,
validate_args=validate_args,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
# pylint: disable=g-long-lambda
return dict(
lower_upper=parameter_properties.ParameterProperties(
event_ndims=2,
shape_fn=lambda sample_shape: ps.concat(
[sample_shape, sample_shape[-1:]], axis=0),
),
permutation=parameter_properties.ParameterProperties(
event_ndims=1,
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED))
# pylint: enable=g-long-lambda
@property
def lower_upper(self):
return self._lower_upper
@property
def permutation(self):
return self._permutation
def _broadcast_params(self):
lower_upper = tf.convert_to_tensor(self.lower_upper)
perm = tf.convert_to_tensor(self.permutation)
shape = ps.broadcast_shape(ps.shape(lower_upper)[:-1],
ps.shape(perm))
lower_upper = tf.broadcast_to(
lower_upper, ps.concat([shape, shape[-1:]], 0))
perm = tf.broadcast_to(perm, shape)
return lower_upper, perm
def _forward(self, x):
lu, perm = self._broadcast_params()
w = lu_reconstruct(lower_upper=lu,
perm=perm,
validate_args=self.validate_args)
return tf.linalg.matvec(w, x)
def _inverse(self, y):
lu, perm = self._broadcast_params()
return lu_solve(
lower_upper=lu,
perm=perm,
rhs=y[..., tf.newaxis],
validate_args=self.validate_args)[..., 0]
def _forward_log_det_jacobian(self, unused_x):
return tf.reduce_sum(
tf.math.log(tf.abs(tf.linalg.diag_part(self.lower_upper))),
axis=-1)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
lu, perm = None, None
assertions = []
if (is_init != tensor_util.is_ref(self.lower_upper) or
is_init != tensor_util.is_ref(self.permutation)):
lu, perm = self._broadcast_params()
assertions.extend(lu_reconstruct_assertions(
lu, perm, self.validate_args))
if is_init != tensor_util.is_ref(self.lower_upper):
lu = tf.convert_to_tensor(self.lower_upper) if lu is None else lu
assertions.append(assert_util.assert_none_equal(
tf.linalg.diag_part(lu), tf.zeros([], dtype=lu.dtype),
message='Invertible `lower_upper` must have nonzero diagonal.'))
return assertions
class MatvecLU(ScaleMatvecLU):
"""Matrix-vector multiply using LU decomposition.
This bijector is identical to the 'Convolution1x1' used in Glow
[(Kingma and Dhariwal, 2018)[1].
#### Examples
Here's an example of initialization via random weights matrix:
```python
def trainable_lu_factorization(
event_size, batch_shape=(), seed=None, dtype=tf.float32, name=None):
with tf.name_scope(name or 'trainable_lu_factorization'):
event_size = tf.convert_to_tensor(
event_size, dtype_hint=tf.int32, name='event_size')
batch_shape = tf.convert_to_tensor(
batch_shape, dtype_hint=event_size.dtype, name='batch_shape')
random_matrix = tf.random.uniform(
shape=tf.concat([batch_shape, [event_size, event_size]], axis=0),
dtype=dtype,
seed=seed)
random_orthonormal = tf.linalg.qr(random_matrix)[0]
lower_upper, permutation = tf.linalg.lu(random_orthonormal)
lower_upper = tf.Variable(
initial_value=lower_upper,
trainable=True,
name='lower_upper')
# Initialize a non-trainable variable for the permutation indices so
# that its value isn't re-sampled from run-to-run.
permutation = tf.Variable(
initial_value=permutation,
trainable=False,
name='permutation')
return lower_upper, permutation
channels = 3
conv1x1 = tfb.MatvecLU(*trainable_lu_factorization(channels),
validate_args=True)
x = tf.random.uniform(shape=[2, 28, 28, channels])
fwd = conv1x1.forward(x)
rev_fwd = conv1x1.inverse(fwd)
# ==> x
```
To initialize this variable outside of TensorFlow, one can also use SciPy,
e.g.,
```python
def lu_factorized_random_orthonormal_matrix(channels, dtype=np.float32):
random_matrix = np.random.rand(channels, channels).astype(dtype)
lower_upper = scipy.linalg.qr(random_matrix)[0]
permutation = scipy.linalg.lu(lower_upper, overwrite_a=True)[0]
permutation = np.argmax(permutation, axis=-2)
return lower_upper, permutation
```
#### References
[1]: <NAME>, <NAME>. Glow: Generative Flow with
Invertible 1x1 Convolutions. _arXiv preprint arXiv:1807.03039_, 2018.
https://arxiv.org/abs/1807.03039
"""
@deprecation.deprecated(
'2020-01-01',
'`MatvecLU` has been deprecated and renamed `ScaleMatvecLU`; please use '
'that symbol instead.')
def __init__(self,
lower_upper,
permutation,
validate_args=False,
name=None):
"""Creates the MatvecLU bijector.
Args:
lower_upper: The LU factorization as returned by `tf.linalg.lu`.
permutation: The LU factorization permutation as returned by
`tf.linalg.lu`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
Default value: `False`.
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'MatvecLU').
Raises:
ValueError: If both/neither `channels` and `lower_upper`/`permutation` are
specified.
"""
super(MatvecLU, self).__init__(
lower_upper, permutation, validate_args=False, name=name or 'MatvecLU')
|
<reponame>exebixel/oneparams
#!/usr/bin/python
import click
import sys
import pandas as pd
from oneparams.reset import pw_reset
from oneparams.api.login import login
from oneparams.excel.card import cards
from oneparams.excel.colaborador import colaborador
from oneparams.excel.comissao import Comissao
from oneparams.excel.servicos import servico
from oneparams.excel.cliente import clientes
import oneparams.config as config
_global_options = [
click.argument('worksheet', required=True, type=click.Path(exists=True)),
click.option('-l', '--login', 'login', required=True,
type=str, help="Email address to login"),
click.option('-p', '--password', 'password', required=False, type=str,
default='<PASSWORD>', help="Access password (default = <PASSWORD>)"),
click.option('-e', '--empresa', 'empresa', required=True,
type=str, help="Company name used to parametrization"),
click.option('-eid', '--empresa-id', 'empresa_id',
required=False, type=int, default=None, help="Company id (if have some companies with same name)"),
click.option('-f', '--filial', 'filial', required=False,
type=str, help="Branch name used to parametrization"),
click.option('-W', '--no-warning', 'warning', required=False,
is_flag=True, default=False, help="Suppress warnings")
]
_reset_options = [
click.option('-R', '--reset', 'reset', required=False, is_flag=True,
default=False, help="Delete or inactivate all services")
]
_error_options = [
click.option('-E', '--no-error', 'error', required=False, is_flag=True,
default=False, help="Resolve erros (this can delete data)")
]
_skip_options = [
click.option('-S', '--skip', 'skip', required=False, is_flag=True,
default=False, help='Skip items already registered')
]
def add_option(options):
def _add_options(func):
for option in reversed(options):
func = option(func)
return func
return _add_options
def cli_login(kwargs):
one = login()
one.login(nome_empresa=kwargs['empresa'],
nome_filial=kwargs['filial'],
email=kwargs['login'],
senha=kwargs['password'],
empresa_id=kwargs['empresa_id'])
def cli_file(worksheet):
try:
return pd.ExcelFile(worksheet)
except FileNotFoundError as exp:
sys.exit(exp)
except ValueError as exp:
sys.exit(exp)
def cli_config(error=False, warning=False, skip=False):
config.RESOLVE_ERROS = error
config.NO_WARNING = warning
config.SKIP = skip
@click.group()
@click.version_option(config.VERSION)
def cli():
pass
@cli.command(help="Manipulating Services")
@add_option(_global_options)
@add_option(_reset_options)
def serv(**kwargs):
cli_login(kwargs)
book = cli_file(kwargs['worksheet'])
cli_config(warning=kwargs['warning'])
servico(book, reset=kwargs['reset'])
@cli.command(help="Manipulating Collaborators")
@add_option(_global_options)
def cols(**kwargs):
cli_login(kwargs)
book = cli_file(kwargs['worksheet'])
cli_config(warning=kwargs['warning'])
colaborador(book)
@cli.command(help="Manipulating Cards")
@add_option(_global_options)
@add_option(_reset_options)
def card(**kwargs):
cli_login(kwargs)
book = cli_file(kwargs['worksheet'])
cli_config(warning=kwargs['warning'])
cards(book, reset=kwargs['reset'])
@cli.command(help="Professional Committee Manipulation")
@add_option(_global_options)
@add_option(_reset_options)
def comm(**kwargs):
cli_login(kwargs)
book = cli_file(kwargs['worksheet'])
cli_config(warning=kwargs['warning'])
Comissao(book, reset=kwargs['reset'])
@cli.command(help="Manipulating Clients")
@add_option(_global_options)
@add_option(_reset_options)
@add_option(_error_options)
@add_option(_skip_options)
def clis(**kwargs):
cli_login(kwargs)
book = cli_file(kwargs['worksheet'])
cli_config(error=kwargs['error'],
warning=kwargs['warning'],
skip=kwargs['skip'])
clientes(book, reset=kwargs['reset'])
@cli.command(help="Password Reset")
@click.argument('email', required=True, type=str)
@click.option('-k', '--key', 'acess_key', envvar='ONE_RESET', required=True, type=str)
def reset(email, acess_key):
pw_reset(email, acess_key)
if __name__ == "__main__":
cli()
|
<reponame>kzinmr/pyknp-extend
#-*- encoding: utf-8 -*-
import re
import sys
import unittest
from pyknp import MList
from pyknp import Morpheme
from pyknp import Features
class Tag(object):
"""
格解析の単位となるタグ(基本句)の各種情報を保持するオブジェクト.
"""
def __init__(self, spec, tag_id=0, newstyle=False):
self._mrph_list = MList()
self.parent_id = -1
self.parent = None
self.children = []
self.dpndtype = ''
self.fstring = ''
self.features = None
self._pstring = ''
self.tag_id = tag_id
self.synnodes = []
spec = spec.strip()
if spec == '+':
pass
elif newstyle:
items = spec.split("\t")
self.parent_id = int(items[2])
self.dpndtype = items[3]
self.fstring = items[17]
self.repname = items[6]
self.features = Features(self.fstring, "|", False)
elif re.match(r'\+ (-?\d+)(\w)(.*)$', spec):
match = re.match(r'\+ (-?\d+)(\w)(.*)$', spec)
self.parent_id = int(match.group(1))
self.dpndtype = match.group(2)
self.fstring = match.group(3).strip()
else:
sys.stderr.write("Illegal tag spec: %s\n" % spec)
quit(1)
# Extract 正規化代表表記
if not newstyle:
self.repname = ''
self.features = Features(self.fstring)
rep = self.features.get("正規化代表表記")
if rep is not None:
self.repname = rep
def push_mrph(self, mrph):
self._mrph_list.push_mrph(mrph)
def spec(self):
return "+ %d%s %s\n%s" % (self.parent_id, self.dpndtype, self.fstring,
self._mrph_list.spec())
def mrph_list(self):
return self._mrph_list
def pstring(self, string=None):
if string:
self._pstring = string
else:
return self._pstring
def get_surface(self):
return ''.join([mrph.midasi for mrph in self.mrph_list()])
class TagTest(unittest.TestCase):
def test(self):
tag_str = "+ 1D <BGH:構文/こうぶん><文節内><係:文節内><文頭><体言><名詞項候補><先行詞候補><正規化代表表記:構文/こうぶん>"
tag = Tag(tag_str, 2)
self.assertEqual(tag.tag_id, 2)
self.assertEqual(tag.dpndtype, 'D')
self.assertEqual(tag.parent_id, 1)
self.assertEqual(len(tag.mrph_list()), 0)
mrph1 = Morpheme("構文 こうぶん 構文 名詞 6 普通名詞 1 * 0 * 0 \"代表表記:構文/こうぶん カテゴリ:抽象物\" <代表表記:構文/こうぶん>")
mrph2 = Morpheme("解析 かいせき 解析 名詞 6 サ変名詞 2 * 0 * 0 \"代表表記:解析/かいせき カテゴリ:抽象物 ドメイン:教育・学習;科学・技術\" <代表表記:解析/かいせき>")
tag.push_mrph(mrph1)
self.assertEqual(len(tag.mrph_list()), 1)
tag.push_mrph(mrph2)
self.assertEqual(len(tag.mrph_list()), 2)
self.assertEqual(tag.get_surface(), '構文解析')
if __name__ == '__main__':
unittest.main()
|
<gh_stars>1-10
#!/usr/bin/env python
import atexit
import logging
import os
import random
import signal
import subprocess
import tempfile
import time
import urllib
from streamcorpus_pipeline.stages import BatchTransform
logger = logging.getLogger(__name__)
# TODO: recast as an IncrementalTransform with persistent subprocess daemon?
# That could potentially save on startup time by allowing one run of the subprocess to handle multiple chunk files worth of input.
#
# This is made available to yaml as "textfilter_batch" by the
# entry_points in this project's setup.py
class FastFilterBatch(BatchTransform):
config_name = 'textfilter_batch'
default_config = {
'names_scf': None,
'names_simple': None,
'min_name_length': None,
'max_name_length': None,
'threads': None,
'timeout_sec': 3600,
'bin_path': None,
}
def __init__(self, config):
self.config = config
self.names_scf = config.get('names_scf')
self.names_simple = config.get('names_simple')
assert self.names_scf or self.names_simple, 'need names_scf or names_simple'
self.min_name_length = config.get('min_name_length')
self.max_name_length = config.get('max_name_length')
self.threads = config.get('threads')
self.timeout_sec = int(config.get('timeout_sec', 3600))
# this will hold the Popen object
self.proc = None
self.temp_file_path = None
def _cmd(self):
# TODO: replace this url special-case with a general purpose
# application level virtual filesystem
if self.names_simple and (self.names_simple.startswith('http:') or
self.names_simple.startswith('https:')):
fd, self.temp_file_path = tempfile.mkstemp(suffix='.txt', prefix='names_')
atexit.register(os.remove, self.temp_file_path)
os.close(fd)
logger.info('downloading names %r -> %r', self.names_simple, self.temp_file_path)
urllib.urlretrieve(self.names_simple, self.temp_file_path)
self.names_simple = self.temp_file_path
cmd = [self.get_bin_path()]
if self.names_scf:
cmd += ['--names-scf', self.names_scf]
elif self.names_simple:
cmd += ['--names', self.names_simple]
if self.min_name_length is not None:
cmd += ['--min-name-length', str(self.min_name_length)]
if self.max_name_length:
cmd += ['--max-name-length', str(self.max_name_length)]
if self.threads:
cmd += ['--threads', str(self.threads)]
return cmd
def process_path(self, chunk_path):
'''
process streamcorpus chunk file at chunk_path.
work in place with results at same path (with tempfile and rename if needed)
'''
cmd = self._cmd()
tmp_path = chunk_path + '_tmp_{0:x}'.format(random.randint(1,999999999))
cmd += ['--input', chunk_path, '--output', tmp_path]
logger.info('going to run filter cmd: %r', cmd)
start = time.time()
self.proc = subprocess.Popen(cmd, shell=False)
retcode = None
while True:
retcode = self.proc.poll()
if retcode is not None:
break
dt = time.time() - start
if dt > self.timeout_sec:
logger.error('cmd timed out after %s: %r', dt, cmd)
self.proc.send_signal(signal.SIGKILL)
raise Exception('filter timed out')
time.sleep(1.0)
self.proc = None
if retcode != 0:
raise Exception('filter returned code: {0}'.format(retcode))
logger.debug('clobber tmp file back onto chunk: mv %r %r', tmp_path, chunk_path)
os.rename(tmp_path, chunk_path)
logger.debug('filter done')
def shutdown(self):
if self.proc:
try:
self.proc.send_signal(signal.SIGTERM)
except:
logger.error('error terminating fast filter subprocess', exc_info=True)
self.proc = None
# TODO? sleep 1; kill -9 ?
def get_bin_path(self):
# this file is
# py/src/streamcorpus_filter/pipeline_stage.py
filter_binary = self.config.get('bin_path')
if filter_binary:
if not os.path.isfile(filter_binary):
logger.error('config bin_path set but no file there: %r', filter_binary)
else:
return filter_binary
py_src_streamcorpus_filter_dir = os.path.dirname(__file__)
filter_binary = os.path.abspath(
os.path.join(
py_src_streamcorpus_filter_dir,
'../../../cpp/filter-multifast'))
return filter_binary
|
<reponame>easyopsapis/easyops-api-python
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: remove.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='remove.proto',
package='instance_relation',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0cremove.proto\x12\x11instance_relation\x1a\x1bgoogle/protobuf/empty.proto\"m\n\rRemoveRequest\x12\x10\n\x08objectId\x18\x01 \x01(\t\x12\x16\n\x0erelationSideId\x18\x02 \x01(\t\x12\x14\n\x0cinstance_ids\x18\x03 \x03(\t\x12\x1c\n\x14related_instance_ids\x18\x04 \x03(\t\"o\n\x15RemoveResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12$\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x16.google.protobuf.Emptyb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_REMOVEREQUEST = _descriptor.Descriptor(
name='RemoveRequest',
full_name='instance_relation.RemoveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='objectId', full_name='instance_relation.RemoveRequest.objectId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relationSideId', full_name='instance_relation.RemoveRequest.relationSideId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_ids', full_name='instance_relation.RemoveRequest.instance_ids', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='related_instance_ids', full_name='instance_relation.RemoveRequest.related_instance_ids', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=173,
)
_REMOVERESPONSEWRAPPER = _descriptor.Descriptor(
name='RemoveResponseWrapper',
full_name='instance_relation.RemoveResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance_relation.RemoveResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='instance_relation.RemoveResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance_relation.RemoveResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance_relation.RemoveResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=175,
serialized_end=286,
)
_REMOVERESPONSEWRAPPER.fields_by_name['data'].message_type = google_dot_protobuf_dot_empty__pb2._EMPTY
DESCRIPTOR.message_types_by_name['RemoveRequest'] = _REMOVEREQUEST
DESCRIPTOR.message_types_by_name['RemoveResponseWrapper'] = _REMOVERESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RemoveRequest = _reflection.GeneratedProtocolMessageType('RemoveRequest', (_message.Message,), {
'DESCRIPTOR' : _REMOVEREQUEST,
'__module__' : 'remove_pb2'
# @@protoc_insertion_point(class_scope:instance_relation.RemoveRequest)
})
_sym_db.RegisterMessage(RemoveRequest)
RemoveResponseWrapper = _reflection.GeneratedProtocolMessageType('RemoveResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _REMOVERESPONSEWRAPPER,
'__module__' : 'remove_pb2'
# @@protoc_insertion_point(class_scope:instance_relation.RemoveResponseWrapper)
})
_sym_db.RegisterMessage(RemoveResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
<gh_stars>0
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
File: pressure_ratio_salhi-ar_example.py
Author: <NAME>
Date: March, 2021
Description: generates Fig. 2d in Part 2 of Physics of Thermionic Orificed Hollow Cathodes.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
### Path to HDF5 file
path_to_results = '../../results/salhi_ar.h5'
### Generate a dataframe out of results for the following parameters:
# Discharge current = 1-20 A
# Mass flow rate = 0.5, 0.93 A
# Neutral gas temperature = 2000, 3000, 4000 K
# Sheath voltage = 1-10 V
key_root = 'Ar/simulations/results/'
key_end = ['r20210309170114','r20210309180313','r20210309190430']
Tgvec = [2000,3000,4000]
# Create a list for each dataframe
dlist = []
for TgK, ke in zip(Tgvec,key_end):
# Create the key
# 'Xe/simulations/results/<temperature>/insert/r<UTC time results were written>'
key = key_root + str(TgK) + '/insert/' + ke
# Read the dataframe
d = pd.read_hdf(path_to_results,key=key)
dlist.append(d)
# Append everything to the first dataframe
for d in dlist[1:]:
dlist[0] = dlist[0].append(d)
# Aggregate dataframe
dfall = dlist[0].copy()
### Find the minimum and maximum bounds for each discharge current
for idx,md in enumerate(np.unique(dfall['massFlowRate_eqA'])):
dfx = dfall[dfall['massFlowRate_eqA']==md]
Idvec = np.unique(dfx['dischargeCurrent'])
min_ratio = np.zeros_like(Idvec)
max_ratio = np.zeros_like(Idvec)
for kk, Id in enumerate(Idvec):
dfxx = dfx[dfx['dischargeCurrent'] == Id]
min_ratio[kk] = np.nanmin(dfxx['totalPressureCorr']/dfxx['magneticPressure'])
max_ratio[kk] = np.nanmax(dfxx['totalPressureCorr']/dfxx['magneticPressure'])
# Plot results
plt.loglog(Idvec/md,min_ratio,'k-')
plt.loglog(Idvec/md,max_ratio,'k-')
plt.fill_between(Idvec/md,min_ratio,max_ratio,color=(0.5,0.5,0.5,0.5))
## Plot experimental data
if idx == 0:
xp_data = np.array([
[2,5316.34861111339],
[4,1359.01870574937],
[6,635.710946408889],
[8,367.566877926228],
[10,239.799201797101],
[14,127.815608942121],
[18,79.5695437435862],
[24,46.0617915438062],
[28,34.5960578776159],
[30,30.4911044389275],
[40,18.8760828511939],
])
else:
xp_data = np.array([
[2.1505376344086,2174.93473265882],
[3.2258064516129,976.57521499222],
[4.3010752688172,551.230288892781],
[5.3763440860215,358.726507336079],
[6.45161290322581,254.060021470301],
[7.52688172043011,189.686506817313],
[8.60215053763441,146.166664840847],
[9.67741935483871,117.322772974465],
[10.752688172043,95.3354255840365],
[11.8279569892473,80.0166993348854],
[12.9032258064516,67.2431130448907],
[13.9784946236559,58.8731742794513],
[16.1290322580645,44.7528995777509],
])
plt.plot(xp_data[:,0],xp_data[:,1],'ko')
# Plot labels and limits
plt.xlim([1,100])
plt.ylim([10,1e4])
plt.xlabel("Id / mdot")
plt.ylabel("P / Pmag")
plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
# Physical Constants
m = 0.1 #kg
Ixx = 0.00062 #kg-m^2
Iyy = 0.00113 #kg-m^2
Izz = 0.9*(Ixx + Iyy) #kg-m^2 (Assume nearly flat object, z=0)
dx = 0.114 #m
dy = 0.0825 #m
g = 9.81 #m/s/s
DTR = 1/57.3; RTD = 57.3
# Simulation time and model parameters
tstep = 0.02 # Sampling time (sec)
simulation_time = 30 # Length of time to run simulation (sec)
t = np.arange(0,simulation_time,tstep) # time array
# Model size
n_states = 12 # Number of states
n_inputs = 4 # Number of inputs
# Initialize State Conditions
x = np.zeros((n_states,np.size(t))) # time history of state vectors
# Initial height
x[11,0] = 0.0
# Initialize inputs
u = np.zeros((n_inputs,np.size(t))) # time history of input vectors
# Initial control inputs
u[:,0] = np.zeros(4)
from scipy.optimize import fsolve
# Propeller Thrust equations as a function of propeller induced velocity, vi
def thrustEqn(vi, *prop_params):
# Unpack parameters
R,A,rho,a,b,c,eta,theta0,theta1,U,V,W,Omega = prop_params
# Calculate local airflow velocity at propeller with vi, V'
Vprime = np.sqrt(U**2 + V**2 + (W - vi)**2)
# Calculate Thrust averaged over one revolution of propeller using vi
Thrust = 1/4 * rho * a * b * c * R * \
( (W - vi) * Omega * R + 2/3 * (Omega * R)**2 * (theta0 + 3/4 * theta1) + \
(U**2 + V**2) * (theta0 + 1/2 * theta1) )
# Calculate residual for equation: Thrust = mass flow rate * delta Velocity
residual = eta * 2 * vi * rho * A * Vprime - Thrust
return residual
def Fthrust(x, u, dx, dy):
# Inputs: Current state x[k], Commanded Propeller RPM inputs u[k],
# Propeller location distances dx, dy (m)
# Returns: Thrust vector for 4 propellers (Newtons)
# Propeller Configuration parameters
R = 0.0762 # propeller length/ disk radius (m)
A = np.pi * R ** 2
rho = 1.225 #kg/m^3 at MSL
a = 5.7 # Lift curve slope used in example in Stevens & Lewis
b = 2 # number of blades
c = 0.0274 # mean chord length (m)
eta = 1 # propeller efficiency
# Manufacturer propeller length x pitch specification:
p_diameter = 6 #inches
p_pitch = 3 #inches
theta0 = 2*np.arctan2(p_pitch, (2 * np.pi * 3/4 * p_diameter/2))
theta1 = -4 / 3 * np.arctan2(p_pitch, 2 * np.pi * 3/4 * p_diameter/2)
# Local velocity at propeller from vehicle state information
ub, vb, wb = x[0], x[1], x[2]
p, q, r = x[3], x[4], x[5]
# Transofrm velocity to local propeller location:
# [U,V,W] = [ub,vb,wb] + [p,q,r] x [dx,dy,0]
U = ub - r * dy
V = vb + r * dx
W = wb - q * dx + p * dy
# Convert commanded RPM to rad/s
Omega = 2 * np.pi / 60 * u
#Collect propeller config, state, and input parameters
prop_params = (R,A,rho,a,b,c,eta,theta0,theta1,U,V,W,Omega)
# Numerically solve for propeller induced velocity, vi
# using nonlinear root finder, fsolve, and prop_params
vi0 = 0.1 # initial guess for vi
vi = fsolve(thrustEqn, vi0, args=prop_params)
# Plug vi back into Thrust equation to solve for T
Vprime = np.sqrt(U**2 + V**2 + (W - vi)**2)
Thrust = eta * 2 * vi * rho * A * Vprime
return Thrust
# Torque function
def T(F,dx,dy):
# Returns torque about cg given thrust force and dx,dy distance from cg
#### PLACEHOLDER ####
return 0
# Nonlinear Dynamics Equations of Motion
def stateDerivative(x,u):
# Inputs: state vector (x), input vector (u)
# Returns: time derivative of state vector (xdot)
# State Vector Reference:
#idx 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
#x = [u, v, w, p, q, r, phi, the, psi, xE, yE, hE]
# Store state variables in a readable format
ub = x[0]
vb = x[1]
wb = x[2]
p = x[3]
q = x[4]
r = x[5]
phi = x[6]
theta = x[7]
psi = x[8]
xE = x[9]
yE = x[10]
hE = x[11]
# Calculate forces from propeller inputs (u)
F1 = Fthrust(x, u[0], dx, dy)
F2 = Fthrust(x, u[1], -dx, -dy)
F3 = Fthrust(x, u[2], dx, -dy)
F4 = Fthrust(x, u[3], -dx, dy)
Fz = F1 + F2 + F3 + F4
L = (F2 + F3) * dy - (F1 + F4) * dy
M = (F1 + F3) * dx - (F2 + F4) * dx
N = -T(F1,dx,dy) - T(F2,dx,dy) + T(F3,dx,dy) + T(F4,dx,dy)
# Pre-calculate trig values
cphi = np.cos(phi); sphi = np.sin(phi)
cthe = np.cos(theta); sthe = np.sin(theta)
cpsi = np.cos(psi); spsi = np.sin(psi)
# Calculate the derivative of the state matrix using EOM
xdot = np.zeros(12)
xdot[0] = -g * sthe + r * vb - q * wb # = udot
xdot[1] = g * sphi*cthe - r * ub + p * wb # = vdot
xdot[2] = 1/m * (-Fz) + g*cphi*cthe + q * ub - p * vb # = wdot
xdot[3] = 1/Ixx * (L + (Iyy - Izz) * q * r) # = pdot
xdot[4] = 1/Iyy * (M + (Izz - Ixx) * p * r) # = qdot
xdot[5] = 1/Izz * (N + (Ixx - Iyy) * p * q) # = rdot
xdot[6] = p + (q*sphi + r*cphi) * sthe / cthe # = phidot
xdot[7] = q * cphi - r * sphi # = thetadot
xdot[8] = (q * sphi + r * cphi) / cthe # = psidot
xdot[9] = cthe*cpsi*ub + (-cphi*spsi + sphi*sthe*cpsi) * vb + \
(sphi*spsi+cphi*sthe*cpsi) * wb # = xEdot
xdot[10] = cthe*spsi * ub + (cphi*cpsi+sphi*sthe*spsi) * vb + \
(-sphi*cpsi+cphi*sthe*spsi) * wb # = yEdot
xdot[11] = -1*(-sthe * ub + sphi*cthe * vb + cphi*cthe * wb) # = hEdot
return xdot
# # Plot Thrust as a function of RPM for various vertical velocity conditions
RPM = np.linspace(1000,6000,200)
vertvel = np.array([0,0,1] + 9*[0])
Thrust_m2vel = np.array([Fthrust(2*vertvel,rpmIn,dx,dy) for rpmIn in RPM])
Thrust_m1vel = np.array([Fthrust(1*vertvel,rpmIn,dx,dy) for rpmIn in RPM])
Thrust_0vel = np.array([Fthrust(0*vertvel,rpmIn,dx,dy) for rpmIn in RPM])
Thrust_p1vel = np.array([Fthrust(-1*vertvel,rpmIn,dx,dy) for rpmIn in RPM])
Thrust_p2vel = np.array([Fthrust(-2*vertvel,rpmIn,dx,dy) for rpmIn in RPM])
fig = plt.figure(figsize=(8,8))
plt.plot(RPM, 4 * Thrust_m2vel / (m*g) )
plt.plot(RPM, 4 * Thrust_m1vel / (m*g) )
plt.plot(RPM, 4 * Thrust_0vel / (m*g) )
plt.plot(RPM, 4 * Thrust_p1vel / (m*g) )
plt.plot(RPM, 4 * Thrust_p2vel / (m*g) )
plt.plot(RPM, np.ones(np.size(RPM)), 'k--')
plt.legend(('Airspeed = -2 m/s','Airpseed = -1 m/s','Airspeed = 0 m/s', \
'Airpseed = 1 m/s','Airspeed = 2 m/s'), loc='upper left')
plt.xlabel('Propeller RPM (x4)')
plt.ylabel('Thrust (g)')
plt.title('Quadcopter Thrust for different Vertical Airspeeds')
plt.show()
# def controlInputs(x, t):
# # Inputs: Current state x[k], time t
# # Returns: Control inputs u[k]
# #### Placeholder Function ####
# # Trim RPM for all 4 propellers to provide thrust for a level hover
# trim = 3200
# pitch_cmd = 0
# roll_cmd = 0
# climb_cmd = 0
# yaw_cmd = 0
# # Example open loop control inputs to test dynamics:
# # Climb
# if t < 11.0:
# climb_cmd = 500
# # Pitch Forward
# if t > 8.0:
# pitch_cmd = -10
# if t > 9.0:
# pitch_cmd = 10
# if t > 10.0:
# pitch_cmd = 0
# # Pitch Backward
# if t > 12.0:
# pitch_cmd = 15
# if t > 13.0:
# pitch_cmd = -15
# if t > 14.0:
# pitch_cmd = 0
# # Increase lift
# if t > 16.0:
# climb_cmd = 150
# # RPM command based on pitch, roll, climb, yaw commands
# u = np.zeros(4)
# u[0] = trim + ( pitch_cmd + roll_cmd + climb_cmd - yaw_cmd) / 4
# u[1] = trim + (-pitch_cmd - roll_cmd + climb_cmd - yaw_cmd) / 4
# u[2] = trim + ( pitch_cmd - roll_cmd + climb_cmd + yaw_cmd) / 4
# u[3] = trim + (-pitch_cmd + roll_cmd + climb_cmd + yaw_cmd) / 4
# return u
# # 4th Order Runge Kutta Calculation
# def RK4(x,u,dt):
# # Inputs: x[k], u[k], dt (time step, seconds)
# # Returns: x[k+1]
# # Calculate slope estimates
# K1 = stateDerivative(x, u)
# K2 = stateDerivative(x + K1 * dt / 2, u)
# K3 = stateDerivative(x + K2 * dt / 2, u)
# K4 = stateDerivative(x + K3 * dt, u)
# # Calculate x[k+1] estimate using combination of slope estimates
# x_next = x + 1/6 * (K1 + 2*K2 + 2*K3 + K4) * dt
# return x_next
# # March through time array and numerically solve for vehicle states
# for k in range(0, np.size(t) - 1):
# # Determine control inputs based on current state
# u[:,k] = controlInputs(x[:,k], t[k])
# # Predict state after one time step
# x[:,k+1] = RK4(x[:,k], u[:,k], tstep)
# plt.figure(1, figsize=(8,8))
# plt.subplot(311)
# plt.plot(t,x[11,:],'b',label='h')
# plt.ylabel('h (m)')
# #plt.xlabel('Time (sec)')
# #plt.legend(loc='best')
# plt.title('Time History of Height, X Position, and Pitch')
# plt.subplot(312)
# plt.plot(t,x[9,:],'b',label='x')
# plt.ylabel('x (m)')
# #plt.xlabel('Time (sec)')
# plt.subplot(313)
# plt.plot(t,x[7,:]*RTD,'b',label='theta')
# plt.ylabel('Theta (deg)')
# plt.xlabel('Time (sec)')
# plt.figure(2, figsize=(8,8))
# ax = plt.subplot(1,1,1)
# plt.plot(x[9,0:-1:20],x[11,0:-1:20],'bo-',label='y')
# plt.text(x[9,0] + 0.1, x[11,0],'START')
# plt.text(x[9,-1], x[11,-1],'END')
# plt.ylabel('h [m]'); plt.xlabel('x [m]')
# ax.axis('equal')
# #plt.legend(loc='best')
# plt.title('Vertical Profile')
# plt.figure(3, figsize=(8,4))
# plt.plot(t[0:-1],u[0,0:-1],'b',label='T1')
# plt.plot(t[0:-1],u[1,0:-1],'g',label='T2')
# plt.plot(t[0:-1],u[2,0:-1],'r',label='T3')
# plt.plot(t[0:-1],u[3,0:-1],'y',label='T4')
# plt.xlabel('Time (sec)')
# plt.ylabel('Propeller RPM')
# plt.legend(loc='best')
# plt.title('Time History of Control Inputs')
# plt.show()
|
<gh_stars>1-10
# Copyright: <NAME>, 2021
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.functional import softmax
class RNN(torch.nn.Module):
def __init__(self, rnn_config):
super(RNN, self).__init__()
self.embedding_layer = nn.Embedding(
num_embeddings=rnn_config['num_embeddings'],
embedding_dim=rnn_config['embedding_dim'],
padding_idx=rnn_config['num_embeddings'] - 1
)
if rnn_config['rnn_type'] == 'LSTM':
self.rnn = nn.LSTM(
input_size=rnn_config['input_size'],
hidden_size=rnn_config['hidden_size'],
num_layers=rnn_config['num_layers'],
batch_first=True,
dropout=rnn_config['dropout']
)
elif rnn_config['rnn_type'] == 'GRU':
self.rnn = nn.GRU(
input_size=rnn_config['input_size'],
hidden_size=rnn_config['hidden_size'],
num_layers=rnn_config['num_layers'],
batch_first=True,
dropout=rnn_config['dropout']
)
else:
raise ValueError(
"rnn_type should be either 'LSTM' or 'GRU'."
)
# output does not include <sos> and <pad>, so
# decrease the num_embeddings by 2
self.linear = nn.Linear(
rnn_config['hidden_size'], rnn_config['num_embeddings'] - 2
)
def forward(self, data, lengths):
embeddings = self.embedding_layer(data)
# pack the padded input
# the lengths are decreased by 1 because we don't
# use <eos> for input and we don't need <sos> for
# output during traning.
embeddings = pack_padded_sequence(
input=embeddings,
lengths=lengths,
batch_first=True,
enforce_sorted=False
)
# recurrent network, discard (h_n, c_n) in output.
# Tearcher-forcing is used here, so we directly feed
# the whole sequence to model.
embeddings, _ = self.rnn(embeddings)
# linear layer to generate input of softmax
embeddings = self.linear(embeddings.data)
# return the packed representation for backpropagation,
# the targets will also be packed.
return embeddings
def sample(self, batch_size, vocab, device, max_length=140):
"""Use this function if device is GPU"""
# get integer of "start of sequence"
start_int = vocab.vocab['<sos>']
# create a tensor of shape [batch_size, seq_step=1]
sos = torch.ones(
[batch_size, 1],
dtype=torch.long,
device=device
)
sos = sos * start_int
# sample first output
output = []
x = self.embedding_layer(sos)
x, hidden = self.rnn(x)
x = self.linear(x)
x = softmax(x, dim=-1)
x = torch.multinomial(x.squeeze(), 1)
output.append(x)
# a tensor to indicate if the <eos> token is found
# for all data in the mini-batch
finish = torch.zeros(batch_size, dtype=torch.bool).to(device)
# sample until every sequence in the mini-batch
# has <eos> token
for _ in range(max_length):
# forward rnn
x = self.embedding_layer(x)
x, hidden = self.rnn(x, hidden)
x = self.linear(x)
x = softmax(x, dim=-1)
# sample
x = torch.multinomial(x.squeeze(), 1)
output.append(x)
# terminate if <eos> is found for every data
eos_sampled = (x == vocab.vocab['<eos>']).data
finish = torch.logical_or(finish, eos_sampled.squeeze())
if torch.all(finish):
return torch.cat(output, -1)
return torch.cat(output, -1)
def sample_cpu(self, vocab):
"""Use this function if device is CPU"""
output = []
# get integer of "start of sequence"
start_int = vocab.vocab['<sos>']
# create a tensor of shape [batch_size=1, seq_step=1]
sos = torch.tensor(
start_int,
dtype=torch.long
).unsqueeze(dim=0
).unsqueeze(dim=0)
# sample first output
x = self.embedding_layer(sos)
x, hidden = self.rnn(x)
x = self.linear(x)
x = softmax(x, dim=-1)
x = torch.multinomial(x.squeeze(), 1)
output.append(x.item())
# use first output to iteratively sample until <eos> occurs
while output[-1] != vocab.vocab['<eos>']:
x = x.unsqueeze(dim=0)
x = self.embedding_layer(x)
x, hidden = self.rnn(x, hidden)
x = self.linear(x)
x = softmax(x, dim=-1)
x = torch.multinomial(x.squeeze(), 1)
output.append(x.item())
# convert integers to tokens
output = [vocab.int2tocken[x] for x in output]
# popout <eos>
output.pop()
# convert to a single string
output = vocab.combine_list(output)
return output
|
# import Asclepius dependencies
from pandas.core.frame import DataFrame
from asclepius.instelling import GGZ, ZKH
from asclepius.medewerker import Medewerker
from asclepius.portaaldriver import PortaalDriver
from asclepius.testen import TestFuncties, Verklaren
# import other dependencies
from typing import Union
from pandas import ExcelWriter
class ReleaseTesten:
def __init__(self, gebruiker: Medewerker, losse_bestanden: bool = False):
# Initialiseren
self.gebruiker = gebruiker
self.portaaldriver = PortaalDriver(self.gebruiker)
self.testfuncties = TestFuncties()
self.verklaren = Verklaren()
self.losse_bestanden = losse_bestanden
return None
def test_da(self, *instellingen: Union[GGZ, ZKH]):
# Download excelbestanden
mislukt_download = []
for instelling in instellingen:
try:
self.portaaldriver.webscraper_da(instelling)
except:
mislukt_download.append(instelling.klant_code)
# Test de DA
mislukt_da = []
for instelling in instellingen:
try:
# Aantallencheck
self.testfuncties.aantallencheck(instelling, False)
self.testfuncties.aantallencheck(instelling, True)
# Standaardverschillen vinden
self.verklaren.standaardverschillen_da(instelling, False)
self.verklaren.standaardverschillen_da(instelling, True)
except:
mislukt_da.append(instelling.klant_code)
if self.losse_bestanden:
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download + mislukt_da):
with ExcelWriter(f'Bevindingen DA {instelling.klant_code}.xlsx') as writer:
instelling.bevindingen_da.to_excel(writer, sheet_name=f'{instelling.klant_code}')
instelling.bevindingen_da_test.to_excel(writer, sheet_name=f'{instelling.klant_code} test')
else: pass
else:
with ExcelWriter(f'Bevindingen DA.xlsx') as writer:
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download + mislukt_da):
instelling.bevindingen_da.to_excel(writer, sheet_name=f'{instelling.klant_code}')
instelling.bevindingen_da_test.to_excel(writer, sheet_name=f'{instelling.klant_code} test')
else: pass
# Print mislukte downloads/tests
if len(mislukt_download) != 0:
print('Mislukte downloads:', ' '.join(mislukt_download))
else:
print('Geen mislukte downloads!')
if len(mislukt_da) != 0:
print('Mislukte DA tests:', ' '.join(mislukt_da))
else:
print('Geen mislukte DA tests!')
return None
def test_bi(self, *instellingen: Union[GGZ, ZKH]):
# Download excelbestanden
mislukt_download = []
for instelling in instellingen:
try:
self.portaaldriver.webscraper_bi(instelling)
except:
mislukt_download.append(instelling.klant_code)
# Test de BI
mislukt_bi = []
for instelling in instellingen:
try:
# Vergelijk BI prestatiekaarten
self.testfuncties.prestatiekaarten_vergelijken(instelling, 'bi')
except:
mislukt_bi.append(instelling.klant_code)
if self.losse_bestanden:
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download + mislukt_bi):
with ExcelWriter(f'Bevindingen BI {instelling.klant_code}.xlsx') as writer:
instelling.bevindingen_bi.to_excel(writer, sheet_name=f'{instelling.klant_code}')
else: pass
else:
with ExcelWriter(f'Bevindingen BI.xlsx') as writer:
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download + mislukt_bi):
instelling.bevindingen_bi.to_excel(writer, sheet_name=f'{instelling.klant_code}')
else: pass
# Print mislukte downloads/tests
if len(mislukt_download) != 0:
print('Mislukte downloads:', ' '.join(mislukt_download))
else:
print('Geen mislukte downloads!')
if len(mislukt_bi) != 0:
print('Mislukte BI tests:', ' '.join(mislukt_bi))
else:
print('Geen mislukte BI tests!')
return None
def test_zpm(self, *instellingen: Union[GGZ, ZKH]):
# Download excelbestanden
mislukt_download = []
for instelling in instellingen:
try:
self.portaaldriver.webscraper_zpm(instelling)
except:
mislukt_download.append(instelling.klant_code)
# Test de ZPM
mislukt_zpm = []
for instelling in instellingen:
try:
# Vergelijk ZPM prestatiekaarten
self.testfuncties.prestatiekaarten_vergelijken(instelling, 'zpm')
except:
mislukt_zpm.append(instelling.klant_code)
if self.losse_bestanden:
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download + mislukt_zpm):
with ExcelWriter(f'Bevindingen ZPM {instelling.klant_code}.xlsx') as writer:
instelling.bevindingen_zpm.to_excel(writer, sheet_name=f'{instelling.klant_code}')
else: pass
else:
with ExcelWriter(f'Bevindingen ZPM.xlsx') as writer:
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download + mislukt_zpm):
instelling.bevindingen_zpm.to_excel(writer, sheet_name=f'{instelling.klant_code}')
else: pass
# Print mislukte downloads/tests
if len(mislukt_download) != 0:
print('Mislukte downloads:', ' '.join(mislukt_download))
else:
print('Geen mislukte downloads!')
if len(mislukt_zpm) != 0:
print('Mislukte ZPM tests:', ' '.join(mislukt_zpm))
else:
print('Geen mislukte ZPM tests!')
return None
def test_zpm_nza(self, *instellingen: Union[GGZ, ZKH]):
# Download excelbestanden
mislukt_download = []
for instelling in instellingen:
try:
self.portaaldriver.webscraper_zpm_nza(instelling)
except:
mislukt_download.append(instelling.klant_code)
# Test de ZPM_NZA
mislukt_zpm = []
for instelling in instellingen:
try:
# Vergelijk ZPM_NZA prestatiekaarten
self.testfuncties.prestatiekaarten_vergelijken(instelling, 'zpm_nza')
except:
mislukt_zpm.append(instelling.klant_code)
if self.losse_bestanden:
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download + mislukt_zpm):
with ExcelWriter(f'Bevindingen ZPM 100% NZA {instelling.klant_code}.xlsx') as writer:
instelling.bevindingen_zpm_nza.to_excel(writer, sheet_name=f'{instelling.klant_code}')
else: pass
else:
with ExcelWriter(f'Bevindingen ZPM 100% NZA.xlsx') as writer:
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download + mislukt_zpm):
instelling.bevindingen_zpm_nza.to_excel(writer, sheet_name=f'{instelling.klant_code}')
else: pass
# Print mislukte downloads/tests
if len(mislukt_download) != 0:
print('Mislukte downloads:', ' '.join(mislukt_download))
else:
print('Geen mislukte downloads!')
if len(mislukt_zpm) != 0:
print('Mislukte ZPM 100% NZA tests:', ' '.join(mislukt_zpm))
else:
print('Geen mislukte ZPM 100% NZA tests!')
return None
def test_slm(self, *instellingen: Union[GGZ, ZKH]):
# Download excelbestanden
mislukt_download = []
for instelling in instellingen:
try:
self.portaaldriver.webscraper_slm(instelling)
except:
mislukt_download.append(instelling.klant_code)
# Test de BI
bevindingen_slm = DataFrame({'Instelling': [], 'Delta totaal A': [], 'Delta totaal P': []})
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download):
new_row = {'Instelling': instelling.klant_code, 'Delta totaal A': instelling.slm_delta_a, 'Delta totaal P': instelling.slm_delta_p}
bevindingen_slm = bevindingen_slm.append(new_row, ignore_index = True)
else: pass
with ExcelWriter(f'Bevindingen SLM.xlsx') as writer:
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download):
bevindingen_slm.to_excel(writer, sheet_name=f'{instelling.klant_code}')
else: pass
# Print mislukte downloads/tests
if len(mislukt_download) != 0:
print('Mislukte downloads:', ' '.join(mislukt_download))
else:
print('Geen mislukte downloads!')
return None
|
<reponame>SuLab/myvariant.info
from .clinvar_xml_parser import load_data as load_common
import biothings.dataload.uploader as uploader
from dataload.uploader import SnpeffPostUpdateUploader
SRC_META = {
"url" : "https://www.ncbi.nlm.nih.gov/clinvar/",
"license_url" : "https://www.ncbi.nlm.nih.gov/clinvar/intro/",
"license_url_short": "https://goo.gl/OaHML9"
}
class ClinvarBaseUploader(SnpeffPostUpdateUploader):
def get_pinfo(self):
pinfo = super(ClinvarBaseUploader,self).get_pinfo()
# clinvar parser has some memory requirements, ~1.5G
pinfo.setdefault("__reqs__",{})["mem"] = 1.5 * (1024**3)
return pinfo
@classmethod
def get_mapping(klass):
mapping = {
"clinvar": {
"properties": {
"hg19": {
"properties": {
"start": {
"type": "integer"
},
"end": {
"type": "integer"
}
}
},
"hg38": {
"properties": {
"start": {
"type": "integer"
},
"end": {
"type": "integer"
}
}
},
"omim": {
"type": "string",
"analyzer": "string_lowercase"
},
"uniprot": {
"type": "string",
"analyzer": "string_lowercase"
},
"cosmic": {
"type": "string",
"analyzer": "string_lowercase"
},
"dbvar": {
"type": "string",
"analyzer": "string_lowercase"
},
"chrom": {
"type": "string",
"analyzer": "string_lowercase"
},
"gene": {
"properties": {
"symbol": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
},
"id": {
"type": "long"
}
}
},
"genotypeset": {
"properties": {
"type": {
"type": "string",
"analyzer": "string_lowercase"
},
"genotype": {
"type": "string",
"analyzer": "string_lowercase"
}
}
},
"type": {
"type": "string",
"analyzer": "string_lowercase"
},
"rsid": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
},
"rcv": {
#"type": "nested",
#"include_in_parent": True, # NOTE: this is not available in ES 2.x
"properties": {
"accession": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
},
"clinical_significance": {
"type": "string"
},
"number_submitters": {
"type": "byte"
},
"review_status": {
"type": "string"
},
"last_evaluated": {
"type": "date"
},
"preferred_name": {
"type": "string",
"analyzer": "string_lowercase"
},
"origin": {
"type": "string",
"analyzer": "string_lowercase"
},
"conditions": {
"properties": {
"name": {
"type": "string"
},
"synonyms": {
"type": "string"
},
"identifiers": {
"properties": {
"efo": {
"type": "string",
"analyzer": "string_lowercase"
},
"gene": {
"type": "string",
"analyzer": "string_lowercase"
},
"medgen": {
"type": "string",
"analyzer": "string_lowercase"
},
"omim": {
"type": "string",
"analyzer": "string_lowercase"
},
"orphanet": {
"type": "string",
"analyzer": "string_lowercase"
},
"human_phenotype_ontology": {
"type": "string",
"analyzer": "string_lowercase"
}
}
},
"age_of_onset": {
"type": "string",
"analyzer": "string_lowercase"
}
}
}
}
},
"cytogenic": {
"type": "string",
"analyzer": "string_lowercase"
},
"allele_id": {
"type": "integer"
},
"variant_id": {
"type": "integer"
},
"coding_hgvs_only": {
"type": "boolean"
},
"ref": {
"type": "string",
"analyzer": "string_lowercase"
},
"alt": {
"type": "string",
"analyzer": "string_lowercase"
},
"hgvs": {
"properties": {
"genomic": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
},
"coding": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
},
"non-coding": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
},
"protein": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
}
}
}
}
}
}
return mapping
class ClinvarHG19Uploader(ClinvarBaseUploader):
name = "clinvar_hg19"
main_source = "clinvar"
__metadata__ = {
"mapper" : 'observed',
"assembly" : "hg19",
"src_meta" : SRC_META,
}
def load_data(self,data_folder):
self.logger.info("Load data from folder '%s'" % data_folder)
try:
return load_common(data_folder,"hg19")
except Exception as e:
import traceback
self.logger.error("Error while uploading, %s:\n%s" % (e,traceback.format_exc()))
raise
class ClinvarHG38Uploader(ClinvarBaseUploader):
name = "clinvar_hg38"
main_source = "clinvar"
__metadata__ = {
"mapper" : 'observed',
"assembly" : "hg38",
"src_meta" : SRC_META,
}
def load_data(self,data_folder):
self.logger.info("Load data from folder '%s'" % data_folder)
try:
return load_common(data_folder,"hg38")
except Exception as e:
import traceback
self.logger.error("Error while uploading, %s:\n%s" % (e,traceback.format_exc()))
raise
|
<gh_stars>1-10
import numpy as np
import math
import os
import sys
sys.path.append("../..")
from PARAMETERS import *
from subvision.utils import CameraFacing
class Watchout:
'''
Calculate the distance to an object from bounding box coordinates, camera information, and expected object sizes in meters.
Implements a 'ensemble' approach, where 2~3 different ways of triangulating the distance are averaged out.
Usage:
sort = Sort()
watchout = Watchout()
for frame in video:
detections = yolo_detector(frame)
tracked_dets = sort(detections)
watchout_results = watchout(tracked_dets) # a boolean whether to sound alarm right now
'''
def __init__(
self,
index_to_name_dict = {},
cam0_width = 960,
cam0_height = 540,
cam1_width = 960,
cam1_height = 540,
buffer_len = 10,
area_min_delta = 40,
area_max_delta = 4000,
left_hand_drive = False,
camera_info = None,
):
self.index_to_name_dict = index_to_name_dict
self.cam0_width = cam0_width
self.cam0_height = cam0_height
self.cam1_width = cam1_width
self.cam1_height = cam1_height
self.buffer_len = buffer_len
self.area_min_delta = area_min_delta
self.area_max_delta = area_max_delta
self.left_hand_drive = left_hand_drive
self.camera_info = camera_info
self.rolling_filter = [False for _ in range(buffer_len)] # queue filter to reduce false positives
self.memory = {} # dictionary where key is track_id, and value is a 'Thing' object which contains all the other information.
def step(self, tracked_dets = np.empty((0,9))):
'''
Parameters:
tracked_dets, a np.array where
0 : x1
1 : y1
2 : x2
3 : y2
4 : category index
5 : d(x_center)/dt
6 : d(y_center)/dt
7 : d(area)/dt
8 : object_id (tracked, unique id)
Returns:
tracked_dets +
9 : distance
'''
row, col = tracked_dets.shape
watchout_output_dets = np.empty((0,col+1))
for i in range(len(tracked_dets)):
obj = tracked_dets[i]
if obj[8].item() in self.memory.keys(): #we've seen this guy before
watchedthing = self.memory[obj[8].item()]
watchedthing.step(obj)
#print(f"Predicted distance: {watchedthing.pred_distance}m")
obj = np.hstack((obj, watchedthing.pred_distance))
else:
self.memory.update({obj[8].item(): WatchedThing(obj, self.camera_info)}) # create new Thing
# if memory contains more than 100, delete 25 earliest entries
if len(self.memory) > 50:
earliest_indexes = [x for x in sorted(self.memory.keys())][:25]
for ind in earliest_indexes:
del self.memory[ind]
obj = np.hstack((obj, 100))
watchout_output_dets = np.vstack((obj, watchout_output_dets))
return watchout_output_dets
class WatchedThing:
def __init__(self, obj, camera_info):
self.camera_info = camera_info
self.update_basic_properties(obj)
# history dependent properties
self.dddt = None # distance change
def step(self, obj):
self.update_basic_properties(obj)
def update_basic_properties(self, obj):
# verbatim properties
self.x1 = obj[0].item()
self.y1 = obj[1].item()
self.x2 = obj[2].item()
self.y2 = obj[3].item()
self.cat_ind = obj[4].item()
self.dxdt = obj[5].item()
self.dydt = obj[6].item()
self.dadt = obj[7].item()
# calculated properties
self.area = ((obj[2]-obj[0]) * (obj[3]-obj[1])).item()
self.x_c = (obj[0] + ((obj[2]-obj[0])/2)).item()
self.y_c = (obj[1] + ((obj[3]-obj[1])/2)).item()
self.pred_distance = self.predict_distance_from_box()
def predict_distance_from_box(self):
'''
distance predicted using vertical = known_height(meters) / {tan(camera_angle_vertical(rad) * (object_height(pixels) / frame_height(pixels)) }
distance predicted using horizontal = known_width(meters) / {tan(camera_angle_horizontal(rad) * (object_width(pixels) / frame_width(pixels)) }
distance predicted using bottom-center of box = camera_installation_height / { tan( 0.5*camera_angle_vertical * ( obj_y2 - (frame_height / 2)) / ( frame_height / 2 ))}
'''
cat_ind = self.cat_ind
sizes_dict = KNOWN_CLASS_SIZES
obj_width = self.y2 - self.y1
obj_height = self.x2 - self.x1
frame_width = UNIFIED_WIDTH
frame_height = UNIFIED_HEIGHT
width_fov = self.camera_info.width_fov
height_fov = self.camera_info.height_fov
width_fov = width_fov * (math.pi / 180) #deg to rad
height_fov = height_fov * (math.pi / 180) # deg to rad
cam_installation_height = self.camera_info.install_height
d_pred_width = smart_divide(sizes_dict[cat_ind][0], math.tan(width_fov * (obj_width/frame_width)))
#d_pred_width = (sizes_dict[cat_ind][0])/(math.tan(width_fov * (obj_width/frame_width)))
d_pred_height = smart_divide(sizes_dict[cat_ind][1], math.tan(height_fov * (obj_height/frame_height)))
# calculating distance based on y2 was found not useful
avg_d_pred = (d_pred_width + d_pred_height)/2
return avg_d_pred
def smart_divide(a,b):
return a/b if b != 0 else a/(b+1)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# controller.py
#
# Home to the MyController class, used for:
# - Interacting directly with hardware: LEDs, PIR sensor
# - Interacting directly with the SpaceAuth API and the datalogger script
# - Checking IDs
# - Granting/denying access
import time
import sys
import os
import subprocess
from constants import *
from config import *
import RPi.GPIO as GPIO
from urllib2 import urlopen, Request
from json import loads
class MyController:
def __init__(self):
print "Initializing controller"
self.idList = []
#---------- API Request ----------
# Make an authorized request to the API using
# the key found in constants.py
def APIRequest(self, _url):
_request = Request(_url, None, {'X-Api-Key':API_Key})
return urlopen(_request)
#---------- API Check Endorsement ----------
# Endorsements are listed in a JSON file generated
# by the web API.
#
# Arguments:
# user_id : user id number to check
# endorsement_unique_name : unique name of endorsement in database
# station : station name ('enter', 'exit', etc.) in database
#
# Returns:
# 1 if user has endorsement, 0 otherwise
#
def APICheckEndorsement(self, user_id, endorsement_unique_name, station):
try:
endorsementsToParse = self.APIRequest(API_Users_URL +
user_id +
'/endorsements?station=' +
station).read()
except Exception as e:
print e
print "Could not access server"
return 0
print endorsementsToParse
parsedEndorsements = loads(endorsementsToParse)
for endorsement in parsedEndorsements:
if endorsement[Unique_Name_Key] == endorsement_unique_name:
return 1
return 0
#---------- API Check Admin ----------
# Returns 1 if user has admin access to the system, else 0.
def APICheckAdmin(self, user_id):
try:
userToParse = self.APIRequest(API_Users_URL + user_id).read()
parsedUser = loads(userToParse)
if "admin" in parsedUser['role']:
return 1
else:
return 0
except:
print "Could not access server"
return 0
#---------- Check Date ----------
# If today isn't the recorded date, erase the unique ID list
# and set the date.
def checkDate(self):
global Todays_Date
today = subprocess.check_output('date +"%D"', shell=True)
today = today.strip()
if Todays_Date != today:
self.idList = []
Todays_Date = today
# Get the current time
def checkTime(self):
timestamp = subprocess.check_output('date +"%H:%M"', shell=True)
timestamp = timestamp.strip()
return timestamp
#---------- Controller Check ID ----------
# See if the ID has space access and return True/False.
# Append the swipe record to the Google Sheet.
# Station: 0 for exit, 1 for enter
def checkId(self, patronId, station):
uniqueId = 0
accessed = 0
# Assign statin string
if station == 1:
stationStr = "enter"
elif station == 0:
stationStr = "exit"
else:
return False
accessGranted = self.APICheckEndorsement(patronId, Space_Access_Unique_Name, stationStr)
if(accessGranted): accessGrantedBool = True
else:
accessGrantedBool = False
print patronId
self.checkDate()
# See if ID is unique for today
# and if access was granted
if patronId not in self.idList:
uniqueId = 1
self.idList.append(patronId)
else: uniqueId = 0
# Get the current time
timestamp = self.checkTime()
# Build the shell command to run the datalogger
dataloggerString = 'python '+directory+'datalogger.py "'+Todays_Date+'" "'+timestamp+'" "'+str(accessGranted)+'" "'+str(uniqueId)+'" "'+str(station)+'"&'
subprocess.Popen(dataloggerString, shell=True)
return accessGrantedBool
#---------- Log Swipe Out ----------
#
#
def logSwipeOut(self):
self.checkDate()
timestamp = self.checkTime()
dataloggerString = 'python '+directory+'datalogger.py "'+Todays_Date+'" "'+timestamp+'" "'+'"1"'+'" "'+'"0"'+'" "'+'"0"'+'"&'
subprocess.Popen(dataloggerString, shell=True)
#---------- Deny Access ----------
# Play sad tune and blink the red light.
# Disable motion detect for ~2.5 seconds.
def denyAccess(self, toPlay):
self.disableMotionDetect()
GPIO.output(redPin, True)
if toPlay:
os.system('aplay ' + directory + 'sad.wav &')
time.sleep(accessSleep)
self.enableMotionDetect()
GPIO.output(redPin, False)
print "Access Denied"
#---------- Grant Access ----------
# Play happy tune and blink the green light.
# Disable motion detect for ~4 seconds.
def grantAccess(self, toPlay, inOrOut):
self.disableMotionDetect()
GPIO.output(greenPin, True)
if toPlay:
if inOrOut:
os.system('aplay ' + directory + 'happy.wav &')
else:
os.system('aplay ' + directory + 'goodbye.wav &')
time.sleep(accessSleep)
self.enableMotionDetect()
GPIO.output(greenPin, False)
print "Access Granted"
# Turn a specific pin on
def LEDOn(self, LEDPin):
GPIO.output(LEDPin, True)
# Turn a specific pin off
def LEDOff(self, LEDPin):
GPIO.output(LEDPin, False)
#---------- Motion Detected ----------
# When motion is detected, play a beeping sound
# and flash the red LED
def motionDetectCallback(self, pirPin):
print "Motion Detected!"
os.system('aplay ' + directory + 'sad.wav &')
while GPIO.input(pirPin):
GPIO.output(redPin, True)
time.sleep(motionSleep)
GPIO.output(redPin, False)
time.sleep(motionSleep)
# Turn on motion detection
def enableMotionDetect(self):
GPIO.add_event_detect(pirPin, GPIO.RISING)
GPIO.add_event_callback(pirPin, self.motionDetectCallback)
# Turn off motion detectoin
def disableMotionDetect(self):
GPIO.remove_event_detect(pirPin)
|
<gh_stars>0
"""Mock service for testing Service integration
A JupyterHub service running a basic HTTP server.
Used by the `mockservice` fixtures found in `conftest.py` file.
Handlers and their purpose include:
- EchoHandler: echoing proxied URLs back
- EnvHandler: retrieving service's environment variables
- APIHandler: testing service's API access to the Hub retrieval of `sys.argv`.
- WhoAmIHandler: returns name of user making a request (deprecated cookie login)
- OWhoAmIHandler: returns name of user making a request (OAuth login)
"""
import json
import pprint
import os
import sys
from urllib.parse import urlparse
import requests
from tornado import web, httpserver, ioloop
from jupyterhub.services.auth import HubAuthenticated, HubOAuthenticated, HubOAuthCallbackHandler
from jupyterhub.utils import make_ssl_context
class EchoHandler(web.RequestHandler):
"""Reply to an HTTP request with the path of the request."""
def get(self):
self.write(self.request.path)
class EnvHandler(web.RequestHandler):
"""Reply to an HTTP request with the service's environment as JSON."""
def get(self):
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(dict(os.environ)))
class APIHandler(web.RequestHandler):
"""Relay API requests to the Hub's API using the service's API token."""
def get(self, path):
api_token = os.environ['JUPYTERHUB_API_TOKEN']
api_url = os.environ['JUPYTERHUB_API_URL']
r = requests.get(api_url + path,
headers={'Authorization': 'token %s' % api_token},
)
r.raise_for_status()
self.set_header('Content-Type', 'application/json')
self.write(r.text)
class WhoAmIHandler(HubAuthenticated, web.RequestHandler):
"""Reply with the name of the user who made the request.
Uses "deprecated" cookie login
"""
@web.authenticated
def get(self):
self.write(self.get_current_user())
class OWhoAmIHandler(HubOAuthenticated, web.RequestHandler):
"""Reply with the name of the user who made the request.
Uses OAuth login flow
"""
@web.authenticated
def get(self):
self.write(self.get_current_user())
def main():
pprint.pprint(dict(os.environ), stream=sys.stderr)
if os.getenv('JUPYTERHUB_SERVICE_URL'):
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
app = web.Application([
(r'.*/env', EnvHandler),
(r'.*/api/(.*)', APIHandler),
(r'.*/whoami/?', WhoAmIHandler),
(r'.*/owhoami/?', OWhoAmIHandler),
(r'.*/oauth_callback', HubOAuthCallbackHandler),
(r'.*', EchoHandler),
], cookie_secret=os.urandom(32))
ssl_context = None
key = os.environ.get('JUPYTERHUB_SSL_KEYFILE') or ''
cert = os.environ.get('JUPYTERHUB_SSL_CERTFILE') or ''
ca = os.environ.get('JUPYTERHUB_SSL_CLIENT_CA') or ''
if key and cert and ca:
ssl_context = make_ssl_context(
key,
cert,
cafile = ca,
check_hostname = False)
server = httpserver.HTTPServer(app, ssl_options=ssl_context)
server.listen(url.port, url.hostname)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print('\nInterrupted')
if __name__ == '__main__':
from tornado.options import parse_command_line
parse_command_line()
main()
|
<filename>experiments/mainFT.py<gh_stars>0
import sys, argparse,os,glob
sys.path.insert(0, '../')
# import geomloss
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from dataProcessing.dataModule import SingleDatasetModule
from train.runClf import runClassifier
from train.trainer_FT import FTmodel
parser = argparse.ArgumentParser()
parser.add_argument('--slurm', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--expName', type=str, default='apr3')
parser.add_argument('--trainClf',action='store_true')
parser.add_argument('--TLParamsFile', type=str, default=None)
parser.add_argument('--inPath', type=str, default=None)
parser.add_argument('--outPath', type=str, default=None)
parser.add_argument('--source', type=str, default="Uschad")
parser.add_argument('--target', type=str, default="Dsads")
parser.add_argument('--n_classes', type=int, default=4)
parser.add_argument('--saveModel', type=bool, default=False)
args = parser.parse_args()
my_logger = None
if args.slurm:
args.inPath = '/storage/datasets/sensors/frankDatasets/'
args.outPath = '/mnt/users/guilherme.silva/TransferLearning-Sensors/results'
verbose = 0
save_path = '../saved/'
params_path = '/mnt/users/guilherme.silva/TransferLearning-Sensors/experiments/params/'
my_logger = WandbLogger(project='TL',
log_model='all',
name=args.expName + '_FT_' + args.source + '_to_' + args.target)
else:
verbose = 1
args.inPath = 'C:\\Users\\gcram\\Documents\\Smart Sense\\Datasets\\frankDataset\\'
params_path = 'C:\\Users\\gcram\\Documents\\GitHub\\TransferLearning-Sensors\\experiments\\params\\'
args.paramsPath = None
save_path = 'C:\\Users\\gcram\\Documents\\GitHub\\TransferLearning-Sensors\\saved\\'
# my_logger = WandbLogger(project='TL',
# log_model='all',
# name=args.expName + 'test_FT_' + args.source + '_to_' + args.target)
#
def getHparams():
clfParams = {}
clfParams['kernel_dim'] = [(5, 3), (25, 3)]
clfParams['n_filters'] = (4, 16, 18, 24)
clfParams['enc_dim'] = 64
clfParams['epoch'] = 10
clfParams["dropout_rate"] = 0.2
clfParams['FE'] = 'fe2'
clfParams['input_shape'] = (2, 50, 3)
clfParams['alpha'] = None
clfParams['step_size'] = None
clfParams['bs'] = 128
clfParams['lr'] = 0.00005
clfParams['weight_decay'] = 0.0
if args.TLParamsFile:
import json
# with open(os.path.join(params_path,args.clfParamsFile)) as f:
# clfParams = json.load(f)
with open(os.path.join(params_path,args.TLParamsFile)) as f:
TLparams = json.load(f)
TLparams['gan'] = TLparams['gan'] =='True'
return TLparams, clfParams
TLparams = {}
TLparams['lr'] = 0.005
TLparams['gan'] = False
TLparams['lr_gan'] = 0.0001
TLparams['bs'] = 128
TLparams['step_size'] = None
TLparams['epoch'] = 20
TLparams['feat_eng'] = 'asym'
TLparams['alpha'] = 0.05
TLparams['beta'] = 0.75
TLparams['discrepancy'] = 'ot'
TLparams['weight_decay'] = 0.0
return TLparams, clfParams
if __name__ == '__main__':
TLparams, clfParams = getHparams()
dm_source = SingleDatasetModule(data_dir=args.inPath,
datasetName=args.source,
n_classes=args.n_classes,
input_shape=clfParams['input_shape'],
batch_size=clfParams['bs'])
dm_source.setup(Loso=False, split=False,normalize = True)
file = f'model_{args.source}'
#if os.path.join(save_path,file + '_feature_extractor') not in glob.glob(save_path + '*'):
#if args.trainClf:
if False:
trainer, clf, res = runClassifier(dm_source,clfParams)
print('Source: ',res)
clf.save_params(save_path,file)
dm_target = SingleDatasetModule(data_dir=args.inPath,
datasetName=args.target,
input_shape=clfParams['input_shape'],
n_classes=args.n_classes,
batch_size=TLparams['bs'],
type='target')
dm_target.setup(Loso=False, split=False,normalize = True)
model = FTmodel(trainParams=TLparams,
n_classes = args.n_classes,
lossParams = None,
save_path = None,
model_hyp=clfParams)
chk_path = "../saved/c791a09f23cfa488fe7e80c35a6edb68"
model2 = model.load_from_checkpoint(chk_path)
if my_logger:
params = {}
params['clfParams'] = clfParams
params['TLparams'] = TLparams
my_logger.log_hyperparams(params)
my_logger.watch(model)
model.load_params(save_path,file)
model.setDatasets(dm_source, dm_target)
early_stopping = EarlyStopping('discpy_loss', mode='min', patience=10, verbose=True)
trainer = Trainer(gpus=1,
check_val_every_n_epoch=1,
max_epochs=TLparams['epoch'],
logger=my_logger,
progress_bar_refresh_rate=verbose,
callbacks = [early_stopping],
multiple_trainloader_mode='max_size_cycle')
trainer.fit(model)
res = model.get_final_metrics()
print(res)
if my_logger:
my_logger.log_metrics(res)
|
# -*- coding: UTF8 -*-
"""
TODO:
- Fix arguments of find_contact()
- Implement PING protocol
"""
import socket
import random
import logging
from unittest import mock
from typing import Tuple, Optional
from json.decoder import JSONDecodeError
from .node import Node
from .config import Config
from .request import Request
from .requests import Requests
from .encryption import Encryption
from .message import Message, OwnMessage
from .contact import Contact, OwnContact, Beacon
from .utils import decode_json, get_primary_local_ip_address, get_timestamp
from .validation import is_valid_received_message, is_valid_request, is_valid_contact, verify_received_aes_key
class Network:
def __init__(self, master_node):
self.master_node = master_node
self.host = get_primary_local_ip_address() # TODO: listen on all interfaces, making this attribute deprecated.
####################
# Requests section #
####################
def route_request(self, request: Request, broadcast: bool = True) -> None:
"""
This method is used to route the requests to their corresponding functions, in order to process them.
:param Request request: The request, as a Request object.
:param bool broadcast: Whether we want to broadcast the message.
The only case it should be set to False is if we are replying to a specific request.
"""
"""
For each request, we first verify it is valid, and then process it.
"""
# If the request is already in the raw_requests database, we skip it.
if self.master_node.databases.raw_requests.is_request_known(request.get_id()):
return
self.store_raw_request_depending_on_type(request)
status = request.status
def _broadcast(req) -> None:
if broadcast:
self.broadcast_request(req)
# End of _broadcast method.
def _log_invalid_req(req) -> None:
log_msg = f'Invalid {req.status!r} request ({req.get_id()!r})'
if Config.verbose:
log_msg = f'{log_msg}: {req.to_json()}'
logging.debug(log_msg)
# End of _log_invalid_req method.
if status == "WUP_INI": # What's Up Protocol Initialization
if not Requests.is_valid_wup_ini_request(request):
_log_invalid_req(request)
return
self.handle_what_is_up_init(request)
return
elif status == "WUP_REP": # What's Up Protocol Reply
if not Requests.is_valid_wup_rep_request(request):
_log_invalid_req(request)
return
self.handle_what_is_up_reply(request)
return
elif status == "BCP": # BroadCast Protocol
if not Requests.is_valid_bcp_request(request):
_log_invalid_req(request)
return
self.handle_broadcast(request)
return
elif status == "DNP": # Discover Nodes Protocol
if not Requests.is_valid_dp_request(request):
_log_invalid_req(request)
return
self.handle_discover_nodes(request)
return
elif status == "DCP": # Discover Contact Protocol
if not Requests.is_valid_dp_request(request):
_log_invalid_req(request)
return
self.handle_discover_contact(request)
return
# All requests above are neither stored nor broadcast back.
# Only those below are, and only if they are valid.
if status == "MPP": # Message Propagation Protocol
if not is_valid_received_message(request.data):
_log_invalid_req(request)
return
_broadcast(request)
self.handle_message(request)
return
elif status == "NPP": # Node Publication Protocol
if not Requests.is_valid_npp_request(request):
_log_invalid_req(request)
return
_broadcast(request)
self.handle_new_node(request)
return
elif status == "CSP": # Contact Sharing Protocol
if not Requests.is_valid_csp_request(request):
_log_invalid_req(request)
return
_broadcast(request)
contact = Contact.from_dict(request.data)
self.master_node.databases.contacts.add_contact(contact)
return
elif status == "KEP": # Keys Exchange Protocol
if not Requests.is_valid_kep_request(request):
_log_invalid_req(request)
return
_broadcast(request)
self.negotiate_aes(request)
return
else:
# The request has an invalid status.
logging.warning(f'Captured request calling unknown protocol: {status!r}.')
return
def handle_raw_request(self, json_request: str) -> None:
"""
This function is called everytime we receive a JSON request.
It converts the request to a dictionary (if structurally valid) and routes it.
:param str json_request: A request, as a JSON-encoded string.
"""
dict_request = decode_json(json_request)
if not is_valid_request(dict_request):
return
request = Request.from_dict(dict_request)
self.route_request(request)
############################
# Request handling section #
############################
# All methods of this section must have only one argument: request, a valid Request object.
def negotiate_aes(self, request: Request) -> bool:
"""
Negotiate an AES key.
This function is called by two events:
- When we receive a Keys Exchange (KEP) request,
- When we discover a new node (Node Publication Protocol).
Note that we are not guaranteed that this request is for us.
:param Request request: A valid request.
:return bool: True if the negotiation is over, False otherwise.
TODO: Get rid of assertions
"""
def concatenate_keys(key1: bytes, key2: bytes) -> tuple:
"""
Concatenate the two keys and derive a nonce.
:param bytes key1: Half an AES key.
:param bytes key2: Half an AES key.
:return tuple: 2-tuple (bytes: aes_key, bytes: nonce)
"""
if key1 < key2:
aes_key = key1 + key2
elif key1 > key2:
aes_key = key2 + key1
else:
# Almost impossible case where the two keys are the same
raise ValueError("Wait, what ??!")
nonce = Encryption.derive_nonce_from_aes_key(aes_key)
return aes_key, nonce
def propagate(half_aes_key: bytes) -> None:
"""
Wrapper used to broadcast the KEP request over the network.
:param bytes half_aes_key: Half an AES key.
"""
req = Requests.kep(half_aes_key, self.master_node, author_node)
self.broadcast_request(req)
def store(key_identifier: str, key: bytes, nonce: Optional[bytes]) -> None:
"""
Store the AES key in the conversations database.
:param str key_identifier: A key ID ; the Node's ID.
:param bytes key: The AES key.
:param Optional[bytes] nonce: The nonce, bytes if the negotiation is over, None otherwise.
"""
# Concatenate key and nonce if the later is passed.
if nonce is not None:
f_key = key + nonce
else:
f_key = key
self.master_node.databases.conversations.store_aes(key_identifier, f_key, get_timestamp())
def get_peer_half_key() -> Optional[bytes]:
"""
Reads the request for the half key sent by this conversation's peer.
:return Optional[bytes]: The half key of the distant peer.
"""
if not verify_received_aes_key(request.data["key"], author_node.get_rsa_public_key()):
# The AES key sent is invalid.
return
# This AES key's length is 16 bytes.
se_en_half_key = request.data["key"]["value"]
half_key = Encryption.decrypt_asymmetric(self.master_node.get_rsa_private_key(), se_en_half_key)
assert len(half_key) == Config.aes_keys_length // 2
return half_key
def finish_negotiation() -> None:
"""
Used when we already initialized the negotiation and we just received the second part to conclude it.
At the end of this function, we have a valid AES key for communicating with this node.
"""
stored_half_key, nonce = self.master_node.databases.conversations.get_decrypted_aes(key_id)
# nonce should be empty and aes_key should be exactly half the expected key length
assert nonce is None
assert len(stored_half_key) == Config.aes_keys_length // 2
half_key = get_peer_half_key()
if half_key is None:
return
key, nonce = concatenate_keys(half_key, stored_half_key)
assert len(key) == Config.aes_keys_length
assert len(nonce) == Config.aes_keys_length // 2
store(key_id, key, nonce)
logging.info(f'Finished negotiation with {key_id!r}')
def continue_negotiation() -> None:
"""
Used when an KEP request is received but we haven't initiated it.
We then proceed to send the other half of the key.
At the end of this function, we have a valid AES key for communicating with this node.
"""
new_half_key = Encryption.create_half_aes_key()
half_key = get_peer_half_key()
if half_key is None:
return
key, nonce = concatenate_keys(half_key, new_half_key)
assert len(key) == Config.aes_keys_length
assert len(nonce) == Config.aes_keys_length // 2
propagate(new_half_key)
store(key_id, key, nonce)
logging.info(f'Continued negotiation with {key_id!r}')
def new_negotiation() -> None:
"""
Used when initializing a new negotiation, usually when acknowledging a new node.
We send our half, store it and wait.
When receiving the second part, we will call "finish_negotiation()".
"""
half_aes_key = Encryption.create_half_aes_key()
# We don't encrypt it here, we will take care of that when creating the KEP request.
propagate(half_aes_key)
store(key_id, half_aes_key, None)
logging.info(f'Initiated negotiation with {key_id!r}')
if not self.master_node.databases.are_node_specific_databases_open:
return False
status: str = request.status
own_id = self.master_node.get_id()
# We will take the author's RSA public key to encrypt our part of the AES key.
if status == "KEP":
author_node = Node.from_dict(request.data['author'])
recipient_node = Node.from_dict(request.data['recipient'])
# If we are not the recipient, end.
recipient_id = recipient_node.get_id()
if recipient_id != own_id:
if Config.log_full_network:
msg = f"{status} request {request.get_id()!r} is not addressed to us"
if Config.verbose:
msg += f": got {recipient_id!r}, ours is {own_id!r}"
return False
elif status == "NPP":
author_node = Node.from_dict(request.data)
else:
msg = f'Invalid protocol {request.status!r} called function {Network.negotiate_aes.__name__!r}'
logging.critical(msg)
raise ValueError(msg)
key_id = author_node.get_id()
# Special case: if we are the node of the NPP request, we'll create a new AES key for ourself.
if status == "NPP" and key_id == own_id:
aes_key, nonce = Encryption.create_aes()
store(key_id, aes_key, nonce)
return True
# If the key is already negotiated, end.
if self.master_node.databases.conversations.is_aes_negotiated(key_id):
# You might want to add a renegotiation system here.
return True
# If the negotiation has been launched, check if it is expired.
# If it is, remove it.
# Otherwise, this means we are receiving the second part of the AES key,
# and therefore we can conclude the negotiation.
# If the negotiation has not been launched, we will be initiating it.
if self.master_node.databases.conversations.is_aes_negotiation_launched(key_id):
if self.master_node.databases.conversations.is_aes_negotiation_expired(key_id):
self.master_node.databases.conversations.remove_aes_key(key_id)
new_negotiation()
return False
# If the negotiation has not yet expired, we conclude it.
else:
if status == "KEP":
finish_negotiation()
return True
else:
if status == "KEP":
continue_negotiation()
return True
elif status == "NPP":
new_negotiation()
return False
return False
def handle_message(self, request: Request) -> None:
"""
This method is used when receiving a new message.
It is called after the request has been broadcast back and stored in the raw_requests database,
and will take care of storing the message if we can decrypt its content.
:param Request request: A MPP request.
"""
author = Node.from_dict(request.data["author"])
if not author:
return
if not self.master_node.databases.conversations.is_aes_negotiated(author.id):
# Here, the negotiation is not done yet, so we launch it.
# If it returns False, meaning the negotiation is not over, we end the function.
# Otherwise, we continue.
if not self.negotiate_aes(request):
return
# The AES keys have been negotiated, so we can proceed.
aes_key, nonce = self.master_node.databases.conversations.get_decrypted_aes(author.get_id())
message_dec = Message.from_dict_encrypted(aes_key, nonce, request.data)
if not message_dec:
# We won't log anything else: any error should have been logged by the message constructor above.
return
# At this point, the message has been read and we can store it (encrypted) in our conversations database.
message_enc = Message.from_dict(request.data)
message_enc.set_time_received()
self.master_node.databases.conversations.store_new_message(author.get_id(), message_enc)
def handle_new_node(self, request: Request) -> None:
"""
Called when we receive a NPP request.
:param Request request: A NPP request.
"""
node = Node.from_dict(request.data)
# Even if the node information was validated beforehand,
# we'll check if it worked anyway.
if not node:
return
self.negotiate_aes(request)
self.master_node.databases.nodes.add_node(node)
def handle_what_is_up_init(self, request: Request) -> None:
"""
Called when we receive a What's Up init request.
:param Request request: A WUP_INI request.
"""
request_timestamp = int(request.data["timestamp"])
contact_info = request.data["author"]
contact = Contact.from_dict(contact_info)
if not contact:
# Double check
return
all_requests = self.master_node.databases.raw_requests.get_all_raw_requests_since(request_timestamp)
for request in all_requests.values():
req = Requests.wup_rep(request)
self.send_request(req, contact)
def handle_what_is_up_reply(self, request: Request) -> None:
"""
Called when receiving a What's Up reply request.
:param Request request: A WUP_REP request.
"""
inner_request = Request.from_dict(request.data)
if not inner_request:
return
# Route the request.
self.route_request(inner_request, broadcast=False)
def handle_broadcast(self, request: Request) -> None:
pass # TODO
def handle_discover_nodes(self, request: Request) -> None:
"""
Handles Discover Nodes requests.
The request must be valid.
:param Request request: A valid DNP request.
"""
contact = Contact.from_dict(request.data["author"])
# Add the contact requesting the nodes if we don't know it already.
if not self.master_node.databases.contacts.contact_exists(contact.get_id()):
self.master_node.databases.contacts.add_contact(contact)
for node in self.master_node.databases.nodes.get_all_nodes():
req = Requests.npp(node)
self.send_request(req, contact)
def handle_discover_contact(self, request: Request) -> None:
"""
Handles Discover Contacts requests.
The request must be valid.
:param Request request: A valid CSP request.
"""
contact = Contact.from_dict(request.data["author"])
# Add the contact if we don't know it already.
if not self.master_node.databases.contacts.contact_exists(contact.get_id()):
self.master_node.databases.contacts.add_contact(contact)
for contact_object in self.master_node.databases.contacts.get_all_contacts():
req = Requests.csp(contact_object)
self.send_request(req, contact)
def store_raw_request_depending_on_type(self, request: Request) -> None:
"""
Takes a raw request and stores it if it is appropriate to do so.
:param Request request: The request to store.
"""
if request.status not in Config.store_requests:
return
self.master_node.databases.raw_requests.add_new_raw_request(request)
#####################
# Protocols section #
#####################
def what_is_up(self) -> None:
"""
Chooses a node (preferably a beacon) and asks for all requests since the last one we received.
This method is called when a RSA private key is loaded into the client.
TODO: Take care of the case where we know less than 10 contacts
"""
last_received_request = self.master_node.databases.raw_requests.get_last_received()
if not last_received_request:
# We didn't receive any request yet.
return
for contact in self.find_available_contact():
req = Requests.wup_ini(last_received_request.timestamp, contact)
if self.send_request(req, contact):
return
def request_nodes(self) -> None:
"""
Creates a DNP request and sends it to one contact.
"""
own_contact = OwnContact('private')
req = Requests.dnp(own_contact)
for contact in self.find_available_contact():
if self.send_request(req, contact):
logging.info(f'Requested new contacts to {contact.get_address()}:{contact.get_port()}')
return
def request_contacts(self) -> None:
"""
Creates a DCP request and sends it to one contact.
"""
own_contact = OwnContact('private')
req = Requests.dnp(own_contact)
for contact in self.find_available_contact():
if self.send_request(req, contact):
logging.info(f'Requested new contacts to {contact.get_address()}:{contact.get_port()}')
return
################################
# Network interactions section #
################################
def prepare_and_send_own_message(self, recipient: Node, own_message: OwnMessage) -> None:
if not self.master_node.databases.conversations.is_aes_negotiated(recipient.get_id()):
logging.error(f'Tried to send a message to node {recipient.get_id()}, '
f'but AES negotiation is not complete.')
return
aes_key, nonce = self.master_node.databases.conversations.get_decrypted_aes(recipient.get_id())
aes = Encryption.construct_aes_object(aes_key, nonce)
own_message.prepare(aes)
if not own_message.is_prepared():
logging.error(f'Something went wrong during the preparation of the message.')
return
# Convert OwnMessage to Message, and store it in the conversations database.
msg_data = own_message.to_dict()
msg = Message.from_dict(msg_data)
# Hack our way around AssertionError
msg.set_time_received()
self.master_node.databases.conversations.store_new_message(recipient.get_id(), msg)
req = Requests.mpp(own_message)
self.broadcast_request(req)
def find_available_contact(self):
contacts = self.get_all_contacts()
for contact in contacts:
# TODO: Add network verification
yield contact
def get_all_contacts(self):
"""
Returns a generator of all the contacts we know.
:return: A generator object of the contacts.
"""
def _get_beacons(beacons_list: list):
for beacon_info in beacons_list:
beacon_obj = Beacon.from_raw_address(beacon_info)
if not beacon_obj:
# The beacon information is invalid.
logging.warning(f"A beacon is invalid: {beacon_info!r}")
continue
yield beacon_info
def __get_contacts(beacons_list: list, contacts_list: list):
beacons = _get_beacons(beacons_list)
if beacons:
for beacon in beacons:
yield beacon
for contact_obj in contacts_list:
yield contact_obj
# Gets the beacons and contacts lists.
own_contact = OwnContact('private')
all_beacons = Config.beacons
all_contacts = self.master_node.databases.contacts.get_all_contacts(exclude=[own_contact.get_id()])
random.shuffle(all_beacons)
random.shuffle(all_contacts)
contacts = __get_contacts(all_beacons, all_contacts)
for contact in contacts:
yield contact
def listen_for_autodiscover_packets(self, stop_event) -> None:
def receive_all(sock: socket.socket) -> Tuple[bytes, Tuple[str, int]]:
"""
Receives all parts of a network-sent message.
:param socket.socket sock: The socket object.
:return: The complete message and the information of the sender.
"""
data = bytes()
while True:
# Could there be interlaced packets (two different addresses gathered during successive loops) ?
part, add = sock.recvfrom(Config.network_buffer_size)
data += part
if len(part) < Config.network_buffer_size:
# Either 0 or end of data
break
return data, add
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind(("", Config.broadcast_port))
while not stop_event.isSet():
request_raw, address = receive_all(sock=s)
if Config.log_full_network:
logging.debug(f'Received packet: {request_raw!r} from {address!r}')
# The request is not assured to be JSON, could be text, raw bytes or anything else.
json_request = Encryption.decode_bytes(request_raw)
try:
dict_request = decode_json(json_request)
except JSONDecodeError:
continue
req_add = dict_request['address'].split(Config.contact_delimiter)[0]
sender_add = address[0]
own_add = OwnContact('private').get_address()
if req_add != sender_add:
# If the addresses are not the same.
continue
if req_add == own_add:
# If the request's sender address is ours.
continue
if is_valid_contact(dict_request):
contact = Contact.from_dict(dict_request)
contact.set_last_seen()
msg = f'We received a new contact by listening on the broadcast: {json_request}'
# The following verification is already done in the add contact,
# but we do it anyway to be able to tell if it was already present or not.
if not self.master_node.databases.contacts.contact_exists(contact.get_id()):
self.master_node.databases.contacts.add_contact(contact)
else:
msg += ' (already registered)'
if Config.log_full_network:
logging.debug(msg)
def broadcast_autodiscover(self) -> None:
# Resource: https://github.com/ninedraft/python-udp
if len(self.master_node.databases.contacts.get_all_contacts_ids()) > Config.broadcast_limit:
# If we know enough contacts, we don't need to broadcast.
# TODO: find a way of stopping calls to this function
return
own_contact = OwnContact('private')
own_contact_information = Encryption.encode_string(own_contact.to_json())
self.send_broadcast(own_contact_information)
def send_broadcast(self, info: bytes) -> None:
"""
Takes a contact information as a bytes object (containing JSON), and broadcasts it.
"""
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.settimeout(2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(info, ('<broadcast>', Config.broadcast_port))
if Config.log_full_network:
logging.debug(f'Broadcast own contact information: {info!r}')
def listen_for_requests(self, stop_event) -> None:
"""
Setup a server and listens on a port.
It requires a TCP connection to transmit information.
"""
def receive_all(sock: socket.socket) -> bytes:
"""
Receives all parts of a network-sent message.
:param socket.socket sock: The socket object.
:return bytes: The complete message.
"""
data = bytes()
while True:
part = sock.recv(Config.network_buffer_size)
data += part
if len(part) < Config.network_buffer_size:
break
return data
with socket.socket() as server_socket:
server_socket.bind((self.host, Config.sami_port))
server_socket.listen(Config.network_max_conn)
while not stop_event.isSet():
connection, address = server_socket.accept()
raw_bytes_request = receive_all(connection)
json_request = Encryption.decode_bytes(raw_bytes_request)
try:
dict_request = decode_json(json_request)
except JSONDecodeError:
if Config.log_full_network:
log_msg = 'Received an unknown packet'
if Config.verbose:
log_msg += f': {raw_bytes_request}'
logging.debug(log_msg)
continue
request = Request.from_dict(dict_request)
if request: # If we managed to get a valid request.
if Config.log_full_network:
log_msg = f'Received {request.status!r} request {request.get_id()!r} from {address}'
if Config.verbose:
log_msg += f': {dict_request}'
logging.info(log_msg)
# We'll route it.
self.route_request(request)
else:
if Config.log_full_network:
log_msg = f'Received invalid request from {address}'
if Config.verbose:
log_msg += f': {dict_request}'
logging.info(log_msg)
connection.close()
def broadcast_request(self, request: Request) -> None:
"""
Broadcast a request to all known contacts.
:param Request request:
"""
contacts = list(self.get_all_contacts())
for contact in contacts:
self.send_request(request, contact)
logging.info(f'Broadcast request {request.get_id()!r} to {len(contacts)} contacts')
def send_dummy_data_to_self(self) -> None:
# Mocks the ``to_json()`` method.
# As we don't want to send a dirty request, we will make it return an empty string.
# Sockets will send this
with mock.patch('sami.Request.to_json') as mock_req_to_json:
mock_req_to_json.return_value = ''
req = Request('', {}, 0, 0)
own_contact = OwnContact('private')
own_contact_information = Encryption.encode_string(own_contact.to_json())
self.send_broadcast(own_contact_information) # Used to end broadcast listening
self.send_request(req, Contact.from_dict(own_contact.to_dict())) # Used to end requests listening
def send_request(self, request: Request, contact: Contact) -> bool:
"""
Send a request to a specific contact.
:param Request request: The request to send.
:param Contact contact: The contact to send the request to.
:return bool: True if it managed to send the request, False otherwise.
TODO : error handling
"""
address = contact.get_address()
port = contact.get_port()
self.store_raw_request_depending_on_type(request)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:
client_socket.settimeout(Config.contact_connect_timeout)
try:
client_socket.connect((address, port))
client_socket.send(Encryption.encode_string(request.to_json()))
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, OSError):
if Config.log_full_network:
logging.info(f'Could not send request {request.get_id()!r} to {address}:{port}')
except Exception as e:
logging.warning(f'Unhandled {type(e)} exception caught: {e!r}')
else:
if Config.log_full_network:
logging.info(f'Sent {request.status!r} request {request.get_id()!r} to {address}:{port}')
return True
return False
|
<gh_stars>0
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from .models import neighbourhood, healthservices, Business, Health, Authorities, BlogPost, Profile, Notifications, Comment
from .email import send_priority_email
from .forms import notificationsForm, ProfileForm, BlogPostForm, BusinessForm, CommentForm
from decouple import config, Csv
import datetime as dt
from django.http import JsonResponse
import json
from django.db.models import Q
from django.contrib.auth.models import User
from rest_framework.response import Response
from rest_framework.views import APIView
# Create your views here.
def index(request):
try:
if not request.user.is_authenticated:
return redirect('/accounts/login/')
current_user = request.user
profile = Profile.objects.get(username=current_user)
except ObjectDoesNotExist:
return redirect('create-profile')
return render(request, 'index.html')
@login_required(login_url='/accounts/login/')
def notification(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
all_notifications = Notifications.objects.filter(neighbourhood=profile.neighbourhood)
return render(request, 'notifications.html', {"notifications": all_notifications})
@login_required(login_url='/accounts/login/')
def health(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
healthservices = Health.objects.filter(neighbourhood=profile.neighbourhood)
return render(request, 'health.html', {"healthservices": healthservices})
@login_required(login_url='/accounts/login/')
def blog(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
blogposts = BlogPost.objects.filter(neighbourhood=profile.neighbourhood)
return render(request, 'blog.html', {"blogposts": blogposts})
@login_required(login_url='/accounts/login/')
def businesses(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
businesses = Business.objects.filter(neighbourhood=profile.neighbourhood)
return render(request, 'business.html', {"businesses": businesses})
@login_required(login_url='/accounts/login/')
def authorities(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
authorities = Authorities.objects.filter(neighbourhood=profile.neighbourhood)
return render(request, 'security.html', {"authorities": authorities})
@login_required(login_url='/accounts/login/')
def view_blog(request, id):
current_user = request.user
try:
comments = Comment.objects.filter(post_id=id)
except:
comments = []
blog = BlogPost.objects.get(id=id)
if request.method == 'POST':
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
comment = form.save(commit=False)
comment.username = current_user
comment.post = blog
comment.save()
else:
form = CommentForm()
return render(request, 'view_blog.html', {"blog": blog, "form": form, "comments": comments})
@login_required(login_url='/accounts/login/')
def user_profile(request, username):
user = User.objects.get(username=username)
profile = Profile.objects.get(username=user)
return render(request, 'profile.html', {"profile": profile})
@login_required(login_url='/accounts/login/')
def my_profile(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
return render(request, 'user_profile.html', {"profile": profile})
@login_required(login_url='/accounts/login/')
def new_blogpost(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
if request.method == "POST":
form = BlogPostForm(request.POST, request.FILES)
if form.is_valid():
blogpost = form.save(commit=False)
blogpost.username = current_user
blogpost.neighbourhood = profile.neighbourhood
blogpost.avatar = profile.avatar
blogpost.save()
return HttpResponseRedirect('/blog')
else:
form = BlogPostForm()
return render(request, 'blogpost_form.html', {"form": form})
@login_required(login_url='/accounts/login/')
def new_business(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
if request.method == "POST":
form = BusinessForm(request.POST, request.FILES)
if form.is_valid():
business = form.save(commit=False)
business.owner = current_user
business.neighbourhood = profile.neighbourhood
business.save()
return HttpResponseRedirect('/business')
else:
form = BusinessForm()
return render(request, 'business_form.html', {"form": form})
@login_required(login_url='/accounts/login/')
def create_profile(request):
current_user = request.user
if request.method == "POST":
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.username = current_user
profile.save()
return HttpResponseRedirect('/')
else:
form = ProfileForm()
return render(request, 'profile_form.html', {"form": form})
@login_required(login_url='/accounts/login/')
def new_notification(request):
current_user = request.user
profile = Profile.objects.get(username=current_user)
if request.method == "POST":
form = notificationsForm(request.POST, request.FILES)
if form.is_valid():
notification = form.save(commit=False)
notification.author = current_user
notification.neighbourhood = profile.neighbourhood
notification.save()
# if notification.priority == 'High Priority':
# send_priority_email(profile.name, profile.email, notification.title, notification.notification, notification.author, notification.neighbourhood)
return HttpResponseRedirect('/notifications')
else:
form = notificationsForm()
return render(request, 'notifications_form.html', {"form": form})
@login_required(login_url='/accounts/login/')
def update_profile(request):
current_user = request.user
if request.method == "POST":
instance = Profile.objects.get(username=current_user)
form = ProfileForm(request.POST, request.FILES, instance=instance)
if form.is_valid():
profile = form.save(commit=False)
profile.username = current_user
profile.save()
return redirect('Index')
elif Profile.objects.get(username=current_user):
profile = Profile.objects.get(username=current_user)
form = ProfileForm(instance=profile)
else:
form = ProfileForm()
return render(request, 'update_profile.html', {"form": form})
@login_required(login_url='/accounts/login/')
def search_results(request):
if 'blog' in request.GET and request.GET["blog"]:
search_term = request.GET.get("blog")
searched_blogposts = BlogPost.search_blogpost(search_term)
message = f"{search_term}"
print(searched_blogposts)
return render(request, 'search.html', {"message": message, "blogs": searched_blogposts})
else:
message = "You haven't searched for anything"
return render(request, 'search.html', {"message": message})
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class NewsElement(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, since: int=None, until: date=None, duration: int=None, title: str=None, content: str=None): # noqa: E501
"""NewsElement - a model defined in Swagger
:param since: The since of this NewsElement. # noqa: E501
:type since: int
:param until: The until of this NewsElement. # noqa: E501
:type until: date
:param duration: The duration of this NewsElement. # noqa: E501
:type duration: int
:param title: The title of this NewsElement. # noqa: E501
:type title: str
:param content: The content of this NewsElement. # noqa: E501
:type content: str
"""
self.swagger_types = {
'since': int,
'until': date,
'duration': int,
'title': str,
'content': str
}
self.attribute_map = {
'since': 'since',
'until': 'until',
'duration': 'duration',
'title': 'title',
'content': 'content'
}
self._since = since
self._until = until
self._duration = duration
self._title = title
self._content = content
@classmethod
def from_dict(cls, dikt) -> 'NewsElement':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The NewsElement of this NewsElement. # noqa: E501
:rtype: NewsElement
"""
return util.deserialize_model(dikt, cls)
@property
def since(self) -> int:
"""Gets the since of this NewsElement.
:return: The since of this NewsElement.
:rtype: int
"""
return self._since
@since.setter
def since(self, since: int):
"""Sets the since of this NewsElement.
:param since: The since of this NewsElement.
:type since: int
"""
self._since = since
@property
def until(self) -> date:
"""Gets the until of this NewsElement.
:return: The until of this NewsElement.
:rtype: date
"""
return self._until
@until.setter
def until(self, until: date):
"""Sets the until of this NewsElement.
:param until: The until of this NewsElement.
:type until: date
"""
self._until = until
@property
def duration(self) -> int:
"""Gets the duration of this NewsElement.
:return: The duration of this NewsElement.
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration: int):
"""Sets the duration of this NewsElement.
:param duration: The duration of this NewsElement.
:type duration: int
"""
self._duration = duration
@property
def title(self) -> str:
"""Gets the title of this NewsElement.
:return: The title of this NewsElement.
:rtype: str
"""
return self._title
@title.setter
def title(self, title: str):
"""Sets the title of this NewsElement.
:param title: The title of this NewsElement.
:type title: str
"""
self._title = title
@property
def content(self) -> str:
"""Gets the content of this NewsElement.
:return: The content of this NewsElement.
:rtype: str
"""
return self._content
@content.setter
def content(self, content: str):
"""Sets the content of this NewsElement.
:param content: The content of this NewsElement.
:type content: str
"""
self._content = content
|
import numpy as np
import matplotlib.pyplot as plt
import random
class GaussianBandit:
def __init__(self):
self._arm_means = np.random.uniform(0., 1., 10) # Sample some means
self.n_arms = len(self._arm_means)
self.rewards = []
self.total_played = 0
def reset(self):
self.rewards = []
self.total_played = 0
def play_arm(self, a):
reward = np.random.normal(self._arm_means[a], 1.) # Use sampled mean and covariance of 1.
self.total_played += 1
self.rewards.append(reward)
return reward
def greedy(bandit, timesteps):
rewards = np.zeros(bandit.n_arms)
n_plays = np.zeros(bandit.n_arms)
Q = np.zeros(bandit.n_arms)
possible_arms = range(bandit.n_arms)
# TODO: init variables (rewards, n_plays, Q) by playing each arm once
# iterate through all possible arms
for a in possible_arms:
# playing each arm once
rewards[a] = bandit.play_arm(a)
# is set to 1, because every arm is played once
n_plays[a] = 1
# is set to rewards[a] because n_play is 1 for every arm
Q[a] = rewards[a]
# Main loop
while bandit.total_played < timesteps:
# This example shows how to play a random arm:
#a = random.choice(possible_arms)
#reward_for_a = bandit.play_arm(a)
# TODO: instead do greedy action selection
# search indices with maximum estimate of action-value
a = np.argmax(Q)
# TODO: update the variables (rewards, n_plays, Q) for the selected arm
# play greedy arm a
rewards[a] += bandit.play_arm(a)
# incremet greedy arm a
n_plays[a] += 1
# calculate new estimated action-value
Q[a] = rewards[a]/n_plays[a]
def epsilon_greedy(bandit, timesteps):
# define epsilon
e = 0.1
# TODO: epsilon greedy action selection (you can copy your code for greedy as a starting point)
# Initialize (same as in greedy)
rewards = np.zeros(bandit.n_arms)
n_plays = np.zeros(bandit.n_arms)
Q = np.zeros(bandit.n_arms)
possible_arms = range(bandit.n_arms)
# TODO: init variables (rewards, n_plays, Q) by playing each arm once
# iterate through all possible arms
for a in possible_arms:
# playing each arm once
rewards[a] = bandit.play_arm(a)
# is set to 1, because every arm is played once
n_plays[a] = 1
# is set to rewards[a] because n_play is 1 for every arm
Q[a] = rewards[a]
while bandit.total_played < timesteps:
# epsilon greedy action selection
#
if random.random() > e:
# search indices with maximum estimate of action-value
a = np.argmax(Q)
else:
# instead do random action selection
a = random.choice(possible_arms)
# play arm
# TODO: update the variables (rewards, n_plays, Q) for the selected arm
rewards[a] += bandit.play_arm(a)
# incremet greedy arm a
n_plays[a] += 1
# calculate new estimated action-value
Q[a] = rewards[a]/n_plays[a]
#reward_for_a = bandit.play_arm(0) # Just play arm 0 as placeholder
def main():
n_episodes = 10000 # TODO: set to 10000 to decrease noise in plot
n_timesteps = 1000
rewards_greedy = np.zeros(n_timesteps)
rewards_egreedy = np.zeros(n_timesteps)
for i in range(n_episodes):
if i % 100 == 0:
print ("current episode: " + str(i))
b = GaussianBandit() # initializes a random bandit
greedy(b, n_timesteps)
rewards_greedy += b.rewards
b.reset() # reset the bandit before running epsilon_greedy
epsilon_greedy(b, n_timesteps)
rewards_egreedy += b.rewards
rewards_greedy /= n_episodes
rewards_egreedy /= n_episodes
plt.plot(rewards_greedy, label="greedy")
print("Total reward of greedy strategy averaged over " + str(n_episodes) + " episodes: " + str(np.sum(rewards_greedy)))
plt.plot(rewards_egreedy, label="e-greedy")
print("Total reward of epsilon greedy strategy averaged over " + str(n_episodes) + " episodes: " + str(np.sum(rewards_egreedy)))
plt.legend()
plt.xlabel("Timesteps")
plt.ylabel("Reward")
plt.savefig('bandit_strategies.eps')
plt.show()
if __name__ == "__main__":
main()
|
import os, sys
sys.path.append("..")
from lookup import Lookup
def test_lookup (lookup, text = "A test."):
print(lookup)
print("Testing with: [{}]".format(text))
id_of_bos = lookup.convert_tokens_to_ids(lookup.bos_token)
id_of_eos = lookup.convert_tokens_to_ids(lookup.eos_token)
id_of_pad = lookup.convert_tokens_to_ids(lookup.pad_token)
converted_bos_token = lookup.convert_ids_to_tokens(id_of_bos)
converted_eos_token = lookup.convert_ids_to_tokens(id_of_eos)
converted_pad_token = lookup.convert_ids_to_tokens(id_of_pad)
print("bos_token {} = {} and converted back to token = {}".format(lookup.bos_token, id_of_bos, converted_bos_token))
print("eos_token {} = {} and converted back to token = {}".format(lookup.eos_token, id_of_eos, converted_eos_token))
print("pad_token {} = {} and converted back to token = {}".format(lookup.pad_token, id_of_pad, converted_pad_token))
#print(lookup._tokenizer.all_special_ids)
#print(lookup._tokenizer.all_special_tokens)
#print(lookup._tokenizer.special_tokens_map)
print("\n0. Save/load lookup object:")
if not os.path.exists(lookup.type):
os.makedirs(lookup.type)
lookup.save_special_tokens(file_prefix=os.path.join(lookup.type, lookup.type))
lookup = Lookup(type=lookup.type) # recreate object
lookup.load(file_prefix=os.path.join(lookup.type, lookup.type))
print(lookup)
print("\n1. String to tokens (tokenize):")
tokens = lookup.tokenize(text)
print(tokens)
print("\n2. Tokens to ints (convert_tokens_to_ids):")
ids = lookup.convert_tokens_to_ids(tokens)
print(ids)
print("\n2.5 Token to int (convert_tokens_to_ids with a single str):")
id = lookup.convert_tokens_to_ids(tokens[0])
print(id)
print("\n3. Ints to tokens (convert_ids_to_tokens):")
tokens = lookup.convert_ids_to_tokens(ids)
print(tokens)
print("\n3.5 Int to token (convert_ids_to_tokens with a single int):")
token = lookup.convert_ids_to_tokens(id)
print(token)
print("\n4. Tokens to string (convert_tokens_to_string):")
recreated_text = lookup.convert_tokens_to_string(tokens)
print(recreated_text)
print("\n5. String to ints (encode):")
ids = lookup.encode(text)
print(ids)
print("\n6. Ints to string (decode):")
recreated_text = lookup.decode(ids)
print(recreated_text)
print("\n7. Encode adding special tokens:")
ids = lookup.encode(text, add_bos_eos_tokens=True)
print(ids)
print("How it looks like with tokens: {}".format(lookup.convert_ids_to_tokens(ids)))
print("\n8. Decode skipping special tokens:")
recreated_text = lookup.decode(ids, skip_bos_eos_tokens=True)
print(recreated_text)
if __name__ == "__main__":
# gpt2
lookup = Lookup(type="gpt2")
test_lookup(lookup)
# bpe
print("Create BPE model ...")
lookup = Lookup(type="bpe")
if not os.path.exists(lookup.type):
os.makedirs(lookup.type)
import sentencepiece as spm
spm.SentencePieceTrainer.Train('--input=dummy_corpus.txt --model_prefix=bpe/bpe --character_coverage=1.0 --model_type=bpe --num_threads=8 --split_by_whitespace=true --shuffle_input_sentence=true --max_sentence_length=8000 --vocab_size=1024')
lookup.load("bpe/bpe")
test_lookup(lookup)
|
<gh_stars>0
import numpy as np
class GrowingMat(object):
def __init__(self, shape, capacity, grow_factor=4):
self.data = np.zeros(capacity)
self.shape = shape
self.capacity = capacity
self.grow_factor = grow_factor
def expand(self, cols=None, rows=None, block=None):
if cols is not None and rows is not None:
cols = np.atleast_2d(cols)
rows = np.atleast_2d(rows)
new_shape = (
self.shape[0] + rows.shape[0], self.shape[1] + cols.shape[1])
new_capacity = (
self.capacity[0] * self.grow_factor if new_shape[
0] > self.capacity[0] else self.capacity[0],
self.capacity[1] * self.grow_factor if new_shape[1] > self.capacity[1] else self.capacity[1])
if new_capacity != self.capacity:
# grow array
newdata = np.zeros(new_capacity)
newdata[:self.shape[0], :self.shape[1]] = self.data
self.capacity = new_capacity
self.data = newdata
self.data[self.shape[0]:new_shape[0], :self.shape[1]] = rows
self.data[:self.shape[0], self.shape[1]:new_shape[1]] = cols
if block is not None:
self.data[self.shape[0]:new_shape[0],
self.shape[1]:new_shape[1]] = block
self.shape = new_shape
#print "New shape", new_shape, self.shape, self.view.shape, #self.finalized.shape
elif cols is not None:
cols = np.atleast_2d(cols)
new_shape = (self.shape[0], self.shape[1] + cols.shape[1])
new_capacity = (self.capacity[0],
self.capacity[1] * self.grow_factor if new_shape[1] > self.capacity[1] else self.capacity[1])
if new_capacity != self.capacity:
# grow array
newdata = np.zeros(new_capacity)
newdata[:self.shape[0], :self.shape[1]] = self.data
self.capacity = new_capacity
self.data = newdata
self.data[:self.shape[0], self.shape[1]:new_shape[1]] = cols
self.shape = new_shape
elif rows is not None:
rows = np.atleast_2d(rows)
new_shape = (self.shape[0] + rows.shape[0], self.shape[1])
new_capacity = (
self.capacity[0] * self.grow_factor if new_shape[
0] > self.capacity[0] else self.capacity[0],
self.capacity[1])
if new_capacity != self.capacity:
# grow array
newdata = np.zeros(new_capacity)
newdata[:self.shape[0], :self.shape[1]] = self.data
self.data = newdata
self.capacity = new_capacity
self.data[self.shape[0]:new_shape[0], :self.shape[1]] = rows
self.shape = new_shape
@property
def view(self):
return self.data[:self.shape[0], :self.shape[1]]
@view.setter
def view(self, d):
self.data[:self.shape[0], :self.shape[1]] = d
@property
def finalized(self):
data = self.view
return np.reshape(data, newshape=self.shape)
class GrowingVector(object):
def __init__(self, size, capacity=100, grow_factor=4):
self.data = np.zeros(capacity)
self.size = size
self.capacity = capacity
self.grow_factor = grow_factor
def expand(self, rows):
rows = np.atleast_1d(rows)
new_size = self.size + rows.shape[0]
new_capacity = self.capacity * \
self.grow_factor if new_size > self.capacity else self.capacity
if new_capacity != self.capacity:
# grow array
newdata = np.zeros(new_capacity)
newdata[:self.size] = self.data
self.capacity = new_capacity
self.data = newdata
self.data[self.size:new_size] = rows
self.size = new_size
@property
def view(self):
return self.data[:self.size]
@view.setter
def view(self, d):
self.data[:self.size] = d
@property
def finalized(self):
data = self.view
return np.reshape(data, newshape=(self.size))
|
<filename>awsscripts/sketches/emr.py
from typing import Dict, Any, Optional, List
from awsscripts.ec2.ec2 import ec2_instances
from awsscripts.emr.emr import EMR
from awsscripts.sketches.sketchitem import SketchItem
class EmrSketchItem(SketchItem):
def has_configuration(self, name: str) -> bool:
"""
Determines if a configuration (with given classification name) exists.
:param name: classification name
:return: true if the configuration exists; false otherwise
"""
return self._has_in_list_dict('configurations', 'Classification', name)
def get_configuration(self, name: str) -> Optional[Dict[str, Any]]:
"""
Gets configuration by classification name
:param name: classification name
:return: configuration if exists; None otherwise
"""
return self._get_in_list_dict('configurations', 'Classification', name)
def get_configurations(self) -> List[Dict[str, Any]]:
"""
Get all defined configurations
:return: all defined configurations
"""
return self._get_list_dict('configurations')
def remove_configuration(self, name: str) -> None:
"""
Removes configuration if exists.
:param name: classification name
:return: nothing
"""
self._remove_in_list_dict('configurations', 'Classification', name)
def put_configuration(self, name: str, properties: Dict[str, Any]) -> None:
"""
Adds or replaces configuration.
:param name: classification name
:param properties: properties
:return: nothing
"""
self._remove_in_list_dict('configurations', 'Classification', name)
self._put_in_list('configurations', {
'Classification': name,
'Properties': properties
})
def put_configurations(self, configurations: List[Dict[str, Any]]) -> None:
"""
Add/replace one or more configuration (raw)
:param configurations: raw configurations
:return: nothing
"""
names = map(lambda c: c['Classification'], configurations)
for name in names:
self.remove_configuration(name)
for config in configurations:
self.put_configuration(config['Classification'], config['Properties'])
def has_bootstrap_script(self, name: str) -> bool:
return self._has_in_list_dict('bootstrap_scripts', 'name', name)
def remove_bootstrap_script(self, name: str) -> None:
self._remove_in_list_dict('bootstrap_scripts', 'name', name)
def put_bootstrap_script(self, name: str, path: str, args: List[str]) -> None:
self._put_in_list('bootstrap_scripts', {
'name': name,
'path': path,
'args': args
})
def get_bootstrap_scripts(self) -> List[Dict[str, Any]]:
return self._get_list_dict('bootstrap_scripts')
def get_bootstrap_script(self, name: str) -> Dict[str, Any]:
return self._get_in_list_dict('bootstrap_scripts', 'name', name)
def set_log_uri(self, log_uri: str) -> None:
self['log_uri'] = log_uri
def get_log_uri(self) -> str:
return self._get('log_uri')
def has_subnet(self, subnet: str) -> bool:
return self._has_in_list('subnets', subnet)
def remove_subnet(self, subnet: str) -> None:
self._remove_in_list('subnets', subnet)
def put_subnet(self, subnet: str) -> None:
self.remove_subnet(subnet)
self._put_in_list('subnets', subnet)
def get_subnets(self) -> List[str]:
return self._get_list('subnets')
def put_security_groups(self, sg: Dict[str, Any]) -> None:
self['security_groups'] = sg
def get_security_groups(self) -> Dict[str, Any]:
return self['security_groups'] if 'security_groups' in self else {
'AdditionalSlaveSecurityGroups': [],
'EmrManagedSlaveSecurityGroup': 'TODO',
'EmrManagedMasterSecurityGroup': 'TODO',
'AdditionalMasterSecurityGroups': []
}
def set_job_flow_role(self, job_flow_role: str) -> None:
self['job_flow_role'] = job_flow_role
def get_job_flow_role(self) -> str:
return self['job_flow_role'] if 'job_flow_role' in self else 'IamInstanceProfile'
def set_service_role(self, job_flow_role: str) -> None:
self['service_role'] = job_flow_role
def get_service_role(self) -> str:
return self['service_role'] if 'service_role' in self else 'EMR_DefaultRole'
def set_keyname(self, keyname: str) -> None:
self['keyname'] = keyname
def get_keyname(self) -> str:
return self._get('keyname')
def put_tags(self, tags: List[Dict[str, str]]) -> None:
names = map(lambda t: t['Key'], tags)
for name in names:
self._remove_in_list_dict('tags', 'Key', name)
for tag in tags:
self._put_in_list('tags', tag)
def get_tags(self) -> List[Dict[str, str]]:
return self._get_list_dict('tags')
def put_instance_fleet(self, name: str, master_capacity: int, core_capacity: int, spot: bool):
self['instance_fleet'] = {
'master': {
'instance_fleet_name': name,
'TargetOnDemandCapacity': master_capacity,
'TargetSpotCapacity': 0,
'on_demand_allocation_strategy': 'LOWEST_PRICE'
},
'core': {
'instance_fleet_name': name,
'TargetOnDemandCapacity': core_capacity if not spot else 0,
'TargetSpotCapacity': core_capacity if spot else 0,
'on_demand_allocation_strategy': 'LOWEST_PRICE'
}
}
def get_instance_fleet(self):
return self._get('instance_fleet')
def get_instance_fleets(self):
return self._get('instance_fleets')
def put_instance_groups(self, master_instance: str, core_instance: str, count: int, spot: bool):
self['instance_groups'] = {
'master': {
'Market': 'ON_DEMAND',
'InstanceType': master_instance,
'InstanceCount': 1
},
'core': {
'Market': 'ON_DEMAND' if not spot else 'SPOT',
'InstanceType': core_instance,
'InstanceCount': count
}
}
def get_instance_groups(self):
return self._get('instance_groups')
def set_emr_label(self, emr: str):
self['emr_label'] = emr
def get_emr_label(self) -> Optional[str]:
return self._get('emr_label')
def set_cluster_name(self, name: str):
self['cluster_name'] = name
def get_cluster_name(self) -> Optional[str]:
return self._get('cluster_name')
def set_applications(self, applications: List[str]):
self['applications'] = applications
def get_applications(self) -> Optional[List[str]]:
return self._get('applications')
def set_protect(self, protect: bool):
self['TerminationProtected'] = protect
def get_protect(self) -> Optional[bool]:
return self._get('TerminationProtected')
def set_master_size_gb(self, size: int) -> None:
self['master_size_gb'] = size
def get_master_size_gb(self) -> Optional[int]:
return self._get('master_size_gb')
def set_core_size_gb(self, size: int) -> None:
self['core_size_gb'] = size
def get_core_size_gb(self) -> Optional[int]:
return self._get('core_size_gb')
def generate(self):
return self.content() + {
'job_flow_role': self.get_job_flow_role(),
'service_role': self.get_service_role(),
'security_groups': self.get_security_groups(),
'instance_fleets': EmrSketchItem._generate_instance_fleets()
}
@staticmethod
def _generate_instance_fleets():
result = {}
for instance, values in ec2_instances.items():
name = 'mem/cpu=' + str(round(values['memory'] / values['cpu'], 2))
weight = int(max(values['cpu'] / 4, values['memory'] / 32))
if weight > 0:
value = {
'InstanceType': instance,
'WeightedCapacity': weight
}
if values['storage'] > 0:
result.setdefault('ssd;' + name, []).append(value)
else:
result.setdefault('ebs;' + name, []).append(value)
result.setdefault(name, []).append(value)
return result
@staticmethod
def from_cluster(cluster_id: str):
emr_item = EmrSketchItem()
emr = EMR(verbose=False)
cluster = emr.describe_cluster(cluster_id)
ec2 = cluster['Ec2InstanceAttributes']
for b in cluster['BootstrapActions']:
emr_item.put_bootstrap_script(b['Name'], b['ScriptPath'], b['Args'])
subnets = ec2['RequestedEc2SubnetIds'] if ec2['RequestedEc2SubnetIds'] else [ec2['Ec2SubnetId']]
for subnet in subnets:
emr_item.put_subnet(subnet)
security_groups = {
'EmrManagedMasterSecurityGroup': ec2['EmrManagedMasterSecurityGroup'],
'EmrManagedSlaveSecurityGroup': ec2['EmrManagedSlaveSecurityGroup'],
}
if 'AdditionalMasterSecurityGroups' in ec2:
security_groups['AdditionalMasterSecurityGroups'] = ec2['AdditionalMasterSecurityGroups']
if 'AdditionalSlaveSecurityGroups' in ec2:
security_groups['AdditionalSlaveSecurityGroups'] = ec2['AdditionalSlaveSecurityGroups']
if 'ServiceAccessSecurityGroup' in ec2:
security_groups['ServiceAccessSecurityGroup'] = ec2['ServiceAccessSecurityGroup']
emr_item.put_security_groups(security_groups)
emr_item.set_job_flow_role(ec2['IamInstanceProfile'])
# template.set_service_role(???)
emr_item.set_log_uri(cluster['LogUri'])
emr_item.put_configurations(cluster['Configurations'])
emr_item.put_tags(cluster['Tags'])
if 'Ec2KeyName' in ec2:
emr_item.set_keyname(ec2['Ec2KeyName'])
return emr_item
@staticmethod
def from_content(content: Dict[str, Any]):
emr_item = EmrSketchItem()
emr_item.content = content
return emr_item
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import numpy.testing as npt
from reagent.core.parameters import ProblemDomain
from reagent.gym.envs import Gym
from reagent.gym.envs.wrappers.simple_minigrid import SimpleObsWrapper
from reagent.gym.utils import create_df_from_replay_buffer
from reagent.preprocessing.sparse_to_dense import PythonSparseToDenseProcessor
from reagent.test.base.horizon_test_base import HorizonTestBase
logger = logging.getLogger(__name__)
class TestEnv(SimpleObsWrapper):
"""
Wrap Gym environment in TestEnv to save the MiniGrid's
observation, action, reward and terminal in a list so that
we can check if replay buffer is working correctly
"""
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
# mdp_id, sequence_number, state, action, reward, terminal
self.sart = []
self.mdp_id = -1
self.sequence_number = 0
def seed(self, *args, **kwargs):
return self.env.seed(*args, **kwargs)
def reset(self, **kwargs):
self.mdp_id += 1
self.sequence_number = 0
res = self.env.reset(**kwargs)
self.sart.append([self.mdp_id, self.sequence_number, res, None, None, None])
return res
def step(self, action):
res = self.env.step(action)
(
_,
_,
last_state,
last_action,
last_reward,
last_terminal,
) = self.sart[-1]
assert (
last_state is not None
and last_action is None
and last_reward is None
and last_terminal is None
)
next_state, reward, terminal, _ = res
self.sart[-1][3] = action
self.sart[-1][4] = reward
self.sart[-1][5] = terminal
self.sequence_number += 1
self.sart.append(
[self.mdp_id, self.sequence_number, next_state, None, None, None]
)
return res
class TestGymReplayBuffer(HorizonTestBase):
def test_create_df_from_replay_buffer(self):
env_name = "MiniGrid-Empty-5x5-v0"
env = Gym(env_name=env_name)
state_dim = env.observation_space.shape[0]
# Wrap env in TestEnv
env = TestEnv(env)
problem_domain = ProblemDomain.DISCRETE_ACTION
DATASET_SIZE = 1000
multi_steps = None
DS = "2021-09-16"
# Generate data
df = create_df_from_replay_buffer(
env=env,
problem_domain=problem_domain,
desired_size=DATASET_SIZE,
multi_steps=multi_steps,
ds=DS,
shuffle_df=False,
)
self.assertEqual(len(df), DATASET_SIZE)
# Check data
preprocessor = PythonSparseToDenseProcessor(list(range(state_dim)))
for idx, row in df.iterrows():
df_mdp_id = row["mdp_id"]
env_mdp_id = str(env.sart[idx][0])
self.assertEqual(df_mdp_id, env_mdp_id)
df_seq_num = row["sequence_number"]
env_seq_num = env.sart[idx][1]
self.assertEqual(df_seq_num, env_seq_num)
df_state = preprocessor.process([row["state_features"]])[0][0].numpy()
env_state = env.sart[idx][2]
npt.assert_array_equal(df_state, env_state)
df_action = row["action"]
env_action = str(env.sart[idx][3])
self.assertEqual(df_action, env_action)
df_terminal = row["next_action"] == ""
env_terminal = env.sart[idx][5]
self.assertEqual(df_terminal, env_terminal)
if not df_terminal:
df_reward = float(row["reward"])
env_reward = float(env.sart[idx][4])
npt.assert_allclose(df_reward, env_reward)
df_next_state = preprocessor.process([row["next_state_features"]])[0][
0
].numpy()
env_next_state = env.sart[idx + 1][2]
npt.assert_array_equal(df_next_state, env_next_state)
df_next_action = row["next_action"]
env_next_action = str(env.sart[idx + 1][3])
self.assertEqual(df_next_action, env_next_action)
else:
del env.sart[idx + 1]
|
<filename>src/cli.py<gh_stars>0
#!/usr/bin/env python3
# coding: utf8
import sys
import argparse
import requests
import response_parser from ResponseParser
import hatena_photo_life_rss from HatenaPhotoLife
import wsse from WSSE
class CLI:
def __init__(self):
self.VERSION = '0.0.1'
def parse():
parser = self._make_parser()
args = parser.parse_args()
print(args, file=sys.stderr)
self._cli_routing(args)
def _cli_routing(self, args):
if self._not_found_sub_commands(args): self._not_found_sub_commands_exec()
else: self._found_sub_commands_exec(args)
def _not_found_sub_commands(self, args): return not hasattr(args, 'handler')
def _not_found_sub_commands_exec(self, args):
parser.print_help()
sys.exit(1)
def _found_sub_commands_exec(self, args):
# APIクライアント生成
api = HatenaPhotoLife(
WSSE.from_json(
Path.here('secret.json'),
Path.here('secret-schema.json')))
# 実行する
args.handler(args, api)
def _make_parser(self):
parser = argparse.ArgumentParser(description=f'画像をアップロードする。はてなフォトライフへ。 {self.VERSION}')
sub = parser.add_subparsers()
# post
parser_post = sub.add_parser('post', help='アップロードする。`post -h`')
parser_post.add_argument('path', help='画像ファイルパス')
parser_post.add_argument('-t', '--title', help='画像のタイトル(初期値=pathのファイル名)')
parser_post.add_argument('-f', '--folder', help='アップロード先のフォルダ名')
parser_post.add_argument('-g', '--generator', help='アップロードしたツール名(フォルダ振分用)')
parser_post.add_argument('-p', '--response-parser', help='API応答パーサのパス(LinedResponseParser.py)')
parser_post.set_defaults(handler=command_post)
# set-title
parser_title = sub.add_parser('set-title', help='タイトルを変更する。`set-title -h`')
parser_title.add_argument('image_id', help='画像ID(yyyyMMddHHmmss)')
parser_title.add_argument('title', help='タイトル')
parser_title.set_defaults(handler=command_set_title)
# delete
parser_delete = sub.add_parser('delete', help='削除する。see `delete -h`')
parser_delete.add_argument('image_id', help='画像ID(yyyyMMddHHmmss)')
parser_delete.set_defaults(handler=command_delete)
# get
parser_get = sub.add_parser('get', help='取得する。`get -h`')
parser_get.add_argument('image_id', help='画像ID(yyyyMMddHHmmss)')
parser_get.set_defaults(handler=command_get)
# feed
parser_feed = sub.add_parser('feed', help='最新データをいくつか取得する。`feed -h`')
parser_feed.set_defaults(handler=command_feed)
return parser
class Command:
@staticmethod
def post(args, api):
print('command_post', file=sys.stderr)
res = api.post(args.path, title=args.title, folder=args.folder, generator=args.generator)
ResponseParser(res).parse()
@staticmethod
def set_title(args, api):
print('command_set_title', file=sys.stderr)
res = api.set_title(args.image_id, args.title)
ResponseParser(res).parse()
@staticmethod
def delete(args, api):
print('command_delete', file=sys.stderr)
res = api.delete(args.image_id)
ResponseParser(res).parse()
@staticmethod
def get(args, api):
print('command_get', file=sys.stderr)
res = api.get(args.image_id)
ResponseParser(res).parse()
@staticmethod
def feed(args, api):
print('command_feed', file=sys.stderr)
res = api.feed()
ResponseParser(res).parse()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from psychopy.visual import Window, TextStim
from psychopy.core import wait, Clock, quit
from psychopy.event import clearEvents, waitKeys, Mouse
from psychopy.gui import Dlg
from time import gmtime, strftime
from codecs import open
from random import shuffle, choice, randint
from copy import deepcopy
from psychopy.iohub import launchHubServer
from numpy import mean, std
from datetime import datetime
from itertools import permutations
import random
## for testing
testing = False # True for testing, False for real recording
###
main_ddline = 1 # sec
isi_set = (500, 800, 1100)
instruction_color = '#111111' #formerly = #9999FF
############ MAIN ITEMS - paste from JS
probe_crime_list_1 = ' Ausgeben als : <NAME>\n\n Nachricht an Deckname : <NAME>\n\n Aktion : Operation Kuh\n\n Objekt : Regen Akte\n\n Inhalt des Objektes : Helikopter Pläne\n\n Adresse : Hai Straße'
probe_crime_list_2 = ' Ausgeben als : <NAME>\n\n Nachricht an Deckname : Weißes Shirt\n\n Aktion : Operation Fichte\n\n Objekt : Eulen Akte\n\n Inhalt des Objektes : Messing Pläne\n\n Adresse : Löwen Straße'
crime_list_1 = ["<NAME>", "<NAME>", "Operation Kuh", "Regen Akte", "Helikopter Pläne", "Hai Straße"]
crime_list_2 = ["<NAME>", "Weißes Shirt","Operation Fichte","Eulen Akte","Messing Pläne","Löwen Straße"]
dummy_list_numbers = [0, 1, 2, 3, 4, 5]
training_recall_item = {0 : 'Ausgeben als', 1 : 'Nachricht an Deckname', 2 : 'Aktion', 3 : 'Objekt', 4 : 'Inhalt des Objektes', 5 : 'Adresse'}
rounds = 1
if testing:
escape_key = 'escape'
instr_wait = 0.1
else:
escape_key = 'notallowed'
instr_wait = 0.5
# EXECUTE all main functions here
def execute():
start_input() # prompt to input stuff
# now initiate stuff
set_screen() # creates psychopy screen and stim objects
# window opens
create_file() # created output file
consent_instructions()
training_instruction()
which_round_indicator()
training_software()
which_round_indicator()
training_list()
training_software()
which_round_indicator()
training_list()
training_software()
final_slide()
win.mouseVisible = False # hide mouse
print("************** END OF LEARNING TASK **************")
ending() # saves demographic & final infos, gives feedback
waitKeys(keyList = ['b']) # press B to end the exp (prevents subject from closing window)
quit()
def consent_instructions():
show_instruction("Bitte füllen Sie die Einverständniserklärung zur Teilnahme am Experiment aus. \nSie sollten diese vor sich auf dem Tisch finden. Bei Unklarheiten oder weiteren Fragen heben Sie leise Ihre Hand.\nWenn Sie damit fertig sind, drücken Sie die Leertaste, um mit dem Experiment zu starten.")
show_instruction("Sie werden nun eine Reihe von Aufgaben am Computer durchführen. Bitte lesen und befolgen Sie die Anweisungen sorgfältig. Sollten Sie während des Experiments Fragen haben, melden Sie sich bei der Versuchsleitung, bevor Sie fortfahren.\nDrücken Sie die Leertaste, um die Anweisungen zu sehen.")
def which_round_indicator():
global condition
if rounds == 1:
show_instruction("Es folgt nun die erste Runde, in der die soeben gezeigten Wortpaare abgefragt werden. Geben Sie diese exakt so, wie sie Ihnen eben gezeigt wurden, ein. \nLeertaste drücken, um fortzufahren.")
elif rounds == 2:
show_instruction("Es folgen erneut alle Informationen, die Sie benötigen, wenn Sie sich als Komplize ausgeben. Damit diese Täuschung funktioniert, ist es sehr wichtig, dass jedes Detail der Nachricht korrekt ist. Bitte prägen Sie sich deshalb erneut alle Informationen ein. \nLeertaste drücken, um fortzufahren.")
elif rounds == 3:
show_instruction("Es folgt nun eine dritte und letzte Runde. Die Wortpaare werden noch einmal gezeigt, bevor diese ein letztes Mal abgefragt werden.\nLeertaste drücken, um fortzufahren.")
def training_instruction():
global condition
if condition % 2 != 0:
probe_crime_list = probe_crime_list_1
else:
probe_crime_list = probe_crime_list_2
show_instruction('Sie sollen eine Person kontaktieren, die unter Verdacht steht, kriminelle Aktivitäten begangen zu haben. Schreiben Sie dieser Person eine E-Mail, in der Sie um die Übergabe illegal erlangter Dokumente bitten. Dazu geben Sie sich als einer der Komplizen der Person aus und loggen sich in den Mail-Account dieses Komplizen ein. In der Nachricht bitten Sie den Verdächtigen, dass er Sie an einem bestimmten Ort trifft und die entsprechenden Dokumente bei sich hat. Die Informationen, die Sie für diese Aufgabe benötigen werden, werden Ihnen gleich präsentiert.\n\nDrücken Sie die Leertaste um fortzufahren.')
show_instruction('Für das Verfassen der E-Mail werden Sie die folgenden Informationen brauchen. Sie loggen sich in den Uni Wien Webmail Account des Komplizen ein und senden dann eine Nachricht an den Decknamen der anderen verdächtigen Person. Sie erklären dieser Person, dass es um eine bestimmte Aktion geht und bitten die Person, Sie an einer bestimmten Adresse zu treffen und zu diesem Treffen das genannte Objekt mit dem sich darin befindenden Inhalt mitzubringen. Drücken Sie daher erst die Leertaste, wenn Sie die unten stehenden Wortpaare, die für das Verfassen der Nachricht benötigt werden, gründlich auswendig gelernt haben. Im Folgenden werden diese in drei Runden abgefragt.\n\n' + probe_crime_list)
def training_list():
global condition
if condition % 2 != 0:
probe_crime_list = probe_crime_list_1
else:
probe_crime_list = probe_crime_list_2
show_instruction('Drücken Sie die Leertaste, wenn Sie die unten stehenden Items gründlich auswendig gelernt haben.\nSie loggen sich in den Uni Wien Webmail Account des Komplizen ein und senden dann eine Nachricht an den Decknamen der anderen verdächtigen Person. Sie erklären dieser Person, dass es um eine bestimmte Aktion geht und bitten die Person, Sie an einer bestimmten Adresse zu treffen und zu diesem Treffen das genannte Objekt mit dem sich darin befindenden Inhalt mitzubringen.\n\n' + probe_crime_list)
def training_software():
global condition, required, typedin, rounds
required_items = []
if condition % 2 != 0:
required_items = crime_list_1
else:
required_items = crime_list_2
combine_shuffle = list(zip(required_items, dummy_list_numbers))
shuffle(combine_shuffle)
required_items[:], dummy_list_numbers[:] = zip(*combine_shuffle)
counter = 0
while counter <= 5:
required = required_items[counter]
cue = training_recall_item[dummy_list_numbers[counter]]
counter += 1
instr_display = TextStim(win, color=instruction_color, font='Helvetica', text = u'Bitte geben Sie im Folgenden das korrekte, zuvor auswendig gelernte Wortpaar ein, drücken Sie dann ENTER.', pos=(0, 150), height=30, wrapWidth=1100, colorSpace='rgb')
input_prompt = TextStim(win, color=instruction_color, font='Helvetica', text = cue + ':', pos=(-100, 0), alignHoriz = 'right', height=35)
input_display = TextStim(win, color='black', pos=(-100, -4), alignHoriz = 'left', height=35, bold = True, colorSpace='rgb')
typedin = ''
while True:
input_display.setText(typedin)
instr_display.draw()
input_prompt.draw()
input_display.draw()
win.flip()
char = waitKeys()[0]
if char == 'backspace' and len(typedin) > 0:
typedin = typedin[:-1]
elif char == escape_key:
break
elif char == 'return':
if len( trm(typedin) ) > 0:
break
elif len(char) == 1 and char.isalpha():
typedin += char.upper()
elif char == 'space':
typedin += ' '
elif char == 'comma':
typedin += ','
typedin_words = trm(typedin)
add_resp()
if counter <= 5:
wait(0.5)
else:
break
rounds += 1
def final_slide():
show_instruction("Sie haben nun alle relevanten Informationen gelernt. Bitte führen Sie die Aufgabe nun aus, indem Sie im Google Chrome Browser auf webmail.univie.ac.at gehen und sich dort mit dem eingespeicherten user:account einloggen und die Nachricht mit den gelernten Informationen verfassen und senden. Wenden Sie sich bitte an die Versuchsleitung, um zum Desktop zu gelangen und führen Sie die Aufgabe dann eigenständig aus. Sollten Sie weitere Fragen haben, wenden Sie sich bitte ebenfalls an die Versuchsleitung.")
waitKeys(keyList = ['b'])
def set_screen(): # screen properties
global win, start_text, left_label, right_label, center_disp, instruction_page
win = Window([1280, 1000], color='#dddddd', fullscr = 1, units = 'pix', allowGUI = True) # 1280 1024
start_text = TextStim(win, color=instruction_color, font='Helvetica', text = u'Um anzufangen, bitte die Leertaste drücken.', pos = [0,-300], height=35, bold = True, wrapWidth= 1100)
left_label = TextStim(win, color='#111111', font='Verdana', text = 'unvertraut', pos = [-350,-160], height=35, alignHoriz='center')
right_label = TextStim(win, color='#111111', font='Verdana', text = 'vertraut', pos = [350,-160], height=35, alignHoriz='center')
center_disp = TextStim(win, color='#111111', font='Arial', text = '', height = 60)
instruction_page = TextStim(win, wrapWidth = 1200, height = 28, font='Helvetica', color = instruction_color)
def start_input():
global subj_id, dems, condition, gender
input_box = Dlg(title=u'Grunddaten', labelButtonOK=u'OK', labelButtonCancel=u'Abbrechen')
input_box.addText(text=u'')
input_box.addField(label=u'c.', tip = '1-8')
input_box.addField(label=u'VP', tip = 'Ziffern')
input_box.addText(text=u'')
input_box.addText(text=u'Bitte ausfüllen:')
input_box.addField(label=u'Geschlecht', initial = '', choices=[u'männlich',u'weiblich', u'divers'] )
input_box.addField(label=u'Alter', tip = 'Ziffern')
input_box.addText(text=u'')
input_box.show()
if input_box.OK:
stop = False
try:
condition = int(input_box.data[0])
except ValueError:
condition = 99
print("Condition must be a number!")
## CONDITIONS:
# use condition nos. for control vs. experimental group
# plus for guilty vs innocent block first
# 1 probes 1 + exp + crime first
# 2 probes 2 + exp + nocrime first
# 3 probes 1 + exp + nocrime first
# 4 probes 2 + exp + crime first
# 5 probes 1 + control + crime first
# 6 probes 2 + control + no crime first
# 7 probes 1 + control + no crime first
# 8 probes 2 + control + crime first first
# check if variables correctly given
if condition not in range(1,9):
if testing:
condition = 1 # set value for testing to skip Dlg input box
print("condition was not set, now set to " + str(condition) + " for testing.")
else:
print("condition was not set correctly (should be 1/2/3/4/5/6/7/8)")
stop = True
try:
subj_num = int(input_box.data[1])
except ValueError:
if testing:
subj_num = 99 # set value for testing to skip Dlg input box
print("subj_num was not set, now set to " + str(subj_num) + " for testing.")
else:
print("vp (subject number) was not set correctly (should be simple number)")
stop = True
try:
age = int(input_box.data[3])
except ValueError:
if testing:
age = 11 # set value for testing to skip Dlg input box
print("age was not set, now set to " + str(age) + " for testing.")
else:
print("age was not set correctly (should be simple number)")
stop = True
if stop:
print("\nTry again with correct inputs.\n")
quit()
subj_id = str(subj_num).zfill(3) + "_" + str(strftime("%Y%m%d%H%M%S", gmtime()))
if input_box.data[2] == 'weiblich':
gender = 2
elif input_box.data[2] == 'männlich':
gender = 1
else:
gender = 3
dems = 'dems\tgender/age\t' + str(gender) + '/' + str(age)
start_date = datetime.now()
else:
quit()
def create_file():
global data_out
f_name = 'lcp1_learning_' + str(condition) + "_" + subj_id + '.txt'
data_out=open(f_name, 'a', encoding='utf-8')
data_out.write( '\t'.join( [ "subject_id", "condition", "probe_item", "typed_in", "similarityscore", "rounds" ] ) + "\n" )
print("File created:", f_name)
def show_instruction(instruction_text):
instruction_page.setText(instruction_text)
instruction_page.draw()
win.flip()
wait(instr_wait)
inst_resp = waitKeys(keyList = ['space', escape_key])
end_on_esc(inst_resp[0])
def end_on_esc(escap):
if escap == escape_key : # escape
print("Trying to escape?")
instruction_page.setText('Sure you want to discontinue and quit the experiment?\n\nPress "y" to quit, or press "n" to continue.')
instruction_page.draw()
win.flip()
wait(1)
quit_resp = waitKeys(keyList = ['y', 'n'])
if quit_resp[0] == 'y':
print("************ ESCAPED ************")
data_out.close()
win.close()
quit()
else:
clearEvents()
print("Continuing...")
# from https://github.com/luosch/similar_text
def similar_str(str1, str2):
"""
return the len of longest string both in str1 and str2
and the positions in str1 and str2
"""
max_len = tmp = pos1 = pos2 = 0
len1, len2 = len(str1), len(str2)
for p in range(len1):
for q in range(len2):
tmp = 0
while p + tmp < len1 and q + tmp < len2 \
and str1[p + tmp] == str2[q + tmp]:
tmp += 1
if tmp > max_len:
max_len, pos1, pos2 = tmp, p, q
return max_len, pos1, pos2
def similar_char(str1, str2):
"""
return the total length of longest string both in str1 and str2
"""
max_len, pos1, pos2 = similar_str(str1, str2)
total = max_len
if max_len != 0:
if pos1 and pos2:
total += similar_char(str1[:pos1], str2[:pos2])
if pos1 + max_len < len(str1) and pos2 + max_len < len(str2):
total += similar_char(str1[pos1 + max_len:], str2[pos2 + max_len:]);
return total
def similar_text(str1, str2):
"""
return a int value in [0, 100], which stands for match level
"""
if not (isinstance(str1, str) or isinstance(str1, unicode)):
raise TypeError("must be str or unicode")
elif not (isinstance(str2, str) or isinstance(str2, unicode)):
raise TypeError("must be str or unicode")
elif len(str1) == 0 and len(str2) == 0:
return 0.0
else:
return int(similar_char(str1, str2) * 200.0 / (len(str1) + len(str2)))
def trm(raw_inp):
return [w for w in raw_inp.replace(',', ' ').split(' ') if w != ''][:2]
def add_resp():
global condition, required
data_out.write( '\t'.join( [ str(subj_id), str(condition), str(required), str(typedin), str(similar_text(str(required.upper()), str(typedin)))]) + '\t' + str(rounds) + '\n' )
print(required, str(typedin), similar_text(str(required.upper()), str(typedin)))
def ending ():
data_out.write(dems + "\n")
data_out.close()
show_instruction( "ENDE" )
# EXECUTE
execute()
|
import requests
import time
from bs4 import BeautifulSoup
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
}
def proceed(url, retry_time = 0):
"""
use link in proceed tag if whole article is not available
:type url: String
"""
try:
req = requests.get(url, headers = headers)
except:
req = None
if not req:
if retry_time < 2:
print("request of url " + url + " not found, retry in 30 seconds")
time.sleep(30)
return proceed(url, retry_time = retry_time + 1)
else:
print("error in url " + url)
return
html = req.text
soup = BeautifulSoup(html, 'html.parser')
proceed_bar = soup.find_all('ul', attrs = {"class": "actions", "role":"navigation"})
proceed_item = None
for item in proceed_bar:
if len(item.attrs['class']) == 1:
proceed_item = item
break
if not proceed_item:
print("bug happend")
return
link_obj = proceed_item.find_all('a')
for obj in link_obj:
if 'href' in obj.attrs and 'works' in obj.attrs['href']:
url = "https://www.archiveofourown.org" + obj.attrs['href']
try:
req = requests.get(url, headers = headers)
if not req:
print("request of proceed url " + url + " not found, retry in 30 seconds")
for _ in range(3):
time.sleep(30)
req = requests.get(url, headers = headers)
print("Retry: " + str(_))
if req:
break
if not req:
print("Fail to connect " + url)
return
html = req.text
soup = BeautifulSoup(html, 'html.parser')
if not soup:
print("request of proceed url " + url + " not found, retry in 30 seconds")
for _ in range(3):
time.sleep(30)
req = requests.get(url, headers = headers)
html = req.text
soup = BeautifulSoup(html, 'html.parser')
print("Retry: " + str(_))
if soup:
break
if not soup:
print("error in proceeded url " + url)
# with open('./temp_html.html', 'w', encoding= 'utf-8') as f:
# f.write(html)
return soup
except:
print("Connection failed in proceed, please check your VPN setting or restart it, url: " + url)
return
def get_content(url, zh_cn_only = False, retry_time = 0):
"""
:type url: String
:type zh_cn_only: Bool, only download simplified Chinese articles?
:retry_time: time already retried
"""
succeed = True
raw_url = url
try:
req = requests.get(url, headers = headers)
html = req.text
except:
print("Connection failed, please check your VPN setting or restart it, url: " + url)
succeed = False
if not succeed:
return
if not req:
print("request of proceed url " + url + " not found, retry in 30 seconds")
for _ in range(3):
time.sleep(30)
req = requests.get(url, headers = headers)
print("Retry: " + str(_))
if req:
break
soup = BeautifulSoup(html, 'html.parser')
if "If you proceed you have agreed that you are willing to see such content" in html:
soup = proceed(url)
# if it is a single chapter,we should use link contains whole chapter
if not soup:
return
entire_obj = soup.find('li', attrs = {"class": "chapter entire"})
if entire_obj:
link_obj = entire_obj.find('a')
if link_obj and 'href' in link_obj.attrs:
url = "https://www.archiveofourown.org" + link_obj.attrs['href']
try:
req = requests.get(url, headers = headers)
html = req.text
except:
if retry_time < 2:
print("Connection failed, retry in 20 seconds")
time.sleep(20)
get_content(raw_url, zh_cn_only=zh_cn_only, retry_time = retry_time + 1)
else:
print("Connection failed, please check your VPN setting or restart it, url: " + url)
succeed = False
if not succeed:
return
if not req:
print("request of proceed url " + url + " not found, retry in 30 seconds")
for _ in range(3):
time.sleep(30)
req = requests.get(url, headers = headers)
print("Retry: " + str(_))
if req:
break
soup = BeautifulSoup(html, 'html.parser')
if "If you proceed you have agreed that you are willing to see such content" in html:
soup = proceed(url)
if not soup:
if retry_time < 2:
print("Rejected by ao3, retry in 120 seconds")
time.sleep(120)
get_content(raw_url, zh_cn_only = zh_cn_only, retry_time= retry_time + 1)
else:
print(url + " rejected by ao3")
return
title_obj = soup.find('title')
# if access denied by ao3(if you download lots of articles, like 50+ articles, ao3 will reject your request and ask you to retry later)
if not title_obj:
if "retry" or 'Retry' in html.text and retry_time < 2:
print("Rejected by ao3, retry in 60 seconds")
time.sleep(60)
get_content(raw_url, zh_cn_only = zh_cn_only, retry_time= retry_time + 1)
else:
print(url + " does not have a standard title")
return
title = title_obj.text.strip()
print("Downloading " + title + " from " + url)
if zh_cn_only:
language_obj = soup.find('dd', {"class": "language"})
if not language_obj or "中文" not in language_obj.text:
print(url + " is not written in Chinese")
return
content_obj = soup.find('div', attrs = {"id":"workskin"})
if not content_obj:
print("Page error, url:" + url)
return
content = content_obj.text.lstrip()
return content, title
|
<gh_stars>0
# type: ignore
import random
from django.test import TestCase
from memrise.core.domains.entities import (
CourseEntity,
LevelEntity,
WordEntity,
)
from memrise.core.use_cases.dashboard import DashboardCourseContainer
class TestWordEntity(TestCase):
def test_entity(self):
word_id = 816
level_id = 14
main_word = "main word"
translate_word = "second word"
word_entity = WordEntity(
id=word_id, level_id=level_id, word_a=main_word, word_b=translate_word
)
word_as_dict = word_entity.dict()
expected = {
"id": 816,
"level_id": 14,
"word_a": "main word",
"word_b": "second word",
"is_learned": False,
}
self.assertDictEqual(word_as_dict, expected)
class TestLevelEntity(TestCase):
def test_entity(self):
level_id = 1342
number = 3
course_id = 14543
name = "TestLevel"
words = []
le = LevelEntity(
id=level_id, number=number, course_id=course_id, name=name, words=words
)
level_as_dict = le.dict()
expected = {
"id": 1342,
"number": 3,
"course_id": 14543,
"name": "TestLevel",
"words": [],
}
self.assertDictEqual(level_as_dict, expected)
def test_add_word(self):
"""Добавление по одной сущности слова WordEntity в объект черзе метод add_word"""
le = LevelEntity(number=3, course_id=14543, name="TestLevel", id=1)
self.assertListEqual(le.words, [])
word_entity1 = WordEntity(
id=1, word_a="essential", word_b="translate_word1", level_id=le.id
)
word_entity2 = WordEntity(
id=2, word_a="appropriate", word_b="translate_word2", level_id=le.id
)
exptected = [word_entity1, word_entity2]
for word in exptected:
le.add_word(word)
self.assertListEqual(le.words, exptected)
def test_add_words(self):
"""Добавление множественных сущностей WordEntity в объект черзе метод add_words"""
le = LevelEntity(number=3, course_id=14543, name="TestLevel", id=1)
self.assertListEqual(le.words, [])
word_entity1 = WordEntity(
id=1, word_a="essential", word_b="translate_word1", level_id=le.id
)
word_entity2 = WordEntity(
id=2, word_a="appropriate", word_b="translate_word2", level_id=le.id
)
words = [word_entity1, word_entity2]
le.add_words(words)
self.assertListEqual(le.words, words)
class TestCourseEntity(TestCase):
def test_entity(self):
id = 618
name = "Course 1"
url = "/path/to/course"
difficult = 234
num_things = 123
num_levels = 2
difficult_url = "/path/to/difficult/words"
is_disable = True
ce = CourseEntity(
id=id,
name=name,
url=url,
difficult=difficult,
num_words=num_things,
num_levels=num_levels,
difficult_url=difficult_url,
is_disable=is_disable,
)
course_as_dict = ce.dict()
expected = {
"id": 618,
"name": "Course 1",
"url": "/path/to/course",
"difficult": 234,
"num_words": 123,
"num_levels": 2,
"difficult_url": "/path/to/difficult/words",
"levels_url": [],
"levels": [],
"is_disable": True,
}
self.assertDictEqual(course_as_dict, expected)
def test_add_level(self):
course_id = 1
ce = CourseEntity(
id=course_id,
name="Course 1",
url="/path/to/course",
difficult=234,
num_words=123,
num_levels=2,
difficult_url="/path/to/difficult/words",
)
level1 = LevelEntity(number=1, course_id=ce.id, name="TestLevel1", id=1)
level2 = LevelEntity(number=2, course_id=ce.id, name="TestLevel2", id=2)
expected = [level1, level2]
for level in expected:
ce.add_level(level)
self.assertListEqual(ce.levels, expected)
def test_add_levels(self):
course_id = 1
ce = CourseEntity(
id=course_id,
name="Course 1",
url="/path/to/course",
difficult=234,
num_words=123,
num_levels=2,
difficult_url="/path/to/difficult/words",
)
level1 = LevelEntity(number=1, course_id=ce.id, name="TestLevel1", id=1)
level2 = LevelEntity(number=2, course_id=ce.id, name="TestLevel2", id=2)
levels = [level1, level2]
ce.add_levels(levels)
self.assertListEqual(ce.levels, levels)
class TestDashboardEntity(TestCase):
def setUp(self) -> None:
self.de = DashboardCourseContainer()
def test_add_course(self):
id = random.getrandbits(10)
name = "Course 1"
url = "/path/to/course"
difficult = 234
num_things = 123
num_levels = 2
difficult_url = "/path/to/difficult/words"
ce = CourseEntity(
id=id,
name=name,
url=url,
difficult=difficult,
num_words=num_things,
num_levels=num_levels,
difficult_url=difficult_url,
)
self.assertEqual(self.de.get_courses(), [])
self.de.add_course(ce)
courses = self.de.get_courses()
self.assertEqual(len(courses), 1)
for course in courses:
self.assertEqual(course, ce)
def test_add_courses(self):
course_1 = CourseEntity(
id=1,
name="Course 1",
url="/path/to/course",
difficult=111,
num_words=121,
num_levels=11,
difficult_url="/path/to/difficult/words",
)
course_2 = CourseEntity(
id=2,
name="Course 2",
url="/path/to/course",
difficult=222,
num_words=122,
num_levels=12,
difficult_url="/path/to/difficult/words",
)
course_3 = CourseEntity(
id=3,
name="Course 3",
url="/path/to/course",
difficult=333,
num_words=123,
num_levels=13,
difficult_url="/path/to/difficult/words",
)
courses = [course_1, course_2, course_3]
self.assertEqual(self.de.get_courses(), [])
self.de.add_courses(courses)
stored_courses = self.de.get_courses()
self.assertEqual(len(courses), 3)
self.assertListEqual(stored_courses, courses)
def test_get_courses(self):
ce5 = CourseEntity(
id=5,
name="Course 5",
url="/path/to/course",
difficult=234,
num_words=123,
num_levels=2,
difficult_url="/path/to/difficult/words",
)
ce1 = CourseEntity(
id=1,
name="Course 1",
url="/path/to/course",
difficult=234,
num_words=123,
num_levels=2,
difficult_url="/path/to/difficult/words",
)
ce3 = CourseEntity(
id=3,
name="Course 3",
url="/path/to/course",
difficult=234,
num_words=123,
num_levels=2,
difficult_url="/path/to/difficult/words",
)
ce8 = CourseEntity(
id=8,
name="Course 8",
url="/path/to/course",
difficult=234,
num_words=123,
num_levels=2,
difficult_url="/path/to/difficult/words",
)
courses_entity = [ce5, ce3, ce1, ce8]
for course_entity in courses_entity:
self.de.add_course(course_entity)
courses = self.de.get_courses()
self.assertEqual(len(courses), 4)
ids = [course.id for course in courses]
expected = [1, 3, 5, 8]
self.assertEqual(ids, expected)
def test_purge(self):
ce5 = CourseEntity(
id=5,
name="Course 5",
url="/path/to/course",
difficult=234,
num_words=123,
num_levels=2,
difficult_url="/path/to/difficult/words",
)
ce1 = CourseEntity(
id=1,
name="Course 1",
url="/path/to/course",
difficult=234,
num_words=123,
num_levels=2,
difficult_url="/path/to/difficult/words",
)
ce3 = CourseEntity(
id=3,
name="Course 3",
url="/path/to/course",
difficult=234,
num_words=123,
num_levels=2,
difficult_url="/path/to/difficult/words",
)
ce8 = CourseEntity(
id=8,
name="Course 8",
url="/path/to/course",
difficult=234,
num_words=123,
num_levels=2,
difficult_url="/path/to/difficult/words",
)
courses_entity = [ce5, ce3, ce1, ce8]
for course_entity in courses_entity:
self.de.add_course(course_entity)
courses = self.de.get_courses()
self.assertEqual(len(courses), 4)
self.de.purge()
self.assertEqual(self.de.get_courses(), [])
|
<reponame>dcdanko/capalyzer
import math
import pandas as pd
from scipy.stats import gmean, entropy
from numpy.linalg import norm
from random import random, sample
import numpy as np
MIL = 1000 * 1000
# ALPHA Diversity`
def shannon_entropy(row, rarefy=0):
"""Return the shannon entropy of an iterable.
Shannon entropy is robust to rarefaction but we keep
the param for consistency.
"""
row_sum, H = sum(row), 0
for val in row:
val = val / row_sum
if val == 0:
continue
H += val * math.log2(val)
if H < 0:
H *= -1
return H
def richness(row, rarefy=0, count=False):
"""Return the richness of an iterable."""
if count:
return sum(row > 0)
row_sum, R = sum(row), 0
for val in row:
prob_success = val / row_sum
prob_fail = 1 - prob_success
prob_detect = 1 - (prob_fail ** rarefy)
if val and rarefy <= 0:
R += 1
else:
R += prob_detect
return int(R + 0.5)
def chao1(row, rarefy=0):
"""Return richnes of an iterable"""
row_sum, R, S, D = sum(row), 0, 0, 0.0000001
num_reads = MIL if math.isclose(row_sum, 1) else row_sum # default to 1M reads if compositional
num_reads = rarefy if rarefy > 0 else num_reads # if rarefy is set use that as read count
for val in row:
prob_success = val / row_sum
prob_fail = 1 - prob_success
prob_detect = 1 - (prob_fail ** num_reads)
if rarefy:
R += prob_detect
elif val:
R += 1
S += 1 if val == 1 else 0
D += 1 if val == 2 else 0
return R + (S ** 2) / (2 * D)
# Beta Diversity
def clr(X):
_X = X + 0.0000001
_X = _X / norm(_X, ord=1)
g = gmean(_X)
_X = np.divide(_X, g)
_X = np.log(_X)
return _X
def rho_proportionality(P, Q):
_P, _Q = clr(P), clr(Q)
N = np.var(_P - _Q)
D = np.var(_P) + np.var(_Q)
return 1 - (N / D)
def jensen_shannon_dist(P, Q):
_P = P / norm(P, ord=1)
_Q = Q / norm(Q, ord=1)
_M = 0.5 * (_P + _Q)
J = 0.5 * (entropy(_P, _M) + entropy(_Q, _M))
return math.sqrt(J)
# Rarefaction
def single_rarefaction(tbl, n=0):
"""Return the number of nonzero columns in tbl.
Select n rows at random if specified.
"""
if n and n > 0 and n < tbl.shape[0]:
tbl = tbl.loc[sample(list(tbl.index), n)]
return sum(tbl.sum(axis=0) > 0)
def rarefaction_analysis(tbl, ns=[], nsample=16, include_all=True):
"""Return a dataframe with two columns.
N, the number of samples and Taxa, the number of nonzero elements.
"""
result = []
if not ns:
ns = range(tbl.shape[0])
if include_all:
ns = list(ns) + [tbl.shape[0]]
for n in ns:
for _ in range(nsample):
result.append((n, single_rarefaction(tbl, n=n)))
return pd.DataFrame(result, columns=['N', 'Taxa'])
|
#!/usr/bin/env python
from color import Color, ColorHSV
from LPD8806 import LPD8806
from WS2801 import WS2801
#Not all LPD8806 strands are created equal.
#Some, like Adafruit's use GRB order and the other common order is GRB
#Library defaults to GRB but you can call strand.setChannelOrder(ChannelOrder)
#to set the order your strands use
class ChannelOrder:
RGB = [0,1,2] #Probably not used, here for clarity
GRB = [1,0,2] #Strands from Adafruit and some others (default)
BRG = [1,2,0] #Strands from many other manufacturers
class LEDStrip:
def __init__(self, leds, use_py_spi = True, dev="/dev/spidev0.0", driver="WS2801"):
#Variables:
# leds -- strand size
# dev -- spi device
if(driver == "WS2801"):
self.driver = WS2801(leds, use_py_spi, dev)
else:
#no alternate drivers for now. Here so they can be added later
self.driver = LPD8806(leds, use_py_spi, dev)
self.c_order = self.driver.channelOrder()
self.leds = leds
self.lastIndex = self.leds - 1
self.gamma = bytearray(256)
self.buffer = [0 for x in range(self.leds + 1)]
self.masterBrightness = 1.0
for led in range(self.leds):
self.buffer[led] = bytearray(3)
self.gamma = self.driver.gamma()
def update(self):
self.driver.update(self.buffer)
#Allows for easily using LED strands with different channel orders
def setChannelOrder(self, order):
self.c_order = order
#Set the master brightness for the LEDs 0.0 - 1.0
def setMasterBrightness(self, bright):
if(bright > 1.0 or bright < 0.0):
raise ValueError('Brightness must be between 0.0 and 1.0')
self.masterBrightness = bright
#Fill the strand (or a subset) with a single color using a Color object
def fill(self, color, start=0, end=0):
if start < 0:
start = 0
if end == 0 or end > self.lastIndex:
end = self.lastIndex
for led in range(start, end + 1): #since 0-index include end in range
self.__set_internal(led, color)
#Fill the strand (or a subset) with a single color using RGB values
def fillRGB(self, r, g, b, start=0, end=0):
self.fill(Color(r, g, b), start, end)
#Fill the strand (or a subset) with a single color using HSV values
def fillHSV(self, h, s, v, start=0, end=0):
self.fill(ColorHSV(h, s, v).get_color_rgb(), start, end)
#Fill the strand (or a subset) with a single color using a Hue value.
#Saturation and Value components of HSV are set to max.
def fillHue(self, hue, start=0, end=0):
self.fill(ColorHSV(hue).get_color_rgb(), start, end)
def fillOff(self, start=0, end=0):
self.fillRGB(0, 0, 0, start, end)
#internal use only. sets pixel color
def __set_internal(self, pixel, color):
if(pixel < 0 or pixel > self.lastIndex):
return; #don't go out of bounds
self.buffer[pixel][self.c_order[0]] = self.gamma[int(color.r * self.masterBrightness)]
self.buffer[pixel][self.c_order[1]] = self.gamma[int(color.g * self.masterBrightness)]
self.buffer[pixel][self.c_order[2]] = self.gamma[int(color.b * self.masterBrightness)]
#Set single pixel to Color value
def set(self, pixel, color):
self.__set_internal(pixel, color)
#Set single pixel to RGB value
def setRGB(self, pixel, r, g, b):
color = Color(r, g, b)
self.set(pixel, color)
#Set single pixel to HSV value
def setHSV(self, pixel, h, s, v):
self.set(pixel, ColorHSV(h, s, v).get_color_rgb())
#Set single pixel to Hue value.
#Saturation and Value components of HSV are set to max.
def setHue(self, pixel, hue):
self.set(pixel, ColorHSV(hue).get_color_rgb())
#turns off the desired pixel
def setOff(self, pixel):
self.setRGB(pixel, 0, 0, 0)
#Turn all LEDs off.
def all_off(self):
self.fillOff()
self.update()
self.fillOff()
self.update()
|
from django.shortcuts import render, redirect
from . import models, forms
from utils.utils import hash_code, make_confirm_email
from django.conf import settings
from send_email import send_email
import datetime
def index(request):
if not request.session.get("is_login", None):
return redirect("/login/")
return render(request, "login/index.html")
def login(request):
if request.session.get("is_login", None): # 不允许重复登录
return redirect("/index/")
if request.method == "POST":
login_form = forms.UserForm(request.POST)
message = "用户名或密码不能为空!"
if login_form.is_valid():
username = login_form.cleaned_data.get("username")
password = login_form.cleaned_data.get("password")
# if username.strip() and password: # 确保用户名和密码不为空
# 其他更多验证 。。。
try:
user = models.User.objects.get(name=username)
except:
message = "用户不存在!"
return render(request, "login/login.html", locals())
# 首先,检查用户是否通过邮件确认
if not user.has_confirmed:
message = "该用户还未经过邮件确认!"
return render(request, "login/login.html", locals())
if user.password == hash_code(password):
request.session["is_login"] = True
request.session["user_id"] = user.id
request.session["user_name"] = user.name
return redirect("/index/")
else:
message = "密码不正确!"
return render(request, "login/login.html", locals())
else:
return render(request, "login/login.html", locals())
login_form = forms.UserForm()
return render(request, "login/login.html", locals())
def register(request):
if request.session.get("is_login", None):
return redirect("/index/")
if request.method == "POST":
register_form = forms.RegisterForm(request.POST)
message = "请检查填写内容!"
if register_form.is_valid():
username = register_form.cleaned_data.get("username")
password_first = register_form.cleaned_data.get("password_first")
password_confirm = register_form.cleaned_data.get("password_confirm")
email = register_form.cleaned_data.get("email")
sex = register_form.cleaned_data.get("sex")
if password_first != password_confirm:
message = "两次输入的密码不同!"
return render(request, "login/register.html", locals())
else:
same_name_user = models.User.objects.filter(name=username)
if same_name_user:
message = "用户名已存在!"
return render(request, "login/register.html", locals())
same_email_user = models.User.objects.filter(email=email)
if same_email_user:
message = "该邮箱已被注册!"
return render(request, "login/register.html", locals())
new_user = models.User.objects.create(name=username, password=<PASSWORD>_code(password_<PASSWORD>),
email=email, sex=sex)
code = make_confirm_email(new_user)
send_email(email, code)
message = "请前往邮箱进行确认!"
return render(request, "login/confirm.html", locals())
else:
return render(request, "login/register.html", locals())
register_form = forms.RegisterForm()
return render(request, "login/register.html", locals())
def logout(request):
if not request.session.get("is_login", None):
return redirect("/login/")
request.session.flush() # 删除当前的会话数据和会话cookie。经常用在用户退出后,删除会话。
# 或者使用下面的方法
# del request.session['is_login']
# del request.session['user_id']
# del request.session['user_name']
return redirect("/login/")
def user_confirm(request):
code = request.GET.get("code", None)
message = ""
try:
confirm = models.ConfirmEmail.objects.get(code=code)
except:
message = "无效的确认请求!"
return render(request, "login/confirm.html", locals())
c_time = confirm.c_time
now = datetime.datetime.now()
if now > c_time + datetime.timedelta(settings.CONFIRM_DAYS):
confirm.user.delete()
message = "您的邮件已经过期,请重新注册!"
return render(request, "login/confirm.html", locals())
else:
confirm.user.has_confirmed = True
confirm.user.save()
confirm.delete()
message = "感谢确认,请使用账号登录!"
return render(request, "login/confirm.html", locals())
|
<filename>notebookjs/_display.py
from IPython.core.display import display, HTML, Javascript
from string import Template, ascii_uppercase
import pkg_resources
import random
import re
import json
from ._comm import setup_comm_api
def id_generator(size=15):
"""Helper function to generate random div ids."""
chars = list(ascii_uppercase)
return ''.join(random.choice(chars) for i in range(size))
def make_html(library_list, main_function, parameter_dict, css_list):
"""Makes the HTML that will be added to the Notebook"""
# Loading Python CommAPI
comm_api_path = pkg_resources.resource_filename(__name__, "resources/CommAPI.js")
with open(comm_api_path, "r") as f:
comm_api_js = f.read()
# Making sure library_list and css_list are lists.
if type(library_list) is not list:
library_list = [library_list]
if type(css_list) is not list:
css_list = [css_list]
# Downloading web resources
for idx in range(len(library_list)):
if check_url(library_list[idx]):
library_list[idx] = download_url(library_list[idx])
for idx in range(len(css_list)):
if check_url(css_list[idx]):
css_list[idx] = download_url(css_list[idx])
# Adding CommAPI to library_list
library_list.insert(0, comm_api_js)
# Generating HTML
div_id = id_generator()
library_bundle = '\n\n'.join(library_list)
css_bundle = '\n'.join(css_list)
template_path = pkg_resources.resource_filename(__name__, "resources/template.html")
with open(template_path, "r") as f:
html_all_template = f.read()
html_all_template = Template(html_all_template)
return html_all_template.substitute(div_id=div_id,
library_bundle=library_bundle,
main_function=main_function,
parameter_dict=json.dumps(parameter_dict),
css_bundle=css_bundle)
# Regex expression to test if a string is a URL
regex_url = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def check_url(string):
"""Checks if the string argument is a URL"""
return re.match(regex_url, string) is not None
def download_url(url):
"""Downloads a URL file as a browser."""
import requests
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0'}
r = requests.get(url, headers=headers, stream=False)
return r.content.decode("utf-8")
def save_html(html_dest, library_list, main_function, data_dict = {}, callbacks = None, css_list=[]):
"""Saves the bundled code (output of execute_js) to an HTML file
Parameters
----------
html_dest : str
Path to the output HTML dest file. Example: "./output.html"
library_list : list of str
List of strings containing either 1) URL to a javascript library, 2) javascript code
main_function : str
Name of the main function to be called. The function will be called with two parameters:
<div_id>, for example "#my_div", and <data_dict>.
data_dict : dict
Dictionary containing the data to be passed to <main_function>
callbacks : dict
Dictionary of the form {<callback_str_id> : <python_function>}. The javascript library can
use callbacks to talk to python.
css_list : list of str
List of strings containing either 1) URL to a CSS stylesheet or 2) CSS styles
"""
if callbacks is not None:
print ("Warning: Python callbacks do not work in standalone HTML file.")
print ("Saving file...")
html_all = make_html(library_list, main_function, data_dict, css_list)
with open(html_dest, "w") as f:
f.write(html_all)
def execute_js(library_list, main_function, data_dict = {}, callbacks = {}, css_list=[]):
"""Executes a javascript function that can add content to an output div
Parameters
----------
library_list : list of str
List of strings containing either 1) URL to a javascript library, 2) javascript code
main_function : str
Name of the main function to be called. The function will be called with two parameters:
<div_id>, for example "#my_div", and <data_dict>.
data_dict : dict
Dictionary containing the data to be passed to <main_function>
callbacks : dict
Dictionary of the form {<callback_str_id> : <python_function>}. The javascript library can
use callbacks to talk to python.
css_list : list of str
List of strings containing either 1) URL to a CSS stylesheet or 2) CSS styles
"""
html_all = make_html(library_list, main_function, data_dict, css_list)
for callback_id in callbacks.keys():
setup_comm_api(callback_id, callbacks[callback_id])
display(HTML(html_all))
|
import warnings
import inspect
import matplotlib.pyplot as plt
import IPython.display
import numpy as np
from cued_sf2_lab.familiarisation import load_mat_img, plot_image
from cued_sf2_lab.laplacian_pyramid import quantise
from cued_sf2_lab import laplacian_pyramid
import warnings
import inspect
import matplotlib.pyplot as plt
import matplotlib as mpl
import IPython.display
from cued_sf2_lab.familiarisation import load_mat_img, plot_image
import numpy as np
from typing import Tuple
from cued_sf2_lab.laplacian_pyramid import bpp, quantise
from cued_sf2_lab.dwt import idwt
from cued_sf2_lab.dwt import dwt
from cued_sf2_lab.dct import colxfm
from cued_sf2_lab.dct import regroup, dct_ii
from cued_sf2_lab.lbt import pot_ii
import math
from skimage.metrics import structural_similarity as ssim
from cued_sf2_lab.jpeg2 import get_quantisation_step_ratio
from cued_sf2_lab.jpeg2 import jpegenc,jpegdec,dwtgroup
import torch
import torch.nn as nn
import torch.nn.functional as F
from skimage.filters import unsharp_mask
from scipy.signal import convolve2d
from scipy import signal
from skvideo.measure import msssim
from cued_sf2_lab.jpeg2 import quant1,quant2
from cued_sf2_lab.jpeg2 import custom_quant1,custom_quant2,diagscan
from cued_sf2_lab import arithmetic
from cued_sf2_lab import pyae
import rle
import objsize
import argparse
from scipy.io import savemat
# lighthouse, _ = load_mat_img(img='lighthouse.mat', img_info='X', cmap_info={'map', 'map2'})
# bridge, _ = load_mat_img(img='bridge.mat', img_info='X', cmap_info={'map'})
# flamingo, _ = load_mat_img(img='flamingo.mat', img_info='X')
# lighthouse = lighthouse-128.0
# bridge = bridge-128.0
# flamingo = flamingo-128.0
# img = bridge
class Encode:
def __init__(self,curr_min,curr_max,opthuff,s_range,Ns,enc_type,fmin,fmax):
quantisation_matrix1 =[[16,11,10,16,24,40,51,61], # Original JPEG
[12,12,14,19,26,58,60,55],
[14,13,16,24,40,57,69,56],
[14,17,22,29,51,87,80,62],
[18,22,37,56,68,109,103,77],
[24,35,55,64,81,104,113,92],
[49,64,78,87,103,121,120,101],
[72,92,95,98,112,100,103,99]]
quantisation_matrix2 = [[8,13,16,25,39,68,95,74], # Quality 95
[13,19,47,17,49,65,79,69],
[19,15,36,23,67,95,79,58],
[15,39,30,85,79,127,128,77],
[27,47,55,75,122,174,117,76],
[45,87,75,86,102,167,178,105],
[71,96,113,115,139,156,151,122],
[137,131,161,140,176,115,132,125]]
# Take the best out of them
self.matrices = [np.array(quantisation_matrix1),np.array(quantisation_matrix2)]
self.curr_min = curr_min
self.curr_max = curr_max
self.opthuff = opthuff
self.s_range = s_range
self.Ns = Ns
self.enc_type = enc_type
self.fmin = fmin
self.fmax = fmax
def binary_search(self,X,s,N,dcbits,curr_min,curr_max,frequency_quant, quantisation_matrix,opthuff,enc_type):
delta = 0.1
min_bits = 40900
max_bits = 40940
valid_params = []
vlc, bits2, huffval = jpegenc(X, curr_max, N=N, M=N, opthuff=opthuff, dcbits=dcbits, log=False,s = s,quantisation_matrix = quantisation_matrix, frequency_quant = frequency_quant,enc_type = enc_type)
# x_rec = jpegdec(vlc, curr_max, N=N, M=N, bits=bits, huffval=huffval, dcbits=dcbits, W=256, H=256, log=True,s=s,quantisation_matrix = quantisation_matrix,frequency_quant = frequency_quant,enc_type = enc_type)
bits = np.sum(vlc[:,1])+1424
# print('hi')
while bits > max_bits or bits < min_bits:
print(bits,curr_max,curr_min)
qstep = (curr_max + curr_min)/2
vlc, bits2, huffval = jpegenc(X,qstep, N=N, M=N, opthuff=opthuff, dcbits=dcbits, log=False,s = s,quantisation_matrix = quantisation_matrix, frequency_quant = frequency_quant,enc_type = enc_type)
# x_rec = jpegdec(vlc, qstep, N=N, M=N, bits=bits, huffval=huffval, dcbits=dcbits, W=256, H=256, log=True,s=s,quantisation_matrix = quantisation_matrix,frequency_quant = frequency_quant,enc_type = enc_type)
bits = np.sum(vlc[:,1])+1424
if bits >= max_bits:
curr_min = qstep
elif bits <= min_bits:
curr_max = qstep
# we have ourselves an interval for curr_max and curr_min
if frequency_quant:
return [(dcbits,s,N,frequency_quant,round(qstep,4),quantisation_matrix,vlc,bits2,huffval)]
valid_params.append((dcbits,s,N,frequency_quant,round(qstep,4),vlc,bits2,huffval))
for step in range(int(curr_min/delta),int(curr_max/delta)):
step *= delta
vlc, bits2, huffval = jpegenc(X,step, N=N, M=N, opthuff=opthuff, dcbits=dcbits, log=False,s = s,quantisation_matrix = quantisation_matrix, frequency_quant = frequency_quant,enc_type = enc_type)
# x_rec = jpegdec(vlc, step, N=N, M=N, bits=bits, huffval=huffval, dcbits=dcbits, W=256, H=256, log=True,s=s,quantisation_matrix = quantisation_matrix,frequency_quant = frequency_quant,enc_type = enc_type)
bits = np.sum(vlc[:,1])+1424
if bits <= min_bits:
break
elif bits >= min_bits and bits <= max_bits:
valid_params.append((dcbits,s,N,frequency_quant,round(step,4),vlc,bits2,huffval))
return valid_params
def optimize_params(self,X): # Need to repurpose this for the arithmetic coding function
curr_min = self.curr_min
curr_max = self.curr_max
opthuff = self.opthuff
s_range = self.s_range
Ns = self.Ns
enc_type = self.enc_type
fmax = self.fmax
fmin = self.fmin
quantisation_matrix1 =[[16,11,10,16,24,40,51,61], # Original JPEG
[12,12,14,19,26,58,60,55],
[14,13,16,24,40,57,69,56],
[14,17,22,29,51,87,80,62],
[18,22,37,56,68,109,103,77],
[24,35,55,64,81,104,113,92],
[49,64,78,87,103,121,120,101],
[72,92,95,98,112,100,103,99]]
quantisation_matrix2 = [[8,13,16,25,39,68,95,74], # Quality 95
[13,19,47,17,49,65,79,69],
[19,15,36,23,67,95,79,58],
[15,39,30,85,79,127,128,77],
[27,47,55,75,122,174,117,76],
[45,87,75,86,102,167,178,105],
[71,96,113,115,139,156,151,122],
[137,131,161,140,176,115,132,125]]
# Take the best out of them
matrices = [np.array(quantisation_matrix1),np.array(quantisation_matrix2)]
# matrices = [np.array(quantisation_matrix1)]
valid_params = []
possible_dcbits = [6,7,8,9,10,11,12,13,14]
freq = [False,True]
# enc_type = 'lbt'
delta = 0.1
for s in s_range:
for N in Ns:
N = int(N)
print("S, N are {}, {}".format(s,N))
for frequency_quant in freq:
dcflag = False # if we don't get an error we are done
if frequency_quant:
if N == 8:
for quantisation_matrix in matrices:
for dcbits in possible_dcbits:
if dcflag:
break
try:
if not dcflag:
valid_params += self.binary_search(X,s,N,dcbits,fmin,fmax,frequency_quant, quantisation_matrix,opthuff,enc_type)
dcflag = True
except:
print("Error: Trying new values!")
continue
else:
for dcbits in possible_dcbits:
if dcflag:
break
try:
if not dcflag:
valid_params += self.binary_search(X,s,N,dcbits,curr_min,curr_max,frequency_quant, None,opthuff,enc_type)
dcflag = True
except:
print("Error: Trying new values!")
continue
max_error = -float('inf')
min_error = float('inf')
optimum_x = 0
opt_param = [()]
optimum_vlc = []
sharp = np.array([[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]])
for param in valid_params:
dcbits = param[0]
s = param[1]
N = param[2]
frequency_quant = param[3]
step = param[4]
if frequency_quant:
quantisation_matrix = param[5]
vlc = param[6]
bits = param[7]
huffval = param[8]
x_rec = jpegdec(vlc, step, N=N, M=N, bits=bits, huffval=huffval, dcbits=dcbits, W=256, H=256, log=True,s=s,quantisation_matrix = quantisation_matrix,frequency_quant = frequency_quant,enc_type = enc_type)
else:
quantisation_matrix = None
vlc = param[5]
bits = param[6]
huffval = param[7]
x_rec = jpegdec(vlc, step, N=N, M=N, bits=bits, huffval=huffval, dcbits=dcbits, W=256, H=256, log=True,s=s,quantisation_matrix = quantisation_matrix,frequency_quant = frequency_quant,enc_type = enc_type)
rec = x_rec + 128.0
rec = (rec-rec.min())/(rec.max()-rec.min())
rec2 = rec*255
met = (msssim(X+128.0,rec2) + ssim(X+128.0,rec2)-np.std((X+128.0)/255-rec2/255))/3
if met > max_error:
max_error = met
opt_param = param
optimum_vlc = vlc
if met < min_error:
min_error = met
return opt_param,optimum_vlc,valid_params
parser = argparse.ArgumentParser(description="")
parser.add_argument("--image_dir", default= "bridge.mat", type=str, help="Path to checkpoint (default: none)")
args = parser.parse_args()
img,_ = load_mat_img(img=args.image_dir, img_info='X', cmap_info={})
img = img - 128.0
curr_min = 1
curr_max = 200
opthuff = True
s_range = np.arange(1.2,1.6,0.05)
Ns = np.array([4,8,16])
enc_type = 'lbt'
fmin = 0.7
fmax = 6.5
encoder = Encode(curr_min,curr_max,opthuff,s_range,Ns,enc_type,fmin,fmax)
opt_params,optimum_vlc,_ = encoder.optimize_params(img)
final_params = {
'dct_bits': opt_params[0],
's': opt_params[1],
'N': opt_params[2],
'freq':opt_params[3],
'step_ratio':opt_params[4],
'huffval':opt_params[-1],
'bits2': opt_params[-2],
'vlc': np.array(optimum_vlc)
}
#opt_dict = {"params":opt_params}
total_size = sum(optimum_vlc[:,1])+1424+1+int(np.log2(opt_params[2])+1)+32
print(total_size)
savemat("Group_13_vlc_params.mat",final_params)
|
<gh_stars>0
"""
Inference
---------
Module description
"""
import warnings
from abc import ABC, abstractmethod
from collections.abc import Iterable
import chainer
import chainer.functions as F
import numpy as np
from tqdm import tqdm
from brancher.optimizers import ProbabilisticOptimizer
from brancher.variables import DeterministicVariable, Variable, ProbabilisticModel
from brancher.transformations import truncate_model
from brancher.utilities import reassign_samples
from brancher.utilities import zip_dict
from brancher.utilities import sum_from_dim
# def maximal_likelihood(random_variable, number_iterations, optimizer=chainer.optimizers.SGD(0.001)):
# """
# Summary
#
# Parameters
# ---------
# random_variable : brancher.Variable
# number_iterations : int
# optimizer : chainer.optimizers
# Summary
# """
# prob_optimizer = ProbabilisticOptimizer(optimizer) #TODO: This function is not up to date
# prob_optimizer.setup(random_variable)
# loss_list = []
# for iteration in tqdm(range(number_iterations)):
# loss = -F.sum(random_variable.calculate_log_probability({}))
# prob_optimizer.chain.cleargrads()
# loss.backward()
# prob_optimizer.optimizer.update()
# loss_list.append(loss.data)
# return loss_list
def stochastic_variational_inference(joint_model, number_iterations, number_samples,
optimizer=chainer.optimizers.Adam(0.001),
input_values={}, inference_method=None,
posterior_model=None, sampler_model=None,
pretraining_iterations=0): #TODO: input values
"""
Summary
Parameters
---------
"""
if not inference_method:
warnings.warn("The inference method was not specified, using the default reverse KL variational inference")
inference_method = ReverseKL()
if not posterior_model:
posterior_model = joint_model.posterior_model
if not sampler_model: #TODO: clean up
if not sampler_model:
try:
sampler_model = inference_method.sampler_model
except AttributeError:
try:
sampler_model = joint_model.posterior_sampler
except AttributeError:
sampler_model = None
joint_model.update_observed_submodel()
optimizers_list = [ProbabilisticOptimizer(posterior_model, optimizer)]
if inference_method.learnable_model:
optimizers_list.append(ProbabilisticOptimizer(joint_model, optimizer))
if inference_method.learnable_sampler:
optimizers_list.append(ProbabilisticOptimizer(sampler_model, optimizer))
loss_list = []
inference_method.check_model_compatibility(joint_model, posterior_model, sampler_model)
for iteration in tqdm(range(number_iterations)):
loss = inference_method.compute_loss(joint_model, posterior_model, sampler_model, number_samples)
if np.isfinite(loss.data).all():
[opt.chain.cleargrads() for opt in optimizers_list]
loss.backward()
optimizers_list[0].update()
if iteration > pretraining_iterations:
[opt.update() for opt in optimizers_list[1:]]
else:
warnings.warn("Numerical error, skipping sample")
loss_list.append(loss.data)
joint_model.diagnostics.update({"loss curve": np.array(loss_list)})
inference_method.post_process(joint_model) #TODO: this could be implemented with a with block
class InferenceMethod(ABC):
#def __init__(self): #TODO: abstract attributes
# self.learnable_model = False
# self.needs_sampler = False
# self.learnable_sampler = False
@abstractmethod
def check_model_compatibility(self, joint_model, posterior_model, sampler_model):
pass
@abstractmethod
def compute_loss(self, joint_model, posterior_model, sampler_model, number_samples, input_values):
pass
@abstractmethod
def post_process(self, joint_model):
pass
class ReverseKL(InferenceMethod):
def __init__(self):
self.learnable_model = True
self.needs_sampler = False
self.learnable_sampler = False
def check_model_compatibility(self, joint_model, posterior_model, sampler_model):
pass #TODO: Check differentiability of the model
def compute_loss(self, joint_model, posterior_model, sampler_model, number_samples, input_values={}):
loss = -joint_model.estimate_log_model_evidence(number_samples=number_samples,
method="ELBO", input_values=input_values, for_gradient=True)
return loss
def post_process(self, joint_model):
pass
class WassersteinVariationalGradientDescent(InferenceMethod): #TODO: Work in progress
def __init__(self, variational_samplers, particles,
cost_function=None,
deviation_statistics=None,
biased=False,
number_post_samples=8000): #TODO: Work in progress
self.learnable_model = False #TODO: to implement later
self.needs_sampler = True
self.learnable_sampler = True
self.biased = biased
self.number_post_samples = number_post_samples
if cost_function:
self.cost_function = cost_function
else:
self.cost_function = lambda x, y: sum_from_dim((x - y) **2, dim_index=1)
if deviation_statistics:
self.deviation_statistics = deviation_statistics
else:
self.deviation_statistics = lambda lst: sum(lst)
def model_statistics(dic):
num_samples = list(dic.values())[0].shape[0]
reassigned_particles = [reassign_samples(p._get_sample(num_samples), source_model=p, target_model=dic)
for p in particles]
statistics = [self.deviation_statistics([self.cost_function(value_pair[0], value_pair[1]).data
for var, value_pair in zip_dict(dic, p).items()])
for p in reassigned_particles]
return np.array(statistics).transpose()
truncation_rules = [lambda a, idx=index: True if (idx == np.argmin(a)) else False
for index in range(len(particles))]
self.sampler_model = [truncate_model(model=sampler,
truncation_rule=rule,
model_statistics=model_statistics)
for sampler, rule in zip(variational_samplers, truncation_rules)]
def check_model_compatibility(self, joint_model, posterior_model, sampler_model):
assert isinstance(sampler_model, Iterable) and all([isinstance(subsampler, (Variable, ProbabilisticModel))
for subsampler in sampler_model]), "The Wasserstein Variational GD method require a list of variables or probabilistic models as sampler"
# TODO: Check differentiability of the model
def compute_loss(self, joint_model, posterior_model, sampler_model, number_samples, input_values={}):
sampler_loss = sum([-joint_model.estimate_log_model_evidence(number_samples=number_samples, posterior_model=subsampler,
method="ELBO", input_values=input_values, for_gradient=True)
for subsampler in sampler_model])
particle_loss = self.get_particle_loss(joint_model, posterior_model, sampler_model, number_samples,
input_values)
return sampler_loss + particle_loss
def get_particle_loss(self, joint_model, particle_list, sampler_model, number_samples, input_values):
samples_list = [sampler._get_sample(number_samples, input_values=input_values)
for sampler in sampler_model]
if self.biased:
importance_weights = [1./number_samples for _ in sampler_model]
else:
importance_weights = [joint_model.get_importance_weights(q_samples=samples,
q_model=sampler,
for_gradient=False).flatten()
for samples, sampler in zip(samples_list, sampler_model)]
reassigned_samples_list = [reassign_samples(samples, source_model=sampler, target_model=particle)
for samples, sampler, particle in zip(samples_list, sampler_model, particle_list)]
pair_list = [zip_dict(particle._get_sample(1), samples)
for particle, samples in zip(particle_list, reassigned_samples_list)]
particle_loss = sum([F.sum(w*self.deviation_statistics([self.cost_function(value_pair[0], value_pair[1].data)
for var, value_pair in particle.items()]))
for particle, w in zip(pair_list, importance_weights)])
return particle_loss
def post_process(self, joint_model): #TODO: Work in progress
sample_list = [sampler._get_sample(self.number_post_samples)
for sampler in self.sampler_model]
self.weights = []
for sampler, s in zip(self.sampler_model, sample_list):
a = sampler.get_acceptance_probability(number_samples=self.number_post_samples)
_, Z = joint_model.get_importance_weights(q_samples=s,
q_model=sampler,
for_gradient=False,
give_normalization=True)
self.weights.append(a*Z)
self.weights /= np.sum(self.weights)
|
import mysql as mysql
import mysql.connector
import tkinter as tk
from tkinter import *
from tkinter import ttk, messagebox
import pandas as pd
import matplotlib.pyplot as plt
class sql:
def __init__(self):
pass
def insert(self,name,phone,email,question,answer,password):
mydb = mysql.connector.connect(
host="127.0.0.1",
user="root",
password="<PASSWORD>"
)
mycursor = mydb.cursor()
mycursor.execute("use expense_tracker_app;")
sql = """INSERT INTO `expense_tracker_app`.
`admin`(`Username`, `Userphno`, `Usermailid`, `Securityquestion`, `Answer`, `Userpassword`)
VALUES(%s,%s,%s,%s,%s,%s)"""
values=(name,phone,email,question,answer,password)
mycursor.execute(sql,values)
mydb.commit()
print(mycursor.lastrowid, "record inserted.")
print(mycursor.rowcount)
def insertexpenseintoApp(self):
category_expense = dict()
def submit():
def check(k,n):
try:
d=float(n)
category_expense[k]=n
except ValueError:
category_expense[k]=0.00
num=App1.entry3.get()
num1=App1.entry4.get()
num2=App1.entry5.get()
num3=App1.entry6.get()
num4=App1.entry7.get()
num5=App1.entry8.get()
num6=App1.entry9.get()
num7=App1.entry10.get()
num8=App1.entry11.get()
num9=App1.entry12.get()
num10=App1.entry13.get()
num11=App1.entry14.get()
num12=App1.entry15.get()
num13=App1.entry16.get()
num14=App1.entry17.get()
if len(num) > 9 or len(num1) > 9 or len(num3) > 9 or len(num4) > 9 or len(num5) > 9 or len(num6) > 9 or len(num7) > 9 or len(num8) > 9 or len(num9) > 9 or len(num10) > 9 or len(num11) > 9 or len(num12) > 9 or len(num13) > 9 or len(num14) > 9:
alert= Tk()
alert.geometry("100x100")
messagebox.showwarning("Warning", " Max 9 digits")
alert.mainloop()
if App1.month.current()==0:
alert1 = Tk()
alert1.geometry("100x100")
messagebox.showwarning("Warning", " Choose a month ")
alert1.mainloop()
check('Grocery', num)
check('Medicine',num1)
check('Shopping',num2)
check('Entertainment',num3)
check('Tax',num4)
check('Houserent',num5)
check('Houseeb',num6)
check('Housewb',num7)
check('Housefurniture',num8)
check('Houseelectronic',num9)
check('Housegas',num10)
check('Personal',num11)
check('Loan',num12)
check('Insurance',num13)
check('Education',num14)
print(category_expense)
self.insertexpenseintodb(category_expense, App1.month.get())
App1=Tk()
App1.title("EXPENSE DETAILS !!")
App1.geometry("700x1100")
App1.label = tk.Label(App1, text="RECORDING YOUR EXPENSES !!",font=("Helvetica", 10,'bold')).place(x=250,y=10)
App1.label1=tk.Label(App1,text='Select month :').place(x=10,y=50)
App1.month = tk.ttk.Combobox(App1, width=40, state="readonly")
App1.month.place(x=400,y=50)
App1.month['values'] = ('~~~None selected~~~',' Jan',' Feb', ' Mar', ' Apr',' May ', ' June ',' July ',
' Aug ',' Sep ',' Oct ',' Nov ',' Dec ')
App1.month.current(0)
App1.label2=tk.Label(App1,text='Current year').place(x=10,y=90)
App1.label18=tk.Label(App1,text="* Max no. of characters allowed 9 for expense field *",fg='red').place(x=400,y=110)
import datetime
x = datetime.datetime.now()
App1.label2=tk.Label(App1,text=x.year).place(x=400,y=90)
App1.label3=tk.Label(App1,text="Enter the amount spent on Grocery :",font=("Helvetica",10)).place(x=10,y=130)
App1.entry3=tk.Entry(App1)
App1.entry3.place(x=400, y=130)
App1.label4 = tk.Label(App1, text="Enter Medical expenses :",font=("Helvetica",10)).place(x=10, y=170)
App1.entry4 = tk.Entry(App1)
App1.entry4.place(x=400, y=170)
App1.label5 = tk.Label(App1, text="Enter Shopping expenses :",font=("Helvetica",10)).place(x=10, y=210)
App1.entry5 = tk.Entry(App1)
App1.entry5.place(x=400, y=210)
App1.label6= tk.Label(App1, text="Enter Entertainment expenses :",font=("Helvetica",10)).place(x=10, y=250)
App1.entry6 = tk.Entry(App1)
App1.entry6.place(x=400, y=250)
App1.label7 = tk.Label(App1, text="Enter Tax expenses :", font=("Helvetica",10)).place(x=10, y=290)
App1.entry7 = tk.Entry(App1)
App1.entry7.place(x=400, y=290)
App1.label8 = tk.Label(App1, text="Enter House-rent expenses :", font=("Helvetica",10)).place(x=10, y=330)
App1.entry8 = tk.Entry(App1)
App1.entry8.place(x=400, y=330)
App1.label9= tk.Label(App1, text="Enter House-EB expenses :", font=("Helvetica",10)).place(x=10, y=370)
App1.entry9= tk.Entry(App1)
App1.entry9.place(x=400, y=370)
App1.label10 = tk.Label(App1, text="Enter House-WaterBill expenses :", font=("Helvetica",10)).place(x=10, y=410)
App1.entry10 = tk.Entry(App1)
App1.entry10.place(x=400, y=410)
App1.label11 = tk.Label(App1, text="Enter House-Furniture expenses :", font=("Helvetica",10)).place(x=10, y=450)
App1.entry11 = tk.Entry(App1)
App1.entry11.place(x=400, y=450)
App1.label12 = tk.Label(App1, text="Enter House-Electronic expenses :", font=("Helvetica", 10)).place(x=10,y=490)
App1.entry12 = tk.Entry(App1)
App1.entry12.place(x=400, y=490)
App1.label13= tk.Label(App1, text="Enter House-Gas expenses :", font=("Helvetica",10)).place(x=10, y=530)
App1.entry13= tk.Entry(App1)
App1.entry13.place(x=400, y=530)
App1.label14 = tk.Label(App1, text="Enter Personal expenses :", font=("Helvetica",10)).place(x=10, y=570)
App1.entry14 = tk.Entry(App1)
App1.entry14.place(x=400, y=570)
App1.label15= tk.Label(App1, text="Enter Loan expenses :", font=("Helvetica",10)).place(x=10, y=610)
App1.entry15 = tk.Entry(App1)
App1.entry15.place(x=400, y=610)
App1.label16= tk.Label(App1, text="Enter Insurance expenses :", font=("Helvetica",10)).place(x=10, y=650)
App1.entry16 = tk.Entry(App1)
App1.entry16.place(x=400, y=650)
App1.label17= tk.Label(App1, text="Enter Educational expenses :", font=("Helvetica",10)).place(x=10, y=690)
App1.entry17 = tk.Entry(App1)
App1.entry17.place(x=400, y=690)
App1.button = tk.Button(App1, text="Submit", command=submit)
App1.button.place(bordermode=OUTSIDE, x=250, y=730, width=200, height=50)
App1.mainloop()
def insertexpenseintodb(self,d,m):
import datetime
x = datetime.datetime.now()
def graph(d,m):
c1=[]
a1=[]
for i in d:
a1.append(float(d[i]))
c1.append(i)
plotdata = pd.DataFrame(
{"Amount in (Rs.)": a1},
index=c1)
# Plot a bar chart
plotdata.plot(kind="bar",figsize=(10,10))
plt.xticks(rotation=30, horizontalalignment="center")
plt.xlabel("Expense Category")
plt.ylabel("Amount Spent(in Rs.)")
plt.title("Expense Analysis for the month"+m+str(x.year))
plt.show()
final_dict=dict()
for i in d:
if float(d[i])>0.00:
final_dict[i]=d[i]
total=0.0
for i in final_dict:
total+=float(final_dict[i])
print(total)
mydb1 = mysql.connector.connect(
host="127.0.0.1",
user="root",
password="<PASSWORD>"
)
mycursor1 = mydb1.cursor()
mycursor1.execute("use expense_tracker_app;")
userid=mycursor1.execute("SELECT Userid FROM `admin` WHERE Userid=(SELECT MAX(Userid) FROM `admin`);")
userid=mycursor1.fetchone()
a=userid[0]
sql1 = """INSERT INTO `expense_tracker_app`.`cumulativeexpense` (`Userid`, `UserMonth`, `Currentyear`, `Totalexpense`) VALUES (%s, %s, %s,%s);"""
values1 = (a,m,x.year,total)
mycursor1.execute(sql1, values1)
mydb1.commit()
for i in final_dict:
mycursor1 = mydb1.cursor()
mycursor1.execute("use expense_tracker_app;")
mycursor1.execute("SELECT idCategory FROM `category` WHERE Category_name='%s';" % (i))
userid = mycursor1.fetchone()
print(userid)
print(type(userid))
sql1 = """INSERT INTO `expense_tracker_app`.`monthlyexpense` (`Userid`, `Month`, `idCategory`, `Amount`) VALUES (%s, %s, %s,%s);"""
values1 = (a, m, userid[0], float(final_dict[i]))
mycursor1.execute(sql1, values1)
mydb1.commit()
print(mycursor1.lastrowid)
print(mycursor1.rowcount)
graph(final_dict,m)
|
<gh_stars>0
import os ,datetime
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
import matplotlib.pyplot as plt
batch_size = 32
seq_len = 49
d_k = 256
d_v = 256
n_heads = 12
ff_dim = 256
" data download https://finance.yahoo.com/quote/IBM/history?period1=950400&period2=1594512000&interval=1d&filter=history&frequency=1d"
df = pd.read_csv('./input/szzs.csv', delimiter=',', usecols=['Date', 'Close'])
# Apply moving average with a window of 10 days to all columns
df[[ 'Close']] = df[[ 'Close']].rolling(10).mean()
# Drop all rows with NaN values
df.dropna(how='any', axis=0, inplace=True)
'''Calculate percentage change'''
df['Close'] = df['Close'].pct_change() # Create arithmetic returns column
df.dropna(how='any', axis=0, inplace=True) # Drop all rows with NaN values
'''Normalize price columns'''
min_return = min(df[['Close']].min(axis=0))
max_return = max(df[['Close']].max(axis=0))
# Min-max normalize price columns (0-1 range)
df['Close'] = (df['Close'] - min_return) / (max_return - min_return)
'''Create training, validation and test split'''
print(df)
times = sorted(df.index.values)
last_30pct = sorted(df.index.values)[-int(0.3*len(times))] # Last 30% of series
df_train = df[(df.index < last_30pct)] # Training data are 80% of total data
df_val = df[(df.index >= last_30pct)]
df_test = df[(df.index >= last_30pct)]
test_tick=df[(df.index >= last_30pct)]['Date'][49:]
# Remove date column
df_train.drop(columns=['Date'], inplace=True)
df_val.drop(columns=['Date'], inplace=True)
df_test.drop(columns=['Date'], inplace=True)
# Convert pandas columns into arrays
train_data = df_train.values
val_data = df_val.values
test_data = df_test.values
# Training data
X_train, y_train = [], []
for i in range(seq_len, len(train_data)):
X_train.append(train_data[i-seq_len:i]) # Chunks of training data with a length of 128 df-rows
y_train.append(train_data[:, 0][i]) #Value of 4th column (Close Price) of df-row 128+1
X_train, y_train = np.array(X_train), np.array(y_train)
###############################################################################
# Validation data
X_val, y_val = [], []
for i in range(seq_len, len(val_data)):
X_val.append(val_data[i-seq_len:i])
y_val.append(val_data[:, 0][i])
X_val, y_val = np.array(X_val), np.array(y_val)
###############################################################################
# Test data
X_test, y_test = [], []
for i in range(seq_len, len(test_data)):
X_test.append(test_data[i-seq_len:i])
y_test.append(test_data[:, 0][i])
X_test, y_test = np.array(X_test), np.array(y_test)
print(X_train)
print(X_train.shape,y_train.shape)
print(y_train)
pangg=np.array([1,2,3,])
print(pangg.shape)
import model
model = model.create_model()
print(X_train)
print(y_train)
callback = tf.keras.callbacks.ModelCheckpoint('Transformer+TimeEmbedding_avg.hdf5',
monitor='val_loss',
save_best_only=True,
verbose=1)
history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=1,
steps_per_epoch=len(X_train)/batch_size,
callbacks=[callback],
validation_data=(X_val, y_val))
model.load_weights('Transformer+TimeEmbedding_avg.hdf5')
y=model.predict(X_test)
fig=plt.figure()
import datetime
test_tick=test_tick.apply(lambda x:datetime.datetime.strptime(x,'%Y-%m-%d'))
true_y=y*(max_return - min_return)+min_return
true_ytest=y_test*(max_return - min_return)+min_return
df = pd.read_csv('./input/szzs.csv', delimiter=',', usecols=['Date','Close'])
real_close=df[(df.index >= last_30pct)]['Close'][48:-1].values
pre_close=[]
for i in range(len(real_close)):
predict_close=real_close[i]*(1+true_y[i])
pre_close.append(predict_close)
plt.plot(test_tick[-50:],df[(df.index >= last_30pct)]['Close'][49:].values[-50:],label="true",linewidth=1)
plt.plot(test_tick[-50:],pre_close[-50:],label="predict",linewidth=1)
plt.legend()
plt.show() |
<filename>test/management_interface_integration_tests.py
# Copyright PA Knowledge Ltd 2021
# For licence terms see LICENCE.md file
import os
import unittest
import subprocess
import requests
import json
import threading
from test_helpers import TestHelpers
from Emulator import launch_management_interface
from nose.plugins.attrib import attr
class MgmtInterfaceIntegrationTests(unittest.TestCase):
interface_server_thread = None
valid_port_config = None
config_filepath = 'Emulator/config/port_config.json'
users_filepath = 'Emulator/config/users.json'
ingress_port1 = None
ingress_port2 = None
@classmethod
def setUpClass(cls):
cls.start_interface_server()
cls.valid_port_config = TestHelpers.read_port_config()
cls.ingress_port1 = cls.valid_port_config["routingTable"][0]["ingressPort"]
cls.ingress_port2 = cls.valid_port_config["routingTable"][1]["ingressPort"]
try:
TestHelpers.wait_for_open_comms_ports("172.17.0.1", 8081, "zv")
except TimeoutError as ex:
print(f"Exception during setUpClass: {ex}")
cls.tearDownClass()
raise
@classmethod
def start_interface_server(cls):
use_anonymous_access = "True"
cls.interface_server_thread = threading.Thread(target=launch_management_interface.start_interface,
args=(8081, "basic", use_anonymous_access))
cls.interface_server_thread.start()
@classmethod
def tearDownClass(cls):
TestHelpers.delete_users_file(cls.users_filepath)
subprocess.run("docker stop management_interface".split())
cls.interface_server_thread.join()
TestHelpers.reset_port_config_file(cls.valid_port_config)
@classmethod
def tearDown(cls):
TestHelpers.reset_port_config_file(cls.valid_port_config)
subprocess.run("docker stop emulator 1>/dev/null 2>&1", shell=True)
subprocess.run("docker rm emulator 1>/dev/null 2>&1", shell=True)
def test_get_config_endpoint(self):
response = requests.get("http://172.17.0.1:8081/api/config/diode")
with open(self.config_filepath, 'r') as config_file:
expected = json.loads(config_file.read())
self.assertEqual(expected, json.loads(response.text))
def test_get_config_endpoint_no_auth_with_unauthorised_credentials(self):
response = requests.get("http://172.17.0.1:8081/api/config/diode", auth=("user", "passwrong"))
self.assertEqual(200, response.status_code)
def test_get_config_endpoint_no_auth_with_authorised_credentials(self):
response = requests.get("http://172.17.0.1:8081/api/config/diode", auth=("user", "password"))
with open(self.config_filepath, 'r') as config_file:
expected = json.loads(config_file.read())
self.assertEqual(expected, json.loads(response.text))
def test_power_on_endpoint(self):
TestHelpers.wait_for_closed_comms_ports("172.17.0.1", self.ingress_port1)
requests.post("http://172.17.0.1:8081/api/command/diode/power/on")
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port1)
def test_power_on_endpoint_when_emulator_already_on_returns_200(self):
TestHelpers.wait_for_closed_comms_ports("172.17.0.1", self.ingress_port1)
requests.post("http://172.17.0.1:8081/api/command/diode/power/on")
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port1)
response = requests.post("http://172.17.0.1:8081/api/command/diode/power/on")
self.assertEqual("Diode powered on", json.loads(response.text)["Status"])
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port1)
def test_power_off_endpoint(self):
requests.post("http://172.17.0.1:8081/api/command/diode/power/on")
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port1)
requests.post("http://172.17.0.1:8081/api/command/diode/power/off")
TestHelpers.wait_for_closed_comms_ports("172.17.0.1", self.ingress_port1)
def test_power_off_endpoint_when_emulator_off_returns_200(self):
TestHelpers.wait_for_closed_comms_ports("172.17.0.1", self.ingress_port1)
response = requests.post("http://172.17.0.1:8081/api/command/diode/power/off")
self.assertEqual(200, response.status_code)
def test_power_off_removes_emulator_container(self):
requests.post("http://172.17.0.1:8081/api/command/diode/power/on")
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port1)
requests.post("http://172.17.0.1:8081/api/command/diode/power/off")
TestHelpers.wait_for_closed_comms_ports("172.17.0.1", self.ingress_port1)
requests.post("http://1172.16.17.32:8081/api/command/diode/power/on")
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port1)
def test_update_config_endpoint(self):
requests.post("http://172.17.0.1:8081/api/command/diode/power/on")
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port1)
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port2)
TestHelpers.wait_for_closed_comms_ports("172.17.0.1", 40003)
with open(self.config_filepath, 'r') as config_file:
new_config = json.loads(config_file.read())
new_config["routingTable"][0]["ingressPort"] = 40003
self.assertEqual(200, requests.put("http://172.17.0.1:8081/api/config/diode",
json=new_config,
headers={"Content-Type": "application/json"}
).status_code)
TestHelpers.wait_for_open_comms_ports("172.17.0.1", 40003)
TestHelpers.wait_for_closed_comms_ports("172.17.0.1", self.ingress_port1)
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port2)
def test_update_config_endpoint_returns_400_when_schema_check_fails(self):
with open(self.config_filepath, 'r') as config_file:
new_config = json.loads(config_file.read())
new_config["ingress"]["useDhcp"] = False
del new_config["ingress"]["adapters"]
requests.post("http://172.17.0.1:8081/api/command/diode/power/on")
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port1)
response = requests.put("http://172.17.0.1:8081/api/config/diode",
json=new_config,
headers={"Content-Type": "application/json"})
self.assertEqual(400, response.status_code)
self.assertEqual("'adapters' is a required property", response.text)
def test_update_config_endpoint_returns_400_when_routing_table_is_too_long(self):
with open(self.config_filepath, 'r') as config_file:
new_config = json.loads(config_file.read())
new_config["routingTable"] = []
for i in range(0, 53):
table = {"ingressPort": i + 40000, "egressIpAddress":"emulator_tester_1", "egressSrcPort":50001, "egressDestPort":50001}
new_config["routingTable"].append(table)
requests.post("http://172.17.0.1:8081/api/command/diode/power/on")
TestHelpers.wait_for_open_comms_ports("172.17.0.1", self.ingress_port1)
response = requests.put("http://172.17.0.1:8081/api/config/diode",
json=new_config,
headers={"Content-Type": "application/json"})
self.assertEqual(400, response.status_code)
self.assertIn("is too long", response.text)
def test_missing_config_file_with_get_config_endpoint(self):
os.remove(self.config_filepath)
response = requests.get("http://172.17.0.1:8081/api/config/diode")
self.assertEqual("Config file does not exist", json.loads(response.text)["Status"])
def test_missing_config_file_with_power_on_endpoint(self):
os.remove(self.config_filepath)
response = requests.post("http://172.17.0.1:8081/api/command/diode/power/on")
self.assertEqual("Config file could not be found to power on diode", json.loads(response.text)["Status"])
def test_get_schema_endpoint(self):
response = requests.get("http://172.17.0.1:8081/api/config/diode/schema")
with open('Emulator/openapi/schema.json', 'r') as schema_file:
expected = json.loads(schema_file.read())
self.assertEqual(expected, json.loads(response.text))
def test_get_versioning_info_endpoint(self):
response = requests.get("http://172.17.0.1:8081/api/status/version")
with open('Emulator/VERSIONING', 'r') as versioning_file:
version_number = versioning_file.read()
status = {
"Diode": {
"Firmware": {
"F1": "0.0.0_rc0",
"F2": "0.0.0_rc0",
"F3": "0.0.0_rc0"
}
},
"Management": {
"Kernel": "0.00.00-cl-som-imx7-0.0 #1 SMP PREEMPT Mon Jan 01 00:00:00 UTC 1970",
"RestAPI": f"{version_number}-a0000a0a",
"RootFS": {
"Build": "000",
"Hash": "0a0a0000",
"MountPoint": "/dev/mmcblk2p0"
},
"DiskImage": {
"Build": "000",
"Hash": "0a0a0000"
}
}
}
self.assertEqual(json.dumps(status), response.text)
def test_get_status_info_endpoint(self):
response = requests.get("http://172.17.0.1:8081/api/status")
[self.assertIn(key, json.loads(response.text)["Dataplane"]["Counters"]) for key in ["f1fast", "f1slow", "f2fast", "f2slow", "f3fast", "f3slow"]]
self.assertEqual("0", json.loads(response.text)["Dataplane"]["Counters"]["f1fast"]["ethA"]["txFramesErr"])
self.assertEqual("0", json.loads(response.text)["Dataplane"]["Counters"]["f2fast"]["numBMPPacs"])
self.assertIn("coreid", json.loads(response.text)["Dataplane"]["Counters"]["f2slow"])
self.assertEqual("32.23", json.loads(response.text)["Dataplane"]["Status"]["F1"]["temperatures"]["DIE"])
self.assertEqual("1.36", json.loads(response.text)["Dataplane"]["Status"]["F1"]["voltages"]["1V35"])
self.assertEqual("39", json.loads(response.text)["Management"]["Status"]["temperatures"]["SOM"])
@attr("mgmt_firmware")
def test_update_mgmt_firmware_endpoint(self):
self.assertEqual(200, requests.put("http://172.17.0.1:8081/api/firmware/mgmt/update",
data=b'1234',
headers={"Content-Type": "application/octet-stream"}
).status_code)
@attr("mgmt_firmware")
def test_update_mgmt_firmware_fails_if_no_update_provided(self):
self.assertEqual(500, requests.put("http://172.17.0.1:8081/api/firmware/mgmt/update",
data=None,
headers={"Content-Type": "application/octet-stream"}
).status_code)
@attr("mgmt_firmware")
def test_update_diode_firmware_endpoint(self):
self.assertEqual(200, requests.put("http://172.17.0.1:8081/api/firmware/diode/update",
data=b'diode firmware',
headers={"Content-Type": "application/octet-stream"}
).status_code)
@attr("mgmt_firmware")
def test_update_diode_firmware_fails_if_no_update_provided(self):
self.assertEqual(500, requests.put("http://172.17.0.1:8081/api/firmware/diode/update",
data=None,
headers={"Content-Type": "application/octet-stream"}
).status_code)
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(MgmtInterfaceIntegrationTests)
unittest.TextTestRunner(verbosity=5).run(SUITE)
|
<reponame>jspeerless/citrine-python
"""A collection of FileLink objects."""
import mimetypes
import os
from enum import Enum
from logging import getLogger
from typing import Iterable, Optional, Tuple, Union, List, Dict
from uuid import UUID
import requests
from boto3 import client as boto3_client
from boto3.session import Config
from botocore.exceptions import ClientError
from gemd.entity.bounds.base_bounds import BaseBounds
from gemd.entity.file_link import FileLink as GEMDFileLink
from citrine._rest.collection import Collection
from citrine._rest.resource import Resource
from citrine._serialization.properties import List as PropertyList
from citrine._serialization.properties import Optional as PropertyOptional
from citrine._serialization.properties import String, Object, Integer
from citrine._serialization.serializable import Serializable
from citrine._session import Session
from citrine._utils.functions import write_file_locally, format_escaped_url
from citrine.jobs.job import JobSubmissionResponse, _poll_for_job_completion
from citrine.resources.response import Response
logger = getLogger(__name__)
class _Uploader:
"""Holds the many parameters that are generated and used during file upload."""
def __init__(self):
self.bucket = ''
self.object_key = ''
self.upload_id = ''
self.region_name = ''
self.aws_access_key_id = ''
self.aws_secret_access_key = ''
self.aws_session_token = ''
self.s3_version = ''
self.s3_endpoint_url = None
self.s3_use_ssl = True
self.s3_addressing_style = 'auto'
class FileProcessingType(Enum):
"""The supported File Processing Types."""
VALIDATE_CSV = "VALIDATE_CSV"
class FileProcessingData:
"""The base class of all File Processing related data implementations."""
pass
class CsvColumnInfo(Serializable):
"""The info for a CSV Column, contains the name, recommended and exact bounds."""
name = String('name')
""":str: name of the column"""
bounds = Object(BaseBounds, 'bounds')
""":BaseBounds: recommended bounds of the column (might include some padding)"""
exact_range_bounds = Object(BaseBounds, 'exact_range_bounds')
""":BaseBounds: exact bounds of the column"""
def __init__(self, name: String, bounds: BaseBounds,
exact_range_bounds: BaseBounds): # pragma: no cover
self.name = name
self.bounds = bounds
self.exact_range_bounds = exact_range_bounds
class CsvValidationData(FileProcessingData, Serializable):
"""The resulting data from the processed CSV file."""
columns = PropertyOptional(PropertyList(Object(CsvColumnInfo)), 'columns',
override=True)
""":Optional[List[CsvColumnInfo]]: all of the columns in the CSV"""
record_count = Integer('record_count')
""":int: the number of rows in the CSV"""
def __init__(self, columns: List[CsvColumnInfo],
record_count: int): # pragma: no cover
self.columns = columns
self.record_count = record_count
class FileProcessingResult:
"""
The results of a successful file processing operation.
The type of the actual data depends on the specific processing type.
"""
def __init__(self, processing_type: FileProcessingType, data: Union[Dict,
FileProcessingData]):
self.processing_type = processing_type
self.data = data
class FileLink(Resource['FileLink'], GEMDFileLink):
"""
Resource that stores the name and url of an external file.
Parameters
----------
filename: str
The name of the file.
url: str
URL that can be used to access the file.
"""
filename = String('filename', override=True)
url = String('url', override=True)
typ = String('type')
def __init__(self, filename: str, url: str):
GEMDFileLink.__init__(self, filename, url)
self.typ = GEMDFileLink.typ
@property
def name(self):
"""Attribute name is an alias for filename."""
return self.filename
def __str__(self):
return '<File link {!r}>'.format(self.filename)
def as_dict(self) -> dict:
"""Dump to a dictionary (useful for interoperability with gemd)."""
return self.dump()
class FileCollection(Collection[FileLink]):
"""Represents the collection of all file links associated with a dataset."""
_path_template = 'projects/{project_id}/datasets/{dataset_id}/files'
_individual_key = 'file'
_collection_key = 'files'
_resource = FileLink
def __init__(self, project_id: UUID, dataset_id: UUID, session: Session):
self.project_id = project_id
self.dataset_id = dataset_id
self.session = session
def build(self, data: dict) -> FileLink:
"""Build an instance of FileLink."""
return FileLink.build(data)
def _fetch_page(self,
page: Optional[int] = None,
per_page: Optional[int] = None) -> Tuple[Iterable[FileLink], str]:
"""
List all visible files in the collection.
Parameters
---------
page: int, optional
The "page" number of results to list. Default is the first page, which is 1.
per_page: int, optional
Max number of results to return for each call. Default is 20.
Returns
-------
Iterable[FileLink]
FileLink objects in this collection.
str
The next uri if one is available, empty string otherwise
"""
path = self._get_path()
params = {}
if page is not None:
params["page"] = page
if per_page is not None:
params["per_page"] = per_page
response = self.session.get_resource(path=path, params=params)
collection = response[self._collection_key]
return collection, ""
def _build_collection_elements(self, collection):
for file in collection:
yield self.build(self._as_dict_from_resource(file))
def _as_dict_from_resource(self, file: dict):
"""
Convert a file link resource downloaded from the API into a FileLink dictionary.
This is necessary because the database resource contains additional information that is
not in the FileLink object, such as file size and the id of the user who uploaded the file.
Parameters
---------
file: dict
A JSON dictionary corresponding to the file link as it is saved in the database.
Returns
-------
dict
A dictionary that can be built into a FileLink object.
"""
typ = 'file_link'
filename = file['filename']
# The field 'versioned_url' contains some information necessary to construct a file path,
# but does not contain project and dataset id. It also contains extraneous information.
# We assert that the 'versioned_url' "picks up" where the collection path leaves off
# (at "/files"). We take what comes after "/files" and combine it with the collection path
# to create the file url.
split_url = file['versioned_url'].split('/')
try:
split_collection_path = self._get_path().split('/')
overlap_index = split_url.index(split_collection_path[-1])
except ValueError:
raise ValueError("Versioned URL, '{}', cannot be joined with collection path "
"'{}'".format(file['versioned_url'], self._get_path()))
url = '/'.join(split_collection_path + split_url[overlap_index + 1:])
file_dict = {
'url': url,
'filename': filename,
'type': typ
}
return file_dict
def upload(self, *, file_path: str, dest_name: str = None) -> FileLink:
"""
Uploads a file to the dataset.
Parameters
----------
file_path: str
The path to the file on the local computer.
dest_name: str, optional
The name the file will have after being uploaded. If unspecified, the local name of
the file will be used. That is, the file at "/Users/me/diagram.pdf" will be uploaded
with the name "diagram.pdf". File names **must be unique** within a dataset. If a file
is uploaded with the same `dest_name` as an existing file it will be considered
a new version of the existing file.
Returns
-------
FileLink
The filename and url of the uploaded object.
"""
if not os.path.isfile(file_path):
raise ValueError("No file at specified path {}".format(file_path))
if not dest_name:
# Use the file name as a default dest_name
dest_name = os.path.basename(file_path)
uploader = self._make_upload_request(file_path, dest_name)
uploader = self._upload_file(file_path, uploader)
return self._complete_upload(dest_name, uploader)
def _make_upload_request(self, file_path: str, dest_name: str):
"""
Make a request to the backend to upload a file. Uses mimetypes.guess_type.
Parameters
----------
file_path: str
The path to the file on the local computer.
dest_name: str
The name the file will have after being uploaded.
Returns
-------
_Uploader
Holds the parameters returned by the upload request, for later use.
These must include region_name, aws_access_key_id, aws_secret_access_key,
aws_session_token, bucket, object_key, & upload_id.
"""
path = self._get_path() + "/uploads"
# This string coercion is for supporting pathlib.Path objects in python 3.6
mime_type = self._mime_type(str(file_path))
file_size = os.stat(file_path).st_size
assert isinstance(file_size, int)
upload_json = {
'files': [
{
'file_name': dest_name,
'mime_type': mime_type,
'size': file_size
}
]
}
# POST request creates space in S3 for the file and returns AWS-related information
# (such as temporary credentials) that allow the file to be uploaded.
upload_request = self.session.post_resource(path=path, json=upload_json)
uploader = _Uploader()
# Extract all relevant information from the upload request
try:
uploader.region_name = upload_request['s3_region']
uploader.aws_access_key_id = upload_request['temporary_credentials']['access_key_id']
uploader.aws_secret_access_key = \
upload_request['temporary_credentials']['secret_access_key']
uploader.aws_session_token = upload_request['temporary_credentials']['session_token']
uploader.bucket = upload_request['s3_bucket']
uploader.object_key = upload_request['uploads'][0]['s3_key']
uploader.upload_id = upload_request['uploads'][0]['upload_id']
uploader.s3_endpoint_url = self.session.s3_endpoint_url
uploader.s3_use_ssl = self.session.s3_use_ssl
uploader.s3_addressing_style = self.session.s3_addressing_style
except KeyError:
raise RuntimeError("Upload initiation response is missing some fields: "
"{}".format(upload_request))
return uploader
@staticmethod
def _mime_type(file_path: str):
mime_type = mimetypes.guess_type(file_path)[0]
if mime_type is None:
mime_type = "application/octet-stream"
return mime_type
@staticmethod
def _upload_file(file_path: str, uploader: _Uploader):
"""
Upload a file to S3.
Parameters
----------
file_path: str
The path to the file on the local computer.
uploader: _Uploader
Holds the parameters returned by the upload request.
Returns
-------
_Uploader
The input uploader object with its s3_version field now populated.
"""
additional_s3_opts = {
'use_ssl': uploader.s3_use_ssl,
'config': Config(s3={'addressing_style': uploader.s3_addressing_style})
}
if uploader.s3_endpoint_url is not None:
additional_s3_opts['endpoint_url'] = uploader.s3_endpoint_url
s3_client = boto3_client('s3',
region_name=uploader.region_name,
aws_access_key_id=uploader.aws_access_key_id,
aws_secret_access_key=uploader.aws_secret_access_key,
aws_session_token=uploader.aws_session_token,
**additional_s3_opts)
with open(file_path, 'rb') as f:
try:
# NOTE: This is only using the simple PUT logic, not the more sophisticated
# multipart upload approach that is also available (providing parallel
# uploads, etc).
upload_response = s3_client.put_object(
Bucket=uploader.bucket,
Key=uploader.object_key,
Body=f,
Metadata={"X-Citrine-Upload-Id": uploader.upload_id})
except ClientError as e:
raise RuntimeError("Upload of file {} failed with the following "
"exception: {}".format(file_path, e))
uploader.s3_version = upload_response['VersionId']
return uploader
def _complete_upload(self, dest_name: str, uploader: _Uploader):
"""
Indicate that the upload has finished and determine the file URL.
Parameters
----------
dest_name: str
The name the file will have after being uploaded.
uploader: _Uploader
Holds the parameters returned by the upload request and the upload response.
Returns
-------
FileLink
The filename and url of the uploaded object.
"""
path = self._get_path() + format_escaped_url("/uploads/{}/complete", uploader.upload_id)
complete_response = self.session.put_resource(path=path,
json={'s3_version': uploader.s3_version})
try:
file_id = complete_response['file_info']['file_id']
version = complete_response['file_info']['version']
except KeyError:
raise RuntimeError("Upload completion response is missing some "
"fields: {}".format(complete_response))
url = self._get_path(file_id) + format_escaped_url('/versions/{}', version)
return FileLink(filename=dest_name, url=url)
def download(self, *, file_link: FileLink, local_path: str):
"""
Download the file associated with a given FileLink to the local computer.
Parameters
----------
file_link: FileLink
Resource referencing the external file.
local_path: str
Path to save file on the local computer. If `local_path` is a directory,
then the filename of this FileLink object will be appended to the path.
"""
directory, filename = os.path.split(local_path)
if not filename:
filename = file_link.filename
local_path = os.path.join(directory, filename)
# The "/content-link" route returns a pre-signed url to download the file.
content_link_path = file_link.url + '/content-link'
content_link_response = self.session.get_resource(content_link_path)
pre_signed_url = content_link_response['pre_signed_read_link']
download_response = requests.get(pre_signed_url)
write_file_locally(download_response.content, local_path)
def process(self, *, file_link: FileLink,
processing_type: FileProcessingType,
wait_for_response: bool = True,
timeout: float = 2 * 60,
polling_delay: float = 1.0) -> Union[JobSubmissionResponse,
Dict[FileProcessingType,
FileProcessingResult]]:
"""
Start a File Processing async job, returning a pollable job response.
:param file_link: The file to process.
:param processing_type: The type of file processing to invoke.
:return: A JobSubmissionResponse which can be used to poll for the result.
"""
params = {"processing_type": processing_type.value}
response = self.session.put_resource(file_link.url + "/processed", json={},
params=params)
job = JobSubmissionResponse.build(response)
logger.info('Build job submitted with job ID {}.'.format(job.job_id))
if wait_for_response:
return self.poll_file_processing_job(file_link=file_link,
processing_type=processing_type,
job_id=job.job_id, timeout=timeout,
polling_delay=polling_delay)
else:
return job
def poll_file_processing_job(self, *, file_link: FileLink,
processing_type: FileProcessingType,
job_id: UUID,
timeout: float = 2 * 60,
polling_delay: float = 1.0) -> Dict[FileProcessingType,
FileProcessingResult]:
"""
[ALPHA] Poll for the result of the file processing task.
Parameters
----------
job_id: UUID
The background job ID to poll for.
timeout:
How long to poll for the result before giving up. This is expressed in
(fractional) seconds.
polling_delay:
How long to delay between each polling retry attempt.
Returns
-------
None
This method will raise an appropriate exception if the job failed, else
it will return None to indicate the job was successful.
"""
# Poll for job completion - this will raise an error if the job failed
_poll_for_job_completion(self.session, self.project_id, job_id, timeout=timeout,
polling_delay=polling_delay)
return self.file_processing_result(file_link=file_link, processing_types=[processing_type])
def file_processing_result(self, *,
file_link: FileLink,
processing_types: List[FileProcessingType]) -> \
Dict[FileProcessingType, FileProcessingResult]:
"""
Return the file processing result for the given file link and processing type.
Parameters
----------
file_link: FileLink
The file to process
processing_types: FileProcessingType
A list of the particular file processing types to retrieve
Returns
-------
Map[FileProcessingType, FileProcessingResult]
The file processing results, mapped by processing type.
"""
processed_results_path = file_link.url + '/processed'
params = []
for proc_type in processing_types:
params.append(('processing_type', proc_type.value))
response = self.session.get_resource(processed_results_path, params=params)
results_json = response['results']
results = {}
for result_json in results_json:
processing_type = FileProcessingType[result_json['processing_type']]
data = result_json['data']
if processing_type == FileProcessingType.VALIDATE_CSV:
data = CsvValidationData.build(data)
result = FileProcessingResult(processing_type, data)
results[processing_type] = result
return results
def delete(self, file_link: FileLink):
"""
Delete the file associated with a given FileLink from the database.
Parameters
----------
file_link: FileLink
Resource referencing the external file.
"""
split_url = file_link.url.split('/')
assert split_url[-2] == 'versions' and split_url[-4] == 'files', \
"File URL is expected to end with '/files/{{file_id}}/version/{{version id}}', " \
"but FileLink instead has url {}".format(file_link.url)
file_id = split_url[-3]
data = self.session.delete_resource(self._get_path(file_id))
return Response(body=data)
|
<filename>venv/lib/python3.8/site-packages/azureml/core/compute/compute.py<gh_stars>0
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Contains the abstract parent and configuration classes for compute targets in Azure Machine Learning."""
try:
from abc import ABCMeta
ABC = ABCMeta('ABC', (), {})
except ImportError:
from abc import ABC
from abc import abstractmethod
import json
import uuid
import requests
import sys
import time
from azureml._compute._constants import MLC_COMPUTE_RESOURCE_ID_FMT, MLC_LIST_COMPUTES_FMT
from azureml._compute._constants import RP_COMPUTE_RESOURCE_ID_FMT, RP_LIST_COMPUTES_FMT
from azureml._compute._constants import MLC_WORKSPACE_API_VERSION
from azureml._base_sdk_common.user_agent import get_user_agent
from azureml._base_sdk_common import _ClientSessionId
from azureml._compute._util import get_paginated_compute_results
from azureml._compute._util import get_requests_session
from azureml.exceptions import ComputeTargetException, UserErrorException
from azureml._restclient.clientbase import ClientBase
from azureml._restclient.constants import RequestHeaders
from dateutil.parser import parse
class ComputeTarget(ABC):
"""Abstract parent class for all compute targets managed by Azure Machine Learning.
A compute target is a designated compute resource/environment where you run your training script
or host your service deployment. This location may be your local machine or a cloud-based compute resource.
For more information, see `What are compute targets in Azure Machine
Learning? <https://docs.microsoft.com/azure/machine-learning/concept-compute-target>`_
.. remarks::
Use the ComputeTarget constructor to retrieve
the cloud representation of a Compute object associated with the provided workspace. The constructor
returns an instance of a child class corresponding to the specific type of the retrieved Compute object.
If the Compute object is not found, a :class:`azureml.exceptions.ComputeTargetException` is raised.
:param workspace: The workspace object containing the Compute object to retrieve.
:type workspace: azureml.core.Workspace
:param name: The name of the Compute object to retrieve.
:type name: str
"""
_compute_type = None
def __new__(cls, workspace, name):
"""Return an instance of a compute target.
ComputeTarget constructor is used to retrieve a cloud representation of a Compute object associated with the
provided workspace. Will return an instance of a child class corresponding to the specific type of the
retrieved Compute object.
:param workspace: The workspace object containing the Compute object to retrieve.
:type workspace: azureml.core.Workspace
:param name: The name of the of the Compute object to retrieve.
:type name: str
:return: An instance of a child of :class:`azureml.core.ComputeTarget` corresponding to the
specific type of the retrieved Compute object
:rtype: azureml.core.ComputeTarget
:raises azureml.exceptions.ComputeTargetException:
"""
if workspace and name:
compute_payload = cls._get(workspace, name)
if compute_payload:
compute_type = compute_payload['properties']['computeType']
is_attached = compute_payload['properties']['isAttachedCompute']
for child in ComputeTarget.__subclasses__():
if is_attached and compute_type == 'VirtualMachine' and child.__name__ == 'DsvmCompute':
# Cannot attach DsvmCompute
continue
elif not is_attached and compute_type == 'VirtualMachine' and child.__name__ == 'RemoteCompute':
# Cannot create RemoteCompute
continue
elif not is_attached and compute_type == 'Kubernetes' and child.__name__ == 'KubernetesCompute':
# Cannot create KubernetesCompute
continue
elif compute_type == child._compute_type:
compute_target = super(ComputeTarget, cls).__new__(child)
compute_target._initialize(workspace, compute_payload)
return compute_target
else:
raise ComputeTargetException('ComputeTargetNotFound: Compute Target with name {} not found in '
'provided workspace'.format(name))
else:
return super(ComputeTarget, cls).__new__(cls)
def __init__(self, workspace, name):
"""Class ComputeTarget constructor.
Retrieve a cloud representation of a Compute object associated with the provided workspace. Returns an
instance of a child class corresponding to the specific type of the retrieved Compute object.
:param workspace: The workspace object containing the Compute object to retrieve.
:type workspace: azureml.core.Workspace
:param name: The name of the of the Compute object to retrieve.
:type name: str
:return: An instance of a child of :class:`azureml.core.ComputeTarget` corresponding to
the specific type of the retrieved Compute object
:rtype: azureml.core.ComputeTarget
:raises azureml.exceptions.ComputeTargetException:
"""
pass
def __repr__(self):
"""Return the string representation of the ComputeTarget object.
:return: String representation of the ComputeTarget object.
:rtype: str
"""
return "{}(workspace={}, name={}, id={}, type={}, provisioning_state={}, location={}, " \
"tags={})".format(self.__class__.__name__,
self.workspace.__repr__() if hasattr(self, 'workspace') else None,
self.name if hasattr(self, 'name') else None,
self.id if hasattr(self, 'id') else None,
self.type if hasattr(self, 'type') else None,
self.provisioning_state if hasattr(self, 'provisioning_state') else None,
self.location if hasattr(self, 'location') else None,
self.tags if hasattr(self, 'tags') else None,)
@abstractmethod
def _initialize(self, compute_resource_id, name, location, compute_type, tags, description, created_on,
modified_on, provisioning_state, provisioning_errors, cluster_resource_id, cluster_location,
workspace, mlc_endpoint, operation_endpoint, auth, is_attached):
"""Initilize abstract method.
:param compute_resource_id:
:type compute_resource_id: str
:param name:
:type name: str
:param location:
:type location: str
:param compute_type:
:type compute_type: str
:param tags:
:type tags: builtin.list[str]
:param description:
:type description: str
:param created_on:
:type created_on: datetime.datetime
:param modified_on:
:type modified_on: datetime.datetime
:param provisioning_state:
:type provisioning_state: str
:param provisioning_errors:
:type provisioning_errors: builtin.list[dict]
:param cluster_resource_id:
:type cluster_resource_id: str
:param cluster_location:
:type cluster_location: str
:param workspace:
:type workspace: azureml.core.Workspace
:param mlc_endpoint:
:type mlc_endpoint: str
:param operation_endpoint:
:type operation_endpoint: str
:param auth:
:type auth: azureml.core.authentication.AbstractAuthentication
:param is_attached:
:type is_attached: boolean
:return:
:rtype: None
"""
self.id = compute_resource_id
self.name = name
self.location = location
self.type = compute_type
self.tags = tags
self.description = description
self.created_on = parse(created_on) if created_on else None
self.modified_on = parse(modified_on) if modified_on else None
self.provisioning_state = provisioning_state
self.provisioning_errors = provisioning_errors
self.cluster_resource_id = cluster_resource_id
self.cluster_location = cluster_location
self.workspace = workspace
self._mlc_endpoint = mlc_endpoint
self._operation_endpoint = operation_endpoint
self._auth = auth
self.is_attached = is_attached
@staticmethod
def _get_resource_manager_endpoint(workspace):
"""Return endpoint for resource manager based on cloud type.
For AzureCloud, resource manager endpoint is: "https://management.azure.com/".
:param workspace:
:type workspace: azureml.core.Workspace
:return:
:rtype: str
"""
return workspace._auth._get_cloud_type().endpoints.resource_manager
@staticmethod
def _get_compute_endpoint(workspace, name):
"""Return mlc endpoint for the compute.
:param workspace:
:type workspace: azureml.core.Workspace
:param name:
:type name: str
:return:
:rtype: str
"""
compute_resource_id = MLC_COMPUTE_RESOURCE_ID_FMT.format(workspace.subscription_id, workspace.resource_group,
workspace.name, name)
resource_manager_endpoint = ComputeTarget._get_resource_manager_endpoint(workspace)
return '{}{}'.format(resource_manager_endpoint, compute_resource_id)
@staticmethod
def _get_list_computes_endpoint(workspace):
"""Return mlc endpoint for list computes.
:param workspace:
:type workspace: azureml.core.Workspace
:return:
:rtype: str
"""
list_computes = MLC_LIST_COMPUTES_FMT.format(workspace.subscription_id, workspace.resource_group,
workspace.name)
resource_manager_endpoint = ComputeTarget._get_resource_manager_endpoint(workspace)
return '{}{}'.format(resource_manager_endpoint, list_computes)
@staticmethod
def _get_rp_compute_endpoint(workspace, name):
"""Return rp endpoint for the compute.
:param workspace:
:type workspace: azureml.core.Workspace
:param name:
:type name: str
:return:
:rtype: str
"""
compute_resource_id = RP_COMPUTE_RESOURCE_ID_FMT.format(workspace.subscription_id, workspace.resource_group,
workspace.name, name)
api_endpoint = workspace.service_context._get_api_url()
return '{}{}'.format(api_endpoint, compute_resource_id)
@staticmethod
def _get_rp_list_computes_endpoint(workspace):
"""Return rp endpoint for list computes.
:param workspace:
:type workspace: azureml.core.Workspace
:return:
:rtype: str
"""
list_computes = RP_LIST_COMPUTES_FMT.format(workspace.subscription_id, workspace.resource_group,
workspace.name)
api_endpoint = workspace.service_context._get_api_url()
return '{}{}'.format(api_endpoint, list_computes)
@staticmethod
def _get(workspace, name):
"""Return web response content for the compute.
:param workspace:
:type workspace: azureml.core.Workspace
:param name:
:type name: str
:return:
:rtype: dict
"""
endpoint = ComputeTarget._get_rp_compute_endpoint(workspace, name)
headers = workspace._auth.get_authentication_header()
ComputeTarget._add_request_tracking_headers(headers)
params = {'api-version': MLC_WORKSPACE_API_VERSION}
resp = ClientBase._execute_func(get_requests_session().get, endpoint, params=params, headers=headers)
if resp.status_code == 200:
content = resp.content
if isinstance(content, bytes):
content = content.decode('utf-8')
get_content = json.loads(content)
return get_content
elif resp.status_code == 404:
return None
else:
raise ComputeTargetException('Received bad response from Resource Provider:\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(resp.status_code, resp.headers, resp.content))
@staticmethod
def create(workspace, name, provisioning_configuration):
"""Provision a Compute object by specifying a compute type and related configuration.
This method creates a new compute target rather than attaching an existing one.
.. remarks::
The type of object provisioned is determined by the provisioning configuration provided.
In the following example, a persistent compute target provisioned by
:class:`azureml.core.compute.AmlCompute` is created. The ``provisioning_configuration`` parameter in this
example is of type :class:`azureml.core.compute.amlcompute.AmlComputeProvisioningConfiguration`.
.. code-block:: python
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cpu_cluster_name = "cpu-cluster"
# Verify that cluster does not exist already
try:
cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',
max_nodes=4)
cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
cpu_cluster.wait_for_completion(show_output=True)
Full sample is available from
https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb
:param workspace: The workspace object to create the Compute object under.
:type workspace: azureml.core.Workspace
:param name: The name to associate with the Compute object.
:type name: str
:param provisioning_configuration: A ComputeTargetProvisioningConfiguration object that is used to determine
the type of Compute object to provision, and how to configure it.
:type provisioning_configuration: azureml.core.compute.compute.ComputeTargetProvisioningConfiguration
:return: An instance of a child of ComputeTarget corresponding to the type of object provisioned.
:rtype: azureml.core.ComputeTarget
:raises azureml.exceptions.ComputeTargetException:
"""
if name in ["amlcompute", "local", "containerinstance"]:
raise UserErrorException("Please specify a different target name."
" {} is a reserved name.".format(name))
compute_type = provisioning_configuration._compute_type
return compute_type._create(workspace, name, provisioning_configuration)
@staticmethod
def _create_compute_target(workspace, name, compute_payload, target_class):
"""Create compute target.
:param workspace:
:type workspace: azureml.core.Workspace
:param name:
:type name: str
:param compute_payload:
:type compute_payload: dict
:param target_class:
:type target_class:
:return:
:rtype: azureml.core.ComputeTarget
"""
endpoint = ComputeTarget._get_compute_endpoint(workspace, name)
headers = {'Content-Type': 'application/json'}
headers.update(workspace._auth.get_authentication_header())
ComputeTarget._add_request_tracking_headers(headers)
params = {'api-version': MLC_WORKSPACE_API_VERSION}
resp = ClientBase._execute_func(get_requests_session().put, endpoint, params=params, headers=headers,
json=compute_payload)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
raise ComputeTargetException('Received bad response from Resource Provider:\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(resp.status_code, resp.headers, resp.content))
if 'Azure-AsyncOperation' not in resp.headers:
raise ComputeTargetException('Error, missing operation location from resp headers:\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(resp.status_code, resp.headers, resp.content))
compute_target = target_class(workspace, name)
compute_target._operation_endpoint = resp.headers['Azure-AsyncOperation']
return compute_target
@staticmethod
def attach(workspace, name, attach_configuration):
"""Attach a Compute object to a workspace using the specified name and configuration information.
.. remarks::
The type of object to pass to the parameter ``attach_configuration`` is a
:class:`azureml.core.compute.compute.ComputeTargetAttachConfiguration`
object built using the ``attach_configuration`` function on any of the child classes of
:class:`azureml.core.ComputeTarget`.
The following example shows how to attach an ADLA account to a workspace using the
:meth:`azureml.core.compute.AdlaCompute.attach_configuration` method of AdlaCompute.
.. code-block:: python
adla_compute_name = 'testadl' # Name to associate with new compute in workspace
# ADLA account details needed to attach as compute to workspace
adla_account_name = "<adla_account_name>" # Name of the Azure Data Lake Analytics account
adla_resource_group = "<adla_resource_group>" # Name of the resource group which contains this account
try:
# check if already attached
adla_compute = AdlaCompute(ws, adla_compute_name)
except ComputeTargetException:
print('attaching adla compute...')
attach_config = AdlaCompute.attach_configuration(resource_group=adla_resource_group, account_name=adla_account_name)
adla_compute = ComputeTarget.attach(ws, adla_compute_name, attach_config)
adla_compute.wait_for_completion()
print("Using ADLA compute:{}".format(adla_compute.cluster_resource_id))
print("Provisioning state:{}".format(adla_compute.provisioning_state))
print("Provisioning errors:{}".format(adla_compute.provisioning_errors))
Full sample is available from
https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-adla-as-compute-target.ipynb
:param workspace: The workspace object to attach the Compute object to.
:type workspace: azureml.core.Workspace
:param name: The name to associate with the Compute object.
:type name: str
:param attach_configuration: A ComputeTargetAttachConfiguration object that is used to determine
the type of Compute object to attach, and how to configure it.
:type attach_configuration: azureml.core.compute.compute.ComputeTargetAttachConfiguration
:return: An instance of a child of ComputeTarget corresponding to the type of object attached.
:rtype: azureml.core.ComputeTarget
:raises azureml.exceptions.ComputeTargetException:
"""
compute_type = attach_configuration._compute_type
return compute_type._attach(workspace, name, attach_configuration)
@staticmethod
def _attach(workspace, name, attach_payload, target_class):
"""Attach implementation method.
:param workspace:
:type workspace: azureml.core.Workspace
:param name:
:type name: str
:param attach_payload:
:type attach_payload: dict
:param target_class:
:type target_class:
:return:
:rtype:
"""
attach_payload['location'] = workspace.location
endpoint = ComputeTarget._get_compute_endpoint(workspace, name)
headers = {'Content-Type': 'application/json'}
headers.update(workspace._auth.get_authentication_header())
ComputeTarget._add_request_tracking_headers(headers)
params = {'api-version': MLC_WORKSPACE_API_VERSION}
resp = ClientBase._execute_func(get_requests_session().put, endpoint, params=params, headers=headers,
json=attach_payload)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
raise ComputeTargetException('Received bad response from Resource Provider:\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(resp.status_code, resp.headers, resp.content))
if 'Azure-AsyncOperation' not in resp.headers:
raise ComputeTargetException('Error, missing operation location from resp headers:\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(resp.status_code, resp.headers, resp.content))
compute_target = target_class(workspace, name)
compute_target._operation_endpoint = resp.headers['Azure-AsyncOperation']
return compute_target
@staticmethod
def list(workspace):
"""List all ComputeTarget objects within the workspace.
Return a list of instantiated child objects corresponding to the specific type of Compute. Objects are
children of :class:`azureml.core.ComputeTarget`.
:param workspace: The workspace object containing the objects to list.
:type workspace: azureml.core.Workspace
:return: List of compute targets within the workspace.
:rtype: builtin.list[azureml.core.ComputeTarget]
:raises azureml.exceptions.ComputeTargetException:
"""
envs = []
endpoint = ComputeTarget._get_rp_list_computes_endpoint(workspace)
headers = workspace._auth.get_authentication_header()
ComputeTarget._add_request_tracking_headers(headers)
params = {'api-version': MLC_WORKSPACE_API_VERSION}
resp = ClientBase._execute_func(get_requests_session().get, endpoint, params=params, headers=headers)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
raise ComputeTargetException('Error occurred retrieving targets:\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(resp.status_code, resp.headers, resp.content))
is_windows_contrib_installed = True
try:
from azureml.contrib.compute import AmlWindowsCompute # noqa: F401
except ImportError:
is_windows_contrib_installed = False
pass
content = resp.content
if isinstance(content, bytes):
content = content.decode('utf-8')
result_list = json.loads(content)
paginated_results = get_paginated_compute_results(result_list, headers)
for env in paginated_results:
if 'properties' in env and 'computeType' in env['properties']:
compute_type = env['properties']['computeType']
is_attached = env['properties']['isAttachedCompute']
env_obj = None
for child in ComputeTarget.__subclasses__():
if is_attached and compute_type == 'VirtualMachine' and child.__name__ == 'DsvmCompute':
# Cannot attach DsvmCompute
continue
elif not is_attached and compute_type == 'VirtualMachine' and child.__name__ == 'RemoteCompute':
# Cannot create RemoteCompute
continue
elif not is_attached and compute_type == 'Kubernetes' and child.__name__ == 'KubernetesCompute':
# Cannot create KubernetesCompute
continue
elif compute_type == child._compute_type:
# If windows contrib is not installed, don't list windows compute type
# Windows is currently supported only for RL runs.
# The windows contrib is installed as a part of RL SDK install.
# This step is trying to avoid users using this compute target by mistake for a non-RL run
if not is_windows_contrib_installed and "properties" in env['properties'] and \
env['properties']['properties'] is not None and \
"osType" in env['properties']['properties'] and \
env['properties']['properties']['osType'].lower() == 'windows':
pass
else:
env_obj = child.deserialize(workspace, env)
break
if env_obj:
envs.append(env_obj)
return envs
def wait_for_completion(self, show_output=False, is_delete_operation=False):
"""Wait for the current provisioning operation to finish on the cluster.
This method returns a :class:`azureml.exceptions.ComputeTargetException` if there is a problem
polling the compute object.
:param show_output: Indicates whether to provide more verbose output.
:type show_output: bool
:param is_delete_operation: Indicates whether the operation is meant for deleting.
:type is_delete_operation: bool
:raises azureml.exceptions.ComputeTargetException:
"""
try:
operation_state, error = self._wait_for_completion(show_output)
print('Provisioning operation finished, operation "{}"'.format(operation_state))
if not is_delete_operation:
self.refresh_state()
if operation_state != 'Succeeded':
if error and 'statusCode' in error and 'message' in error:
error_response = ('StatusCode: {}\n'
'Message: {}'.format(error['statusCode'], error['message']))
else:
error_response = error
raise ComputeTargetException('Compute object provisioning polling reached non-successful terminal '
'state, current provisioning state: {}\n'
'Provisioning operation error:\n'
'{}'.format(self.provisioning_state, error_response))
except ComputeTargetException as e:
if e.message == 'No operation endpoint':
self.refresh_state()
raise ComputeTargetException('Long running operation information not known, unable to poll. '
'Current state is {}'.format(self.provisioning_state))
else:
raise e
def _wait_for_completion(self, show_output):
"""Wait for completion implementation.
:param show_output:
:type show_output: bool
:return:
:rtype: (str, dict)
"""
if not self._operation_endpoint:
raise ComputeTargetException('No operation endpoint')
operation_state, error = self._get_operation_state()
current_state = operation_state
if show_output:
sys.stdout.write('{}'.format(current_state))
sys.stdout.flush()
while operation_state != 'Succeeded' and operation_state != 'Failed' and operation_state != 'Canceled':
time.sleep(5)
operation_state, error = self._get_operation_state()
if show_output:
sys.stdout.write('.')
if operation_state != current_state:
sys.stdout.write('\n{}'.format(operation_state))
current_state = operation_state
sys.stdout.flush()
return operation_state, error
def _get_operation_state(self):
"""Return operation state.
:return:
:rtype: (str, dict)
"""
headers = self._auth.get_authentication_header()
ComputeTarget._add_request_tracking_headers(headers)
params = {}
# API version should not be appended for operation status URLs.
# This is a bug fix for older SDK and ARM breaking changes and
# will append version only if the request URL doesn't have one.
if 'api-version' not in self._operation_endpoint:
params = {'api-version': MLC_WORKSPACE_API_VERSION}
resp = ClientBase._execute_func(get_requests_session().get, self._operation_endpoint, params=params,
headers=headers)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
raise ComputeTargetException('Received bad response from Resource Provider:\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(resp.status_code, resp.headers, resp.content))
content = resp.content
if isinstance(content, bytes):
content = content.decode('utf-8')
content = json.loads(content)
status = content['status']
error = content.get('error')
# Prior to API version 2019-06-01 the 'error' element was double nested.
# This change retains backwards compat for 2018-11-19 version.
if error is not None:
innererror = error.get('error')
if innererror is not None:
error = innererror
# ---------------------------------------------------------------------
return status, error
@staticmethod
def _add_request_tracking_headers(headers):
if RequestHeaders.CLIENT_REQUEST_ID not in headers:
headers[RequestHeaders.CLIENT_REQUEST_ID] = str(uuid.uuid4())
if RequestHeaders.CLIENT_SESSION_ID not in headers:
headers[RequestHeaders.CLIENT_SESSION_ID] = _ClientSessionId
if RequestHeaders.USER_AGENT not in headers:
headers[RequestHeaders.USER_AGENT] = get_user_agent()
@abstractmethod
def refresh_state(self):
"""Perform an in-place update of the properties of the object.
Update properties based on the current state of the corresponding cloud object.
This is useful for manual polling of compute state.
This abstract method is implemented by child classes of :class:`azureml.core.ComputeTarget`.
"""
pass
def get_status(self):
"""Retrieve the current provisioning state of the Compute object.
.. remarks::
Values returned are listed in the Azure REST API Reference for
`ProvisioningState <https://docs.microsoft.com/rest/api/azureml/workspacesandcomputes
/machinelearningcompute/get#provisioningstate>`_.
:return: The current ``provisioning_state``.
:rtype: str
"""
self.refresh_state()
return self.provisioning_state
@abstractmethod
def delete(self):
"""Remove the Compute object from its associated workspace.
This abstract method is implemented by child classes of :class:`azureml.core.ComputeTarget`.
.. remarks::
If this object was created through Azure Machine Learning, the corresponding cloud-based objects
will also be deleted. If this object was created externally and only attached to the workspace, this
method raises an exception and nothing is changed.
"""
pass
@abstractmethod
def detach(self):
"""Detach the Compute object from its associated workspace.
This abstract method is implemented by child classes of :class:`azureml.core.ComputeTarget`.
Underlying cloud objects are not deleted, only their associations are removed.
"""
pass
def _delete_or_detach(self, underlying_resource_action):
"""Remove the Compute object from its associated workspace.
If underlying_resource_action is 'delete', the corresponding cloud-based objects will also be deleted.
If underlying_resource_action is 'detach', no underlying cloud object will be deleted, the association
will just be removed.
:param underlying_resource_action: whether delete or detach the underlying cloud object
:type underlying_resource_action: str
:raises azureml.exceptions.ComputeTargetException:
"""
headers = self._auth.get_authentication_header()
ComputeTarget._add_request_tracking_headers(headers)
params = {'api-version': MLC_WORKSPACE_API_VERSION, 'underlyingResourceAction': underlying_resource_action}
resp = ClientBase._execute_func(get_requests_session().delete, self._mlc_endpoint, params=params,
headers=headers)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
raise ComputeTargetException('Received bad response from Resource Provider:\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(resp.status_code, resp.headers, resp.content))
self.provisioning_state = 'Deleting'
self._operation_endpoint = resp.headers['Azure-AsyncOperation']
@abstractmethod
def serialize(self):
"""Convert this Compute object into a JSON serialized dictionary.
:return: The JSON representation of this Compute object.
:rtype: dict
"""
created_on = self.created_on.isoformat() if self.created_on else None
modified_on = self.modified_on.isoformat() if self.modified_on else None
compute = {'id': self.id, 'name': self.name, 'location': self.location, 'type': self.type, 'tags': self.tags,
'description': self.description, 'created_on': created_on, 'modified_on': modified_on,
'provisioning_state': self.provisioning_state, 'provisioning_errors': self.provisioning_errors}
return compute
@staticmethod
@abstractmethod
def deserialize(workspace, object_dict):
"""Convert a JSON object into a Compute object.
.. remarks::
Raises a :class:`azureml.exceptions.ComputeTargetException` if the provided
workspace is not the workspace the Compute is associated with.
:param workspace: The workspace object the Compute object is associated with.
:type workspace: azureml.core.Workspace
:param object_dict: A JSON object to convert to a Compute object.
:type object_dict: dict
:return: The Compute representation of the provided JSON object.
:rtype: azureml.core.ComputeTarget
"""
pass
@staticmethod
@abstractmethod
def _validate_get_payload(payload):
pass
class ComputeTargetProvisioningConfiguration(ABC):
"""Abstract parent class for all ComputeTarget provisioning configuration objects.
This class defines the configuration parameters for provisioning
compute objects. Provisioning configuration varies by child compute object. Specify provisioning configuration
with the ``provisioning_configuration`` method of child compute objects that require provisioning.
:param type: The type of ComputeTarget this object is associated with.
:type type: azureml.core.ComputeTarget
:param location: The Azure region to provision the Compute object in.
:type location: str
"""
def __init__(self, type, location):
"""Initialize the ProvisioningConfiguration object.
:param type: The type of ComputeTarget this object is associated with
:type type: azureml.core.ComputeTarget
:param location: The Azure region to provision the Compute object in.
:type location: str
:return: The ProvisioningConfiguration object
:rtype: azureml.core.compute.compute.ComputeTargetProvisioningConfiguration
"""
self._compute_type = type
self.location = location
@abstractmethod
def validate_configuration(self):
"""Check that the specified configuration values are valid.
Raises a :class:`azureml.exceptions.ComputeTargetException` if validation fails.
:raises azureml.exceptions.ComputeTargetException:
"""
pass
class ComputeTargetAttachConfiguration(ABC):
"""Abstract parent class for all ComputeTarget attach configuration objects.
This class defines the configuration parameters for attaching compute objects.
Attach configuration varies by child compute object. Specify attach configuration with the
``attach_configuration`` method of child compute objects.
:param type: The type of ComputeTarget this object is associated with.
:type type: azureml.core.ComputeTarget
"""
def __init__(self, type):
"""Initialize the AttachConfiguration object.
:param type: The type of ComputeTarget this object is associated with.
:type type: azureml.core.ComputeTarget
:return: The AttachConfiguration object.
:rtype: azureml.core.compute.compute.ComputeTargetAttachConfiguration
"""
self._compute_type = type
@abstractmethod
def validate_configuration(self):
"""Check that the specified configuration values are valid.
Raises a :class:`azureml.exceptions.ComputeTargetException` if validation fails.
:raises azureml.exceptions.ComputeTargetException:
"""
pass
class ComputeTargetUpdateConfiguration(ABC):
"""Abstract parent class for all ComputeTarget update configuration objects.
This class defines configuration parameters for updating compute objects.
Update configuration varies by child compute object. Specify update configuration
with the ``update`` method of child compute objects that support updating.
"""
def __init__(self, type):
"""Initialize the UpdateConfiguration object.
:param compute: The type of ComputeTarget that should be updated.
:type compute: azureml.core.ComputeTarget
:return: The ComputeTargetUpdateConfiguration object.
:rtype: azureml.core.compute.compute.ComputeTargetUpdateConfiguration
"""
self._compute_type = type
@abstractmethod
def validate_configuration(self):
"""Check that the specified configuration values are valid.
Raises a :class:`azureml.exceptions.ComputeTargetException` if validation fails.
:raises azureml.exceptions.ComputeTargetException:
"""
pass
|
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
import pandas as pd
import math
from static_data import ARR_ranges, on_plot_shown_label,fig_size,color_schemes,themes
from preprocess_util import *
from plot_func.plot_util_multi_method import *
from plot_func.multi_method_plotter import Multi_method_plotter
class Single_sample_multi_method_plotter(Multi_method_plotter):
def __init__(self,plot_dfs,anno_df,method_names):
Multi_method_plotter.__init__(self,plot_dfs,anno_df,method_names)
def plot_dist(self,x_axis_column_name, scale):
return Multi_method_plotter.plot_dist(self,x_axis_column_name, scale)
def plot_arr(self,x_axis_column_name,scale):
fig = go.Figure()
for plot_df,method_name in zip(self.plot_dfs,self.method_names):
plot_df = filter_by_scale(scale, plot_df)
plot_df, custom_sort = get_group_range(plot_df, x_axis_column_name,None,None)
plot_df = plot_df.groupby(by='group_range').count().reset_index()
plot_df['Frequency'] = plot_df['isoform']/plot_df['isoform'].sum()
fill_na_df = pd.DataFrame({'group_range':['{:.0%}-{:.0%}'.format(i/10,(i+1)/10) for i in range(1,10)]+['<=10%','>100%'],'Frequency':[0 for i in range(11)]}).set_index('group_range')
plot_df = plot_df.append(fill_na_df.loc[fill_na_df.index.difference(pd.Index(plot_df['group_range']))].reset_index())
plot_df = plot_df.sort_values(
by=['group_range'], key=lambda col: custom_sort(col, ARR_ranges))
fig.add_trace(go.Bar(x=plot_df['group_range'],
y=plot_df['Frequency'],name=method_name))
fig.update_layout(
xaxis_title='Abundance Recovery Rate',
yaxis_title='Frequency',
width=fig_size['rec']['width'],height=fig_size['rec']['height'],template=themes['medium_single']
)
return fig
def plot_resolution_entropy(self,scale):
fig = go.Figure()
for i,plot_df,method_name in zip(range(len(self.method_names)),self.plot_dfs,self.method_names):
plot_df = filter_by_scale(scale, plot_df)
RE = [get_resolution_entropy(plot_df['estimated_abund'],100)]
fig.add_trace(go.Bar(x=['Resolution Entropy'],
y=RE,name=method_name,marker_color=color_schemes[i]))
fig.update_layout(
width=fig_size['small_rec']['width'],height=fig_size['small_rec']['height'],template=themes['medium_single']
)
return fig
def plot_corr_scatter(self,x_axis_column_name, y_axis_column_name,scale):
fig = make_subplots(rows=1, cols=len(self.plot_dfs), vertical_spacing=0.2, horizontal_spacing=0.1,subplot_titles=self.method_names)
x_maxs,y_maxs = [],[]
for plot_df,method_name,i in zip(self.plot_dfs,self.method_names,range(len(self.plot_dfs))):
plot_df = filter_by_scale(scale, plot_df)
plot_df = plot_df[(np.log2(plot_df[x_axis_column_name]+1) >=1) & (np.log2(plot_df[y_axis_column_name]+1) >= 1)]
x = plot_df[x_axis_column_name]
y = plot_df[y_axis_column_name]
x = np.log2(x + 1)
y = np.log2(y + 1)
x_maxs.append(x.max())
y_maxs.append(y.max())
x,y,density = get_density(x,y)
fig.add_trace(go.Scattergl(x=x, y=y, mode='markers', name='Value',marker=dict(size=5,color=density,colorscale='viridis'),showlegend=False), col=i+1,row=1)
fig.add_trace(go.Histogram2dContour(x=x, y=y, name='Density',contours={'coloring':'none','showlabels':True},showlegend=False), col=i+1,row=1)
x_title = 'Log2(True abundance+1)'
y_title = 'Log2(Estimated abundance+1)'
fig.update_layout(autosize=False,width=fig_size['square']['width']*len(self.plot_dfs),height=fig_size['square']['height'],template=themes['medium_multi'])
fig.update_xaxes(title_text=x_title,range=[1,max(x_maxs+y_maxs)])
fig.update_yaxes(title_text=y_title,range=[1, max(x_maxs+y_maxs)])
return fig
def plot_std_scatter(self,x_axis_column_name, y_axis_column_name,scale):
fig = make_subplots(rows=1, cols=len(self.plot_dfs), vertical_spacing=0.2, horizontal_spacing=0.1,subplot_titles=self.method_names)
x_maxs = []
for plot_df,method_name,i in zip(self.plot_dfs,self.method_names,range(len(self.plot_dfs))):
plot_df = filter_by_scale(scale, plot_df)
plot_df = plot_df[(np.log2(plot_df[x_axis_column_name]+1) >=1)]
x = plot_df[x_axis_column_name]
y = plot_df[y_axis_column_name]
x = np.log2(x + 1)
x_maxs.append(x.max())
x,y,density = get_density(x,y)
fig.add_trace(go.Scattergl(x=x, y=y, mode='markers', name='Value',marker=dict(size=5,color=density,colorscale='viridis'),showlegend=False), col=i+1,row=1)
fig.add_trace(go.Histogram2dContour(x=x, y=y, name='Density',contours={'coloring':'none','showlabels':True},showlegend=False), col=i+1,row=1)
x_title = 'Log2(Estimated abundance+1)'
y_title = 'std'
fig.update_layout(autosize=False,width=fig_size['small_square']['width']*len(self.plot_dfs),height=fig_size['small_square']['height'],template=themes['large_single'])
fig.update_xaxes(title_text=x_title,range=[1,max(x_maxs)])
fig.update_yaxes(title_text=y_title)
return fig
def plot_grouped_curve(self,x_axis_column_name, y_axis_column_names,scale):
figure_cols = math.ceil(math.sqrt(len(y_axis_column_names)))
figure_rows = math.ceil(len(y_axis_column_names)/ figure_cols)
# figure_rows = math.ceil(math.sqrt(len(y_axis_column_names)))
# figure_cols = math.ceil(len(y_axis_column_names)/ figure_rows)
fig = make_subplots(rows=figure_rows, cols=figure_cols, vertical_spacing=0.25, horizontal_spacing=0.1)
ranges,max_threshold = prepare_ranges(self.plot_dfs[0],x_axis_column_name)
for plot_df,method_name,j in zip(self.plot_dfs,self.method_names,range(len(self.plot_dfs))):
plot_df = filter_by_scale(scale, plot_df)
plot_df, custom_sort = get_group_range(plot_df, x_axis_column_name,ranges,max_threshold)
# fig = go.Figure()
# figure_rows = math.ceil(math.sqrt(len(y_axis_column_names)))
# figure_cols = math.ceil(len(y_axis_column_names)/ figure_rows)
for i in range(len(y_axis_column_names)):
row_num = math.ceil((i+1)/figure_cols)
col_num = i % figure_cols+1
y_axis_column_name = y_axis_column_names[i]
if ((y_axis_column_name in ['mrd']) & (x_axis_column_name=='K_value')):
group_series = plot_df.groupby(by='group_range').apply(lambda df: get_single_sample_metric(
y_axis_column_name, df['true_abund'], df['estimated_abund'],plot_df,True)).to_frame().reset_index()
else:
group_series = plot_df.groupby(by='group_range').apply(lambda df: get_single_sample_metric(
y_axis_column_name, df['true_abund'], df['estimated_abund'],plot_df)).to_frame().reset_index()
group_series = group_series.rename(columns={0: y_axis_column_name}).sort_values(
by=['group_range'], key=lambda col: custom_sort(col))
if (y_axis_column_name in 'nrmse'):
group_series[y_axis_column_name] = np.log2(group_series[y_axis_column_name]+1)
if (y_axis_column_name in ['nrmse','mrd','mean_arr','spearmanr','RE']):
fig.add_trace(go.Bar(x=group_series['group_range'], y=group_series[y_axis_column_name]
, name='{}'.format(method_name),marker_color=color_schemes[j],showlegend=False), row=row_num, col=col_num)
else:
fig.add_trace(go.Scatter(x=group_series['group_range'], y=group_series[y_axis_column_name],
mode='lines+markers', name='{}'.format(method_name),marker_color=color_schemes[j],showlegend=False), row=row_num, col=col_num)
fig.update_xaxes(
title_text=on_plot_shown_label[x_axis_column_name],tickangle = 45, row=row_num, col=col_num)
fig.update_yaxes(
title_text=on_plot_shown_label[y_axis_column_name], row=row_num, col=col_num)
if (y_axis_column_name=='nrmse'):
fig.update_yaxes(title_text='Log2(NRMSE+1)', row=row_num, col=col_num)
fig.update_traces(showlegend=True,col=1,row=1)
fig.update_layout(
autosize=False,
width=fig_size['rec']['width']*figure_cols,height=fig_size['rec']['height']*figure_rows)
return fig
# def plot_corr_box_plot(self,x_axis_column_name,y_axis_column_name,scale):
# fig = make_subplots(rows=1, cols=len(self.plot_dfs),subplot_titles=self.method_names)
# shared_bins_cond = None
# for plot_df,method_name,j in zip(self.plot_dfs,self.method_names,range(len(self.plot_dfs))):
# plot_df = filter_by_scale(scale, plot_df)
# plot_df = plot_df[(np.log2(plot_df[x_axis_column_name]+1) >=1) & (np.log2(plot_df[y_axis_column_name]+1) >= 1)]
# df,shared_bins_cond = prepare_corr_box_plot_data(np.log2(plot_df[y_axis_column_name]+1),np.log2(plot_df[x_axis_column_name]+1),shared_bins_cond)
# fig.add_trace(go.Box(x=df['true_abund'],y=df['estimated_abund'],name=method_name),col=j+1,row=1)
# fig.update_xaxes(title_text='Log2(True abundance+1)')
# fig.update_yaxes(title_text='Log2(Estimated abundance+1)')
# fig.update_layout(
# autosize=False,showlegend=True,width=fig_size['small_square']['width']*len(self.plot_dfs),height=fig_size['small_square']['height'],template=themes['small_multi'])
# return fig
def plot_corr_box_plot(self,x_axis_column_name,y_axis_column_name,scale):
fig = make_subplots(rows=1, cols=1)
shared_bins_cond = None
for plot_df,method_name,j in zip(self.plot_dfs,self.method_names,range(len(self.plot_dfs))):
plot_df = filter_by_scale(scale, plot_df)
plot_df = plot_df[(np.log2(plot_df[x_axis_column_name]+1) >=1) & (np.log2(plot_df[y_axis_column_name]+1) >= 1)]
df,shared_bins_cond = prepare_corr_box_plot_data(np.log2(plot_df[y_axis_column_name]+1),np.log2(plot_df[x_axis_column_name]+1),shared_bins_cond)
fig.add_trace(go.Box(x=df['true_abund'],y=df['estimated_abund'],name=method_name),col=1,row=1)
fig.update_xaxes(title_text='Log2(True abundance+1)')
fig.update_yaxes(title_text='Log2(Estimated abundance+1)')
fig.update_layout(boxmode = "group",
autosize=False,showlegend=True,width=fig_size['square']['width'],height=fig_size['square']['height'],template=themes['large_single'])
return fig
def plot(self,plot_figure_name, scale):
x_axis_column_name = single_sample_plot_figures[plot_figure_name]['x']
y_axis_column_name = single_sample_plot_figures[plot_figure_name]['y']
if y_axis_column_name == 'dist':
fig = self.plot_dist(x_axis_column_name, scale)
elif plot_figure_name == 'Histogram of Abundance Recovery Rate':
fig = self.plot_arr(x_axis_column_name, scale)
elif plot_figure_name in ["Statistics with different K values",'Statistics with different isoform lengths','Statistics with different numbers of exons','Statistics with different expression level']:
fig = self.plot_grouped_curve(x_axis_column_name,y_axis_column_name,scale)
elif plot_figure_name in ['Correlation of estimated abundance and ground truth']:
fig = self.plot_corr_scatter(x_axis_column_name, y_axis_column_name, scale)
elif plot_figure_name in ['Standard deviation vs estimated abundance scatter']:
fig = self.plot_std_scatter(x_axis_column_name, y_axis_column_name, scale)
elif plot_figure_name == 'Correlation Boxplot of estimated abundance and ground truth':
fig = self.plot_corr_box_plot(x_axis_column_name,y_axis_column_name,scale)
elif plot_figure_name == 'Resolution Entropy':
fig = self.plot_resolution_entropy(scale)
try:
fig.update_layout(title=plot_figure_name, title_x=0.5)
fig.update_xaxes(exponentformat='e',automargin=True)
fig.update_yaxes(exponentformat='e',automargin=True)
except:
print(plot_figure_name)
return fig |
<reponame>aleksas/remap<gh_stars>1-10
from sys import path
path.append('..')
from unittest import TestCase, main
from re_map import Processor
class MatchingGroupTestCase(TestCase):
'''
Tests perfect matching match group replacements.
'''
def test_matching_1(self):
text = ' BBB AAA AAA BBB '
modifiers = [
( r'(AAA)', { 1: 'BBB' } ),
( r'(BBB)', { 1: 'YYY' } ),
]
ref_span_map = [
((1, 4), (1, 4)),
((5, 8), (5, 8)),
((9, 12), (9, 12)),
((13, 16), (13, 16))
]
with Processor(text) as procesor:
for pattern, replacement_map in modifiers:
procesor.process(pattern, replacement_map)
self.assertEqual( procesor.processed_text, ' YYY YYY YYY YYY ' )
self.assertEqual( procesor.span_map, ref_span_map )
decorated_text, decorated_processed_text = procesor.decorate()
self.assertEqual( decorated_text, ' 000 111 222 333 ' )
self.assertEqual( decorated_processed_text, ' 000 111 222 333 ' )
def test_matching_2(self):
text = ' AAA BBB CCC DDD '
modifiers = [
( r'(AAA) (BBB) (CCC)', { 1: 'ZZZZ', 2: 'YYYYY', 3: 'XXXXXX' } ),
( r'((YYYYY)|(ZZZZ))', { 1: 'WWWWWW' } ),
( r'(WWWWWW)', { 1: 'QQQQQQQ' } ),
]
ref_span_map = [
((1, 4), (1, 8)),
((5, 8), (9, 16)),
((9, 12), (17, 23))
]
with Processor(text) as procesor:
for pattern, replacement_map in modifiers:
procesor.process(pattern, replacement_map)
self.assertEqual( procesor.processed_text, ' QQQQQQQ QQQQQQQ XXXXXX DDD ' )
self.assertEqual( procesor.span_map, ref_span_map )
decorated_text, decorated_processed_text = procesor.decorate()
self.assertEqual( decorated_processed_text, ' 0000000 1111111 222222 DDD ' )
self.assertEqual( decorated_text, ' 000 111 222 DDD ' )
def test_matching_3(self):
text = 'AZA'
modifiers = [
( r'(A)', { 1: 'BB' } ),
( r'(BB)', { 1: 'DD' } )
]
with Processor(text) as procesor:
for pattern, replacement_map in modifiers:
procesor.process(pattern, replacement_map)
self.assertEqual( procesor.processed_text, 'DDZDD' )
self.assertEqual( procesor.span_map, [ ((0, 1), (0, 2)), ((2, 3), (3, 5)) ] )
decorated_text, decorated_processed_text = procesor.decorate()
self.assertEqual( decorated_text, '0Z1' )
self.assertEqual( decorated_processed_text, '00Z11' )
def test_matching_4(self):
text = ' AAA '
modifiers = [
( r'(AAA)', { 1: 'BBBBB' } ),
( r'(BBBBB)', { 1: 'CC' } ),
]
with Processor(text) as procesor:
for pattern, replacement_map in modifiers:
procesor.process(pattern, replacement_map)
self.assertEqual( procesor.processed_text, ' CC ' )
self.assertEqual( procesor.span_map, [ ((1, 4), (1, 3)) ] )
decorated_text, decorated_processed_text = procesor.decorate()
self.assertEqual( decorated_text, ' 000 ' )
self.assertEqual( decorated_processed_text, ' 00 ' )
def test_matching_5(self):
text = ' AAA D '
modifiers = [
( r'(AAA) (D)', { 1: 'BBBBB', 2: 'EE' } ),
( r'(BBBBB)', { 1: 'CC' } ),
( r'(EE)', { 1: 'FFFF' } ),
]
with Processor(text) as procesor:
for pattern, replacement_map in modifiers:
procesor.process(pattern, replacement_map)
self.assertEqual( procesor.processed_text, ' CC FFFF ' )
self.assertEqual( procesor.span_map, [ ((1, 4), (1, 3)), ((5, 6), (4, 8)) ] )
decorated_text, decorated_processed_text = procesor.decorate()
self.assertEqual( decorated_text, ' 000 1 ' )
self.assertEqual( decorated_processed_text, ' 00 1111 ' )
def test_matching_6(self):
text = ' AAA D AAA D '
modifiers = [
( r'(AAA) (D)', { 1: 'BBBBB', 2: 'EE' } ),
( r'(BBBBB)', { 1: 'CC' } ),
( r'(EE)', { 1: 'FFFF' } ),
]
with Processor(text) as procesor:
for pattern, replacement_map in modifiers:
procesor.process(pattern, replacement_map)
self.assertEqual( procesor.processed_text, ' CC FFFF CC FFFF ' )
self.assertEqual( procesor.span_map, [ ((1, 4), (1, 3)), ((5, 6), (4, 8)), ((7, 10), (9, 11)), ((11, 12), (12, 16)) ] )
decorated_text, decorated_processed_text = procesor.decorate()
self.assertEqual( decorated_text, ' 000 1 222 3 ' )
self.assertEqual( decorated_processed_text, ' 00 1111 22 3333 ' )
if __name__ == '__main__':
main() |
<filename>bin/commonSubroutines/drawFigure/drawFigure_parallel1tiled.py
##########################################################################
# Copyright 2017, <NAME> (<EMAIL>) #
# #
# This file is part of CCseqBasic5 . #
# #
# CCseqBasic5 is free software: you can redistribute it and/or modify #
# it under the terms of the MIT license.
#
#
# #
# CCseqBasic5 is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# MIT license for more details.
# #
# You should have received a copy of the MIT license
# along with CCseqBasic5.
##########################################################################
# print "Preparing to run - check where we are and which version we have.."
import sys
# print ""
print "We are running in machine :"
print sys.platform
print "We are running in Python version :"
print sys.version
print ""
# print "We can load from these paths :"
# print sys.path
# print "We have these auto-loaded modules"
# print sys.modules
# print "We have these built-ins :"
# print dir(__builtins__)
# print "----------------------------------------"
# print "Run directory :"
import os
# print os.getcwd()
# print "----------------------------------------"
# print "Enabling log file output.."
import syslog
# print "Enabling resource use and run time statistics monitoring.."
import stat
# print "----------------------------------------"
# print "Importing script-specific libraries.."
# print "----------------------------------------"
#
# print "Importing regular expressions"
import re
#
# print "Importing matplotlib"
import matplotlib as mpl
print "We are running matplotlib version :"
print mpl.__version__
#
# print "Available back ends (instead of X windows) :"
# print (mpl.rcsetup.non_interactive_bk)
# print "Now loading the back end : "
# print "mpl.use('pdf')"
mpl.use('pdf')
# print "Importing pyplot "
import matplotlib.pyplot as plt
# print "Importing patches "
import matplotlib.patches as patch
# print "----------------------------------------"
print "Imported (and auto-loaded) modules :"
print(globals())
# print "----------------------------------------"
# print "Reading in the subroutines.."
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, "Reading in the subroutines.."
# Making the comments above the lines..
def writeComment(row) :
layer1.annotate(myComment[row], (2, 88-(10*row))
)
# Subroutines to make RED-GREEN drawing :
# row = from the top, what is the row number for this data ?
def drawTwoColorsFlashed(row) :
print >> sys.stderr, 'Flashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(0, myFlashedPercentages[row][0]), (myFlashedPercentages[row][0], myFlashedPercentages[row][1])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
def drawTwoColorsNonFlashed(row) :
print >> sys.stderr, 'NonFlashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(myPercentages[1][0], myNonFlashedPercentages[row][0]), (myPercentages[1][0]+myNonFlashedPercentages[row][0], myNonFlashedPercentages[row][1])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
# Subroutines to make RED-ORANGE-GREEN drawing :
# row = from the top, what is the row number for this data ?
def drawThreeColorsFlashed(row) :
print >> sys.stderr, 'Flashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(0, myFlashedPercentages[row][0]), (myFlashedPercentages[row][0], myFlashedPercentages[row][1]), (myFlashedPercentages[row][0]+myFlashedPercentages[row][1], myFlashedPercentages[row][2])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
def drawThreeColorsNonFlashed(row) :
print >> sys.stderr, 'NonFlashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(myPercentages[1][0], myNonFlashedPercentages[row][0]), (myPercentages[1][0]+myNonFlashedPercentages[row][0], myNonFlashedPercentages[row][1]), (myPercentages[1][0]+myNonFlashedPercentages[row][0]+myNonFlashedPercentages[row][1], myNonFlashedPercentages[row][2])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
# Subroutines to make GREEN-GREEN drawing :
# row = from the top, what is the row number for this data ?
def drawOneColorFlashed(row) :
print >> sys.stderr, 'Flashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(0, myFlashedPercentages[row])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
def drawOneColorNonFlashed(row) :
print >> sys.stderr, 'NonFlashed'
print >> sys.stderr, row
# myPercentages[1]=[70,30]
layer1.broken_barh(
[(myPercentages[1][0], myNonFlashedPercentages[row])], # X (start, width)
(83-(10*row), 4), # Y (start, height)
facecolors=myColors[row]
)
# print "----------------------------------------"
# print "Starting the run.."
# print "----------------------------------------"
# print ""
print "Reading the input.."
# print ""
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, ""
print >> sys.stderr, "Reading the input.."
# print >> sys.stderr, ""
names=[]
values = []
colors = []
valuesAsFloats = []
with open('percentages.txt') as f:
for line in f:
data = line.split()
names.append(re.sub(r'_', ' ', data[0]))
values.append(data[1:])
temp = []
for i, value in enumerate(data[1:]):
temp.append(float(value))
valuesAsFloats.append(temp)
# print "names :"
# print names
# print "values :"
# print values
# print "valuesAsFloats :"
# print valuesAsFloats
# print "valuesAsFloats[0] :"
# print valuesAsFloats[0]
# print "valuesAsFloats[1] :"
# print valuesAsFloats[1]
# print "valuesAsFloats[2] :"
# print valuesAsFloats[2]
# print "valuesAsFloats[3] :"
# print valuesAsFloats[3]
# print "valuesAsFloats[4] :"
# print valuesAsFloats[4]
# print "----------------------------------------"
# print ""
print "Setting values.."
# print ""
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, ""
print >> sys.stderr, "Setting values.."
# print >> sys.stderr, ""
# Generating the lists..
myLabel=['0','1','2','3','4','5','6','','']
myComment=['0','1','2','3','4','5','6']
myPercentages=[0,1]
myFlashedPercentages=[0,1,2,3,4,5,6]
myNonFlashedPercentages=[0,1,2,3,4,5,6]
myColors=['0',['1','1'],['2','2'],['3','3'],['4','4'],['5','5','5'],['6','6']]
# Default colors (for color blindness support)
#
# PINK GREEN (default)
# RGB HEX
# red 255,74,179 #FF4ABE
# orange 255,140,0 #FF8C00
# green 62,176,145 #3EB091
#
red='#FF4ABE'
orange='#FF8C00'
green='#3EB091'
# Setting the values.. (most of them have four values - those are set here.)
for x in range(2, 7):
if (x != 3 and x !=5 ):
myFlashedPercentages[x]=[valuesAsFloats[x][0],valuesAsFloats[x][1]]
myNonFlashedPercentages[x]=[valuesAsFloats[x][2],valuesAsFloats[x][3]]
myLabel[0]='Total reads (input fastq)'
myComment[0]='Total reads (input fastq)'
myPercentages[0]=valuesAsFloats[0][0]
myColors[0]='blue'
myLabel[1]='Flashed / nonflashed'
myComment[1]='Flash-combined (light blue), non-combined (yellow)'
myPercentages[1]=[valuesAsFloats[1][0],valuesAsFloats[1][1]]
myColors[1]=['dodgerblue','gold']
myLabel[2]='Do/don\'t have RE site'
myComment[2]='With RE site (green), no RE site (red)'
myColors[2]=[green,red]
myLabel[3]='Continue to mapping'
myComment[3]='Continues to mapping :'
myFlashedPercentages[3]=valuesAsFloats[3][0]
myNonFlashedPercentages[3]=valuesAsFloats[3][1]
myColors[3]=green
myLabel[4]='Fragment(s) within (any) tile'
myComment[4]='Fragment(s) within any tile (green), all frags outside tiles (red)'
myColors[4]=[green,red]
myLabel[5]='At least 2 fragments within (any) tile'
# myComment[5]='cap+rep(green), cap+excl(orange), only cap(red)'
myComment[5]='At least 2 within (any) tile (green), only 1 in (any) tile (red), exclusions red'
myFlashedPercentages[5]=[valuesAsFloats[5][0],valuesAsFloats[5][1],valuesAsFloats[5][2]]
myNonFlashedPercentages[5]=[valuesAsFloats[5][3],valuesAsFloats[5][4],valuesAsFloats[5][5]]
myColors[5]=[green,orange,red]
myLabel[6]='Multiple different tiles'
myComment[6]='All fragments in single tile (green), multi-tile (red)'
myColors[6]=[green,red]
# myLabel[7]='Duplicate filtered'
# myComment[7]='non-duplicate(green), duplicate(red)'
# myColors[7]=[green,red]
# myLabel[8]='Blat/ploidy filtered'
# myComment[8]='no-blat-no-ploidy(green), blat and/or ploidy(red)'
# myColors[8]=[green,red]
# print >> sys.stderr,"----------------------------------------"
# print >> sys.stderr,""
# print >> sys.stderr,"Checking that the labels are not in wonky order :"
# print >> sys.stderr,""
# for x in range(0, 9):
# print >> sys.stderr,"Label here :", myLabel[x]
# print >> sys.stderr,"Same line in input : ", names[x]
# print >> sys.stderr,""
# for x in range(0, 2):
# print >> sys.stderr,"Label here :", myLabel[x]
# print >> sys.stderr,"myPercentages : ", myPercentages[x]
# print >> sys.stderr,""
# for x in range(2, 9):
# print >> sys.stderr,"Label here :", myLabel[x]
# print >> sys.stderr,"myFlashedPercentages : ", myFlashedPercentages[x]
# print >> sys.stderr,"myNonFlashedPercentages : ", myNonFlashedPercentages[x]
# print "----------------------------------------"
# print ""
print "Drawing axes and tick marks (general overlay).."
# print ""
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, ""
print >> sys.stderr, "Drawing axes and tick marks (general overlay).."
# print >> sys.stderr, ""
# class matplotlib.figure.Figure(figsize=None, dpi=None, facecolor=None, edgecolor=None, linewidth=0.0, frameon=None, subplotpars=None, tight_layout=None)
# matplotlib.pyplot.subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, subplot_kw=None, gridspec_kw=None, **fig_kw)
fig1, layer1 = plt.subplots()
# Set the overall settings here ..
# Grid on (dotted lines)
layer1.grid(True)
# Where (in whole canvas) we want to put our y-range and x-range
# 0,0 is as normally, left hand down.
# Set x-axis to be from 0 to 100
layer1.set_xlim(0, 100)
layer1.set_xticks([ 0,10,20,30,40,50,60,70,80,90,100])
layer1.set_xlabel('Percentage of input reads')
# Set y-axis to be contain all the reads..
layer1.set_ylim(0, 100)
# From bottom up (as the coordinates go that direction) :
# Copy and reverse the list..
myReverseLabels=myLabel[:]
myReverseLabels.reverse()
layer1.set_yticks([5,15,25,35,45,55,65])
layer1.set_yticklabels(myReverseLabels)
# print "----------------------------------------"
# print ""
print "Drawing boxes and their labels.."
# print ""
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, ""
print >> sys.stderr, "Drawing boxes and their labels.."
# print >> sys.stderr, ""
# matplotlib.pyplot.broken_barh(xranges, yrange, hold=None, data=None, **kwargs)
# Plot horizontal bars.
myFlashedPercentages[1]=myPercentages[1]
layer1.broken_barh(
[(0, myFlashedPercentages[1][0]), (myFlashedPercentages[1][0], myFlashedPercentages[1][1])], # X (start, width)
(0, 75), # Y (start, height)
facecolors=['lightcyan','lemonchiffon'], edgecolor = "none"
)
# Total reads (input fastq)
writeComment(0)
myFlashedPercentages[0]=myPercentages[0]
drawOneColorFlashed(0)
# Flashed / nonflashed
writeComment(1)
myFlashedPercentages[1]=myPercentages[1]
drawTwoColorsFlashed(1)
# Do/don\'t have RE site
writeComment(2)
drawTwoColorsFlashed(2)
drawTwoColorsNonFlashed(2)
# Continue to mapping
writeComment(3)
drawOneColorFlashed(3)
drawOneColorNonFlashed(3)
# Contains capture
writeComment(4)
drawTwoColorsFlashed(4)
drawTwoColorsNonFlashed(4)
# Capture and/or reporter
writeComment(5)
drawThreeColorsFlashed(5)
drawThreeColorsNonFlashed(5)
# Multiple (different) captures
writeComment(6)
drawTwoColorsFlashed(6)
drawTwoColorsNonFlashed(6)
# Duplicate filtered
# writeComment(7)
# drawTwoColorsFlashed(7)
# drawTwoColorsNonFlashed(7)
# Blat/ploidy filtered
# writeComment(8)
# drawTwoColorsFlashed(8)
# drawTwoColorsNonFlashed(8)
# print "----------------------------------------"
# print ""
print "Saving figure.."
# print ""
# print >> sys.stderr, "----------------------------------------"
# print >> sys.stderr, ""
print >> sys.stderr, "Saving figure.."
# print >> sys.stderr, ""
fig1.savefig('summary.pdf', dpi=90, bbox_inches='tight')
fig1.savefig('summary.png', dpi=90, bbox_inches='tight')
|
## FC Newtork
class FCNet:
def __init__(self):
from keras.models import Sequential
from keras.layers import Cropping2D, Lambda, Flatten, Dense
from keras import optimizers
from keras.callbacks import ModelCheckpoint
self.model = Sequential()
# Input layer
self.model.add(Cropping2D(cropping = ((50, 20), (0, 0)), input_shape = (160, 320, 3)))
self.model.add(Lambda(lambda x: (x/127.5)-1.0))
# Flatten
self.model.add(Flatten())
# Hidden layer
self.model.add(Dense(100, activation='relu'))
# Output layer
# Without activation funcation!
self.model.add(Dense(1))
# Optimizer
optimizer = optimizers.Adagrad()
# Compile
self.model.compile(loss='mse', optimizer=optimizer, metrics=['mae'])
self.model.summary()
model_checkpoint = ModelCheckpoint('fcnet-{epoch:02d}.h5', save_best_only=True)
self.callbacks = [model_checkpoint]
def fit(self, train_generator, valid_generator, training_steps, validation_steps, epochs=10):
print("Training with {} training steps, {} validation steps.".format(training_steps, validation_steps))
self.model.fit_generator(generator = train_generator,
steps_per_epoch = training_steps,
validation_data = valid_generator,
validation_steps = validation_steps,
epochs = epochs,
callbacks = self.callbacks)
## Nvidia Network
class PilotNet():
def __init__(self):
from keras.models import Sequential
from keras.layers import Cropping2D, Lambda, Conv2D, Flatten, Dense
from keras import optimizers
from keras.callbacks import ModelCheckpoint
self.model = Sequential()
# Cropping (90, 320,3)
self.model.add(Cropping2D(cropping=((50,20),(0,0)), input_shape=(160,320,3)))
# Normalization 255 or 127?
self.model.add(Lambda(lambda x: (x/127.5)-1.0))
# Conv1 (43,158,24)
self.model.add(Conv2D(24, kernel_size=(5,5), strides=(2,2), padding='valid', activation='relu'))
# Conv2 (20,77,36)
self.model.add(Conv2D(36, kernel_size=(5,5), strides=(2,2), padding='valid', activation='relu'))
# Conv3 (8,37,48)
self.model.add(Conv2D(48, kernel_size=(5,5), strides=(2,2), padding='valid', activation='relu'))
# conv4 (6,35,64)
self.model.add(Conv2D(64, kernel_size=(3,3), strides=(1,1), padding='valid', activation='relu'))
# Conv5 (4, 33, 64)
self.model.add(Conv2D(64, kernel_size=(3,3), strides=(1,1), padding='valid', activation='relu'))
# Flatten (None, 8448)
self.model.add(Flatten())
# FC1
self.model.add(Dense(1164, activation='relu'))
# FC2
self.model.add(Dense(100, activation='relu'))
# FC3
self.model.add(Dense(50, activation='relu'))
# FC4
self.model.add(Dense(10, activation='relu'))
# FC5
self.model.add(Dense(1))
## Optimizer
#optimizer = optimizers.Adam(lr=0.001)
## Compile
self.model.compile(loss='mse', optimizer='adam', metrics=['mae'])
self.model.summary()
model_checkpoint = ModelCheckpoint('PilotNet-{epoch:02d}.h5', save_best_only=True)
self.callbacks = [model_checkpoint]
def fit(self, train_generator, valid_generator, training_steps, validation_steps, epochs=10):
print("Training with {} steps, {} validation steps.".format(training_steps, validation_steps))
self.model.fit_generator(train_generator,
steps_per_epoch = training_steps,
validation_data = valid_generator,
validation_steps = validation_steps,
epochs = epochs,
callbacks = self.callbacks)
## A modified nvidia network
class Modified_Nvidia_Netwrok:
def __init__(self):
from keras.models import Sequential
from keras.layers import Flatten, Dense, Conv2D, Lambda, Dropout
from keras.layers.pooling import MaxPooling2D
from keras import optimizers
from keras.callbacks import ModelCheckpoint
##
from keras.layers import Cropping2D, BatchNormalization, Activation
from keras.layers.advanced_activations import ELU
from keras.regularizers import l2
self.model = Sequential()
self.model.add(Cropping2D(cropping=((50,20),(0,0)), input_shape=(160, 320, 3)))
# Normalization: converts the input from uint8 to float between -1 and 1
self.model.add(Lambda(lambda x: (x / 127.5) - 1.0))
# Conv1
self.model.add(Conv2D(24, kernel_size=(5,5), padding='valid', activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2,2)))
# Conv2
self.model.add(Conv2D(36, kernel_size=(5,5), padding='valid', activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2,2)))
# Conv3
self.model.add(Conv2D(48, kernel_size=(5,5), padding='valid', activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2,2)))
# Conv4
self.model.add(Conv2D(64, kernel_size=(3,3), padding='valid', activation='relu'))
# conv5
self.model.add(Conv2D(64, kernel_size=(3,3), padding='valid', activation='relu'))
# Flattening Layer (None, 4096)
self.model.add(Flatten())
# Dropout 0.5 (None, 4096)
self.model.add(Dropout(0.5))
# FC1 (None, 1164)
self.model.add(Dense(1164, activation='relu'))
# Dropout
self.model.add(Dropout(0.5))
# FC2 (None, 100)
self.model.add(Dense(100, activation='relu'))
# FC3 (None, 50)
self.model.add(Dense(50, activation='relu'))
# FC4 (None, 10)
self.model.add(Dense(10, activation='relu'))
# FC5 (None, 1)
self.model.add(Dense(1, kernel_initializer='normal'))
## Optimizer
optimizer = optimizers.Adam()
## Compile
self.model.compile(loss='mse', optimizer=optimizer, metrics=['mae'])
self.model.summary()
# Use the keras ModelCheckpoint to save the model
# afer every epoch
model_checkpoint = ModelCheckpoint('modified_nvidia_model-{epoch:02d}.h5', save_best_only=True)
self.callbacks = [model_checkpoint]
## Train the model using Keras' fit_generator()
# train_generator: generator to provide batches of training data
# valid_generator: generator to provide batches of validation data
# training_steps: integer of training steps to achieve one epoch
# validation_steps: integer of validation steps
# epochs: integer
def fit(self, train_generator, valid_generator, training_steps, validation_steps, epochs=10):
print("Training with {} training steps, {} validation steps.".format(training_steps, validation_steps))
self.model.fit_generator(train_generator,
steps_per_epoch = training_steps,
validation_data = valid_generator,
validation_steps = validation_steps,
epochs = epochs,
callbacks = self.callbacks)
def save_model(self, save_path):
self.model.save(save_path)
'''
import os
import csv
import cv2
import numpy as np
from keras.models import Sequential
from keras.layers import Flatten, Conv2D, Dense
## reading data
lines = []
dataset_dir = '/media/ubuntu16/新加卷/Self-Driving/datasets/carnd'
data_file = os.path.join(dataset_dir, 'driving_log.csv')
with open(data_file) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
center_images = []
steer_labels = []
for line in lines:
filepath = line[0]
center_image = cv2.imread(filepath)
center_images.append(center_image)
steer_label = float(line[3])
steer_labels.append(steer_label)
X_train = np.array(center_images)
y_train = np.array(steer_labels)
## simple network
model = Sequential()
model.add(Flatten(input_shape=(160,320,3)))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, epochs=10)
# save model
model.save('simple_model.h5')
Conv2D(filters, kernel_size, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1), activation=None,
use_bias=True, kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None,
bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None)
''' |
'''
These tests are inspired by and use code from the tests made by cs540-testers
for the Fall 2020 semester
Their version can be found here: https://github.com/cs540-testers/hw5-tester/
'''
__maintainer__ = 'CS540-testers-SP21'
__author__ = ['<NAME>']
__credits__ = ['<NAME>', '<NAME>', '<NAME>', '<NAME>']
__version__ = '1.0'
import sys
import unittest
from time import time
import numpy as np
from pca import *
file_path = 'YaleB_32x32.npy'
def timeit(func):
def timed_func(*args, **kwargs):
t0 = time()
out = func(*args, **kwargs)
print(f'Ran {func.__name__}{" "*(30-len(func.__name__))}in {(time() - t0)*1000:.2f}ms')
return out
return timed_func
class TestPrincipalComponentAnalysis(unittest.TestCase):
@timeit
def test1_load_and_center_dataset(self):
X = load_and_center_dataset(file_path)
# Dataset needs to have the correct shape
self.assertEqual(X.shape, (2414, 1024))
# The mean should be close to 0 (account for floating point math)
self.assertTrue(np.isclose(X.mean(), 0.0))
@timeit
def test2_get_covariance(self):
X = load_and_center_dataset(file_path)
S = get_covariance(X)
# S needs have size d x d
self.assertEqual(S.shape, (1024, 1024))
# S should be symmetric
self.assertTrue(np.allclose(S, S.T))
# S should have non-negative values on the diagonal
self.assertTrue(np.min(np.diagonal(S)) >= 0)
@timeit
def test3_get_eig_small(self):
X = load_and_center_dataset(file_path)
S = get_covariance(X)
Lambda, U = get_eig(S, 2)
# Eigenvalues need to have shape 2 2
self.assertEqual(Lambda.shape, (2, 2))
# Eigenvalues should match example
self.assertTrue(np.allclose(Lambda, [[1369142.41612494, 0],[0, 1341168.50476773]]))
# Eigenvectors need to have shape 2 2
self.assertEqual(U.shape, (1024, 2))
# Av = λv (matrix * vector = scalar * vector)
self.assertTrue(np.allclose(S @ U, U @ Lambda))
@timeit
def test4_get_eig_large(self):
X = load_and_center_dataset(file_path)
S = get_covariance(X)
Lambda, U = get_eig(S, 1024)
# Eigenvalues need to have shape 1024 1024
self.assertEqual(np.shape(Lambda), (1024, 1024))
# Check that Lambda is diagonal
self.assertEqual(np.count_nonzero(
Lambda - np.diag(np.diagonal(Lambda))), 0)
# Check that Lambda is sorted in decreasing order
diag = np.diagonal(Lambda)
self.assertTrue(np.all(np.equal(diag, diag[np.argsort(-diag)])))
# Eigenvectors need to have shape 1024 1024
self.assertEqual(np.shape(U), (1024, 1024))
# Av = λv (matrix * vector = scalar * vector)
self.assertTrue(np.all(np.isclose(S @ U, U @ Lambda)))
@timeit
def test5_get_eig_perc_small(self):
X = load_and_center_dataset(file_path)
S = get_covariance(X)
Lambda, U = get_eig_perc(S, 0.07)
# Eigenvalues need to have shape 2 2
self.assertEqual(Lambda.shape, (2, 2))
# Eigenvalues should match example
self.assertTrue(np.allclose(Lambda, [[1369142.41612494, 0],[0, 1341168.50476773]]))
# Eigenvectors need to have shape 2 2
self.assertEqual(U.shape, (1024, 2))
# Av = λv (matrix * vector = scalar * vector)
self.assertTrue(np.allclose(S @ U, U @ Lambda))
@timeit
def test6_get_eig_perc_large(self):
X = load_and_center_dataset(file_path)
S = get_covariance(X)
Lambda, U = get_eig_perc(S, -1)
# Eigenvalues need to have shape 1024 1024
self.assertEqual(np.shape(Lambda), (1024, 1024))
# Check that Lambda is diagonal
self.assertEqual(np.count_nonzero(
Lambda - np.diag(np.diagonal(Lambda))), 0)
# Check that Lambda is sorted in decreasing order
diag = np.diagonal(Lambda)
self.assertTrue(np.all(np.equal(diag, diag[np.argsort(-diag)])))
# Eigenvectors need to have shape 1024 1024
self.assertEqual(np.shape(U), (1024, 1024))
# Av = λv (matrix * vector = scalar * vector)
self.assertTrue(np.all(np.isclose(S @ U, U @ Lambda)))
@timeit
def test7_project_image(self):
X = load_and_center_dataset(file_path)
S = get_covariance(X)
Lambda, U = get_eig(S, 2)
projected = project_image(X[0], U)
# Projected needs to have shape (1024, )
self.assertEqual(projected.shape, (1024,))
# Example values from Canvas
self.assertTrue(np.allclose(projected[:3], [6.84122225,4.83901287,1.41736694]))
self.assertTrue(np.allclose(projected[-3:], [8.75796534,7.45916035,5.4548656]))
# Min and max values
self.assertTrue(np.isclose(projected.max(), 93.22417310945819))
self.assertTrue(np.isclose(projected.min(), 0.27875793275475225))
if __name__ == '__main__':
print(f'Running CS540 SP21 HW3 tester v{__version__}')
# Hack to allow different locations of YaleB_32x32.npy (done this way to allow
# unittest's flags to still be passed, if desired)
if '--yale-path' in sys.argv:
path_index = sys.argv.index('--yale-path') + 1
if path_index == len(sys.argv):
print('Error: must supply path after option --yale-path')
sys.exit(1)
file_path = sys.argv[path_index]
print(f'Using {file_path} as location of dataset')
del(sys.argv[path_index])
del(sys.argv[path_index - 1])
unittest.main(argv=sys.argv)
|
<gh_stars>0
import unittest
import numpy as np
import simulator
import models
import estimators
class TestFloorPlan(unittest.TestCase):
def test_basic_u(self):
np.random.seed(401)
z_ref = -1
width = 10
length = 10
# planes
x_planes = []
x_offsets = np.cumsum(np.random.randint(1, 3, 6)) - width / 2
for x in x_offsets:
x_planes.append(models.Plane.from_axis_distance(axis=np.array([1, 0, 0]), distance=x))
y_planes = []
y_offsets = np.cumsum(np.random.randint(1, 3, 5)) - length / 2
for y in y_offsets:
y_planes.append(models.Plane.from_axis_distance(axis=np.array([0, 1, 0]), distance=y))
# boundaries
boundaries = [simulator.rectangle(x_planes[4], x=np.mean([y_offsets[3], y_offsets[2]]), y=0, w=y_offsets[3]-y_offsets[2]-0.3, h=2),
simulator.rectangle(x_planes[5], x=np.mean([y_offsets[4], y_offsets[1]]), y=0, w=y_offsets[4]-y_offsets[1]-0.5, h=2),
simulator.rectangle(y_planes[1], x=-np.mean([x_offsets[5], x_offsets[1]]), y=0, w=x_offsets[5]-x_offsets[1]-0.4, h=2),
simulator.rectangle(y_planes[2], x=-np.mean([x_offsets[4], x_offsets[1]]), y=0, w=x_offsets[4]-x_offsets[1]-0.2, h=2),
simulator.rectangle(y_planes[3], x=-np.mean([x_offsets[4], x_offsets[1]]), y=0, w=x_offsets[4]-x_offsets[1]-0.25, h=2),
simulator.rectangle(y_planes[4], x=-np.mean([x_offsets[5], x_offsets[1]]), y=0, w=x_offsets[5]-x_offsets[1]-0.1, h=2)
]
# evidence
evidence_index = [((2, 2), (1, 1)),
((4, 2), (2, 1)),
((5, 2), (4, 1)),
((5, 4), (4, 2)),
((4, 4), (3, 3)),
((3, 4), (2, 3)),
((2, 4), (1, 3)),
((3, 1), (2, 0))]
evidence = [models.Point(np.array([-2, 3, 0]))]
for ev in evidence_index:
tr_corner = np.array([x_offsets[ev[0][0]], y_offsets[ev[0][1]]])
bl_corner = np.array([x_offsets[ev[1][0]], y_offsets[ev[1][1]]])
diff = tr_corner - bl_corner
center = np.mean(np.array([tr_corner, bl_corner]), axis=0)
ellipse = simulator.ellipse(float(diff[0]) / 2, float(diff[1]) / 2, 3).rigid(np.eye(2), center)
evidence.append(ellipse)
# construct
cell_complex = models.CellComplex2D(z_ref=z_ref, width=width, length=length, evidence=evidence)
# cell_complex = models.CellComplex2D(z_ref=z_ref, width=width, length=length)
for p in x_planes + y_planes:
cell_complex.insert_partition(p)
for b in boundaries:
cell_complex.insert_boundary(b)
speculator = estimators.FloorPlanSpeculator(cell_complex, horizon=1)
scene_graph = speculator.floorplan()
# scene_graph = cell_complex.cell_graph()
cell_complex.draw(scene_graph)
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from .base import BaseHead
from ..registry import HEADS
from ..weight_init import weight_init_
from ..builder import build_loss
@HEADS.register()
class I3DHead(BaseHead):
"""
Head for ST-GCN model.
Args:
in_channels: int, input feature channels. Default: 256.
num_classes: int, number classes. Default: 10.
"""
def __init__(self, in_channels=256, num_classes=10,
dropout_ratio=0.5,
init_std=0.01, **kwargs):
super().__init__(num_classes, in_channels, **kwargs)
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
#self.fcn = nn.Linear(in_channels, num_classes)
self.fcn = nn.Conv3D(in_channels=in_channels, out_channels=num_classes, kernel_size=1)
self.loss_trip = build_loss(dict(name="TripletLoss"))
self.avg_pool = nn.AdaptiveAvgPool3D((1, 1, 1))
def init_weights(self):
"""Initiate the parameters.
"""
for layer in self.sublayers():
if isinstance(layer, nn.Conv3D):
weight_init_(layer, 'Normal', std=0.02)
'''
weight_init_(self.fcn,
'Normal',
'fc_0.w_0',
'fc_0.b_0',
mean=0.,
std=self.init_std)
'''
def forward(self, x):
"""Define how the head is going to run.
"""
x = x[0]
x = self.avg_pool(x)
x = self.dropout(x)
feats = paddle.reshape(x, (x.shape[0], -1)).clone()
#cls_score = self.fc_cls(feats)
x = self.fcn(x)
cls_score = paddle.reshape_(x, (x.shape[0], -1))
'''
feats = paddle.reshape(x, (x.shape[0], -1))
cls_score = self.fcn(feats)
'''
return cls_score, feats
def loss(self, scores, feats, labels, valid_mode=False, **kwargs):
"""Calculate the loss accroding to the model output ```scores```,
and the target ```labels```.
Args:
scores (paddle.Tensor): The output of the model.
labels (paddle.Tensor): The target output of the model.
Returns:
losses (dict): A dict containing field 'loss'(mandatory) and 'top1_acc', 'top5_acc'(optional).
"""
losses = dict()
if self.ls_eps != 0. and not valid_mode: # label_smooth
loss_ce = self.label_smooth_loss(scores, labels, **kwargs)
else:
loss_ce = self.loss_func(scores, labels, **kwargs)
loss_tri = self.loss_trip(feats, labels)
top1, top5 = self.get_acc(scores, labels, valid_mode)
losses['top1'] = top1
losses['top5'] = top5
losses['loss_ce'] = loss_ce
losses['loss_tri'] = loss_tri
losses['loss'] = loss_ce + loss_tri
return losses
|
# Copyright (C) 2006-2011, University of Maryland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: <NAME>
"""
This module contains utility functions and classes for the application.
"""
#==============================================================================
from __future__ import print_function
import os
import sys
import time
import glob
import wx
from wx.lib import delayedresult
# CRUFT: wx 3/4
phoenix = wx.version() >= '4.0'
BitmapFromImage = wx.Bitmap if phoenix else wx.BitmapFromImage
# Text string used to compare the string width in pixels for different fonts.
# This benchmark string has 273 characters, containing 92 distinct characters
# consisting of the lowercase alpha chars in the ratio used in an English
# Scrabble(TM) set, two sets of uppercase alpha chars, two sets of digits,
# special chars with multiples of commonly used ones, and many spaces to
# approximate spacing between words in sentences and labels.
BENCHMARK_TEXT =\
"aaaaaaaaa bb cc dddd eeeeeeeeeeee ff ggg hh iiiiiiiii j k llll mm "\
"nnnnnn oooooooo pp q rrrrrr ssss tttttt uuuu vv ww x yy z "\
"ABCD EFGH IJKL MNOP QRST UVW XYZ ABCD EFGH IJKL MNOP QRST UVW XYZ "\
"01234 56789 01234 56789 "\
"...... :::: ()()() \"\",,'' ++-- **//== {}[]<> ;|~\\_ ?!@#$%^&"
# The width and height in pixels of the test string using MS Windows default
# font "MS Shell Dlg 2" and a dpi of 96.
# Note: the MS Windows XP default font has the same width and height as Tahoma.
BENCHMARK_WIDTH = 1600
BENCHMARK_HEIGHT = 14
#==============================================================================
def choose_fontsize(fontname=None):
"""
Determines the largest font size (in points) to use for a given font such
that the rendered width of the benchmark string is less than or equal to
101% of the rendered width of the string on a Windows XP computer using the
Windows default font at 96 dpi.
The width in pixels of a rendered string is affected by the choice of font,
the point size of the font, and the resolution of the installed font as
measured in dots-per-inch (aka points-per-inch).
"""
frame = wx.Frame(parent=None, id=wx.ID_ANY, title="")
if fontname is None:
fontname = frame.GetFont().GetFaceName()
max_width = BENCHMARK_WIDTH + BENCHMARK_WIDTH/100
for fontsize in range(12, 5, -1):
frame.SetFont(wx.Font(fontsize, wx.SWISS, wx.NORMAL, wx.NORMAL, False,
fontname))
benchmark = wx.StaticText(frame, wx.ID_ANY, label="")
w, h = benchmark.GetTextExtent(BENCHMARK_TEXT)
benchmark.Destroy()
if w <= max_width: break
frame.Destroy()
return fontsize
def display_fontsize(fontname=None, benchmark_text=BENCHMARK_TEXT,
benchmark_width=BENCHMARK_WIDTH,
benchmark_height=BENCHMARK_HEIGHT):
"""
Displays the width in pixels of a benchmark text string for a given font
at various point sizes when rendered on the application's output device
(which implicitly takes into account the resolution in dpi of the font
faces at the various point sizes).
"""
# Create a temporary frame that we will soon destroy.
frame = wx.Frame(parent=None, id=wx.ID_ANY, title="")
# Set the fontname if one is given, otherwise use the system default font.
# Get the font name even if we just set it in case the specified font is
# not installed and the system chooses another one.
if fontname is not None:
frame.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL, False,
fontname))
fontname = frame.GetFont().GetFaceName()
x, y = wx.ClientDC(frame).GetPPI()
print("*** Benchmark text width and height in pixels = %4d %2d"\
%(benchmark_width, benchmark_height))
print("*** Compare against %s font with dpi resolution of %d:"\
%(fontname, x))
for fontsize in range(12, 5, -1):
frame.SetFont(wx.Font(fontsize, wx.SWISS, wx.NORMAL, wx.NORMAL, False,
fontname))
benchmark = wx.StaticText(frame, wx.ID_ANY, label="")
w, h = benchmark.GetTextExtent(benchmark_text)
benchmark.Destroy()
print(" For point size %2d, benchmark text w, h = %4d %2d"\
%(fontsize, w, h))
frame.Destroy()
def _finddata():
patterns = ['*.png','*.ico','*.jpg']
path = resource_dir()
files = []
for p in patterns:
files += glob.glob(os.path.join(path,p))
return files
def data_files():
"""
Return the data files associated with the package.
The format is a list of (directory, [files...]) pairs which can be
used directly in the py2exe setup script as::
setup(...,
data_files=data_files(),
...)
"""
data_files = [('bumps-data', _finddata())]
return data_files
def package_data():
"""
Return the data files associated with the package.
The format is a dictionary of {'fully.qualified.module', [files...]}
used directly in the setup script as::
setup(...,
package_data=package_data(),
...)
"""
return { 'bumps.gui': _finddata() }
self_cached_path = None
def resource_dir():
"""
Return the path to the application data.
This is either in the environment variable BUMPS_DATA, in the
source tree in gui/resources, or beside the executable in
bumps-data.
"""
# If we already found it, then we are done
global self_cached_path
if self_cached_path is not None: return self_cached_path
# Check for data path in the environment
key = 'BUMPS_DATA'
if key in os.environ:
path = os.environ[key]
if not os.path.isdir(path):
raise RuntimeError('Path in environment %s not a directory'%key)
self_cached_path = path
return self_cached_path
# Check for data path in the package
path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources'))
#print >>sys.stderr, "checking for resource in",path
if os.path.isdir(path):
self_cached_path = path
return self_cached_path
# Check in package root, which is where pyinstaller puts it
root = os.path.dirname(os.path.dirname(os.path.dirname(path)))
path = os.path.join(root, 'bumps-data')
if os.path.isdir(path):
self_cached_path = path
return self_cached_path
# Check for data path next to exe/zip file.
exepath = os.path.dirname(sys.executable)
path = os.path.join(exepath,'bumps-data')
#print >>sys.stderr, "checking for resource in",path
if os.path.isdir(path):
self_cached_path = path
return self_cached_path
# py2app puts the data in Contents/Resources, but the executable
# is in Contents/MacOS.
path = os.path.join(exepath,'..','Resources','bumps-data')
#print >>sys.stderr, "checking for resource in",path
if os.path.isdir(path):
self_cached_path = path
return self_cached_path
raise RuntimeError('Could not find the Bumps data files')
def resource(filename):
return os.path.join(resource_dir(),filename)
def get_bitmap(filename, type=wx.BITMAP_TYPE_PNG, scale_factor=16):
"""
Returns the scaled bitmap from an image file (bmp, jpg, png) stored in
the data directory of the package.
"""
path = resource(filename)
return BitmapFromImage(wx.Image(name=path, type=type)
.Scale(scale_factor, scale_factor))
def popup_error_message(caption, message):
"""Displays an error message in a pop-up dialog box with an OK button."""
msg = wx.MessageDialog(None, message, caption, style=wx.ICON_ERROR|wx.OK)
msg.ShowModal()
msg.Destroy()
def popup_information_message(caption, message):
"""Displays an informational message in a pop-up with an OK button."""
msg = wx.MessageDialog(None, message, caption,
style=wx.ICON_INFORMATION|wx.OK)
msg.ShowModal()
msg.Destroy()
def popup_question(caption, message):
"""Displays a question in a pop-up dialog box with YES and NO buttons."""
msg = wx.MessageDialog(None, message, caption,
style=wx.ICON_QUESTION|wx.YES_NO)
msg.ShowModal()
msg.Destroy()
def popup_warning_message(caption, message):
"""Displays a warning message in a pop-up dialog box with an OK button."""
msg = wx.MessageDialog(None, message, caption, style=wx.ICON_WARNING|wx.OK)
msg.ShowModal()
msg.Destroy()
#==============================================================================
class StatusBarInfo():
"""This class writes, saves, and restores multi-field status bar text."""
def __init__(self):
frame = wx.FindWindowByName("AppFrame", parent=None)
self.sb = frame.GetStatusBar()
self.cnt = self.sb.GetFieldsCount()
self.field = [""]*self.cnt
def write(self, index=0, text=""):
# Write text to the specified slot and save text locally.
# Beware that if you use field 0, wxPython will likely overwite it.
if index > self.cnt - 1:
return
self.sb.SetStatusText(text, index)
self.field[index] = text
def restore(self):
# Restore saved text from fields 1 to n.
# Note that wxPython updates field 0 with hints and other messages.
for index in range(1, self.cnt):
self.sb.SetStatusText(self.field[index], index)
#==============================================================================
class ExecuteInThread():
"""
This class executes the specified function in a separate thread and calls a
designated callback function when the execution completes. Control is
immediately given back to the caller of ExecuteInThread which can execute
in parallel in the main thread.
Note that wx.lib.delayedresult provides a simple interface to threading
that does not include mechanism to stop the thread.
"""
def __init__(self, callback, function, *args, **kwargs):
if callback is None: callback = self._callback
#print "*** ExecuteInThread init:", callback, function, args, kwargs
delayedresult.startWorker(consumer=callback, workerFn=function,
wargs=args, wkwargs=kwargs)
def _callback(self, delayedResult):
'''
jobID = delayedResult.getJobID()
assert jobID == self.jobID
try:
result = delayedResult.get()
except Exception, e:
popup_error_message(self, "job %s raised exception: %s"%(jobID, e)
return
'''
return
#==============================================================================
class WorkInProgress(wx.Panel):
"""
This class implements a rotating 'work in progress' gauge.
"""
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY)
self.gauge = wx.Gauge(self, wx.ID_ANY, range=50, size=(250, 25))
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.TimerHandler)
#self.count = 0
def Start(self):
self.timer.Start(100)
def Stop(self):
self.timer.Stop()
def TimerHandler(self, event):
#self.count += 1
#print "*** count = ", self.count
self.gauge.Pulse()
#==============================================================================
log_time_handle = None # global variable for holding TimeStamp instance handle
def log_time(text=None, reset=False):
"""
This is a convenience function for using the TimeStamp class from any
module in the application for logging elapsed and delta time information.
This data is prefixed by a timestamp and optionally suffixed by a comment.
log_time maintains a single instance of TimeStamp during program execution.
Example output from calls to log_time('...'):
==> 0.000s 0.000s Starting <application name>
==> 0.016s 0.016s Starting to display the splash screen
==> 0.015s 0.031s Starting to build the GUI application
==> 0.094s 0.125s Entering the event loop
==> 2.906s 3.031s Terminating the splash screen and showing the GUI
"""
global log_time_handle
if log_time_handle is None:
log_time_handle = TimeStamp()
if reset:
log_time_handle.reset()
log_time_handle.log_interval(text=text)
class TimeStamp():
"""
This class provides timestamp, delta time, and elapsed time services for
displaying wall clock time usage by the application.
"""
def __init__(self):
self.reset()
def reset(self):
# Starts new timing interval.
self.t0 = self.t1 = time.time()
def gettime3(self):
# Gets current time in timestamp, delta time, and elapsed time format.
now = time.time()
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(now))
elapsed = now - self.t0
delta = now - self.t1
self.t1 = now
return timestamp, delta, elapsed
def gettime2(self):
# Gets current time in delta time and elapsed time format.
now = time.time()
elapsed = now - self.t0
delta = now - self.t1
self.t1 = now
return delta, elapsed
def log_time_info(self, text=""):
# Prints timestamp, delta time, elapsed time, and optional comment.
t, d, e = self.gettime3()
print("==> %s%9.3fs%9.3fs %s" %(t, d, e, text))
def log_timestamp(self, text=""):
# Prints timestamp and optional comment.
t, d, e = self.gettime3()
print("==> %s %s" %(t, text))
def log_interval(self, text=""):
# Prints elapsed time, delta time, and optional comment.
d, e = self.gettime2()
print("==>%9.3fs%9.3fs %s" %(d, e, text))
#==============================================================================
if __name__ == '__main__':
# Test the display_fontsize and choose_fontsize functions.
app = wx.PySimpleApp()
print("For Arial font:")
display_fontsize(fontname="Arial")
print(" Calculated font size =", choose_fontsize(fontname="Arial"))
app.Destroy()
print("")
print("*** Data directory is: ", resource_dir())
# Test the TimeStamp class and the convenience function.
print("")
log_time("Using log_time() function")
print("Sleeping for 0.54 seconds ...")
time.sleep(0.54)
log_time("Using log_time() function")
print("Sleeping for 0.83 seconds ...")
time.sleep(0.83)
log_time("Using log_time() function")
print("Creating an instance of TimeStamp (as the second timing class)")
ts = TimeStamp()
print("Sleeping for 0.66 seconds ...")
time.sleep(0.66)
ts.log_time_info(text="Using log_time_info() method")
ts.log_timestamp(text="Using log_timestamp() method")
ts.log_interval(text="Using log_interval() method")
print("Sleeping for 0.35 seconds ...")
time.sleep(0.35)
ts.log_interval(text="Using log_interval() method")
print("Sleeping for 0.42 seconds ...")
time.sleep(0.42)
ts.log_interval(text="Using log_interval() method")
print("Resetting the clock ...")
ts.reset()
ts.log_interval(text="Using log_interval() method")
print("Sleeping for 0.33 seconds ...")
time.sleep(0.33)
ts.log_interval(text="Using log_interval() method")
print("Switch back to the first timing class")
log_time("Using log_time() function")
|
import itertools
import re
from typing import Dict
class CalcParseError(Exception):
pass
class EvaluateError(Exception):
pass
class UnknownOperatorError(Exception):
pass
def add(a, b):
return a + b
def sub(a, b):
return a - b
def mul(a, b):
return a * b
def div(a, b):
return a // b
def _operator_to_string(operator):
if operator == add:
return "+"
elif operator == sub:
return "-"
elif operator == mul:
return "*"
elif operator == div:
return "/"
else:
raise UnknownOperatorError
class CalcNode:
def __init__(self, content=None, operator=None, lch=None, rch=None):
self.content = content
self.lch = lch
self.rch = rch
self.operator = operator
def is_operator_node(self):
return self.operator is not None
def is_constant_node(self):
return isinstance(self.content, int)
def is_variable_node(self):
return not self.is_operator_node() and not self.is_constant_node()
def __eq__(self, other):
return self.__str__() == str(other)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self, depth=0):
opens = [] # Position list of open brackets
cands = []
original_formula = self.to_string_strictly()
for i, c in enumerate(original_formula):
if c == '(':
opens.append(i)
elif c == ')':
assert len(opens) > 0
cands.append((opens[-1], i))
opens.pop()
pass
values_for_identity_check = [3, 14, 15, 92]
def likely_identical(formula: str):
node = CalcNode.parse(formula)
vars = node.get_all_variables()
for combination in itertools.product(values_for_identity_check, repeat=len(vars)):
val_dict = dict(zip(vars, list(combination)))
if self.evaluate(val_dict) != node.evaluate(val_dict):
return False
return True
# Remove parentheses greedy
res_formula = list(original_formula)
for op, cl in cands:
tmp = res_formula.copy()
tmp[op] = ''
tmp[cl] = ''
if likely_identical("".join(tmp)):
res_formula = tmp
simplified_form = "".join(res_formula)
return simplified_form
def get_all_variables(self):
if self.is_operator_node():
lv = self.lch.get_all_variables()
rv = self.rch.get_all_variables()
return lv + rv
elif self.is_constant_node():
return []
else:
return [self.content]
def evaluate(self, variables: Dict[str, int] = None):
if variables is None:
variables = {}
if self.is_operator_node():
lv = self.lch.evaluate(variables)
rv = self.rch.evaluate(variables)
return self.operator(lv, rv)
elif self.is_constant_node():
return int(self.content)
else:
if self.content not in variables:
raise EvaluateError(
"Found an unknown variable '{}'".format(self.content))
else:
return variables[self.content]
def simplify(self):
current_formula = str(self)
# Really stupid heuristics but covers the major case.
while True:
next_formula = re.sub(r"-1\+1$", "", current_formula)
next_formula = re.sub(r"\+0$", "", next_formula)
next_formula = re.sub(r"-0$", "", next_formula)
if next_formula == current_formula:
break
current_formula = next_formula
return CalcNode.parse(current_formula)
def to_string_strictly(self):
if self.is_operator_node():
return "({lch}{op}{rch})".format(
lch=self.lch.to_string_strictly(),
op=_operator_to_string(self.operator),
rch=self.rch.to_string_strictly()
)
else:
return str(self.content)
@classmethod
def parse(cls, formula: str):
res, pos = _expr(formula + "$", 0) # $ is put as a terminal character
if pos != len(formula):
raise CalcParseError
return res
def _expr(formula, pos):
res, pos = _term(formula, pos)
while formula[pos] == '+' or formula[pos] == '-':
tmp = CalcNode()
tmp.operator = add if formula[pos] == '+' else sub
pos += 1
tmp.lch = res
tmp.rch, pos = _term(formula, pos)
res = tmp
return res, pos
def _term(formula, pos):
res, pos = _factor(formula, pos)
while formula[pos] == '*' or formula[pos] == '/':
tmp = CalcNode()
tmp.operator = mul if formula[pos] == '*' else div
pos += 1
tmp.lch = res
tmp.rch, pos = _factor(formula, pos)
res = tmp
return res, pos
def _factor(formula, pos):
if formula[pos] == '(':
pos += 1
res, pos = _expr(formula, pos)
if formula[pos] != ')':
raise CalcParseError
pos += 1
return res, pos
elif formula[pos].isalpha():
varname = ""
while formula[pos].isalpha() or formula[pos] == '_':
varname += formula[pos]
pos += 1
res = CalcNode()
res.content = varname
return res, pos
elif formula[pos].isdigit() or formula[pos] == '-':
if formula[pos] == '-':
sign = -1
pos += 1
if not formula[pos].isdigit():
raise CalcParseError
else:
sign = +1
value = 0
while formula[pos].isdigit():
value = 10 * value + int(formula[pos])
pos += 1
value *= sign
if formula[pos].isalpha() or formula[pos] == '(':
# pattern like "123A"
tmp = CalcNode()
tmp.content = value
res = CalcNode()
res.lch = tmp
res.rch, pos = _factor(formula, pos)
res.operator = mul
return res, pos
else:
res = CalcNode()
res.content = value
return res, pos
else:
raise CalcParseError
|
import warnings as test_warnings
from datetime import datetime, timedelta
from http import HTTPStatus
from json.decoder import JSONDecodeError
from unittest.mock import MagicMock, call, patch
import pytest
import requests
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.assets.converters import asset_from_bitstamp
from rotkehlchen.constants.assets import A_BTC, A_ETH, A_EUR, A_LINK, A_USD, A_USDC
from rotkehlchen.errors.asset import UnknownAsset
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.exchanges.bitstamp import (
API_ERR_AUTH_NONCE_CODE,
API_ERR_AUTH_NONCE_MESSAGE,
API_KEY_ERROR_CODE_ACTION,
API_MAX_LIMIT,
USER_TRANSACTION_MIN_SINCE_ID,
USER_TRANSACTION_SORTING_MODE,
Bitstamp,
)
from rotkehlchen.exchanges.data_structures import (
AssetMovement,
AssetMovementCategory,
Trade,
TradeType,
)
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.constants import A_GBP
from rotkehlchen.tests.utils.mock import MockResponse
from rotkehlchen.types import Fee, Location, Timestamp
from rotkehlchen.utils.serialization import jsonloads_list
def test_name():
exchange = Bitstamp('bitstamp1', 'a', b'a', object(), object())
assert exchange.location == Location.BITSTAMP
assert exchange.name == 'bitstamp1'
def test_bitstamp_exchange_assets_are_known(mock_bitstamp):
request_url = f'{mock_bitstamp.base_uri}/v2/trading-pairs-info'
try:
response = requests.get(request_url)
except requests.exceptions.RequestException as e:
raise RemoteError(
f'Bitstamp get request at {request_url} connection error: {str(e)}.',
) from e
if response.status_code != 200:
raise RemoteError(
f'Bitstamp query responded with error status code: {response.status_code} '
f'and text: {response.text}',
)
try:
response_list = jsonloads_list(response.text)
except JSONDecodeError as e:
raise RemoteError(f'Bitstamp returned invalid JSON response: {response.text}') from e
# Extract the unique symbols from the exchange pairs
pairs = [raw_result.get('name') for raw_result in response_list]
symbols = set()
for pair in pairs:
symbols.update(set(pair.split('/')))
for symbol in symbols:
try:
asset_from_bitstamp(symbol)
except UnknownAsset as e:
test_warnings.warn(UserWarning(
f'Found unknown asset {e.asset_name} in {mock_bitstamp.name}. '
f'Support for it has to be added',
))
def test_validate_api_key_invalid_json(mock_bitstamp):
"""Test when status code is not 200, an invalid JSON response is handled."""
def mock_api_query_response(endpoint, method='', options=None): # pylint: disable=unused-argument # noqa: E501
return MockResponse(HTTPStatus.FORBIDDEN, '{"key"}')
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
result, msg = mock_bitstamp.validate_api_key()
assert result is False
assert msg == 'Bitstamp returned invalid JSON response: {"key"}.'
def test_validate_api_key_err_auth_nonce(mock_bitstamp):
"""Test the error code related with the nonce authentication is properly handled"""
def mock_api_query_response(endpoint, method='', options=None): # pylint: disable=unused-argument # noqa: E501
return MockResponse(
HTTPStatus.FORBIDDEN,
f'{{"code": "{API_ERR_AUTH_NONCE_CODE}", "reason": "whatever"}}',
)
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
result, msg = mock_bitstamp.query_balances()
assert result is False
assert msg == API_ERR_AUTH_NONCE_MESSAGE
result, msg = mock_bitstamp.validate_api_key()
assert result is False
assert msg == API_ERR_AUTH_NONCE_MESSAGE
movements = mock_bitstamp.query_online_deposits_withdrawals(0, 1)
assert movements == []
errors = mock_bitstamp.msg_aggregator.consume_errors()
assert len(errors) == 1
assert API_ERR_AUTH_NONCE_MESSAGE in errors[0]
trades, _ = mock_bitstamp.query_online_trade_history(0, 1)
assert trades == []
errors = mock_bitstamp.msg_aggregator.consume_errors()
assert len(errors) == 1
assert API_ERR_AUTH_NONCE_MESSAGE in errors[0]
@pytest.mark.parametrize('code', API_KEY_ERROR_CODE_ACTION.keys())
def test_validate_api_key_api_key_error_code(
mock_bitstamp,
code,
):
"""Test an error code related with the API key ones returns a tuple with
False (result) and a user friendly message (reason,
from API_KEY_ERROR_CODE_ACTION values).
"""
def mock_api_query_response(endpoint): # pylint: disable=unused-argument
return MockResponse(
HTTPStatus.FORBIDDEN,
f'{{"code": "{code}", "reason": "whatever"}}',
)
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
result, msg = mock_bitstamp.validate_api_key()
assert result is False
assert msg == API_KEY_ERROR_CODE_ACTION[code]
def test_validate_api_key_success(mock_bitstamp):
"""Test when status code is 200 the response is a tuple with True (result)
and an empty message.
"""
def mock_api_query_response(endpoint): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.OK, '')
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
result, msg = mock_bitstamp.validate_api_key()
assert result is True
assert msg == ''
def test_query_balances_invalid_json(mock_bitstamp):
"""Test an invalid JSON response raises RemoteError.
"""
def mock_api_query_response(endpoint): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.OK, '{"key"}')
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
with pytest.raises(RemoteError):
mock_bitstamp.query_balances()
@pytest.mark.parametrize('response, has_reason', (
('{"code": "APIXXX", "reason": "has reason"}', True),
('{"code": "APIXXX", "text": "has text"}', False),
))
def test_query_balances_non_related_error_code(
mock_bitstamp,
response,
has_reason,
):
"""Test an error code unrelated with the system clock not synced one
returns a tuple with None (result) and a message (reason, from 'reason'
response value or `response.text`).
"""
def mock_api_query_response(endpoint): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.FORBIDDEN, response)
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
result, msg = mock_bitstamp.query_balances()
assert result is False
exp_reason = 'has reason' if has_reason else 'has text'
assert exp_reason in msg
def test_query_balances_skips_not_balance_entry(mock_bitstamp):
"""Test an entry that doesn't end with `_balance` is skipped
"""
def mock_api_query_response(endpoint): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.OK, '{"link_available": "1.00000000"}')
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
assert mock_bitstamp.query_balances() == ({}, '')
def test_query_balances_skipped_not_asset_entry(mock_bitstamp):
"""Test an entry that can't instantiate Asset is skipped
"""
def mock_api_query_response(endpoint): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.OK, '{"bbbrrrlink_balance": "1.00000000"}')
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
assert mock_bitstamp.query_balances() == ({}, '')
def test_query_balances_skips_inquirer_error(mock_bitstamp):
"""Test an entry that can't get its USD price because of a remote error is
skipped
"""
inquirer = MagicMock()
inquirer.find_usd_price.side_effect = RemoteError('test')
def mock_api_query_response(endpoint): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.OK, '{"link_balance": "1.00000000"}')
with patch('rotkehlchen.exchanges.bitstamp.Inquirer', return_value=inquirer):
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
assert mock_bitstamp.query_balances() == ({}, '')
@pytest.mark.parametrize('should_mock_current_price_queries', [True])
def test_query_balances_asset_balance(mock_bitstamp, inquirer): # pylint: disable=unused-argument
"""Test an entry that can't get its USD price is skipped
"""
balances_data = (
"""
{
"eth_available": "0.00000000",
"eth_balance": "32.00000000",
"eth_reserved": "0.00000000",
"eth_withdrawal_fee": "0.04000000",
"link_available": "0.00000000",
"link_balance": "1000.00000000",
"link_reserved": "0.00000000",
"link_withdrawal_fee": "0.25000000",
"xrp_available": "0.00000000",
"xrp_balance": "0.00000000",
"xrp_reserved": "0.00000000",
"xrp_withdrawal_fee": "0.02000000"
}
"""
)
def mock_api_query_response(endpoint): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.OK, balances_data)
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
asset_balance, msg = mock_bitstamp.query_balances()
assert asset_balance == {
A_ETH: Balance(
amount=FVal('32'),
usd_value=FVal('48'),
),
A_LINK: Balance(
amount=FVal('1000'),
usd_value=FVal('1500'),
),
}
assert msg == ''
def test_deserialize_trade_buy(mock_bitstamp):
raw_trade = {
'id': 2,
'type': 2,
'datetime': '2020-12-02 09:30:00',
'btc': '0.50000000',
'usd': '-10000.00000000',
'btc_usd': '0.00005000',
'fee': '20.00000000',
'order_id': 2,
}
expected_trade = Trade(
timestamp=1606901400,
location=Location.BITSTAMP,
base_asset=A_BTC,
quote_asset=A_USD,
trade_type=TradeType.BUY,
amount=FVal('0.50000000'),
rate=FVal('0.00005000'),
fee=FVal('20.00000000'),
fee_currency=A_USD,
link='2',
notes='',
)
trade = mock_bitstamp._deserialize_trade(raw_trade)
assert trade == expected_trade
raw_trade = {
'id': 2,
'type': 2,
'datetime': '2019-04-16 08:09:05.149343',
'btc': '0.00060000',
'usd': '0',
'btc_eur': '8364.0',
'eur': '-5.02',
'fee': '0.02',
'order_id': 2,
}
expected_trade = Trade(
timestamp=1555402145,
location=Location.BITSTAMP,
base_asset=A_BTC,
quote_asset=A_EUR,
trade_type=TradeType.BUY,
amount=FVal('0.0006'),
rate=FVal('8364.0'),
fee=FVal('0.02'),
fee_currency=A_EUR,
link='2',
notes='',
)
trade = mock_bitstamp._deserialize_trade(raw_trade)
assert trade == expected_trade
raw_trade = {
'id': 15,
'type': 2,
'datetime': '2019-04-15 16:19:14.826000',
'btc': '0',
'usd': '-7.70998',
'eur_usd': '1.12124',
'eur': '6.87630',
'fee': '0.02',
'order_id': 15,
}
expected_trade = Trade(
timestamp=1555345154,
location=Location.BITSTAMP,
base_asset=A_EUR,
quote_asset=A_USD,
trade_type=TradeType.BUY,
amount=FVal('6.8763'),
rate=FVal('1.12124'),
fee=FVal('0.02'),
fee_currency=A_USD,
link='15',
notes='',
)
trade = mock_bitstamp._deserialize_trade(raw_trade)
assert trade == expected_trade
def test_deserialize_trade_sell(mock_bitstamp):
raw_trade = {
'id': 5,
'type': 2,
'datetime': '2020-12-03 11:30:00',
'eur': '-1.00000000',
'usd': '1.22000000',
'eur_usd': '0.81967213',
'fee': '0.00610000',
'order_id': 3,
}
expected_trade = Trade(
timestamp=1606995000,
location=Location.BITSTAMP,
base_asset=A_EUR,
quote_asset=A_USD,
trade_type=TradeType.SELL,
amount=FVal('1'),
rate=FVal('0.81967213'),
fee=FVal('0.00610000'),
fee_currency=A_USD,
link='5',
notes='',
)
trade = mock_bitstamp._deserialize_trade(raw_trade)
assert trade == expected_trade
raw_trade = {
'id': 10,
'type': 2,
'datetime': '2019-06-25 21:41:08.802256',
'btc': '-1.81213214',
'usd': '0',
'btc_eur': '10119.82',
'eur': '18338.45',
'fee': '40.35000',
'order_id': 3,
}
expected_trade = Trade(
timestamp=1561498868,
location=Location.BITSTAMP,
base_asset=A_BTC,
quote_asset=A_EUR,
trade_type=TradeType.SELL,
amount=FVal('1.81213214'),
rate=FVal('10119.82'),
fee=FVal('40.35'),
fee_currency=A_EUR,
link='10',
notes='',
)
trade = mock_bitstamp._deserialize_trade(raw_trade)
assert trade == expected_trade
@pytest.mark.parametrize('option', ['limit', 'since_id', 'sort', 'offset'])
def test_api_query_paginated_user_transactions_required_options(mock_bitstamp, option):
"""Test calling the 'user_transactions' endpoint requires a set of specific
options.
"""
options = {
'limit': API_MAX_LIMIT,
'since_id': USER_TRANSACTION_MIN_SINCE_ID,
'sort': USER_TRANSACTION_SORTING_MODE,
'offset': 0,
}
del options[option]
with pytest.raises(KeyError):
mock_bitstamp._api_query_paginated(
start_ts=Timestamp(0),
end_ts=Timestamp(1),
options=options,
case='trades',
)
@pytest.mark.parametrize('option', ['limit', 'since_id', 'sort', 'offset'])
def test_api_query_paginated_user_transactions_required_options_values(mock_bitstamp, option):
"""Test calling the 'user_transactions' endpoint requires a set of specific
options.
"""
options = {
'limit': API_MAX_LIMIT,
'since_id': USER_TRANSACTION_MIN_SINCE_ID,
'sort': USER_TRANSACTION_SORTING_MODE,
'offset': 0,
}
options[option] = -1
with pytest.raises(AssertionError):
mock_bitstamp._api_query_paginated(
start_ts=Timestamp(0),
end_ts=Timestamp(1),
options=options,
case='trades',
)
def test_api_query_paginated_invalid_json(mock_bitstamp):
"""Test an invalid JSON response returns empty list.
"""
options = {
'since_id': USER_TRANSACTION_MIN_SINCE_ID,
'limit': API_MAX_LIMIT,
'sort': USER_TRANSACTION_SORTING_MODE,
'offset': 0,
}
def mock_api_query_response(endpoint, method, options): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.OK, '[{"key"}]')
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
result = mock_bitstamp._api_query_paginated(
start_ts=Timestamp(0),
end_ts=Timestamp(1),
options=options,
case='trades',
)
assert result == []
@pytest.mark.parametrize('response', (
'{"code": "APIXXX", "reason": "has reason"}',
'{"code": "APIXXX", "text": "has text"}',
))
def test_api_query_paginated_non_related_error_code(mock_bitstamp, response):
"""Test an error code unrelated with the system clock not synced one
returns a an empty list.
"""
options = {
'since_id': USER_TRANSACTION_MIN_SINCE_ID,
'limit': API_MAX_LIMIT,
'sort': USER_TRANSACTION_SORTING_MODE,
'offset': 0,
}
def mock_api_query_response(endpoint, method, options): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.FORBIDDEN, response)
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
result = mock_bitstamp._api_query_paginated(
start_ts=Timestamp(0),
end_ts=Timestamp(1),
options=options,
case='trades',
)
assert result == []
def test_api_query_paginated_skips_different_type_result(mock_bitstamp):
"""Test results whose type is not in `raw_result_type_filter` are skipped
"""
options = {
'since_id': USER_TRANSACTION_MIN_SINCE_ID,
'limit': API_MAX_LIMIT,
'sort': USER_TRANSACTION_SORTING_MODE,
'offset': 0,
}
def mock_api_query_response(endpoint, method, options): # pylint: disable=unused-argument
return MockResponse(
HTTPStatus.OK,
'[{"type": "whatever"}, {"type": "23"}]',
)
with patch.object(mock_bitstamp, '_api_query', side_effect=mock_api_query_response):
result = mock_bitstamp._api_query_paginated(
start_ts=Timestamp(0),
end_ts=Timestamp(1),
options=options,
case='trades',
)
assert result == []
def test_api_query_paginated_stops_timestamp_gt_end_ts(mock_bitstamp):
"""Test the method stops processing results when a result timestamp is gt
`end_ts`.
"""
api_limit = 2
now = datetime.now().replace(microsecond=0)
gt_now = now + timedelta(seconds=1)
now_ts = int(now.timestamp())
gt_now_iso = gt_now.isoformat()
options = {
'since_id': USER_TRANSACTION_MIN_SINCE_ID,
'limit': api_limit,
'sort': USER_TRANSACTION_SORTING_MODE,
'offset': 0,
}
expected_calls = [
call(
endpoint='user_transactions',
method='post',
options={
'since_id': 1,
'limit': 2,
'sort': 'asc',
'offset': 0,
},
),
]
def mock_api_query_response(endpoint, method, options): # pylint: disable=unused-argument
return MockResponse(
HTTPStatus.OK,
f'[{{"type": "14", "datetime": "{gt_now_iso}"}}]',
)
with patch(
'rotkehlchen.exchanges.bitstamp.API_MAX_LIMIT',
new_callable=MagicMock(return_value=api_limit),
):
with patch.object(
mock_bitstamp,
'_api_query',
side_effect=mock_api_query_response,
) as mock_api_query:
result = mock_bitstamp._api_query_paginated(
start_ts=Timestamp(0),
end_ts=Timestamp(now_ts),
options=options,
case='trades',
)
assert mock_api_query.call_args_list == expected_calls
assert result == []
@pytest.mark.freeze_time(datetime(2020, 12, 3, 12, 0, 0))
def test_api_query_paginated_trades_pagination(mock_bitstamp):
"""Test pagination logic for trades works as expected.
First request: 2 results, 1 valid trade (id 2)
Second request: 2 results, no trades
Third request: 2 results, 1 valid trade (id 5) and 1 invalid trade (id 6)
Trades with id 2 and 5 are expected to be returned.
"""
# Not a trade
user_transaction_1 = """
{
"id": 1,
"type": "-1",
"datetime": "2020-12-02 09:00:00"
}
"""
# First trade, buy BTC with USD, within timestamp range
user_transaction_2 = """
{
"id": 2,
"type": "2",
"datetime": "2020-12-02 09:30:00",
"btc": "0.50000000",
"usd": "-10000.00000000",
"btc_usd": "0.00005000",
"fee": "20.00000000",
"order_id": 2
}
"""
# Not a trade
user_transaction_3 = """
{
"id": 3,
"type": "-1",
"datetime": "2020-12-02 18:00:00"
}
"""
# Not a trade
user_transaction_4 = """
{
"id": 4,
"type": "-1",
"datetime": "2020-12-03 9:00:00"
}
"""
# Second trade, sell EUR for USD, within timestamp range
user_transaction_5 = """
{
"id": 5,
"type": "2",
"datetime": "2020-12-03 11:30:00",
"eur": "-1.00000000",
"usd": "1.22000000",
"eur_usd": "0.81967213",
"fee": "0.00610000",
"order_id": 3
}
"""
# Third trade, buy ETH with USDC, out of timestamp range
user_transaction_6 = """
{
"id": 6,
"type": "2",
"datetime": "2020-12-03 12:00:01",
"eth": "1.00000000",
"usdc": "-750.00000000",
"eth_usdc": "0.00133333",
"fee": "3.75000000",
"order_id": 1
}
"""
api_limit = 2
now = datetime.now()
now_ts = int(now.timestamp())
options = {
'since_id': USER_TRANSACTION_MIN_SINCE_ID,
'limit': api_limit,
'sort': USER_TRANSACTION_SORTING_MODE,
'offset': 0,
}
expected_calls = [
call(
endpoint='user_transactions',
method='post',
options={
'since_id': 1,
'limit': 2,
'sort': 'asc',
'offset': 0,
},
),
call(
endpoint='user_transactions',
method='post',
options={
'since_id': 3,
'limit': 2,
'sort': 'asc',
'offset': 0,
},
),
call(
endpoint='user_transactions',
method='post',
options={
'since_id': 3,
'limit': 2,
'sort': 'asc',
'offset': 2,
},
),
]
def get_paginated_response():
results = [
f'[{user_transaction_1},{user_transaction_2}]',
f'[{user_transaction_3},{user_transaction_4}]',
f'[{user_transaction_5},{user_transaction_6}]',
]
for result_ in results:
yield result_
def mock_api_query_response(endpoint, method, options): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.OK, next(get_response))
get_response = get_paginated_response()
with patch(
'rotkehlchen.exchanges.bitstamp.API_MAX_LIMIT',
new_callable=MagicMock(return_value=api_limit),
):
with patch.object(
mock_bitstamp,
'_api_query',
side_effect=mock_api_query_response,
) as mock_api_query:
result = mock_bitstamp._api_query_paginated(
start_ts=Timestamp(0),
end_ts=Timestamp(now_ts),
options=options,
case='trades',
)
assert mock_api_query.call_args_list == expected_calls
expected_result = [
Trade(
timestamp=1606901400,
location=Location.BITSTAMP,
base_asset=A_BTC,
quote_asset=A_USD,
trade_type=TradeType.BUY,
amount=FVal('0.50000000'),
rate=FVal('0.00005000'),
fee=FVal('20.00000000'),
fee_currency=A_USD,
link='2',
notes='',
),
Trade(
timestamp=1606995000,
location=Location.BITSTAMP,
base_asset=A_EUR,
quote_asset=A_USD,
trade_type=TradeType.SELL,
amount=FVal('1'),
rate=FVal('0.81967213'),
fee=FVal('0.00610000'),
fee_currency=A_USD,
link='5',
notes='',
),
]
assert result == expected_result
@pytest.mark.parametrize('start_ts, since_id', [(0, 1), (1606995001, 6)])
def test_query_online_trade_history(mock_bitstamp, start_ts, since_id):
"""Test `since_id` value will change depending on `start_ts` value.
Also tests `db_trades` are sorted by `link` (as int) in ascending mode.
"""
trades = [
Trade(
timestamp=1606995000,
location=Location.BITSTAMP,
base_asset=A_EUR,
quote_asset=A_USD,
trade_type=TradeType.SELL,
amount=FVal('1.22000000'),
rate=FVal('0.81967213'),
fee=FVal('0.00610000'),
fee_currency=A_EUR,
link='5',
notes='',
),
Trade(
timestamp=1606901400,
location=Location.BITSTAMP,
base_asset=A_BTC,
quote_asset=A_USD,
trade_type=TradeType.BUY,
amount=FVal('0.50000000'),
rate=FVal('0.00005000'),
fee=FVal('20.00000000'),
fee_currency=A_USD,
link='2',
notes='',
),
]
mock_bitstamp.db.add_trades(trades)
end_ts = Timestamp(1606995000)
expected_call = call(
start_ts=start_ts,
end_ts=end_ts,
options={
'since_id': since_id,
'limit': 1000,
'sort': 'asc',
'offset': 0,
},
case='trades',
)
with patch.object(mock_bitstamp, '_api_query_paginated') as mock_api_query_paginated:
mock_bitstamp.query_online_trade_history(
start_ts=Timestamp(start_ts),
end_ts=end_ts,
)
assert mock_api_query_paginated.call_args == expected_call
def test_deserialize_asset_movement_deposit(mock_bitstamp):
raw_movement = {
'id': 2,
'type': '0',
'datetime': '2020-12-02 09:30:00',
'btc': '0.50000000',
'usd': '0.00000000',
'btc_usd': '0.00',
'fee': '0.00050000',
'order_id': 2,
'eur': '0.00',
}
asset = A_BTC
movement = AssetMovement(
timestamp=1606901400,
location=Location.BITSTAMP,
category=AssetMovementCategory.DEPOSIT,
address=None,
transaction_id=None,
asset=asset,
amount=FVal('0.5'),
fee_asset=asset,
fee=Fee(FVal('0.0005')),
link='2',
)
expected_movement = mock_bitstamp._deserialize_asset_movement(raw_movement)
assert movement == expected_movement
raw_movement = {
'id': 3,
'type': '0',
'datetime': '2018-03-21 06:46:06.559877',
'btc': '0',
'usd': '0.00000000',
'btc_usd': '0.00',
'fee': '0.1',
'order_id': 2,
'gbp': '1000.51',
}
asset = A_GBP
movement = AssetMovement(
timestamp=1521614766,
location=Location.BITSTAMP,
category=AssetMovementCategory.DEPOSIT,
address=None,
transaction_id=None,
asset=asset,
amount=FVal('1000.51'),
fee_asset=asset,
fee=Fee(FVal('0.1')),
link='3',
)
expected_movement = mock_bitstamp._deserialize_asset_movement(raw_movement)
assert movement == expected_movement
raw_movement = {
'id': 3,
'type': '0',
'datetime': '2018-03-21 06:46:06.559877',
'btc': '0',
'usd': '0.00000000',
'btc_usd': '0.00',
'fee': '0.1',
'order_id': 2,
'usdc': '1000.51',
}
asset = A_USDC
movement = AssetMovement(
timestamp=1521614766,
location=Location.BITSTAMP,
category=AssetMovementCategory.DEPOSIT,
address=None,
transaction_id=None,
asset=asset,
amount=FVal('1000.51'),
fee_asset=asset,
fee=Fee(FVal('0.1')),
link='3',
)
expected_movement = mock_bitstamp._deserialize_asset_movement(raw_movement)
assert movement == expected_movement
def test_deserialize_asset_movement_withdrawal(mock_bitstamp):
raw_movement = {
'id': 5,
'type': '1',
'datetime': '2020-12-02 09:30:00',
'btc': '0.00000000',
'usd': '-10000.00000000',
'btc_usd': '0.00',
'fee': '50.00000000',
'order_id': 2,
'eur': '0.00',
}
asset = A_USD
movement = AssetMovement(
timestamp=1606901400,
location=Location.BITSTAMP,
category=AssetMovementCategory.WITHDRAWAL,
address=None,
transaction_id=None,
asset=asset,
amount=FVal('10000'),
fee_asset=asset,
fee=Fee(FVal('50')),
link='5',
)
expected_movement = mock_bitstamp._deserialize_asset_movement(raw_movement)
assert movement == expected_movement
raw_movement = {
'id': 5,
'type': '1',
'datetime': '2018-03-21 06:46:06.559877',
'btc': '0',
'usd': '0',
'btc_usd': '0.00',
'fee': '0.1',
'order_id': 2,
'eur': '500',
}
asset = A_EUR
movement = AssetMovement(
timestamp=1521614766,
location=Location.BITSTAMP,
category=AssetMovementCategory.WITHDRAWAL,
address=None,
transaction_id=None,
asset=asset,
amount=FVal('500'),
fee_asset=asset,
fee=Fee(FVal('0.1')),
link='5',
)
expected_movement = mock_bitstamp._deserialize_asset_movement(raw_movement)
assert movement == expected_movement
@pytest.mark.parametrize('start_ts, since_id', [(0, 1), (1606901401, 6)])
def test_query_online_deposits_withdrawals(mock_bitstamp, start_ts, since_id):
"""Test `since_id` value will change depending on `start_ts` value.
Also tests `db_asset_movements` are sorted by `link` (as int) in ascending
mode.
"""
asset_btc = A_BTC
asset_usd = A_USD
movements = [
AssetMovement(
timestamp=1606901400,
location=Location.BITSTAMP,
category=AssetMovementCategory.WITHDRAWAL,
address=None,
transaction_id=None,
asset=asset_usd,
amount=FVal('10000'),
fee_asset=asset_usd,
fee=Fee(FVal('50')),
link='5',
),
AssetMovement(
timestamp=1606801400,
location=Location.BITSTAMP,
category=AssetMovementCategory.DEPOSIT,
address=None,
transaction_id=None,
asset=asset_btc,
amount=FVal('0.5'),
fee_asset=asset_btc,
fee=Fee(FVal('0.0005')),
link='2',
),
]
mock_bitstamp.db.add_asset_movements(movements)
end_ts = Timestamp(1606901401)
expected_call = call(
start_ts=start_ts,
end_ts=end_ts,
options={
'since_id': since_id,
'limit': 1000,
'sort': 'asc',
'offset': 0,
},
case='asset_movements',
)
with patch.object(mock_bitstamp, '_api_query_paginated') as mock_api_query_paginated:
mock_bitstamp.query_online_deposits_withdrawals(
start_ts=Timestamp(start_ts),
end_ts=end_ts,
)
assert mock_api_query_paginated.call_args == expected_call
@pytest.mark.freeze_time(datetime(2020, 12, 3, 12, 0, 0))
@pytest.mark.parametrize('bitstamp_api_key', ['123456'])
@pytest.mark.parametrize('bitstamp_api_secret', [str.encode('abcdefg')])
def test_api_query_request_headers_checks(mock_bitstamp):
"""Test request headers are not polluted by previous requests
"""
options = {
'limit': API_MAX_LIMIT,
'since_id': USER_TRANSACTION_MIN_SINCE_ID,
'sort': USER_TRANSACTION_SORTING_MODE,
'offset': 0,
}
uuid = MagicMock()
uuid.uuid4.return_value = 'hijklm'
session = mock_bitstamp.session
with patch('rotkehlchen.exchanges.bitstamp.uuid', new=uuid):
mock_bitstamp._api_query(endpoint='balance')
assert session.headers['X-Auth'] == 'BITSTAMP 123456'
assert session.headers['X-Auth-Version'] == 'v2'
assert session.headers['X-Auth-Signature'] == (
'eb84d115027532cba9ebab8c692c488284c54551ab4601aa9ce6280187dc9c86'
)
assert session.headers['X-Auth-Nonce'] == 'hijklm'
assert session.headers['X-Auth-Timestamp'] == '1606996800000'
assert 'Content-Type' not in session.headers
mock_bitstamp._api_query(endpoint='balance', options=options.copy())
assert session.headers['X-Auth'] == 'BITSTAMP 123456'
assert session.headers['X-Auth-Version'] == 'v2'
assert session.headers['X-Auth-Signature'] == (
'29728913d776144f0c8d522a58e77bb6c4492b25dbf7b3ebd41c4eb64c28cf0c'
)
assert session.headers['X-Auth-Nonce'] == 'hijklm'
assert session.headers['X-Auth-Timestamp'] == '1606996800000'
assert session.headers['Content-Type'] == 'application/x-www-form-urlencoded'
mock_bitstamp._api_query(endpoint='balance')
assert session.headers['X-Auth'] == 'BITSTAMP 123456'
assert session.headers['X-Auth-Version'] == 'v2'
assert session.headers['X-Auth-Signature'] == (
'eb84d115027532cba9ebab8c692c488284c54551ab4601aa9ce6280187dc9c86'
)
assert session.headers['X-Auth-Nonce'] == 'hijklm'
assert session.headers['X-Auth-Timestamp'] == '1606996800000'
assert 'Content-Type' not in session.headers
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 03_dataset.ipynb (unless otherwise specified).
__all__ = ['MovieDataset', 'Tokenize', 'RandomResizeCrop', 'ToTensor', 'NormalizeStandardize', 'Compose']
# Internal Cell
from torch.utils.data import Dataset
from torchvision import transforms
from transformers import DistilBertTokenizer
from matplotlib import pyplot as plt
from PIL import Image
import numpy as np
import pandas as pd
import os
import torch
# Cell
class MovieDataset(Dataset):
def __init__(self,
poster_img_dir: str,
backdrop_img_dir: str,
ds_type: str,
transforms: list):
super(MovieDataset, self).__init__()
self.ds_type = ds_type
self.poster_path, self.backdrop_path = poster_img_dir, backdrop_img_dir
self.transforms = transforms
assert self.ds_type in ['train', 'valid', 'test'], "Dataset type provided is invalid."
self.df = pd.read_csv(f"{self.ds_type}.csv")
print(f"{self.ds_type} dataset created!")
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int) -> tuple:
"""
Returns a dict of 5 items:
Poster Image, BackDrop Image, MetaData, Title+overview, label
"""
poster_img_path = os.path.join(self.poster_path, f"{self.df.iloc[idx]['id']}.jpg")
backdrop_img_path = os.path.join(self.backdrop_path, f"{self.df.iloc[idx]['id']}.jpg")
poster_img_array = Image.open(poster_img_path).convert('RGB')
backdrop_img_array = Image.open(backdrop_img_path).convert('RGB')
text_inputs = f"{self.df.iloc[idx]['title']}[SEP]{self.df.iloc[idx]['overview']}"
label = self.df.iloc[idx]['tagline']
meta = self.df.iloc[0].drop(labels=['overview', 'title', 'tagline', 'id']).to_numpy(dtype=np.float32)
sample = {"poster_img" : poster_img_array,
"backdrop_img" : backdrop_img_array,
"text_inputs" : text_inputs,
"meta" : meta,
"labels" : label}
sample = self.transforms(sample)
return ((sample["poster_img"], sample["backdrop_img"], sample["text_inputs"], sample["meta"]), sample['labels'])
# Cell
# Tokenize concatenates the title and overview into a single example
class Tokenize(object):
def __init__(self, tokenizer, input_max_length: int, labels_max_length: int):
self.tokenizer = tokenizer
self.input_max_length = input_max_length
self.labels_max_length = labels_max_length
def __call__(self, x: dict) -> dict:
x['labels'] = self.tokenizer(x['labels'], return_tensors='pt', max_length=self.labels_max_length, padding='max_length', truncation=True)['input_ids'].squeeze()
x['text_inputs'] = self.tokenizer(x['text_inputs'], return_tensors='pt', max_length=self.input_max_length, padding='max_length', truncation=True)['input_ids'].squeeze()
return x
# Cell
# Resize the images to a fixed size for batching
class RandomResizeCrop(object):
def __init__(self, width: int, height: int, method: int):
self.width, self.height = width, height
self.method = method
def __call__(self, x: dict) -> dict:
resize = transforms.RandomResizedCrop((self.height, self.width), interpolation=self.method)
x['poster_img'] = np.array(resize(x['poster_img']))
x['backdrop_img'] = np.array(resize(x['backdrop_img']))
return x
# Cell
# ToTensor converts the numpy array to a torch Tensor of the same data type
class ToTensor(object):
def __call__(self, x: dict) -> dict:
x['poster_img'] = np.transpose(x['poster_img'], axes=(2, 0, 1))
x['backdrop_img'] = np.transpose(x['backdrop_img'], axes=(2, 0, 1))
x = {k : torch.Tensor(v) if isinstance(v, np.ndarray) else v for k, v in x.items()}
return x
# Cell
# NormalizeStandardize scales images to between 0 and 1 before subtracting mean and dividing by std
class NormalizeStandardize(object):
def __init__(self, mean: list, std: list):
nc = len(mean)
self.mean = torch.Tensor(mean).view(nc, 1, 1)
self.std = torch.Tensor(std).view(nc, 1, 1)
def __call__(self, x: dict) -> dict:
poster_norm = torch.true_divide(x['poster_img'], 255.)
backdrop_norm = torch.true_divide(x['backdrop_img'], 255.)
x['poster_img'] = (poster_norm - self.mean) / self.std
x['backdrop_img'] = (backdrop_norm - self.mean) / self.std
return x
# Cell
class Compose(object):
def __init__(self, tfms: list):
self.tfms = tfms
def __call__(self, x: dict) -> dict:
for tfm in self.tfms:
x = tfm(x)
return x |
<filename>pr0ntools/layer/parser.py
from pr0ntools.layer.layer import *
from pr0ntools.layer.polygon import *
class LayerSVGParser:
@staticmethod
def parse(layer, file_name):
parser = LayerSVGParser()
parser.layer = layer
parser.file_name = file_name
parser.do_parse()
def process_transform(self, transform):
x_delta = float(transform.split(',')[0].split('(')[1])
y_delta = float(transform.split(',')[1].split(')')[0])
self.x_deltas.append(x_delta)
self.y_deltas.append(y_delta)
self.x_delta += x_delta
self.y_delta += y_delta
def pop_transform(self):
self.x_delta -= self.x_deltas.pop()
self.y_delta -= self.y_deltas.pop()
def do_parse(self):
'''
Need to figure out a better parse algorithm...messy
'''
'''
<rect
y="261.16562"
x="132.7981"
height="122.4502"
width="27.594412"
id="rect3225"
style="fill:#999999" />
'''
#print self.file_name
raw = open(self.file_name).read()
#print 'set vars'
self.x_delta = 0.0
self.x_deltas = list()
self.y_delta = 0.0
self.y_deltas = list()
self.flow_root = False
self.text = None
# 3 handler functions
def start_element(name, attrs):
#print 'Start element:', name, attrs
if name == 'rect':
#print 'Got one!'
# Origin at upper left hand corner, same as PIL
# Note that inkscape displays origin as lower left hand corner...weird
# style="fill:#00ff00"
color = None
if 'style' in attrs:
style = attrs['style']
color = style.split(':')[1]
#if self.flow_root and self.text is None:
# raise Exception('Missing text')
self.last_polygon = self.cur_layer.add_rect(float(attrs['x']) + self.x_delta, float(attrs['y']) + self.y_delta, float(attrs['width']), float(attrs['height']), color=color)
elif name == 'g':
#transform="translate(0,-652.36218)"
if 'transform' in attrs:
transform = attrs['transform']
self.process_transform(transform)
self.g_transform = True
else:
self.g_transform = False
elif name == 'svg':
self.cur_layer.width = int(attrs['width'])
self.cur_layer.height = int(attrs['height'])
#print 'Width ' + str(self.cur_layer.width)
#print 'Height ' + str(self.cur_layer.height)
# Text entry
elif name == 'flowRoot':
'''
<flowRoot
transform="translate(15.941599,-0.58989212)"
xml:space="preserve"
id="flowRoot4100"
style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans">
<flowRegion id="flowRegion4102">
<rect
id="rect4104"
width="67.261375"
height="14.659531"
x="56.913475"
y="189.59261"
style="fill:#000000" />
</flowRegion>
<flowPara id="flowPara4106">
clk0
</flowPara>
</flowRoot>
'''
self.flow_root = True
self.text = None
if 'transform' in attrs:
transform = attrs['transform']
self.flowRoot_transform = True
self.process_transform(transform)
else:
self.flowRoot_transform = False
elif name == 'flowPara':
#self.text = attrs
#print 'TEXT: ' + repr(self.text)
#sys.exit(1)
pass
else:
#print 'Skipping %s' % name
pass
def end_element(name):
#print 'End element:', name
if name == 'flowRoot':
self.last_polygon.text = self.text
self.flow_root = False
self.text = None
self.last_polygon = None
if self.flowRoot_transform:
self.pop_transform()
self.flowRoot_transform = False
elif name == 'g':
if self.g_transform:
self.pop_transform()
self.g_transform = False
pass
def char_data(data):
#print 'Character data:', repr(data)
self.text = data
pass
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = start_element
p.EndElementHandler = end_element
p.CharacterDataHandler = char_data
p.Parse(raw, 1)
class Path2Points:
def __init__(self, path_in):
self.path_in = path_in
def tokf(self):
ret = float(self.tokens[self.i])
self.i += 1
return ret
def toki(self):
ret = int(self.tokens[self.i])
self.i += 1
return ret
def tokb(self):
ret = bool(self.tokens[self.i])
self.i += 1
return ret
def run(self):
def isnum(part):
c = part[0]
return c >= '0' and c <= '9' or c == '+' or c == '-'
cur_x = None
cur_y = None
# Preprocess a bit
# Spec says that commas are equivilent to whitespace
self.path = self.path_in.replace(',', ' ')
# Since whitespace is not required its best to parse a token at a time rather than split on space
# However inkscape is outputting a mix of commas and spaces so good enough for now
self.tokens = self.path.split()
action = None
points = []
# d="m 68.185297,3.7588384 -48.992399,0 0,156.0685716 46.467017,0 0,-53.53809 -14.647211,0 0,-58.08377 17.172593,0 z"
'''
A very crude parser
I assume that I get a single closed polygon
Any movement other than the first is to add a line
Curves are not accepted
'''
try:
self.i = 0
while True:
if self.i >= len(self.tokens):
raise Exception('Path was not closed')
part = self.tokens[self.i]
# Move to
if part in 'MmAa':
action = part
self.i += 1
continue
# Close path
elif part == 'Z' or part == 'z':
# Multiple paths are allowed, return an multidimentional array of points if this starts to occur
if self.i != len(self.tokens) - 1:
raise Exception('Expect close path last element')
break
elif not isnum(part):
raise Exception('Unknown part %s' % part)
# move to? (absolute)
if action == 'M':
# (x, y)
cur_x = self.tokf()
cur_y = self.tokf()
points.append(Point(cur_x, cur_y))
# move to? (relative)
elif action == 'm':
# (x, y)
if cur_x is None:
cur_x = 0.0
cur_y = 0.0
cur_x += self.tokf()
cur_y += self.tokf()
points.append(Point(cur_x, cur_y))
# Elliptical arc curve (relative)
elif action == 'a':
rx = self.tokf()
ry = self.tokf()
x_axis_rotation = self.tokf()
large_arc_flag = self.tokb()
sweep_flag = self.tokb()
x = self.tokf()
y = self.tokf()
if 0:
print
print 'rx: %f, ry: %f' % (rx, ry)
print 'x rot: %f' % x_axis_rotation
print 'Large arc: %d, sweep: %d' % (large_arc_flag, sweep_flag)
print 'x: %f, y: %f' % (x, y)
print
print 'WARNING: aborting arc sequence'
points = []
break
if x_axis_rotation != 0:
raise ValueError('Can not accept rotated polygons')
else:
raise Excetpion('Unknown cur action %s' % action)
except:
print 'Failed to parse: %s' % self.path
print 'Raw: %s' % self.path_in
print 'i: %d, token: %s' % (self.i, self.tokens[self.i])
raise
print 'Parsed %d points from %s' % (len(points), self.path_in)
return points
def path2points(path_in):
return Path2Points(path_in).run()
class MultilayerSVGParser:
def __init__(self, file_name):
# Dict of layer name to layer object
# Adds a layer for every layer found in the source image
self.layers = dict()
self.file_name = file_name
# Image files found in the SVG
self.images = set()
self.layer = None
def process_transform(self, transform):
x_delta = float(transform.split(',')[0].split('(')[1])
y_delta = float(transform.split(',')[1].split(')')[0])
self.x_deltas.append(x_delta)
self.y_deltas.append(y_delta)
self.x_delta += x_delta
self.y_delta += y_delta
def pop_transform(self):
self.x_delta -= self.x_deltas.pop()
self.y_delta -= self.y_deltas.pop()
def run(self):
'''
Need to figure out a better parse algorithm...messy
'''
'''
<rect
y="261.16562"
x="132.7981"
height="122.4502"
width="27.594412"
id="rect3225"
style="fill:#999999" />
'''
#print self.file_name
raw = open(self.file_name).read()
#print 'set vars'
self.x_delta = 0.0
self.x_deltas = list()
self.y_delta = 0.0
self.y_deltas = list()
self.flow_root = False
self.text = None
self.width = None
self.height = None
self.cur_layer = None
# 3 handler functions
def start_element(name, attrs):
#print 'Start element:', name, attrs
if name == 'rect':
#print 'Got one!'
# Origin at upper left hand corner, same as PIL
# Note that inkscape displays origin as lower left hand corner...weird
# style="fill:#00ff00"
color = None
if 'style' in attrs:
style = attrs['style']
color = style.split(':')[1]
#if self.flow_root and self.text is None:
# raise Exception('Missing text')
self.last_polygon = self.cur_layer.add_rect(float(attrs['x']) + self.x_delta, float(attrs['y']) + self.y_delta, float(attrs['width']), float(attrs['height']), color=color)
elif name == 'path':
'''
<path
style="fill:#00ff00;fill-opacity:1;stroke:none"
d="m 68.185297,3.7588384 -48.992399,0 0,156.0685716 46.467017,0 0,-53.53809 -14.647211,0 0,-58.08377 17.172593,0 z"
id="path3110"
inkscape:connector-curvature="0" />
'''
color = None
if 'style' in attrs:
style = attrs['style']
color = style.split(':')[1].split(';')[0]
points = path2points(attrs['d'])
if len(points) != 0:
self.last_polygon = self.cur_layer.add_polygon_by_points(points, color=color)
elif name == 'image':
'''
<image
y="461.00504"
x="276.21426"
id="image3082"
xlink:href="file:///home/mcmaster/document/external/pr0ntools/capture/test/both_0.jpg"
height="177"
width="99" />
'''
self.images.add(attrs['xlink:href'])
elif name == 'g':
'''
<g
inkscape:groupmode="layer"
id="layer2"
inkscape:label="active"
style="display:inline">
...
</g>
'''
#transform="translate(0,-652.36218)"
if 'transform' in attrs:
transform = attrs['transform']
self.process_transform(transform)
self.g_transform = True
else:
self.g_transform = False
if 'inkscape:label' in attrs:
if self.cur_layer:
raise Exception('Nester layer?')
layer_name = attrs['inkscape:label']
print 'Found layer %s' % layer_name
if layer_name in self.layers:
raise Exception("Duplicate layer %s" % layer_name)
self.cur_layer = Layer()
self.cur_layer.name = layer_name
elif name == 'svg':
self.width = int(attrs['width'])
self.height = int(attrs['height'])
#print 'Width ' + str(self.layer.width)
#print 'Height ' + str(self.layer.height)
# Text entry
elif name == 'flowRoot':
'''
<flowRoot
transform="translate(15.941599,-0.58989212)"
xml:space="preserve"
id="flowRoot4100"
style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans">
<flowRegion id="flowRegion4102">
<rect
id="rect4104"
width="67.261375"
height="14.659531"
x="56.913475"
y="189.59261"
style="fill:#000000" />
</flowRegion>
<flowPara id="flowPara4106">
clk0
</flowPara>
</flowRoot>
'''
self.flow_root = True
self.text = None
if 'transform' in attrs:
transform = attrs['transform']
self.flowRoot_transform = True
self.process_transform(transform)
else:
self.flowRoot_transform = False
elif name == 'flowPara':
#self.text = attrs
#print 'TEXT: ' + repr(self.text)
#sys.exit(1)
pass
else:
#print 'Skipping %s' % name
pass
def end_element(name):
#print 'End element:', name
if name == 'flowRoot':
self.last_polygon.text = self.text
self.flow_root = False
self.text = None
self.last_polygon = None
if self.flowRoot_transform:
self.pop_transform()
self.flowRoot_transform = False
elif name == 'g':
self.layers[self.cur_layer.name] = self.cur_layer
self.cur_layer = None
if self.g_transform:
self.pop_transform()
self.g_transform = False
pass
def char_data(data):
#print 'Character data:', repr(data)
self.text = data
pass
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = start_element
p.EndElementHandler = end_element
p.CharacterDataHandler = char_data
p.Parse(raw, 1)
|
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from torch import device
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestGluGrad(TestCase):
def cpu_op_exec(self, input_data, dim):
sign = False
if input_data.dtype == torch.float16:
input_data = input_data.to(torch.float32)
sign = True
input_data.requires_grad = True
data = torch.nn.functional.glu(input_data, dim=dim)
data.backward(torch.ones_like(data))
cpu_output = input_data.grad
if sign:
cpu_output = cpu_output.to(torch.float16)
return cpu_output.to("cpu").numpy()
def npu_op_exec(self, input_data, dim):
input_data = input_data.to("npu")
input_data.requires_grad = True
data = torch.nn.functional.glu(input_data, dim=dim)
data.backward(torch.ones_like(data))
npu_output = input_data.grad
return npu_output.to("cpu").numpy()
def test_glugrad_shape_format(self, device):
# dtype, format[-1 默认], shape, dim
shape_format_32 = [
[np.float32, -1, (2, 2, 4), 0],
[np.float32, -1, (4, 6, 10), 1],
[np.float32, -1, (2, 4, 8), 2],
[np.float32, -1, (4, 6), -1],
[np.float32, -1, (2, 2, 4), 2],
[np.float32, -1, (4, 6, 8, 10), -2],
[np.float32, -1, (4, 6, 6), 1],
[np.float32, -1, (6, 20, 10), 1],
]
shape_format_16 = [
[np.float16, -1, (2, 2, 4), 0],
[np.float16, -1, (4, 6, 10), 1],
[np.float16, -1, (2, 4, 8), 2],
[np.float16, -1, (4, 6), -1],
[np.float16, -1, (2, 2, 4), 2],
[np.float16, -1, (4, 6, 8, 10), -2],
[np.float16, -1, (4, 6, 6), 1],
]
for item in shape_format_32:
cpu_input, npu_input = create_common_tensor(item, -2.0, 2.0)
cpu_output = self.cpu_op_exec(cpu_input, item[3])
npu_output = self.npu_op_exec(npu_input, item[3])
eps = 0.0002 if item[0].dtype == torch.float32 else 0.002
self.assertRtolEqual(cpu_output, npu_output, prec=eps)
for item in shape_format_16:
cpu_input, npu_input = create_common_tensor(item, -2.0, 2.0)
cpu_output = self.cpu_op_exec(cpu_input, item[3])
npu_output = self.npu_op_exec(npu_input, item[3])
eps = 0.0002 if item[0].dtype == torch.float32 else 0.002
self.assertRtolEqual(cpu_output, npu_output, prec=eps)
instantiate_device_type_tests(TestGluGrad, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
|
"""Contains the Spectrum class."""
import numpy as np
from darkhistory import utilities as utils
from darkhistory.spec.spectools import get_bin_bound
from darkhistory.spec.spectools import get_log_bin_width
from darkhistory.spec.spectools import rebin_N_arr
import matplotlib.pyplot as plt
import warnings
from scipy import integrate
from scipy.interpolate import interp1d
class Spectrum:
"""Structure for particle spectra.
For an example of how to use these objects, see `Example 1: Manipulating Spectra Part 1 - Spectrum <https://github.com/hongwanliu/DarkHistory/blob/development/examples/Example_%3F_Manipulating_Spectra_Part_1_Spectrum.ipynb>`_.
Parameters
----------
eng : ndarray
Abscissa for the spectrum.
data : ndarray
Spectrum stored as N or dN/dE.
rs : float, optional
The redshift (1+z) of the spectrum. Default is -1.
in_eng : float, optional
The injection energy of the primary, if this is a secondary spectrum. Default is -1.
mode : {'N', 'dNdE'}, optional
Whether the input is N or dN/dE in each bin. Default is 'dNdE'.
Attributes
----------
eng : ndarray
Abscissa for the spectrum.
dNdE : ndarray
dN/dE of the spectrum.
N : ndarray
N of the spectrum.
rs : float, optional
The redshift (1+z) of the spectrum. Set to -1 if not specified.
length : int
The length of the abscissa.
underflow : dict of str: float
The underflow total number of particles and total energy.
"""
# __array_priority__ must be larger than 0, so that radd can work.
# Otherwise, ndarray + Spectrum works by iterating over the elements of
# ndarray first, which isn't what we want.
__array_priority__ = 1
def __init__(self, eng, data, rs=-1., in_eng=-1., spec_type='dNdE'):
if eng.size != data.size:
raise TypeError("""abscissa and spectrum need to be of the
same size.""")
if eng.size == 1:
raise TypeError("abscissa must be more than length 1.")
if not np.all(np.diff(eng) > 0):
raise TypeError("abscissa must be ordered in increasing energy.")
if spec_type != 'N' and spec_type != 'dNdE':
raise TypeError("invalid spec_type specified.")
self.eng = eng
self._data = data
self.rs = rs
self.in_eng = in_eng
self._spec_type = spec_type
self.length = eng.size
self.underflow = {'N': 0., 'eng': 0.}
@property
def dNdE(self):
if self._spec_type == 'dNdE':
return self._data
elif self._spec_type == 'N':
return self._data/(self.eng * get_log_bin_width(self.eng))
@dNdE.setter
def dNdE(self, value):
if self._spec_type == 'dNdE':
self._data = value
elif self._spec_type == 'N':
self._data = value/(self.eng * get_log_bin_width(self.eng))
@property
def N(self):
if self._spec_type == 'dNdE':
return self._data * self.eng * get_log_bin_width(self.eng)
elif self._spec_type == 'N':
return self._data
@N.setter
def N(self, value):
if self._spec_type == 'dNdE':
self._data = value * self.eng * get_log_bin_width(self.eng)
elif self._spec_type == 'N':
self._data = value
@property
def spec_type(self):
return self._spec_type
def __add__(self, other):
"""Adds two :class:`Spectrum` instances together, or an array to the spectrum. The :class:`Spectrum` object is on the left.
The returned :class:`Spectrum` will have its underflow reset to zero if other is not a :class:`Spectrum` object.
Parameters
----------
other : Spectrum or ndarray
The object to add to the current :class:`Spectrum` object.
Returns
-------
Spectrum
New :class:`Spectrum` instance which has the summed spectrum.
Notes
-----
This special function, together with :meth:`Spectrum.__radd__`, allows the use of the symbol ``+`` to add :class:`Spectrum` objects together.
See Also
--------
:meth:`Spectrum.__radd__`
"""
# Removed ability to add int or float. Not likely to be useful I think?
if type(other) == type(self):
# Some typical errors.
if not np.array_equal(self.eng, other.eng):
raise TypeError("abscissae are different for the two Spectrum objects.")
if self._spec_type != other._spec_type:
raise TypeError("cannot add N to dN/dE.")
new_rs = -1
new_in_eng = -1
if np.array_equal(self.rs, other.rs):
new_rs = self.rs
if np.array_equal(self.in_eng, other.in_eng):
new_in_eng = self.in_eng
new_spectrum = Spectrum(
self.eng, self._data+other._data,
rs = new_rs, in_eng = new_in_eng,
spec_type = self._spec_type
)
new_spectrum.underflow['N'] = (self.underflow['N']
+ other.underflow['N'])
new_spectrum.underflow['eng'] = (self.underflow['eng']
+ other.underflow['eng'])
return new_spectrum
elif isinstance(other, np.ndarray):
return Spectrum(
self.eng, self._data + other,
rs = self.rs, in_eng = self.in_eng,
spec_type = self._spec_type
)
else:
raise TypeError("cannot add object to Spectrum.")
def __radd__(self, other):
"""Adds two :class:`Spectrum` instances together, or an array to the spectrum. The :class:`Spectrum` object is on the right.
The returned :class:`Spectrum` will have its underflow reset to zero if other is not a :class:`Spectrum` object.
Parameters
----------
other : Spectrum or ndarray
The object to add to the current :class:`Spectrum` object.
Returns
-------
Spectrum
New :class:`Spectrum` instance which has the summed spectrum.
Notes
-----
This special function, together with :meth:`Spectrum.__add__`, allows the use of the symbol ``+`` to add :class:`Spectrum` objects together.
See Also
--------
:meth:`Spectrum.__add__`
"""
# Removed ability to add int or float. Not likely to be useful I think?
if type(other) == type(self):
# Some typical errors.
if not np.array_equal(self.eng, other.eng):
raise TypeError("abscissae are different for the two :class:`Spectrum` objects.")
if self._spec_type != other._spec_type:
raise TypeError("cannot add N to dN/dE.")
new_rs = -1
new_in_eng = -1
if self.rs == other.rs:
new_rs = self.rs
if self.in_eng == other.in_eng:
new_in_eng = self.in_eng
new_spectrum = Spectrum(
self.eng, self._data+other._data,
rs = new_rs, in_eng = new_in_eng,
spec_type = self._spec_type
)
new_spectrum.underflow['N'] = (self.underflow['N']
+ other.underflow['N'])
new_spectrum.underflow['eng'] = (self.underflow['eng']
+ other.underflow['eng'])
return new_spectrum
elif isinstance(other, np.ndarray):
return Spectrum(
self.eng, self._data + other,
rs = self.rs, in_eng = self.in_eng,
spec_type = self._spec_type
)
else:
raise TypeError("cannot add object to Spectrum.")
def __sub__(self, other):
"""Subtracts a :class:`Spectrum` or an array from this :class:`Spectrum`.
Parameters
----------
other : Spectrum or ndarray
The object to subtract from the current :class:`Spectrum` object.
Returns
-------
Spectrum
New :class:`Spectrum` instance which has the subtracted spectrum.
Notes
-----
This special function, together with :meth:`Spectrum.__rsub__`, allows the use of the symbol ``-`` to subtract or subtract from :class:`Spectrum` objects.
The returned :class:`Spectrum` object underflow is reset to zero if `other` is not a :class:`Spectrum` object.
See Also
--------
:meth:`Spectrum.__rsub__`
"""
return self + -1*other
def __rsub__(self, other):
"""Subtracts this :class:`Spectrum` from another or an array.
Parameters
----------
other : Spectrum or ndarray
The object from which to subtract the current
:class:`Spectrum` object.
Returns
-------
Spectrum
New :class:`Spectrum` instance which has the subtracted spectrum.
Notes
-----
This special function, together with :meth:`Spectrum.__sub__`, allows the use of the symbol - to subtract or subtract from :class:`Spectrum` objects.
See Also
--------
:meth:`Spectrum.__sub__`
"""
return other + -1*self
def __neg__(self):
"""Negates the spectrum.
Returns
-------
Spectrum
New :class:`Spectrum` instance with the spectrum negated.
Notes
------
The returned :class:`Spectrum` object has underflow set to zero.
"""
return -1*self
def __mul__(self,other):
"""Takes the product of the spectrum with a :class:`Spectrum` object, array or number.
The :class:`Spectrum` object is on the left.
Parameters
----------
other : Spectrum, ndarray, float or int
The object to multiply to the current :class:`Spectrum` object.
Returns
-------
Spectrum
New :class:`Spectrum` instance which has the multiplied spectrum.
Notes
-----
This special function, together with :meth:`Spectrum.__rmul__`, allows the use of the symbol ``*`` to multiply :class:`Spectrum` objects or an array and :class:`Spectrum`.
The returned :class:`Spectrum` object has underflow set to zero if *other* is not a :class:`Spectrum` object.
See Also
--------
:meth:`Spectrum.__rmul__`
"""
if (
np.issubdtype(type(other),np.float64)
or np.issubdtype(type(other),np.int64)
):
new_spectrum = Spectrum(
self.eng, self._data*other,
rs = self.rs, in_eng = self.in_eng,
spec_type = self._spec_type
)
new_spectrum.underflow['N'] = self.underflow['N']*other
new_spectrum.underflow['eng'] = self.underflow['eng']*other
return new_spectrum
elif isinstance(other, np.ndarray):
return Spectrum(
self.eng, self._data*other,
rs = self.rs, in_eng = self.in_eng,
spec_type = self._spec_type
)
elif isinstance(other, Spectrum):
fin_spec_type = self._spec_type
if self._spec_type != other._spec_type:
# If they are not the same, defaults to dNdE.
fin_spec_type = 'dNdE'
new_rs = -1
new_in_eng = -1
if self.rs == other.rs:
new_rs = self.rs
if self.in_eng == other.in_eng:
new_in_eng = self.in_eng
if not np.array_equal(self.eng, other.eng):
raise TypeError("energy abscissae are not the same.")
return Spectrum(
self.eng, self._data*other._data,
rs = new_rs, in_eng = new_in_eng,
spec_type = fin_spec_type
)
else:
raise TypeError("cannot multiply object to Spectrum.")
def __rmul__(self,other):
"""Takes the product of the spectrum with an array or number.
The :class:`Spectrum` object is on the right.
Parameters
----------
other : ndarray, float or int
The object to multiply with the current :class:`Spectrum` object.
Returns
-------
Spectrum
New :class:`Spectrum` instance which has the multiplied spectrum.
Notes
-----
This special function, together with :meth:`Spectrum.__mul__`, allows the use of the symbol ``*`` to multiply :class:`Spectrum` objects or an array and Spectrum.
The returned :class:`Spectrum` object has its underflow set to zero.
See Also
--------
:meth:`Spectrum.__mul__`
"""
if (np.issubdtype(type(other),np.float64)
or np.issubdtype(type(other),np.int64)
):
new_spectrum = Spectrum(
self.eng, self._data*other,
rs = self.rs, in_eng = self.in_eng,
spec_type = self._spec_type
)
new_spectrum.underflow['N'] = self.underflow['N']*other
new_spectrum.underflow['eng'] = self.underflow['eng']*other
return new_spectrum
# Multiplication by Spectrum covered by __mul__
elif isinstance(other, np.ndarray):
return Spectrum(
self.eng, self._data*other,
self.rs, self.in_eng,
spec_type = self._spec_type
)
else:
raise TypeError("cannot multiply object with Spectrum.")
def __truediv__(self,other):
"""Divides the spectrum by an array or number.
The :class:`Spectrum` object is on the left.
Parameters
----------
other : ndarray, float or int
The object to divide the current :class:`Spectrum` object by.
Returns
-------
Spectrum
New :class:`Spectrum` instance which has the divided spectrum.
Notes
-----
This special function, together with :meth:`Spectrum.__rtruediv__`, allows the use of the symbol ``/`` to multiply :class:`Spectrum` objects or an array and Spectrum.
The returned :class:`Spectrum` object underflow is set to zero.
See Also
--------
:meth:`Spectrum.__rtruediv__`
"""
return self*(1/other)
def __rtruediv__(self,other):
"""Divides a number or array by the spectrum.
The :class:`Spectrum` object is on the right.
Parameters
----------
other : ndarray, float or int
The object by which to divide the current
:class:`Spectrum` object.
Returns
-------
Spectrum
New :class:`Spectrum` instance which has the divided spectrum.
Notes
-----
This special function, together with :meth:`Spectrum.__truediv__`, allows the use of the symbol ``/`` to multiply :class:`Spectrum` objects or an array and :class:`Spectrum`.
The returned :class:`Spectrum` object underflow is set to zero.
"""
invSpec = Spectrum(self.eng, 1/self._data, self.rs, self.in_eng)
return other*invSpec
def switch_spec_type(self, target=None):
"""Switches between data being stored as N or dN/dE.
Parameters
----------
target : {'N', 'dNdE'}, optional
The target type to switch to. If not specified, performs a switch regardless.
Notes
------
Although both N and dN/dE can be accessed regardless of which values
are stored, performing a switch before repeated computations can
speed up the computation.
"""
if target is not None:
if target != 'N' and target != 'dNdE':
raise ValueError('Invalid target specified.')
log_bin_width = get_log_bin_width(self.eng)
if self._spec_type == 'N' and not target == 'N':
self._data = self._data/(self.eng*log_bin_width)
self._spec_type = 'dNdE'
elif self._spec_type == 'dNdE' and not target == 'dNdE':
self._data = self._data*self.eng*log_bin_width
self._spec_type = 'N'
def contract(self, mat):
"""Performs a dot product with the :class:`Spectrum`.
Parameters
----------
mat : ndarray
The array to take the dot product with.
Returns
-------
float
The resulting dot product.
"""
return np.dot(mat,self._data)
def totN(self, bound_type=None, bound_arr=None):
"""Returns the total number of particles in part of the spectrum.
The part of the spectrum can be specified in two ways, and is specified by bound_type. Multiple totals can be obtained through bound_arr.
Parameters
----------
bound_type : {'bin', 'eng', None}
The type of bounds to use. Bound values do not have to be within [0:length] for 'bin' or within the abscissa for 'eng'. None should only be used when computing the total particle number in the spectrum.
Specifying ``bound_type='bin'`` without bound_arr returns self.N.
bound_arr : ndarray of length N, optional
An array of boundaries (bin or energy), between which the total number of particles will be computed. If bound_arr is None, but bound_type is specified, the total number of particles in each bin is computed. If both bound_type and bound_arr are None, then the total number of particles in the spectrum is computed.
For 'bin', bounds are specified as the bin *boundary*, with 0 being the left most boundary, 1 the right-hand of the first bin and so on. This is equivalent to integrating over a histogram. For 'eng', bounds are specified by energy values.
These boundaries need not be integer values for 'bin': specifying np.array([0.5, 1.5]) for example will include half of the first bin and half of the second.
Returns
-------
ndarray of length N-1, or float
Total number of particles in the spectrum, or between the specified boundaries.
Examples
---------
>>> eng = np.array([1, 10, 100, 1000])
>>> N = np.array([1, 2, 3, 4])
>>> spec = Spectrum(eng, N, spec_type='N')
>>> spec.totN()
10.0
>>> spec.totN('bin', np.array([1, 3]))
array([5.])
>>> spec.totN('eng', np.array([10, 1e4]))
array([8.])
See Also
--------
:meth:`Spectrum.toteng`
"""
length = self.length
log_bin_width = get_log_bin_width(self.eng)
if self._spec_type == 'dNdE':
dNdlogE = self.eng*self.dNdE
elif self._spec_type == 'N':
dNdlogE = self.N/log_bin_width
if bound_type is not None:
if bound_arr is None:
return dNdlogE * log_bin_width
if bound_type == 'bin':
if not all(np.diff(bound_arr) >= 0):
raise TypeError("bound_arr must have increasing entries.")
N_in_bin = np.zeros(bound_arr.size-1)
if bound_arr[0] > length or bound_arr[-1] < 0:
return N_in_bin
for low,upp,i in zip(bound_arr[:-1], bound_arr[1:],
np.arange(N_in_bin.size)):
# Set the lower and upper bounds, including case where low and upp are outside of the bins.
if low > length or upp < 0:
N_in_bin[i] = 0
continue
low_ceil = int(np.ceil(low))
low_floor = int(np.floor(low))
upp_ceil = int(np.ceil(upp))
upp_floor = int(np.floor(upp))
# Sum the bins that are completely between the bounds.
N_full_bins = np.dot(
dNdlogE[low_ceil:upp_floor],
log_bin_width[low_ceil:upp_floor]
)
N_part_bins = 0
if low_floor == upp_floor or low_ceil == upp_ceil:
# Bin indices are within the same bin. The second requirement covers the case where upp_ceil is length.
N_part_bins += (
dNdlogE[low_floor] * (upp - low)
* log_bin_width[low_floor]
)
else:
# Add up part of the bin for the low partial bin and the high partial bin.
N_part_bins += (
dNdlogE[low_floor] * (low_ceil - low)
* log_bin_width[low_floor]
)
if upp_floor < length:
# If upp_floor is length, then there is no partial bin for the upper index.
N_part_bins += (
dNdlogE[upp_floor]
* (upp-upp_floor) * log_bin_width[upp_floor]
)
N_in_bin[i] = N_full_bins + N_part_bins
return N_in_bin
if bound_type == 'eng':
bin_boundary = get_bin_bound(self.eng)
eng_bin_ind = np.interp(
np.log(bound_arr),
np.log(bin_boundary), np.arange(bin_boundary.size),
left = 0, right = length + 1
)
return self.totN('bin', eng_bin_ind)
else:
return np.dot(dNdlogE,log_bin_width) + self.underflow['N']
def toteng(self, bound_type=None, bound_arr=None):
"""Returns the total energy of particles in part of the spectrum.
The part of the spectrum can be specified in two ways, and is specified by bound_type. Multiple totals can be obtained through bound_arr.
Parameters
----------
bound_type : {'bin', 'eng', None}
The type of bounds to use. Bound values do not have to be within the [0:length] for 'bin' or within the abscissa for 'eng'. None should only be used to obtain the total energy.
Specifying ``bound_type='bin'`` without bound_arr gives the total energy in each bin.
bound_arr : ndarray of length N, optional
An array of boundaries (bin or energy), between which the total number of particles will be computed. If unspecified, the total number of particles in the whole spectrum is computed.
For 'bin', bounds are specified as the bin *boundary*, with 0 being the left most boundary, 1 the right-hand of the first bin and so on. This is equivalent to integrating over a histogram. For 'eng', bounds are specified by energy values.
These boundaries need not be integer values for 'bin': specifying np.array([0.5, 1.5]) for example will include half of the first bin and half of the second.
Returns
-------
ndarray of length N-1, or float
Total energy in the spectrum or between the specified boundaries.
Examples
---------
>>> eng = np.array([1, 10, 100, 1000])
>>> N = np.array([1, 2, 3, 4])
>>> spec = Spectrum(eng, N, spec_type='N')
>>> spec.toteng()
4321.0
>>> spec.toteng('bin', np.array([1, 3]))
array([320.])
>>> spec.toteng('eng', np.array([10, 1e4]))
array([4310.])
See Also
---------
:meth:`.Spectrum.totN`
"""
eng = self.eng
length = self.length
log_bin_width = get_log_bin_width(self.eng)
if self._spec_type == 'dNdE':
dNdlogE = self.eng*self.dNdE
elif self._spec_type == 'N':
dNdlogE = self.N/log_bin_width
if bound_type is not None:
if bound_arr is None:
return dNdlogE * eng * log_bin_width
if bound_type == 'bin':
if not all(np.diff(bound_arr) >= 0):
raise TypeError("bound_arr must have increasing entries.")
eng_in_bin = np.zeros(bound_arr.size-1)
if bound_arr[0] > length or bound_arr[-1] < 0:
return eng_in_bin
for low,upp,i in zip(bound_arr[:-1], bound_arr[1:],
np.arange(eng_in_bin.size)):
if low > length or upp < 0:
eng_in_bin[i] = 0
continue
low_ceil = int(np.ceil(low))
low_floor = int(np.floor(low))
upp_ceil = int(np.ceil(upp))
upp_floor = int(np.floor(upp))
# Sum the bins that are completely between the bounds.
eng_full_bins = np.dot(eng[low_ceil:upp_floor]
* dNdlogE[low_ceil:upp_floor],
log_bin_width[low_ceil:upp_floor])
eng_part_bins = 0
if low_floor == upp_floor or low_ceil == upp_ceil:
# Bin indices are within the same bin. The second requirement covers the case where upp_ceil is length.
eng_part_bins += (eng[low_floor] * dNdlogE[low_floor]
* (upp - low) * log_bin_width[low_floor])
else:
# Add up part of the bin for the low partial bin and the high partial bin.
eng_part_bins += (eng[low_floor] * dNdlogE[low_floor]
* (low_ceil - low) * log_bin_width[low_floor])
if upp_floor < length:
# If upp_floor is length, then there is no partial bin for the upper index.
eng_part_bins += (eng[upp_floor]
* dNdlogE[upp_floor] * (upp-upp_floor)
* log_bin_width[upp_floor])
eng_in_bin[i] = eng_full_bins + eng_part_bins
return eng_in_bin
if bound_type == 'eng':
bin_boundary = get_bin_bound(self.eng)
eng_bin_ind = np.interp(
np.log(bound_arr),
np.log(bin_boundary), np.arange(bin_boundary.size),
left = 0, right = length + 1)
return self.toteng('bin', eng_bin_ind)
else:
return (np.dot(dNdlogE, eng * log_bin_width)
+ self.underflow['eng'])
def shift_eng(self, new_eng):
""" Shifts the abscissa while conserving number.
This function can be used to subtract or add some amount of energy from each bin in the spectrum. The dN/dE is adjusted to conserve number in each bin.
Parameters
----------
new_eng : ndarray
The new energy abscissa.
Returns
-------
None
"""
if new_eng.size != self.eng.size:
raise TypeError("The new abscissa must have the same length as the old one.")
if not all(np.diff(new_eng) > 0):
raise TypeError("abscissa must be ordered in increasing energy.")
new_log_bin_width = get_log_bin_width(new_eng)
if self._spec_type == 'dNdE':
new_dNdE = self.totN('bin')/(new_eng * new_log_bin_width)
self.eng = new_eng
self._data = new_dNdE
elif self._spec_type == 'N':
self.eng = new_eng
def rebin(self, out_eng):
""" Rebins according to a new abscissa.
The total number and total energy is conserved.
If a bin in the old abscissa self.eng is below the lowest bin of the new abscissa out_eng, then the total number and energy not assigned to the lowest bin are assigned to the underflow.
If a bin in self.eng is above the highest bin in out_eng, a warning is thrown, the values are simply discarded, and the total number and energy can no longer be conserved.
Parameters
----------
out_eng : ndarray
The new abscissa to bin into.
Returns
-------
None
Notes
-----
Total number and energy are conserved by assigning the number of particles :math:`N` in a bin of energy :math:`E` to two adjacent bins in the new abscissa out_eng, with energies :math:`E_\\text{low}` and :math:`E_\\text{upp}` such that :math:`E_\\text{low} < E < E_\\text{upp}`\ . The number of particles :math:`N_\\text{low}` and :math:`N_\\text{upp}` assigned to these two bins are given by
.. math::
N_\\text{low} &= \\frac{E_\\text{upp} - E}{E_\\text{upp} - E_\\text{low}} N \\,, \\\\
N_\\text{upp} &= \\frac{E - E_\\text{low}}{E_\\text{upp} - E_\\text{low}} N
Rebinning works best when going from a finer binning to a coarser binning. Going the other way can result in spiky features, since the coarser binning simply does not contain enough information to reconstruct the finer binning in this way.
See Also
--------
:func:`.spectools.rebin_N_arr`
"""
if not np.all(np.diff(out_eng) > 0):
raise TypeError("new abscissa must be ordered in increasing energy.")
# if out_eng[-1] < self.eng[-1]:
# raise OverflowError("the new abscissa lies below the old one: this function cannot handle overflow (yet?).")
# Get the bin indices that the current abscissa (self.eng) corresponds to in the new abscissa (new_eng). Can be any number between 0 and self.length-1. Bin indices are wrt the bin centers.
# Add an additional bin at the lower end of out_eng so that underflow can be treated easily.
# Forces out_eng to be float, avoids strange problems with np.insert
# below if out_eng is of type int.
out_eng = out_eng.astype(float)
first_bin_eng = np.exp(np.log(out_eng[0]) - (np.log(out_eng[1]) - np.log(out_eng[0])))
new_eng = np.insert(out_eng, 0, first_bin_eng)
# Find the relative bin indices for self.eng wrt new_eng. The first bin in new_eng has bin index -1.
bin_ind_interp = interp1d(
new_eng, np.arange(new_eng.size)-1,
bounds_error = False, fill_value = (-2, new_eng.size)
)
bin_ind = bin_ind_interp(self.eng)
# bin_ind = np.interp(self.eng, new_eng,
# np.arange(new_eng.size)-1, left = -2, right = new_eng.size)
# Locate where bin_ind is below 0, above self.length-1 and in between.
ind_low = np.where(bin_ind < 0)
ind_high = np.where(bin_ind == new_eng.size)
ind_reg = np.where( (bin_ind >= 0) & (bin_ind <= new_eng.size - 1) )
if ind_high[0].size > 0:
warnings.warn("The new abscissa lies below the old one: only bins that lie within the new abscissa will be rebinned, bins above the abscissa will be discarded.", RuntimeWarning)
# raise OverflowError("the new abscissa lies below the old one: this function cannot handle overflow (yet?).")
# Get the total N and toteng in each bin of self._data
if self._spec_type == 'dNdE':
N_arr = self.totN('bin')
toteng_arr = self.toteng('bin')
elif self._spec_type == 'N':
N_arr = self.N
toteng_arr = self.N*self.eng
N_arr_low = N_arr[ind_low]
N_arr_high = N_arr[ind_high]
N_arr_reg = N_arr[ind_reg]
toteng_arr_low = toteng_arr[ind_low]
# Bin width of the new array. Use only the log bin width, so that dN/dE = N/(E d log E)
if self._spec_type == 'dNdE':
new_E_dlogE = new_eng * get_log_bin_width(new_eng)
# Regular bins first, done in a completely vectorized fashion.
# reg_bin_low is the array of the lower bins to be allocated the particles in N_arr_reg, similarly reg_bin_upp. This should also take care of the fact that bin_ind is an integer.
reg_bin_low = np.floor(bin_ind[ind_reg]).astype(int)
reg_bin_upp = reg_bin_low + 1
# Takes care of the case where eng[-1] = new_eng[-1]
reg_bin_low[reg_bin_low == new_eng.size-2] = new_eng.size - 3
reg_bin_upp[reg_bin_upp == new_eng.size-1] = new_eng.size - 2
if self._spec_type == 'dNdE':
reg_dNdE_low = (
(reg_bin_upp - bin_ind[ind_reg]) * N_arr_reg
/new_E_dlogE[reg_bin_low+1]
)
reg_dNdE_upp = (
(bin_ind[ind_reg] - reg_bin_low) * N_arr_reg
/new_E_dlogE[reg_bin_upp+1]
)
elif self._spec_type == 'N':
reg_N_low = (reg_bin_upp - bin_ind[ind_reg]) * N_arr_reg
reg_N_upp = (bin_ind[ind_reg] - reg_bin_low) * N_arr_reg
# Low bins.
low_bin_low = np.floor(bin_ind[ind_low]).astype(int)
N_above_underflow = np.sum((bin_ind[ind_low] - low_bin_low)
* N_arr_low)
eng_above_underflow = N_above_underflow * new_eng[1]
N_underflow = np.sum(N_arr_low) - N_above_underflow
eng_underflow = np.sum(toteng_arr_low) - eng_above_underflow
if self._spec_type == 'dNdE':
low_dNdE = N_above_underflow/new_E_dlogE[1]
# Add up, obtain the new data.
new_data = np.zeros(new_eng.size)
if self._spec_type == 'dNdE':
new_data[1] += low_dNdE
# reg_dNdE_low = -1 refers to new_eng[0]
np.add.at(new_data, reg_bin_low+1, reg_dNdE_low)
np.add.at(new_data, reg_bin_upp+1, reg_dNdE_upp)
# print(new_data[reg_bin_low+1])
# new_data[reg_bin_low+1] += reg_dNdE_low
# new_data[reg_bin_upp+1] += reg_dNdE_upp
elif self._spec_type == 'N':
new_data[1] += N_above_underflow
np.add.at(new_data, reg_bin_low+1, reg_N_low)
np.add.at(new_data, reg_bin_upp+1, reg_N_upp)
# new_data[reg_bin_low+1] += reg_N_low
# new_data[reg_bin_upp+1] += reg_N_upp
# Implement changes.
self.eng = new_eng[1:]
self._data = new_data[1:]
self.length = self.eng.size
self.underflow['N'] += N_underflow
self.underflow['eng'] += eng_underflow
def rebin_fast(self, out_eng):
""" Rebins the :class:`Spectrum` with 'N' spec_type quickly.
Rebinning conserves total number and total energy. No checks are made: use with caution!
Parameters
----------
out_eng_interp : ndarray
The new abscissa to bin into. If self.eng has values that are smaller than out_eng[0] or larger than out_eng[-1], then the value is discarded *without error*.
Notes
-----
This implementation is identical to :meth:`Spectrum.rebin`, but works only if the spec_type is of type 'N', and further dispenses with underflow and other checks.
See Also
--------
:meth:`.Spectrum.rebin`
"""
first_bin_eng = np.exp(np.log(out_eng[0]) - (np.log(out_eng[1]) - np.log(out_eng[0])))
new_eng = np.insert(out_eng, 0, first_bin_eng)
# Find the relative bin indices for self.eng wrt new_eng. The first bin in new_eng has bin index -1.
bin_ind_interp = interp1d(
new_eng, np.arange(new_eng.size)-1,
bounds_error = False, fill_value = (-2, new_eng.size)
)
bin_ind = bin_ind_interp(self.eng)
# Locate where bin_ind is in between.
ind_low = np.where(bin_ind < 0)
ind_reg = np.where( (bin_ind >= 0) & (bin_ind <= new_eng.size - 1) )
N_arr = self.N
N_arr_low = N_arr[ind_low]
N_arr_reg = N_arr[ind_reg]
# Regular bins first, done in a completely vectorized fashion.
# reg_bin_low is the array of the lower bins to be allocated the particles in N_arr_reg, similarly reg_bin_upp. This should also take care of the fact that bin_ind is an integer.
reg_bin_low = np.floor(bin_ind[ind_reg]).astype(int)
reg_bin_upp = reg_bin_low + 1
# Takes care of the case where eng[-1] = new_eng[-1]
reg_bin_low[reg_bin_low == new_eng.size-2] = new_eng.size - 3
reg_bin_upp[reg_bin_upp == new_eng.size-1] = new_eng.size - 2
reg_N_low = (reg_bin_upp - bin_ind[ind_reg]) * N_arr_reg
reg_N_upp = (bin_ind[ind_reg] - reg_bin_low) * N_arr_reg
# Low bins.
low_bin_low = np.floor(bin_ind[ind_low]).astype(int)
N_above_underflow = np.sum((bin_ind[ind_low] - low_bin_low)
* N_arr_low)
# Add up, obtain the new data.
new_data = np.zeros(new_eng.size)
new_data[1] += N_above_underflow
np.add.at(new_data, reg_bin_low+1, reg_N_low)
np.add.at(new_data, reg_bin_upp+1, reg_N_upp)
# Implement changes.
self.eng = new_eng[1:]
self._data = new_data[1:]
self.length = self.eng.size
def engloss_rebin(
self, in_eng, out_eng, out_spec_type=None, fast=False
):
""" Converts an energy loss spectrum to a secondary spectrum.
An "energy loss spectrum" is a distribution of outgoing particles as a function of *energy lost* :math:`\\Delta` saved in self.eng after some interaction for an incoming particle :math:`E'` specified by in_eng. The "secondary spectrum" is simply the distribution of outgoing particles as a function of their own energy :math:`E` instead, with abscissa out_eng.
Parameters
----------
in_eng : float
The injection energy of the primary which gives rise to self.dNdE as the energy loss spectrum.
out_eng : ndarray
The final energy abscissa to bin into. If not specified, it is assumed to be the same as the initial abscissa.
out_spec_type: {'N', 'dNdE'}, optional
The spec_type of the output spectrum. If not specified, the output spectrum will have the same spec_type.
fast: bool, optional
If fast, uses :meth:`Spectrum.rebin_fast` instead of :meth:`Spectrum.rebin` for speed.
Notes
-------
This function is simply a numerical version of the fact that
.. math::
\\frac{dN}{d \\Delta}(\\Delta) = \\frac{dN}{dE} (E = E' - \\Delta)
in discretized form, preserving the total number and total energy in the spectrum using :meth:`Spectrum.rebin`.
See Also
---------
:meth:`Spectrum.rebin`
:meth:`Spectrum.rebin_fast`
"""
# sec_spec_eng is the injected energy - delta,
# use float128 for very small differences.
sec_spec_eng = np.flipud(np.float128(in_eng) - np.float128(self.eng))
N_arr = np.flipud(self.N)
# consider only positive energy
pos_eng = sec_spec_eng > 0
# new_spec = rebin_N_arr(
# N_arr[pos_eng], sec_spec_eng[pos_eng],
# out_eng, spec_type = self._spec_type, log_bin_width=log_bin_width
# )
# print(sec_spec_eng[pos_eng])
out_eng = np.float128(out_eng)
if N_arr[pos_eng].size > 1:
new_spec = Spectrum(
sec_spec_eng[pos_eng], N_arr[pos_eng],
spec_type = 'N'
)
if fast:
new_spec.rebin_fast(out_eng)
else:
new_spec.rebin(out_eng)
elif N_arr[pos_eng].size > 0 and N_arr[pos_eng].size <= 1:
new_spec = rebin_N_arr(
N_arr[pos_eng], sec_spec_eng[pos_eng],
out_eng, spec_type = self._spec_type
)
else:
new_spec = Spectrum(
out_eng, np.zeros_like(out_eng), spec_type = 'N'
)
# downcast the energy array.
new_spec.eng = np.float64(new_spec.eng)
if out_spec_type is not None:
if new_spec.spec_type != out_spec_type:
new_spec.switch_spec_type()
if self.spec_type != out_spec_type:
self.switch_spec_type()
else:
if new_spec.spec_type != self.spec_type:
new_spec.switch_spec_type()
self.eng = out_eng
self._data = new_spec._data
self.length = out_eng.size
self.underflow['N'] = new_spec.underflow['N']
self.underflow['eng'] = new_spec.underflow['eng']
def at_eng(self, new_eng, left=-200, right=-200):
"""Interpolates the spectrum at a new abscissa.
Interpolation is logarithmic.
Parameters
----------
new_eng : ndarray
The new energies to interpolate at.
left : float, optional
Returns the value if beyond the first bin on the left. Default is to return -200, so that the exponential is small.
right : float, optional
Returns the value if beyond the last bin on the right. Default is to return -200, so that the exponential is small.
"""
self._data[self._data <= 1e-200] = 1e-200
log_new_data = np.interp(
np.log(new_eng), np.log(self.eng), np.log(self._data),
left=left, right=right
)
self.eng = new_eng
self._data = np.exp(log_new_data)
self._data[self._data <= 1e-200] = 0
def redshift(self, new_rs):
"""Redshifts the :class:`Spectrum` object as a photon spectrum.
Parameters
----------
new_rs : float
The new redshift (1+z) to redshift to.
Examples
--------
>>> eng = np.array([1, 10, 100, 1000])
>>> spec = Spectrum(eng, np.ones(4), rs=100, spec_type='N')
>>> spec.redshift(10)
>>> print(spec.N)
[1. 1. 1. 0.]
>>> print(spec.underflow['N'])
1.0
"""
if self.rs <= 0:
raise ValueError('self.rs must be initialized.')
fac = new_rs/self.rs
eng_orig = self.eng
self.eng = self.eng*fac
if self._spec_type == 'dNdE':
self.dNdE = self.dNdE/fac
self.underflow['eng'] *= fac
self.rebin(eng_orig)
self.rs = new_rs
|
<gh_stars>0
# For handling debug output
import logging as log
from scipy.optimize import shgo
import numpy as np
class EffortAllocation:
def __init__(self, model, covariate_data, allocation_type, *args):
"""
*args will either be budget (if allocation 1) or failures (if allocation 2)
"""
self.model = model
self.covariate_data = covariate_data
self.hazard_array = np.concatenate((self.model.hazard_array, [self.model.hazardNumerical(self.model.n + 1, self.model.modelParameters)]))
if allocation_type == 1:
self.B = args[0]
self.runAllocation1()
self.percentages = self.organizeResults(self.res.x, self.B)
else:
self.f = args[0]
self.runAllocation2()
self.percentages2 = self.organizeResults(self.res2.x, self.effort)
def runAllocation1(self):
##############################################
## Optimization 1: Maximize fault discovery ##
## optimal allocation of budget B ##
##############################################
# lambda function we solve for
# the x values are the different covariate values obtained through SHGO
cons = ({'type': 'ineq', 'fun': lambda x: self.B - sum([x[i] for i in range(self.model.numCovariates)])})
# restrict bounds to positive values
bnds = tuple((0, None) for i in range(self.model.numCovariates))
self.res = shgo(self.allocationFunction, args=(self.covariate_data,), bounds=bnds, constraints=cons)#, n=10000, iters=4)
# the result from SHGO is negative since it is a minimization function
# therefore, we negatate the value to find the maximum
self.mvfVal = -self.res.fun
# number of estimated defects
self.H = self.mvfVal - self.model.mvf_array[-1] # predicted MVF value - last actual MVF value
def allocationFunction(self, x, covariate_data):
new_cov_data = np.concatenate((covariate_data, x[:, None]), axis=1)
omega = self.model.calcOmega(self.hazard_array, self.model.betas, new_cov_data)
# must be negative, SHGO uses minimization and we want to maximize fault discovery
return -(self.model.MVF(self.model.mle_array, omega, self.hazard_array, new_cov_data.shape[1] - 1, new_cov_data))
def runAllocation2(self):
#####################################
## Optimization 2: Minimize budget ##
## identify m additional faults ##
#####################################
cons2 = ({'type': 'eq', 'fun': self.optimization2, 'args': (self.covariate_data,)})
bnds = tuple((0, None) for i in range(self.model.numCovariates))
self.res2 = shgo(lambda x: sum([x[i] for i in range(self.model.numCovariates)]), bounds=bnds, constraints=cons2)
self.effort = np.sum(self.res2.x)
def optimization2(self, x, covariate_data):
res = self.allocationFunction2(x, covariate_data)
H = res - self.model.mvf_array[-1]
return self.f - H
def allocationFunction2(self, x, covariate_data):
new_cov_data = np.concatenate((covariate_data, x[:, None]), axis=1)
omega = self.model.calcOmega(self.hazard_array, self.model.betas, new_cov_data)
# we want to minimize, SHGO uses minimization
return self.model.MVF(self.model.mle_array, omega, self.hazard_array, new_cov_data.shape[1] - 1, new_cov_data)
#### work in progress
# def runAllocation3(self):
# cons3 = ({'type': 'eq', 'fun': self.optimization3, 'args': (self.covariate_data,)})
# bnds = tuple((0, None) for i in range(self.model.numCovariates))
# self.res3 = shgo(lambda x: sum([x[i] for i in range(self.model.numCovariates)]), bounds=bnds, constraints=cons3)
# self.effort3 = np.sum(self.res2.x)
# def optimization3(self, x, covariate_data):
# res = self.allocationFunction3(x, covariate_data)
# H = res - self.model.mvf_array[-1]
# return self.f - H
# def allocationFunction3(self, x, covariate_data):
# new_cov_data = np.concatenate((covariate_data, x[:, None]), axis=1)
# omega = self.model.calcOmega(self.hazard_array, self.model.betas, new_cov_data)
# # we want to minimize, SHGO uses minimization
# return self.model.MVF(self.model.mle_array, omega, self.hazard_array, new_cov_data.shape[1] - 1, new_cov_data)
def organizeResults(self, results, effort):
# check to ensure that no NAN values are displayed
if effort > 0.0:
# return percentage
return np.multiply(np.divide(results, effort), 100)
else:
# avoid divide by 0
log.warning("Budget of 0.0 calculated for allocation")
return [0.0 for i in range(len(results))]
|
from app.model.bukuModel import Buku
from app.model.anggotaModel import Anggota
from app.model.transaksiModel import Transaksi
from app.utility import *
from datetime import datetime, timedelta
import pyfiglet
def transaksiMenu(idUser, namaUser):
print(pyfiglet.figlet_format("E-LIB") + "===========================")
print("1. Tambah Data Peminjaman Buku\n2. Tampilkan Riwayat Peminjaman\n3. Pengembalian Buku\n4. Hapus Data Peminjaman\n5. Kembali")
print("===========================")
pilihSubMenu = int(input("Pilihan Menu: "))
clear()
if(pilihSubMenu == 1):
bukuNotEmpty = Buku().fetchAllBuku()
if(bukuNotEmpty):
idBuku = int(input("Masukkan ID buku yang ingin dipinjam: "))
clear()
bukuExists = Buku().fetchSingleBuku(idBuku)
clear()
if(bukuExists):
anggotaNotEmpty = Anggota().fetchAllAnggota()
idAnggota = int(input("Masukkan ID anggota yang ingin meminjam: "))
clear()
anggotaExists = Anggota().fetchSingleAnggota(idAnggota)
if(anggotaExists):
tanggalPinjam = datetime.today().strftime('%Y-%m-%d')
initialDate = datetime.strptime(str(tanggalPinjam), '%Y-%m-%d')
modifiedDate = initialDate + timedelta(days=3)
tanggalKembali = datetime.strftime(modifiedDate, '%Y-%m-%d')
statusKembali = "0"
data = []
data.append((idBuku, idAnggota, idUser, tanggalPinjam, tanggalKembali, statusKembali))
clear()
Transaksi().insertTransaksi(data)
print("Berhasil menyimpan data")
input("Enter untuk melanjukan...")
else:
clear()
print(f"Tidak ditemukan data anggota dengan ID {idAnggota}")
input("Enter untuk melanjukan...")
else:
clear()
print(f"Tidak ditemukan data buku dengan ID {idBuku}")
input("Enter untuk melanjukan...")
else:
input("Enter untuk melanjukan...")
elif(pilihSubMenu == 2):
Transaksi().fetchAllTransaksi()
input("Enter untuk melanjukan...")
elif(pilihSubMenu == 3):
transaksiNotEmpty = Transaksi().fetchAllTransaksi()
if(transaksiNotEmpty):
idInput = int(input("Masukkan ID transaksi yang bersangkutan: "))
clear()
transaksiExists = Transaksi().fetchSingleTransaksi(idInput)
if(transaksiExists[6] == "1"):
clear()
print(f"Maaf, buku dengan ID {idInput} telah dikembalikan.")
input("Enter untuk melanjukan...")
elif(transaksiExists):
statusKembali = "1"
data = (statusKembali, idInput)
confirmationMsg = input(f"Apakah anda yakin ingin memproses transaksi dengan ID {idInput}? (Y/N): ").lower()
if(confirmationMsg == "y"):
Transaksi().updateTransaksi(data)
clear()
Transaksi().printNota(idInput, namaUser)
input("Enter untuk melanjukan...")
else:
clear()
else:
clear()
print(f"Tidak ditemukan data transaksi dengan ID {idInput}")
input("Enter untuk melanjukan...")
else:
input("Enter untuk melanjukan...")
elif(pilihSubMenu == 4):
transaksiNotEmpty = Transaksi().fetchAllTransaksi()
if(transaksiNotEmpty):
idInput = int(input("Masukkan id transaksi yang ingin dihapus: "))
clear()
transaksiExists = Transaksi().fetchSingleTransaksi(idInput)
if(transaksiExists):
confirmationMsg = input(f"Apakah anda yakin ingin menghapus transaksi dengan ID {idInput}? (Y/N): ").lower()
if(confirmationMsg == "y"):
clear()
Transaksi().deleteTransaksi(idInput)
print("Berhasil menghapus data")
input("Enter untuk melanjukan...")
else:
clear()
else:
clear()
print(f"Tidak ditemukan data transaksi dengan ID {idInput}")
input("Enter untuk melanjukan...")
else:
input("Enter untuk melanjukan...")
else:
pass |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetScalingPlanResult',
'AwaitableGetScalingPlanResult',
'get_scaling_plan',
]
@pulumi.output_type
class GetScalingPlanResult:
"""
Represents a scaling plan definition.
"""
def __init__(__self__, description=None, etag=None, exclusion_tag=None, friendly_name=None, host_pool_references=None, host_pool_type=None, id=None, identity=None, kind=None, location=None, managed_by=None, name=None, object_id=None, plan=None, ring=None, schedules=None, sku=None, tags=None, time_zone=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if exclusion_tag and not isinstance(exclusion_tag, str):
raise TypeError("Expected argument 'exclusion_tag' to be a str")
pulumi.set(__self__, "exclusion_tag", exclusion_tag)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if host_pool_references and not isinstance(host_pool_references, list):
raise TypeError("Expected argument 'host_pool_references' to be a list")
pulumi.set(__self__, "host_pool_references", host_pool_references)
if host_pool_type and not isinstance(host_pool_type, str):
raise TypeError("Expected argument 'host_pool_type' to be a str")
pulumi.set(__self__, "host_pool_type", host_pool_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_by and not isinstance(managed_by, str):
raise TypeError("Expected argument 'managed_by' to be a str")
pulumi.set(__self__, "managed_by", managed_by)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if object_id and not isinstance(object_id, str):
raise TypeError("Expected argument 'object_id' to be a str")
pulumi.set(__self__, "object_id", object_id)
if plan and not isinstance(plan, dict):
raise TypeError("Expected argument 'plan' to be a dict")
pulumi.set(__self__, "plan", plan)
if ring and not isinstance(ring, int):
raise TypeError("Expected argument 'ring' to be a int")
pulumi.set(__self__, "ring", ring)
if schedules and not isinstance(schedules, list):
raise TypeError("Expected argument 'schedules' to be a list")
pulumi.set(__self__, "schedules", schedules)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if time_zone and not isinstance(time_zone, str):
raise TypeError("Expected argument 'time_zone' to be a str")
pulumi.set(__self__, "time_zone", time_zone)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of scaling plan.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> str:
"""
The etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal etag convention. Entity tags are used for comparing two or more entities from the same requested resource. HTTP/1.1 uses entity tags in the etag (section 14.19), If-Match (section 14.24), If-None-Match (section 14.26), and If-Range (section 14.27) header fields.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="exclusionTag")
def exclusion_tag(self) -> Optional[str]:
"""
Exclusion tag for scaling plan.
"""
return pulumi.get(self, "exclusion_tag")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
User friendly name of scaling plan.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="hostPoolReferences")
def host_pool_references(self) -> Optional[Sequence['outputs.ScalingHostPoolReferenceResponse']]:
"""
List of ScalingHostPoolReference definitions.
"""
return pulumi.get(self, "host_pool_references")
@property
@pulumi.getter(name="hostPoolType")
def host_pool_type(self) -> Optional[str]:
"""
HostPool type for desktop.
"""
return pulumi.get(self, "host_pool_type")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ResourceModelWithAllowedPropertySetResponseIdentity']:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> Optional[str]:
"""
The fully qualified resource ID of the resource that manages this resource. Indicates if this resource is managed by another Azure resource. If this is present, complete mode deployment will not delete the resource if it is removed from the template since it is managed by another resource.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> str:
"""
ObjectId of scaling plan. (internal use)
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter
def plan(self) -> Optional['outputs.ResourceModelWithAllowedPropertySetResponsePlan']:
return pulumi.get(self, "plan")
@property
@pulumi.getter
def ring(self) -> Optional[int]:
"""
The ring number of scaling plan.
"""
return pulumi.get(self, "ring")
@property
@pulumi.getter
def schedules(self) -> Optional[Sequence['outputs.ScalingScheduleResponse']]:
"""
List of ScalingSchedule definitions.
"""
return pulumi.get(self, "schedules")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ResourceModelWithAllowedPropertySetResponseSku']:
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> Optional[str]:
"""
Timezone of the scaling plan.
"""
return pulumi.get(self, "time_zone")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetScalingPlanResult(GetScalingPlanResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetScalingPlanResult(
description=self.description,
etag=self.etag,
exclusion_tag=self.exclusion_tag,
friendly_name=self.friendly_name,
host_pool_references=self.host_pool_references,
host_pool_type=self.host_pool_type,
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
managed_by=self.managed_by,
name=self.name,
object_id=self.object_id,
plan=self.plan,
ring=self.ring,
schedules=self.schedules,
sku=self.sku,
tags=self.tags,
time_zone=self.time_zone,
type=self.type)
def get_scaling_plan(resource_group_name: Optional[str] = None,
scaling_plan_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetScalingPlanResult:
"""
Represents a scaling plan definition.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str scaling_plan_name: The name of the scaling plan.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['scalingPlanName'] = scaling_plan_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:desktopvirtualization/v20210201preview:getScalingPlan', __args__, opts=opts, typ=GetScalingPlanResult).value
return AwaitableGetScalingPlanResult(
description=__ret__.description,
etag=__ret__.etag,
exclusion_tag=__ret__.exclusion_tag,
friendly_name=__ret__.friendly_name,
host_pool_references=__ret__.host_pool_references,
host_pool_type=__ret__.host_pool_type,
id=__ret__.id,
identity=__ret__.identity,
kind=__ret__.kind,
location=__ret__.location,
managed_by=__ret__.managed_by,
name=__ret__.name,
object_id=__ret__.object_id,
plan=__ret__.plan,
ring=__ret__.ring,
schedules=__ret__.schedules,
sku=__ret__.sku,
tags=__ret__.tags,
time_zone=__ret__.time_zone,
type=__ret__.type)
|
<reponame>jordyantunes/Imagine
import threading
from collections import deque
import numpy as np
import time
from mpi4py import MPI
class ReplayBuffer:
def __init__(self, buffer_shapes, size_in_transitions, T, sample_transitions, goal_sampler, reward_function):
"""Creates a replay buffer.
Args:
buffer_shapes (dict of ints): the shape for all buffers that are used in the replay
buffer
size_in_transitions (int): the size of the buffer, measured in transitions
T (int): the time horizon for episodes
sample_transitions (function): a function that samples from the replay buffer
"""
self.bias_buffer = True
self.buffer_shapes = buffer_shapes
self.size = size_in_transitions // T
self.T = T
self.sample_transitions = sample_transitions
self.goal_sampler = goal_sampler
self.reward_function = reward_function
# self.buffers is {key: array(size_in_episodes x T or T+1 x dim_key)}
self.buffers = {key: np.empty([self.size, *shape])
for key, shape in buffer_shapes.items()}
self.buffers['g_str'] = np.array([None] * self.size)
self.goals_indices = []
self.imagined_goal_indices = []
# memory management
self.current_size = 0
self.n_transitions_stored = 0
self.pointer = 0
self.lock = threading.Lock()
@property
def full(self):
with self.lock:
return self.current_size == self.size
def sample(self, batch_size, epoch):
"""Returns a dict {key: array(batch_size x shapes[key])}
"""
buffers = {}
with self.lock:
assert self.current_size > 0
for key in self.buffers.keys():
buffers[key] = self.buffers[key][:self.current_size]
buffers['obs_2'] = buffers['obs'][:, 1:, :]
init = time.time()
out = self.sample_transitions(buffers,
self.goals_indices,
batch_size,
epoch)
transitions, replay_ratio_positive_rewards, replay_proba, replay_ratio_positive_per_goal, time_dict = out
for key in (['r', 'obs_2'] + list(self.buffers.keys())):
assert key in transitions, "key %s missing from transitions" % key
time_dict['time_buffer_sample'] = time.time() - init
return transitions, replay_ratio_positive_rewards, replay_proba, replay_ratio_positive_per_goal, time_dict
def add_imagined_goals_to_goals_reached_ids(self, discovered_goal_ids, imagined_inds, episode_batch, goals_reached_ids):
if len(imagined_inds) > 0: # when it is here, the reward function is also used to check for satisfied imagined goals and fill corresponding buffers
final_obs = np.array([ep['obs'][-1] for ep in episode_batch])
imagined_goals = np.array(discovered_goal_ids)[imagined_inds]
# test 50 goals for each episode
n_attempts = min(50, len(imagined_goals))
goals_to_try = np.random.choice(imagined_goals, size=n_attempts, replace=False)
obs = np.repeat(final_obs, n_attempts, axis=0)
goals = np.tile(goals_to_try, final_obs.shape[0])
rewards = self.reward_function.predict(state=obs, goal_ids=goals)[0]
for i in range(len(episode_batch)):
pos_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == 0)].tolist()
goals_reached_ids[i] += pos_goals
return goals_reached_ids
def store_episode(self, episode_batch, goals_reached_ids):
"""episode_batch: array(batch_size x (T or T+1) x dim_key)
"""
# update the set of discovered goal ids
discovered_goal_ids = self.goal_sampler.feedback_memory['memory_id']
for _ in range(len(discovered_goal_ids) - len(self.goals_indices)):
self.goals_indices.append(deque())
imagined_inds = np.argwhere(np.array(self.goal_sampler.feedback_memory['imagined']) == 1).flatten()
goals_reached_ids = self.add_imagined_goals_to_goals_reached_ids(discovered_goal_ids, imagined_inds, episode_batch, goals_reached_ids)
batch_size = len(episode_batch)
assert batch_size == len(goals_reached_ids)
with self.lock:
idxs = self._get_storage_idx(batch_size)
# Maintain buffers for each goal reached to bias buffer sampling
# An episode is added to a particular goal buffer if
# the final transition satisfies that goal.
if self.bias_buffer:
for i in range(batch_size):
# remove old indices
if self.current_size == self.size:
for goal_buffer_ids in self.goals_indices:
if len(goal_buffer_ids) > 0:
if idxs[i] == goal_buffer_ids[0]:
goal_buffer_ids.popleft()
# append new goal indices
for reached_id in goals_reached_ids[i]:
assert reached_id in discovered_goal_ids
ind_list = discovered_goal_ids.index(reached_id)
self.goals_indices[ind_list].append(idxs[i])
# load inputs into buffers
for i in range(batch_size):
for key in self.buffers.keys():
self.buffers[key][idxs[i]] = episode_batch[i][key]
self.n_transitions_stored += batch_size * self.T
def get_current_episode_size(self):
with self.lock:
return self.current_size
def get_current_size(self):
with self.lock:
return self.current_size * self.T
def get_transitions_stored(self):
with self.lock:
return self.n_transitions_stored
def clear_buffer(self):
with self.lock:
self.current_size = 0
def _get_storage_idx(self, inc=None):
inc = inc or 1 # size increment
assert inc <= self.size, "Batch committed to replay is too large!"
# fifo memory
if self.pointer + inc <= self.size:
idx = np.arange(self.pointer, self.pointer + inc)
self.pointer = self.pointer + inc
else:
overflow = inc - (self.size - self.pointer)
idx_a = np.arange(self.pointer, self.size)
idx_b = np.arange(0, overflow)
idx = np.concatenate([idx_a, idx_b])
self.pointer = overflow
# update replay size
self.current_size = min(self.size, self.current_size + inc)
if inc == 1:
idx = idx[0]
return idx
|
from pynwb.misc import AnnotationSeries
from pynwb.misc import TimeSeries
import pandas as pd
import numpy as np
import scipy
import os
# <NAME> 2021
def load_templates_optophysiology(stimuli_mat_file):
# This function loads a mat file containing a structure with ALL the possible stimuli templates
# The group of stimuli is identified by the "Code" field
# For now there are only stimuli templates for Codes 3 and 4 (brightness and spatial frequency stimulation)
# Use the matlab script within optophysiology_stimulus_templates folder to assign the rest
templates = scipy.io.loadmat(stimuli_mat_file)
stimuli_structure = templates['stimuli_structure']
#Codes = templates['Code'][0]
#Descriptions = templates['Description'][0]
#stimulus_IDs = templates['stimulusID'][0]
#stimulus_Matrices = templates['stimulusMatrix'][0]
return stimuli_structure
def used_templates(folder_name):
# This function checks the file "StimulusConfig.txt" to get which stimulusIDs were used during stimulation.
# Then it retrieves the timing of each stimulus presentation from "StimulusTimes.txt"
# Load All templates
stimuli_structure = stimuli_structure = load_templates_optophysiology(
os.path.join('optophysiology_stimuli_templates', 'optophysiology_stimulus_templates.mat'))
# Load file that contains the a collection of stimulus parameters. This is used to create a "comments" string
df_stimulus_codenames = pd.read_csv(os.path.join(folder_name, 'StimulusConfig.txt'), sep=',', header=None)
# Log contrast value
if df_stimulus_codenames[5][0] == 1:
contrast_comment = 'maximum contrast'
elif df_stimulus_codenames[5][0] == 0:
contrast_comment = 'no contrast'
else:
contrast_comment = 'a contrast value of ' + str(df_stimulus_codenames[5][0]) + ', ' \
'where 1 is maximum contrast and 0 is no contrast'
# Log sign of stimulus relative to background (-1 is a stimulus darker than background, 1 is brighter)
if df_stimulus_codenames[0][1] == 1:
contrast_sign_comment = 'brighter'
elif df_stimulus_codenames[0][1] == -1:
contrast_sign_comment = 'darker'
# Finally collect the variables that are needed to be stored within the NWB File
comments = 'Each stimulus was presented ' + str(int(df_stimulus_codenames[1][0])) + ' times. ' \
'Each stimulation lasted ' + str(df_stimulus_codenames[3][0]) + ' seconds. ' \
'The interstimulus period lasted ' + str(df_stimulus_codenames[4][0]) + ' seconds. ' \
'The stimulus was presented at ' + contrast_comment + '. ' \
'The stimulus was ' + contrast_sign_comment + ' than the background. ' \
'The screen was ' + str(int(df_stimulus_codenames[1][1])) + 'x' + str(int(df_stimulus_codenames[2][1])) \
+ ' pixels. ' \
'The display area was a square of ' + str(int(df_stimulus_codenames[4][1])) + ' pixels side with a ' + \
str(int(df_stimulus_codenames[3][1])) + ' pixels y_offset from the bottom of the screen. ' \
'The background intensity value was set at ' + str(df_stimulus_codenames[5][1]) + \
' (continuous from 0 (black) to 1 (white).'
# Load file that contains the info for the presentations [index, time, stimulus type]
df_stimulus_presentations = pd.read_csv(os.path.join(folder_name, 'StimulusTimes.txt'), sep=',', header=None)
timestamps = np.array(df_stimulus_presentations[1])
stimulusIDs = np.array(df_stimulus_presentations[2])
selected_code = []
for code in range(11): # 11 Total experimental stimulation designs
if np.array_equal(np.unique(stimulusIDs), np.unique(stimuli_structure[0][code][2])): # This checks if the stimulusIDs used in the experiment match the templates
selected_code = code
stimulusDescription = stimuli_structure[0][code][1]
template_stimuli = stimuli_structure[0][code][3] # Matrix with templates e.g.: 480x800x11
break
if not selected_code:
stimulusDescription = []
template_stimuli = stimulusIDs
return stimulusDescription, template_stimuli, timestamps, comments
def add_to_nwb(nwbfile, stimulusDescription, template_stimuli, timestamps, comments):
if template_stimuli != []:
stimuli_series = TimeSeries(
name='stimuliTemplates',
data=template_stimuli,
timestamps=timestamps,
comments=comments,
unit='',
description=str(stimulusDescription)
)
nwbfile.add_stimulus_template(stimuli_series)
print('[4] Added stimulus templates')
else:
stimuli_series = AnnotationSeries(
name='stimuliSeries',
data=template_stimuli,
timestamps=timestamps,
comments=comments,
description=str(stimulusDescription)
)
nwbfile.add_stimulus(stimuli_series)
print('[4] Added stimulus times and codes')
return nwbfile
def add_stimulus_optophysiology(nwbfile, folder_name, single_excel_entry):
if single_excel_entry['experimenter'] == "<NAME>":
# Get stimulation info - Either the templates, or the codes used
stimulusDescription, template_stimuli, timestamps, comments = used_templates(folder_name)
# Add to NWB file
nwbfile = add_to_nwb(nwbfile, stimulusDescription, template_stimuli, timestamps, comments)
return nwbfile
|
<gh_stars>1-10
import numpy as np
from pandas import (
Categorical,
CategoricalIndex,
Index,
Interval,
)
import pandas._testing as tm
class TestReindex:
def test_reindex_list_non_unique(self):
# GH#11586
ci = CategoricalIndex(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
res, indexer = ci.reindex(["a", "c"])
tm.assert_index_equal(res, Index(["a", "a", "c"]), exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
def test_reindex_categorical_non_unique(self):
ci = CategoricalIndex(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
res, indexer = ci.reindex(Categorical(["a", "c"]))
exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
def test_reindex_list_non_unique_unused_category(self):
ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
res, indexer = ci.reindex(["a", "c"])
exp = Index(["a", "a", "c"], dtype="object")
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
def test_reindex_categorical_non_unique_unused_category(self):
ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
res, indexer = ci.reindex(Categorical(["a", "c"]))
exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
def test_reindex_duplicate_target(self):
# See GH25459
cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"])
res, indexer = cat.reindex(["a", "c", "c"])
exp = Index(["a", "c", "c"], dtype="object")
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
res, indexer = cat.reindex(
CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
)
exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
def test_reindex_empty_index(self):
# See GH16770
c = CategoricalIndex([])
res, indexer = c.reindex(["a", "b"])
tm.assert_index_equal(res, Index(["a", "b"]), exact=True)
tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp))
def test_reindex_categorical_added_category(self):
# GH 42424
ci = CategoricalIndex(
[Interval(0, 1, closed="right"), Interval(1, 2, closed="right")],
ordered=True,
)
ci_add = CategoricalIndex(
[
Interval(0, 1, closed="right"),
Interval(1, 2, closed="right"),
Interval(2, 3, closed="right"),
Interval(3, 4, closed="right"),
],
ordered=True,
)
result, _ = ci.reindex(ci_add)
expected = ci_add
tm.assert_index_equal(expected, result)
|
from __future__ import unicode_literals
import nose
from reviewboard.hostingsvcs.tests.testcases import ServiceTests
from reviewboard.scmtools.models import Repository, Tool
class AssemblaTests(ServiceTests):
"""Unit tests for the Assembla hosting service."""
service_name = 'assembla'
fixtures = ['test_scmtools']
def test_service_support(self):
"""Testing Assembla service support capabilities"""
self.assertTrue(self.service_class.needs_authorization)
self.assertTrue(self.service_class.supports_bug_trackers)
self.assertTrue(self.service_class.supports_repositories)
self.assertEqual(self.service_class.supported_scmtools,
['Perforce', 'Subversion'])
def test_repo_field_values_perforce(self):
"""Testing Assembla repository field values for Perforce"""
fields = self._get_repository_fields('Perforce', fields={
'assembla_project_id': 'myproject',
})
self.assertEqual(fields['path'], 'perforce.assembla.com:1666')
self.assertNotIn('mirror_path', fields)
self.assertIn('encoding', fields)
self.assertEqual(fields['encoding'], 'utf8')
def test_repo_field_values_subversion(self):
"""Testing Assembla repository field values for Subversion"""
fields = self._get_repository_fields('Subversion', fields={
'assembla_project_id': 'myproject',
'assembla_repo_name': 'myrepo',
})
self.assertEqual(fields['path'],
'https://subversion.assembla.com/svn/myproject/')
self.assertNotIn('mirror_path', fields)
self.assertNotIn('encoding', fields)
def test_save_form_perforce(self):
"""Testing Assembla configuration form with Perforce"""
try:
account = self._get_hosting_account()
service = account.service
service.authorize('myuser', 'abc123', None)
repository = Repository(hosting_account=account,
tool=Tool.objects.get(name='Perforce'))
form = self._get_form(fields={'assembla_project_id': 'myproject'})
form.save(repository)
self.assertIn('use_ticket_auth', repository.extra_data)
self.assertTrue(repository.extra_data['use_ticket_auth'])
self.assertIn('p4_host', repository.extra_data)
self.assertEqual(repository.extra_data['p4_host'], 'myproject')
except ImportError:
raise nose.SkipTest
def test_save_form_subversion(self):
"""Testing Assembla configuration form with Subversion"""
try:
account = self._get_hosting_account()
service = account.service
service.authorize('myuser', 'abc123', None)
repository = Repository(path='https://svn.example.com/',
hosting_account=account,
tool=Tool.objects.get(name='Subversion'))
form = self._get_form(fields={'assembla_project_id': 'myproject'})
form.save(repository)
self.assertNotIn('use_ticket_auth', repository.extra_data)
self.assertNotIn('p4_host', repository.extra_data)
except ImportError:
raise nose.SkipTest
def test_authorize(self):
"""Testing Assembla authorization password storage"""
account = self._get_hosting_account()
service = account.service
self.assertFalse(service.is_authorized())
service.authorize('myuser', 'abc123', None)
self.assertIn('password', account.data)
self.assertNotEqual(account.data['password'], '<PASSWORD>')
self.assertTrue(service.is_authorized())
def test_check_repository_perforce(self):
"""Testing Assembla check_repository with Perforce"""
try:
account = self._get_hosting_account()
service = account.service
service.authorize('myuser', 'abc123', None)
repository = Repository(hosting_account=account,
tool=Tool.objects.get(name='Perforce'))
scmtool = repository.get_scmtool()
self.spy_on(scmtool.check_repository, call_original=False)
service.check_repository(path='mypath',
username='myusername',
password='<PASSWORD>',
scmtool_class=scmtool.__class__,
local_site_name=None,
assembla_project_id='myproject')
self.assertTrue(scmtool.check_repository.called)
self.assertIn('p4_host', scmtool.check_repository.last_call.kwargs)
self.assertEqual(
scmtool.check_repository.last_call.kwargs['p4_host'],
'myproject')
except ImportError:
raise nose.SkipTest
def test_check_repository_subversion(self):
"""Testing Assembla check_repository with Subversion"""
try:
account = self._get_hosting_account()
service = account.service
service.authorize('myuser', 'abc123', None)
repository = Repository(path='https://svn.example.com/',
hosting_account=account,
tool=Tool.objects.get(name='Subversion'))
scmtool = repository.get_scmtool()
self.spy_on(scmtool.check_repository, call_original=False)
service.check_repository(path='https://svn.example.com/',
username='myusername',
password='<PASSWORD>',
scmtool_class=scmtool.__class__,
local_site_name=None)
self.assertTrue(scmtool.check_repository.called)
self.assertNotIn('p4_host',
scmtool.check_repository.last_call.kwargs)
except ImportError:
raise nose.SkipTest
|
<filename>test/test_trainer.py
from unittest import TestCase, main as unittest_main, mock
import numpy as np
import torch
import torch.nn as nn
from experiments.experiment_histories import calc_hist_length_per_net
from training.trainer import TrainerAdam
def generate_single_layer_net():
""" Setup a neural network with one linear layer for test purposes. """
net = nn.Linear(4, 2)
net.weight = nn.Parameter(torch.tensor([[.5, .5, .1, .1], [.4, .4, .2, .2]]))
net.bias = nn.Parameter(torch.zeros(2))
net.criterion = nn.CrossEntropyLoss()
return net
def generate_fake_data_loader():
"""" Generate fake-DataLoader with four batches, i.e. a list with sub-lists of samples and labels.
It has four batches with three samples each. """
samples1 = torch.tensor([[2., 2., 2., 2.], [2., 2., 0., 0.], [0., 0., 2., 2.]])
samples2 = torch.tensor([[1., 2., 3., 4.], [1., 1., 2., 2.], [2., 2., 2., 2.]])
labels1 = torch.tensor([0, 0, 1])
labels2 = torch.tensor([1, 1, 0])
return [[samples1, labels1], [samples1, labels2], [samples2, labels1], [samples2, labels2]]
class TestTrainer(TestCase):
""" Tests for the trainer module.
Call with 'python -m test.test_trainer' from project root '~'.
Call with 'python -m test_trainer' from inside '~/test'. """
def test_calculate_correct_hist_length(self):
""" Should calculate the correct length for history arrays.
History is saved at the following combinations of epochs and iterations: 0,4; 1,4. """
self.assertEqual(2, calc_hist_length_per_net(5, 2, 5))
def test_calculate_correct_hist_length_rounding(self):
""" Should calculate the correct length for history arrays.
History is saved at the following combinations of epochs and iterations: 0,3; 1,2; 2,1. """
self.assertEqual(3, calc_hist_length_per_net(5, 3, 4))
def test_execute_training(self):
""" The training should be executed without errors and results should have correct shapes.
Use a simple net with one linear layer and fake-data_loaders with samples of shape (1,4). """
# create net and setup trainer with fake-DataLoader (use the same loader for training, validation and test)
net = generate_single_layer_net()
fake_loader = generate_fake_data_loader()
trainer = TrainerAdam(0., fake_loader, fake_loader, fake_loader)
net, train_loss_hist, val_loss_hist, val_acc_hist, test_acc_hist, _, _ = \
trainer.train_net(net, epoch_count=2, plot_step=4)
self.assertIs(net is not None, True)
np.testing.assert_array_less(np.zeros(2, dtype=float), train_loss_hist) # check for positive entries
np.testing.assert_array_less(np.zeros(2, dtype=float), val_loss_hist)
np.testing.assert_array_less(np.zeros(2, dtype=float), val_acc_hist)
np.testing.assert_array_less(np.zeros(2, dtype=float), test_acc_hist)
def test_execute_training_with_early_stopping(self):
""" Should execute training without errors and save results with correct shapes.
It should also return a valid checkpoint and early-stopping index.
Use a simple net with one linear layer and fake-data_loaders with samples of shape (1,4). """
# create net and setup trainer with fake-DataLoader (use the same loader for training, validation and test)
net = generate_single_layer_net()
fake_loader = generate_fake_data_loader()
trainer = TrainerAdam(0., fake_loader, fake_loader, fake_loader, save_early_stop=True)
# perform training with mocked validation-loss
with mock.patch('training.trainer.TrainerAdam.compute_val_loss',
side_effect=[2.0, 1.0, 0.5, 1.0]) as mocked_val_loss:
net, train_loss_hist, val_loss_hist, val_acc_hist, test_acc_hist, early_stop_index, early_stop_cp = \
trainer.train_net(net, epoch_count=2, plot_step=2) # 8 batches (iterations), 4 early-stop evaluations
self.assertEqual(4, mocked_val_loss.call_count)
# early-stopping criterion is True for first three calls (last one counts), thus 5 is the 'early_stop_index'
self.assertEqual(5, early_stop_index)
net.load_state_dict(early_stop_cp) # check if the checkpoint can be loaded without errors
self.assertIs(net is not None, True)
np.testing.assert_array_less(np.zeros(4, dtype=float), train_loss_hist) # check for positive entries
np.testing.assert_array_less(np.zeros(4, dtype=float), val_loss_hist)
np.testing.assert_array_less(np.zeros(4, dtype=float), val_acc_hist)
np.testing.assert_array_less(np.zeros(4, dtype=float), test_acc_hist)
def test_compute_test_acc(self):
""" Should calculate the correct test-accuracy.
The fake-net with one linear layer classifies half of the fake-samples correctly.
Use a fake-val_loader with one batch to validate the result. """
# create net and setup trainer and fake-DataLoader with two batches (use the same samples for both batches)
net = generate_single_layer_net()
samples = torch.tensor([[2., 2., 2., 2.], [2., 2., 0., 0.], [0., 0., 2., 2.]])
test_loader = [[samples, torch.tensor([0, 0, 1])], [samples, torch.tensor([1, 1, 0])]]
trainer = TrainerAdam(0., [], [], test_loader=test_loader)
self.assertEqual(0.5, trainer.compute_acc(net, test=True))
def test_compute_val_acc(self):
""" Should calculate the correct validation-accuracy.
The fake-net with one linear layer classifies all fake-samples correctly.
Use a fake-val_loader with one batch to validate the result. """
# create net and setup trainer and fake-DataLoader with one batch
net = generate_single_layer_net()
val_loader = [[torch.tensor([[2., 2., 2., 2.], [2., 2., 0., 0.], [0., 0., 2., 2.]]), torch.tensor([0, 0, 1])]]
trainer = TrainerAdam(0., [], val_loader, test_loader=[])
self.assertEqual(1., trainer.compute_acc(net, test=False))
def test_compute_val_loss(self):
""" Should calculate a positive validation loss. """
# create net and setup trainer with fake-DataLoader (use the same loader for training, validation and test)
net = generate_single_layer_net()
fake_loader = generate_fake_data_loader()
trainer = TrainerAdam(0., fake_loader, fake_loader, fake_loader)
self.assertLessEqual(0.0, trainer.compute_val_loss(net))
def test_should_save_early_stop_checkpoint_no_evaluation(self):
""" Should return False, because the early-stopping criterion should not be evaluated. """
trainer = TrainerAdam(0., [], [], [], save_early_stop=False)
self.assertIs(trainer.should_save_early_stop_checkpoint(0.5, 0.2), False)
def test_should_save_early_stop_checkpoint_no_new_minimum_greater(self):
""" Should return False, because the current validation-loss is greater than the minimum. """
trainer = TrainerAdam(0., [], [], [], save_early_stop=True)
self.assertIs(trainer.should_save_early_stop_checkpoint(0.5, 0.2), False)
def test_should_save_early_stop_checkpoint_no_new_minimum_equal(self):
""" Should return False, because the current validation-loss is equal to the the minimum. """
trainer = TrainerAdam(0., [], [], [], save_early_stop=True)
self.assertIs(trainer.should_save_early_stop_checkpoint(0.2, 0.2), False)
def test_should_save_early_stop_checkpoint_new_checkpoint(self):
""" Should return True, because the validation-accuracy reached a new minimum. """
trainer = TrainerAdam(0., [], [], [], save_early_stop=True)
self.assertIs(trainer.should_save_early_stop_checkpoint(0.1, 0.2), True)
if __name__ == '__main__':
unittest_main()
|
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import copy
import operator
import pickle
import unittest.mock
from traits.api import HasTraits, Int, List
from traits.testing.optional_dependencies import numpy, requires_numpy
from traits.trait_base import _validate_everything
from traits.trait_errors import TraitError
from traits.trait_list_object import (
TraitList,
TraitListEvent,
TraitListObject,
)
def int_item_validator(item):
"""
An item_validator for TraitList that checks that the item is an int
or integer-like object (e.g., any object whose type provides __index__).
Parameters
----------
item : object
Proposed item to add to the list.
Returns
-------
validated_item : object
Actual item to add to the list.
Raises
------
TraitError
If the item is not valid.
"""
try:
return int(operator.index(item))
except TypeError:
raise TraitError("Value {} is not a valid integer".format(item))
def list_item_validator(item):
"""
An item_validator for TraitList that checks that the item is a list.
Parameters
----------
item : object
Proposed item to add to the list.
Returns
-------
validated_item : object
Actual item to add to the list.
Raises
------
TraitError
If the item is not valid.
"""
if isinstance(item, list):
return item
else:
raise TraitError("Value {} is not a list instance".format(item))
class TestTraitListEvent(unittest.TestCase):
def test_creation(self):
event = TraitListEvent(index=2, removed=[3], added=[4])
self.assertEqual(event.index, 2)
self.assertEqual(event.removed, [3])
self.assertEqual(event.added, [4])
event = TraitListEvent(index=2, removed=[3], added=[4])
self.assertEqual(event.index, 2)
self.assertEqual(event.removed, [3])
self.assertEqual(event.added, [4])
def test_defaults(self):
event = TraitListEvent()
self.assertEqual(event.index, 0)
self.assertEqual(event.removed, [])
self.assertEqual(event.added, [])
def test_trait_list_event_str_representation(self):
""" Test string representation of the TraitListEvent class. """
desired_repr = "TraitListEvent(index=0, removed=[], added=[])"
trait_list_event = TraitListEvent()
self.assertEqual(desired_repr, str(trait_list_event))
self.assertEqual(desired_repr, repr(trait_list_event))
def test_trait_list_event_subclass_str_representation(self):
""" Test string representation of a subclass of the TraitListEvent
class. """
class DifferentName(TraitListEvent):
pass
desired_repr = "DifferentName(index=0, removed=[], added=[])"
different_name_subclass = DifferentName()
self.assertEqual(desired_repr, str(different_name_subclass))
self.assertEqual(desired_repr, repr(different_name_subclass))
class TestTraitList(unittest.TestCase):
def setUp(self):
self.index = None
self.added = None
self.removed = None
self.trait_list = None
def notification_handler(self, trait_list, index, removed, added):
self.trait_list = trait_list
self.index = index
self.removed = removed
self.added = added
def test_init(self):
tl = TraitList([1, 2, 3])
self.assertListEqual(tl, [1, 2, 3])
self.assertIs(tl.item_validator, _validate_everything)
self.assertEqual(tl.notifiers, [])
def test_init_no_value(self):
tl = TraitList()
self.assertEqual(tl, [])
self.assertIs(tl.item_validator, _validate_everything)
self.assertEqual(tl.notifiers, [])
def test_init_iterable(self):
tl = TraitList("abcde")
self.assertListEqual(tl, ['a', 'b', 'c', 'd', 'e'])
self.assertIs(tl.item_validator, _validate_everything)
self.assertEqual(tl.notifiers, [])
def test_init_iterable_without_length(self):
tl = TraitList(x**2 for x in range(5))
self.assertEqual(tl, [0, 1, 4, 9, 16])
self.assertIs(tl.item_validator, _validate_everything)
self.assertEqual(tl.notifiers, [])
def test_init_validates(self):
with self.assertRaises(TraitError):
TraitList([1, 2.0, 3], item_validator=int_item_validator)
def test_init_converts(self):
tl = TraitList([True, False], item_validator=int_item_validator)
self.assertEqual(tl, [1, 0])
self.assertTrue(
all(type(item) is int for item in tl),
msg="Non-integers found in int-only list",
)
def test_validator(self):
tl = TraitList([1, 2, 3], item_validator=int_item_validator)
self.assertListEqual(tl, [1, 2, 3])
self.assertEqual(tl.item_validator, int_item_validator)
self.assertEqual(tl.notifiers, [])
def test_notification(self):
tl = TraitList([1, 2, 3], notifiers=[self.notification_handler])
self.assertListEqual(tl, [1, 2, 3])
self.assertIs(tl.item_validator, _validate_everything)
self.assertEqual(tl.notifiers, [self.notification_handler])
tl[0] = 5
self.assertListEqual(tl, [5, 2, 3])
self.assertIs(self.trait_list, tl)
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [1])
self.assertEqual(self.added, [5])
def test_copy(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl_copy = copy.copy(tl)
for itm, itm_cpy in zip(tl, tl_copy):
self.assertEqual(itm_cpy, itm)
self.assertEqual(tl_copy.notifiers, [])
self.assertEqual(tl_copy.item_validator, tl.item_validator)
def test_deepcopy(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl_copy = copy.deepcopy(tl)
for itm, itm_cpy in zip(tl, tl_copy):
self.assertEqual(itm_cpy, itm)
self.assertEqual(tl_copy.notifiers, [])
self.assertEqual(tl_copy.item_validator, tl.item_validator)
def test_deepcopy_memoization(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
trait_lists_copy = copy.deepcopy([tl, tl])
self.assertIs(trait_lists_copy[0], trait_lists_copy[1])
def test_setitem(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl[1] = 5
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [2])
self.assertEqual(self.added, [5])
tl[:] = [1, 2, 3, 4, 5]
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [1, 5, 3])
self.assertEqual(self.added, [1, 2, 3, 4, 5])
def test_setitem_converts(self):
tl = TraitList([9, 8, 7],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl[1] = False
self.assertEqual(tl, [9, 0, 7])
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [8])
self.assertEqual(self.added, [0])
self.assertTrue(
all(type(item) is int for item in tl),
msg="Non-integers found in int-only list",
)
self.assertTrue(
all(type(item) is int for item in self.added),
msg="Event contains non-integers for int-only list",
)
tl[::2] = [True, True]
self.assertEqual(tl, [1, 0, 1])
self.assertEqual(self.index, slice(0, 3, 2))
self.assertEqual(self.removed, [9, 7])
self.assertEqual(self.added, [1, 1])
self.assertTrue(
all(type(item) is int for item in tl),
msg="Non-integers found in int-only list",
)
self.assertTrue(
all(type(item) is int for item in self.added),
msg="Event contains non-integers for int-only list",
)
def test_setitem_no_structural_change(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl[3:] = []
self.assertEqual(tl, [1, 2, 3])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_setitem_no_item_change(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl[0] = 1
self.assertEqual(tl, [1, 2, 3])
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [1])
self.assertEqual(self.added, [1])
def test_setitem_no_removed(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl[3:] = [4, 5, 6]
self.assertEqual(tl, [1, 2, 3, 4, 5, 6])
self.assertEqual(self.index, 3)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [4, 5, 6])
def test_setitem_no_added(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl[1:2] = []
self.assertEqual(tl, [1, 3])
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [2])
self.assertEqual(self.added, [])
def test_setitem_iterable(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl[:] = (x**2 for x in range(4))
self.assertEqual(tl, [0, 1, 4, 9])
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [1, 2, 3])
self.assertEqual(self.added, [0, 1, 4, 9])
def test_setitem_indexerror(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
with self.assertRaises(IndexError):
tl[3] = 4
self.assertEqual(tl, [1, 2, 3])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_setitem_validation_error(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
with self.assertRaises(TraitError):
tl[0] = 4.5
self.assertEqual(tl, [1, 2, 3])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
with self.assertRaises(TraitError):
tl[0:2] = [1, "a string"]
self.assertEqual(tl, [1, 2, 3])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
with self.assertRaises(TraitError):
tl[2:0:-1] = [1, "a string"]
self.assertEqual(tl, [1, 2, 3])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_setitem_negative_step(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl[::-2] = [10, 11, 12]
self.assertEqual(tl, [12, 2, 11, 4, 10])
self.assertEqual(self.index, slice(0, 5, 2))
self.assertEqual(self.removed, [1, 3, 5])
self.assertEqual(self.added, [12, 11, 10])
def test_setitem_negative_one_step(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl[:1:-1] = [10, 11, 12]
self.assertEqual(tl, [1, 2, 12, 11, 10])
self.assertEqual(self.index, 2)
self.assertEqual(self.removed, [3, 4, 5])
self.assertEqual(self.added, [12, 11, 10])
def test_setitem_index_and_validation_error(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
# Assigning an invalid value to an invalid index: the
# TraitError from the invalid value wins.
with self.assertRaises(TraitError):
tl[3] = 4.5
self.assertEqual(tl, [1, 2, 3])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
# Assigning to a slice with invalid r.h.s. length and
# invalid contents: again, the TraitError wins.
with self.assertRaises(TraitError):
tl[::2] = [1, 2, 4.5]
self.assertEqual(tl, [1, 2, 3])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_setitem_item_conversion(self):
tl = TraitList([2, 3, 4],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl[0] = True
self.assertEqual(tl, [1, 3, 4])
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [2])
self.assertEqual(self.added, [1])
# Check that the True has been converted to an int.
self.assertTrue(
all(type(item) is int for item in tl),
msg="Non-integers found in int-only list",
)
self.assertTrue(
all(type(item) is int for item in self.added),
msg="Event contains non-integers for int-only list",
)
def test_setitem_corner_case(self):
# A peculiar-looking corner case where it's easy to get the
# implementation wrong (and CPython did so in the distance past).
tl = TraitList(range(7), notifiers=[self.notification_handler])
# Note: new items inserted at position 5, not position 2.
tl[5:2] = [10, 11, 12]
self.assertEqual(tl, [0, 1, 2, 3, 4, 10, 11, 12, 5, 6])
self.assertEqual(self.index, 5)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [10, 11, 12])
def test_setitem_slice_exhaustive(self):
# Try all possible (slice, list_length) combinations.
for test_slice in self.all_slices(max_index=7):
for test_length in range(6):
for replacement_length in range(6):
with self.subTest(
slice=test_slice,
length=test_length,
replacement=replacement_length,
):
test_list = list(range(test_length))
replacement = list(
range(-1, -1 - replacement_length, -1))
self.assertEqual(len(test_list), test_length)
self.assertEqual(len(replacement), replacement_length)
self.validate_event(
test_list,
lambda items: items.__setitem__(
test_slice,
replacement,
)
)
def test_delitem(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
del tl[2]
self.assertEqual(self.index, 2)
self.assertEqual(self.removed, [3])
self.assertEqual(self.added, [])
del tl[:]
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [1, 2])
self.assertEqual(self.added, [])
with self.assertRaises(IndexError):
del tl[0]
def test_delitem_extended_slice_normalization(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
del tl[2:10:2]
self.assertEqual(tl, [1, 2, 4])
self.assertEqual(self.index, slice(2, 5, 2))
self.assertEqual(self.removed, [3, 5])
self.assertEqual(self.added, [])
def test_delitem_negative_step_normalization(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
# Same effect as del tl[2:5:2].
del tl[5:1:-2]
self.assertEqual(tl, [1, 2, 4])
self.assertEqual(self.index, slice(2, 5, 2))
self.assertEqual(self.removed, [3, 5])
self.assertEqual(self.added, [])
def test_delitem_negative_step(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
del tl[::-2]
self.assertEqual(tl, [2, 4])
self.assertEqual(self.index, slice(0, 5, 2))
self.assertEqual(self.removed, [1, 3, 5])
self.assertEqual(self.added, [])
def test_delitem_slice_exhaustive(self):
# Try all possible (slice, list_length) combinations.
for test_slice in self.all_slices(max_index=7):
for test_length in range(11):
with self.subTest(slice=test_slice, length=test_length):
test_list = list(range(test_length))
self.validate_event(
test_list,
lambda items: items.__delitem__(test_slice)
)
def test_delitem_nochange(self):
tl = TraitList([1, 2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
del tl[3:]
self.assertEqual(tl, [1, 2, 3])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_iadd(self):
tl = TraitList([4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl += [6, 7]
self.assertEqual(self.index, 2)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [6, 7])
def test_iadd_validates(self):
tl = TraitList([4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
with self.assertRaises(TraitError):
tl += [6, 7, 8.0]
self.assertEqual(tl, [4, 5])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_iadd_converts(self):
tl = TraitList([4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl += [True, True]
self.assertEqual(tl, [4, 5, 1, 1])
self.assertEqual(self.index, 2)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [1, 1])
self.assertTrue(
all(type(item) is int for item in tl),
msg="Non-integers found in int-only list",
)
self.assertTrue(
all(type(item) is int for item in self.added),
msg="Event contains non-integers for int-only list",
)
def test_iadd_empty(self):
tl = TraitList([4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl += []
self.assertEqual(tl, [4, 5])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_iadd_iterable(self):
tl = TraitList([4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl += (x**2 for x in range(3))
self.assertEqual(tl, [4, 5, 0, 1, 4])
self.assertEqual(self.index, 2)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [0, 1, 4])
def test_imul(self):
tl = TraitList([1, 2],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl *= 1
self.assertListEqual(tl, [1, 2])
self.assertEqual(self.index, None)
self.assertEqual(self.removed, None)
self.assertEqual(self.added, None)
tl *= 2
self.assertEqual(self.index, 2)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [1, 2])
with self.assertRaises(TypeError):
tl *= "5"
with self.assertRaises(TypeError):
tl *= 2.5
tl *= -1
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [1, 2, 1, 2])
self.assertEqual(self.added, [])
def test_imul_no_notification_for_empty_list(self):
for multiplier in [-1, 0, 1, 2]:
with self.subTest(multiplier=multiplier):
tl = TraitList(
[],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl *= multiplier
self.assertEqual(tl, [])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
@requires_numpy
def test_imul_integer_like(self):
tl = TraitList([1, 2],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl *= numpy.int64(2)
self.assertEqual(tl, [1, 2, 1, 2])
self.assertEqual(self.index, 2)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [1, 2])
tl *= numpy.int64(-1)
self.assertEqual(tl, [])
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [1, 2, 1, 2])
self.assertEqual(self.added, [])
def test_imul_does_not_revalidate(self):
item_validator = unittest.mock.Mock(wraps=int_item_validator)
tl = TraitList([1, 1], item_validator=item_validator)
item_validator.reset_mock()
tl *= 3
item_validator.assert_not_called()
def test_append(self):
tl = TraitList([1],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.append(2)
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [2])
def test_append_validates(self):
tl = TraitList([1],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
with self.assertRaises(TraitError):
tl.append(1.0)
self.assertEqual(tl, [1])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_append_converts(self):
tl = TraitList([2],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.append(False)
self.assertEqual(tl, [2, 0])
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [0])
self.assertTrue(
all(type(item) is int for item in tl),
msg="Non-integers found in int-only list",
)
self.assertTrue(
all(type(item) is int for item in self.added),
msg="Event contains non-integers for int-only list",
)
def test_extend(self):
tl = TraitList([1],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.extend([1, 2])
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [1, 2])
def test_extend_validates(self):
tl = TraitList([5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
with self.assertRaises(TraitError):
tl.extend([2, 3, 4.0])
self.assertEqual(tl, [5])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_extend_converts(self):
tl = TraitList([4],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.extend([False, True])
self.assertEqual(tl, [4, 0, 1])
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [0, 1])
self.assertTrue(
all(type(item) is int for item in tl),
msg="Non-integers found in int-only list",
)
self.assertTrue(
all(type(item) is int for item in self.added),
msg="Event contains non-integers for int-only list",
)
def test_extend_empty(self):
tl = TraitList([1],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.extend([])
self.assertEqual(tl, [1])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_extend_iterable(self):
tl = TraitList([1],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.extend(x**2 for x in range(10, 13))
self.assertEqual(tl, [1, 100, 121, 144])
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [100, 121, 144])
def test_insert(self):
tl = TraitList([2],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.insert(0, 1) # [1,2]
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [1])
tl.insert(-1, 3) # [1,3,2]
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [3])
def test_insert_index_matches_python_interpretation(self):
for insertion_index in range(-10, 10):
with self.subTest(insertion_index=insertion_index):
tl = TraitList([5, 6, 7])
pl = [5, 6, 7]
tl.insert(insertion_index, 1729)
pl.insert(insertion_index, 1729)
self.assertEqual(tl, pl)
def test_insert_validates(self):
tl = TraitList([2],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
with self.assertRaises(TraitError):
tl.insert(0, 1.0)
self.assertEqual(tl, [2])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_insert_converts(self):
tl = TraitList([2, 3],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.insert(1, True)
self.assertEqual(tl, [2, 1, 3])
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [])
self.assertEqual(self.added, [1])
self.assertTrue(
all(type(item) is int for item in tl),
msg="Non-integers found in int-only list",
)
self.assertTrue(
all(type(item) is int for item in self.added),
msg="Event contains non-integers for int-only list",
)
def test_pop(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.pop()
self.assertEqual(self.index, 4)
self.assertEqual(self.removed, [5])
self.assertEqual(self.added, [])
tl.pop(0)
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [1])
self.assertEqual(self.added, [])
# tl is now [2,3,4]
tl.pop(-2)
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [3])
self.assertEqual(self.added, [])
def test_remove(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.remove(3)
self.assertEqual(self.index, 2)
self.assertEqual(self.removed, [3])
self.assertEqual(self.added, [])
with self.assertRaises(ValueError):
tl.remove(3)
tl.remove(2.0)
self.assertEqual(self.index, 1)
self.assertEqual(self.removed, [2])
self.assertIsInstance(self.removed[0], int)
self.assertEqual(self.added, [])
def test_clear(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.clear()
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [1, 2, 3, 4, 5])
self.assertEqual(self.added, [])
def test_clear_empty_list(self):
tl = TraitList([],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.clear()
self.assertEqual(tl, [])
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_sort(self):
tl = TraitList([2, 3, 1, 4, 5, 0],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.sort()
self.assertEqual(tl, [0, 1, 2, 3, 4, 5])
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [2, 3, 1, 4, 5, 0])
self.assertEqual(self.added, [0, 1, 2, 3, 4, 5])
def test_sort_empty_list(self):
tl = TraitList([],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.sort()
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_sort_already_sorted(self):
tl = TraitList([10, 11, 12, 13, 14],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.sort()
self.assertEqual(tl, [10, 11, 12, 13, 14])
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [10, 11, 12, 13, 14])
self.assertEqual(self.added, [10, 11, 12, 13, 14])
def test_reverse(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.reverse()
self.assertEqual(tl, [5, 4, 3, 2, 1])
self.assertEqual(self.index, 0)
self.assertEqual(self.removed, [1, 2, 3, 4, 5])
self.assertEqual(self.added, [5, 4, 3, 2, 1])
def test_reverse_empty_list(self):
tl = TraitList([],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
tl.reverse()
self.assertIsNone(self.index)
self.assertIsNone(self.removed)
self.assertIsNone(self.added)
def test_reverse_single_notification(self):
# Regression test for double notification.
notifier = unittest.mock.Mock()
tl = TraitList([1, 2, 3, 4, 5],
notifiers=[notifier])
notifier.assert_not_called()
tl.reverse()
self.assertEqual(notifier.call_count, 1)
def test_pickle(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
serialized = pickle.dumps(tl, protocol=protocol)
tl_unpickled = pickle.loads(serialized)
self.assertIs(tl_unpickled.item_validator, tl.item_validator)
self.assertEqual(tl_unpickled.notifiers, [])
for i, j in zip(tl, tl_unpickled):
self.assertIs(i, j)
def test_invalid_entry(self):
tl = TraitList([1, 2, 3, 4, 5],
item_validator=int_item_validator,
notifiers=[self.notification_handler])
with self.assertRaises(TraitError):
tl.append("A")
def test_list_of_lists(self):
tl = TraitList([[1]],
item_validator=list_item_validator,
notifiers=[self.notification_handler])
tl.append([2])
# Helper functions for checking a generic operation on a list.
def validate_event(self, original_list, operation):
"""
Validate the event arising from a particular TraitList operation.
Given a test list and an operation to perform, perform
that operation on both a plain Python list and the corresponding
TraitList, then:
- check that the resulting lists match
- check that the event information generated (if any) is suitably
normalized
- check that the list operation can be reconstructed from the
event information
Parameters
----------
original_list : list
List to use for testing.
operation : callable
Single-argument callable which accepts the list and performs
the desired operation on it.
Raises
------
self.failureException
If any aspect of the behaviour is found to be incorrect.
"""
# List to collection notifications in.
notifications = []
def notifier(trait_list, index, removed, added):
notifications.append((index, removed, added))
# Apply the operation to both a plain Python list and a TraitList.
python_list = original_list.copy()
try:
python_result = operation(python_list)
except Exception as e:
python_exception = e
python_raised = True
else:
python_raised = False
trait_list = TraitList(original_list, notifiers=[notifier])
try:
trait_result = operation(trait_list)
except Exception as e:
trait_exception = e
trait_raised = True
else:
trait_raised = False
# Check side-effects, results, and exception types (if applicable).
self.assertEqual(python_list, trait_list)
self.assertEqual(python_raised, trait_raised)
if python_raised:
self.assertEqual(type(python_exception), type(trait_exception))
return
self.assertEqual(python_result, trait_result)
# Check the notification attributes.
if notifications == []:
# No notifications. The new list should match the original,
# and there's nothing more to check.
self.assertEqual(trait_list, original_list)
return
# Otherwise, expect exactly one notification.
self.assertEqual(len(notifications), 1)
index, removed, added = notifications[0]
self.assertTrue(
len(removed) > 0 or len(added) > 0,
"a notification was generated, "
"but no elements were added or removed"
)
# Check normalization of the index.
self.check_index_normalized(index, len(original_list))
# Check that we can reconstruct the list operation from the event.
reconstructed = original_list.copy()
if isinstance(index, slice):
self.assertEqual(removed, reconstructed[index])
if added:
reconstructed[index] = added
else:
del reconstructed[index]
else:
removed_slice = slice(index, index + len(removed))
self.assertEqual(removed, reconstructed[removed_slice])
reconstructed[removed_slice] = added
self.assertEqual(reconstructed, trait_list)
def check_index_normalized(self, index, length):
if isinstance(index, slice):
start, stop, step = index.start, index.stop, index.step
self.assertIsNotNone(start)
self.assertIsNotNone(stop)
self.assertIsNotNone(step)
# Check start and stop.
self.assertTrue(
0 <= start < stop <= length,
msg="start and stop of {} not normalized for length {}".format(
index, length
)
)
# Check step. This should always be > 1, since for step 1
# we can use a plain integer index instead.
self.assertTrue(step > 1, msg="step should be greater than 1")
# Check that the slice represents at least two elements
# (otherwise we should have a plain integer index instead)
self.assertTrue(
start + step < stop,
msg="slice represents fewer than 2 elements"
)
# Check that the stop is the smallest possible out of all
# equivalent stops.
self.assertTrue(
(stop - start) % step == 1,
msg="stop not normalised with respect to step"
)
else:
self.assertTrue(
0 <= index <= length,
msg="index {} is not normalized for length {}".format(
index, length)
)
def all_slices(self, max_index=10):
"""
Generate all slices with bounded start, stop and step.
Parameters
----------
max_index : int
Maximum permitted absolute value of start, stop and step.
Yields
------
s : slice
Slice whose components are all either None, or bounded in
absolute value by max_index.
"""
valid_indices = [None] + list(range(-max_index, max_index + 1))
valid_steps = [step for step in valid_indices if step != 0]
for start in valid_indices:
for stop in valid_indices:
for step in valid_steps:
yield slice(start, stop, step)
def squares(n):
"""
Generic iterable without a valid len, for testing purposes.
Parameters
----------
n : int
Limit for computation.
Returns
-------
squares : generator
Generator yielding the first n squares.
"""
return (x * x for x in range(n))
class HasLengthConstrainedLists(HasTraits):
"""
Test class for testing list length validation.
"""
at_least_two = List(Int, [3, 4], minlen=2)
at_most_five = List(Int, maxlen=5)
unconstrained = List(Int)
class TestTraitListObject(unittest.TestCase):
def test_list_of_lists_pickle_with_notifier(self):
class Foo:
pass
tl = TraitListObject(
trait=List(),
object=Foo(),
name="foo",
value=(),
)
self.assertEqual(
[tl.notifier],
tl.notifiers
)
serialized = pickle.dumps(tl)
tl_deserialized = pickle.loads(serialized)
self.assertEqual(
[tl_deserialized.notifier],
tl_deserialized.notifiers
)
def test_init_too_small(self):
with self.assertRaises(TraitError):
HasLengthConstrainedLists(at_least_two=[1])
def test_init_too_large(self):
with self.assertRaises(TraitError):
HasLengthConstrainedLists(at_most_five=[1, 2, 3, 4, 5, 6])
def test_init_from_iterable(self):
class Foo:
pass
tl = TraitListObject(
trait=List(),
object=Foo(),
name="foo",
value=squares(5),
)
self.assertEqual(tl, list(squares(5)))
def test_delitem(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 23])
del foo.at_most_five[1]
self.assertEqual(foo.at_most_five, [1])
def test_delitem_single_too_small(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2])
with self.assertRaises(TraitError):
del foo.at_least_two[0]
self.assertEqual(foo.at_least_two, [1, 2])
def test_delitem_slice_too_small(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2])
with self.assertRaises(TraitError):
del foo.at_least_two[:]
self.assertEqual(foo.at_least_two, [1, 2])
def test_delitem_from_empty(self):
foo = HasLengthConstrainedLists()
with self.assertRaises(IndexError):
del foo.unconstrained[0]
def test_iadd(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2])
foo.at_most_five += [6, 7, 8]
self.assertEqual(foo.at_most_five, [1, 2, 6, 7, 8])
def test_iadd_too_large(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2, 3, 4])
with self.assertRaises(TraitError):
foo.at_most_five += [6, 7, 8]
self.assertEqual(foo.at_most_five, [1, 2, 3, 4])
def test_iadd_from_iterable(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2])
foo.at_most_five += squares(3)
self.assertEqual(foo.at_most_five, [1, 2, 0, 1, 4])
def test_imul(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3])
foo.at_least_two *= 2
self.assertEqual(foo.at_least_two, [1, 2, 3, 1, 2, 3])
def test_imul_too_small(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
with self.assertRaises(TraitError):
foo.at_least_two *= 0
self.assertEqual(foo.at_least_two, [1, 2, 3, 4])
def test_imul_too_large(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2, 3, 4])
with self.assertRaises(TraitError):
foo.at_most_five *= 2
self.assertEqual(foo.at_most_five, [1, 2, 3, 4])
def test_imul_negative_multiplier(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2, 3, 4])
foo.at_most_five *= -10
self.assertEqual(foo.at_most_five, [])
def test_setitem_index(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
foo.at_least_two[1] = 7
self.assertEqual(foo.at_least_two, [1, 7, 3, 4])
def test_setitem_slice(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
foo.at_least_two[1:] = [6, 7]
self.assertEqual(foo.at_least_two, [1, 6, 7])
def test_setitem_extended_slice(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
foo.at_least_two[1::2] = [6, 7]
self.assertEqual(foo.at_least_two, [1, 6, 3, 7])
def test_setitem_too_small(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
with self.assertRaises(TraitError):
foo.at_least_two[1:] = []
self.assertEqual(foo.at_least_two, [1, 2, 3, 4])
def test_setitem_too_large(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2, 3, 4])
with self.assertRaises(TraitError):
foo.at_most_five[2:] = [10, 11, 12, 13]
self.assertEqual(foo.at_most_five, [1, 2, 3, 4])
def test_setitem_from_iterable(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2])
foo.at_most_five[:1] = squares(4)
self.assertEqual(foo.at_most_five, [0, 1, 4, 9, 2])
def test_setitem_extended_slice_bad_length(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
with self.assertRaises(ValueError):
foo.at_least_two[1::2] = squares(3)
self.assertEqual(foo.at_least_two, [1, 2, 3, 4])
def test_setitem_item_validation_failure(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
with self.assertRaises(TraitError):
foo.at_least_two[2:] = [5.0, 6.0]
self.assertEqual(foo.at_least_two, [1, 2, 3, 4])
def test_setitem_stop_lt_start(self):
# Regression test for enthought/traits#994.
events = []
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
foo.on_trait_change(
lambda event: events.append(event), "at_least_two_items")
# Note: items are inserted at position 4, not position 2.
foo.at_least_two[4:2] = [5, 6, 7]
self.assertEqual(len(events), 1)
event = events[0]
self.assertEqual(event.index, 4)
self.assertEqual(event.removed, [])
self.assertEqual(event.added, [5, 6, 7])
def test_append(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2, 3])
foo.at_most_five.append(6)
self.assertEqual(foo.at_most_five, [1, 2, 3, 6])
def test_append_too_large(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2, 3, 4, 5])
with self.assertRaises(TraitError):
foo.at_most_five.append(6)
self.assertEqual(foo.at_most_five, [1, 2, 3, 4, 5])
def test_clear(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2, 3, 4])
foo.at_most_five.clear()
self.assertEqual(foo.at_most_five, [])
def test_clear_too_small(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
with self.assertRaises(TraitError):
foo.at_least_two.clear()
self.assertEqual(foo.at_least_two, [1, 2, 3, 4])
def test_extend(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
foo.at_least_two.extend([10, 11])
self.assertEqual(foo.at_least_two, [1, 2, 3, 4, 10, 11])
def test_extend_too_large(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2, 3, 4])
with self.assertRaises(TraitError):
foo.at_most_five.extend([10, 11, 12])
self.assertEqual(foo.at_most_five, [1, 2, 3, 4])
def test_extend_from_iterable(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2])
foo.at_most_five.extend(squares(3))
self.assertEqual(foo.at_most_five, [1, 2, 0, 1, 4])
def test_insert(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 3, 4])
foo.at_least_two.insert(3, 16)
self.assertEqual(foo.at_least_two, [1, 2, 3, 16, 4])
def test_insert_too_large(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2, 3, 4, 5])
with self.assertRaises(TraitError):
foo.at_most_five.insert(3, 16)
with self.assertRaises(TraitError):
foo.at_most_five.insert(-10, 16)
with self.assertRaises(TraitError):
foo.at_most_five.insert(10, 16)
self.assertEqual(foo.at_most_five, [1, 2, 3, 4, 5])
def test_pop(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 6])
foo.at_least_two.pop()
self.assertEqual(foo.at_least_two, [1, 2])
def test_pop_too_small(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2])
with self.assertRaises(TraitError):
foo.at_least_two.pop()
with self.assertRaises(TraitError):
foo.at_least_two.pop(0)
# TraitError takes precedence over the IndexError for a bad index.
with self.assertRaises(TraitError):
foo.at_least_two.pop(10)
self.assertEqual(foo.at_least_two, [1, 2])
def test_pop_from_empty(self):
foo = HasLengthConstrainedLists()
with self.assertRaises(IndexError):
foo.unconstrained.pop()
with self.assertRaises(IndexError):
foo.unconstrained.pop(10)
def test_remove(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2, 6, 4])
foo.at_least_two.remove(2)
self.assertEqual(foo.at_least_two, [1, 6, 4])
def test_remove_too_small(self):
foo = HasLengthConstrainedLists(at_least_two=[1, 2])
with self.assertRaises(TraitError):
foo.at_least_two.remove(1)
with self.assertRaises(TraitError):
foo.at_least_two.remove(2.0)
# TraitError from the length violation takes precedence over
# the ValueError for the vad value.
with self.assertRaises(TraitError):
foo.at_least_two.remove(10)
self.assertEqual(foo.at_least_two, [1, 2])
def test_remove_from_empty(self):
foo = HasLengthConstrainedLists()
with self.assertRaises(ValueError):
foo.unconstrained.remove(35)
def test_length_violation_error_message(self):
# Regression test for enthought/traits#1170
foo = HasLengthConstrainedLists(at_least_two=[1, 2])
with self.assertRaises(TraitError) as exc_cm:
foo.at_least_two.remove(1)
exc_message = str(exc_cm.exception)
self.assertIn("'at_least_two' trait", exc_message)
self.assertIn("HasLengthConstrainedLists instance", exc_message)
self.assertIn("an integer", exc_message)
self.assertIn("at least 2 items", exc_message)
def test_dead_object_reference(self):
foo = HasLengthConstrainedLists(at_most_five=[1, 2, 3, 4])
list_object = foo.at_most_five
del foo
list_object.append(5)
self.assertEqual(list_object, [1, 2, 3, 4, 5])
with self.assertRaises(TraitError):
list_object.append(4)
|
#!/usr/bin/python
import urllib2
import json, csv
import subprocess
import sys
import platform
import getopt
import re
all_flag = False
download_flag = False
filename=None
events=[]
try:
opts, args = getopt.getopt(sys.argv[1:],'a,f:,d',['all','file=','download'])
for o, a in opts:
if o in ('-a','--all'):
all_flag=True
if o in ('-f','--file'):
filename=a
if o in ('-d','--download'):
download_flag=True
except getopt.GetoptError, err:
print("parse error: %s\n" %(str(err)))
exit(-2)
if filename == None:
map_file_raw=urllib2.urlopen('https://download.01.org/perfmon/mapfile.csv')
map_dict = csv.DictReader(map_file_raw)
map_file = []
paths = dict()
while True:
try:
map_file.append(map_dict.next())
except StopIteration:
break
# Get the current CPU
if platform.system() == 'CYGWIN_NT-6.1':
p = subprocess.Popen(['./pcm-core.exe -c'],stdout=subprocess.PIPE,shell=True)
elif platform.system() == 'Windows':
p = subprocess.Popen(['pcm-core.exe -c'],stdout=subprocess.PIPE,shell=True)
else:
p = subprocess.Popen(['./pcm-core.x -c'],stdout=subprocess.PIPE,shell=True)
(output, err) = p.communicate()
p_status = p.wait()
# Find the corresponding event files
for model in map_file:
if re.search(model['Family-model'], output):
paths[model['EventType']] = model['Filename']
print (model)
# Check if we at least found core events
if not "core" in paths:
print ('no core event found for %s CPU, program abort...' % (output))
exit(-1)
for eventType in paths:
path = paths[eventType]
json_data=urllib2.urlopen('https://download.01.org/perfmon'+path)
events_data=json.load(json_data)
if(download_flag == True):
with open(path.split('/')[-1],'w') as outfile:
json.dump(events_data, outfile, sort_keys=True, indent=4)
events += events_data
else:
for f in filename.split(','):
print f
events.extend(json.load(open(f)))
if all_flag == True:
for event in events:
if 'EventName' in event and 'BriefDescription' in event:
print (event['EventName']+':'+event['BriefDescription'])
sys.exit(0)
name=raw_input("Event to query (empty enter to quit):")
while(name != ''):
for event in events:
if event.has_key('EventName') and name.lower() in event['EventName'].lower():
print (event['EventName']+':'+event['BriefDescription'])
for ev_code in event['EventCode'].split(', '):
print ('cpu/umask=%s,event=%s,name=%s%s%s%s%s%s/' % (
event['UMask'], ev_code, event['EventName'],
(',offcore_rsp=%s' % (event['MSRValue'])) if 'MSRValue' in event and event['MSRValue'] != '0' else '',
(',inv=%s' % (event['Invert'])) if 'Invert' in event and event['Invert'] != '0' else '',
(',any=%s' % (event['AnyThread'])) if 'AnyThread' in event and event['AnyThread'] != '0' else '',
(',edge=%s'% (event['EdgeDetect'])) if 'EdgeDetect' in event and event['EdgeDetect'] != '0' else '',
(',cmask=%s' % (event['CounterMask'])) if 'CounterMask' in event and event['CounterMask'] != '0' else ''))
name=raw_input("Event to query (empty enter to quit):")
|
import sys
bin16 = lambda x : ''.join(reversed( [str((x >> i) & 1) for i in range(16)] ) )
def print_comp_error(ins,data,line):
print(f"Compilation Error! Intruction {ins} has wrong data: {data} Line: {line}")
return
def decode_instr(instruction, data, instruction_line):
output = 0b0
if instr == 'JMP':
output |= 0b1000000000000000000000
try:
output |= int(data)
except :
print_comp_error(instruction, data, instruction_line)
elif instr == 'JZE':
output |= 0b1010000000000000000000
try:
output |= int(data)
except :
print_comp_error(instruction, data, instruction_line)
elif instr == 'JPO':
output |= 0b1100000000000000000000
try:
output |= int(data)
except :
print_comp_error(instruction, data, instruction_line)
elif instr == 'JCY':
output |= 0b1110000000000000000000
try:
output |= int(data)
except :
print_comp_error(instruction, data, instruction_line)
elif instr == 'MOM':
if data.split(',')[1] == 'W':
output |= 0b0100000000000000000000
try:
output |= int(data.split(',')[0])
except :
print_comp_error(instruction, data, instruction_line)
elif data.split(',')[0] == 'W':
output |= 0b0101000000000000000000
try:
output |= int(data.split(',')[1])
except :
print_comp_error(instruction, data, instruction_line)
else:
print_comp_error(instruction, data, instruction_line)
elif instr == 'ADW':
output |= 0b0110000000000000000000
try:
output |= int(data.split(',')[0])<<5
output |= int(data.split(',')[1])
except:
print_comp_error(instruction, data, instruction_line)
elif instr == 'BSR':
output |= 0b0111000000000000000000
try:
output |= int(data)
except:
print_comp_error(instruction, data, instruction_line)
elif instr == 'MOV' :
if data.split(',')[1] != 'W' and data.split(',')[0] != 'W':
output |= 0b0010000000000000000000
try:
output |= int(data.split(',')[0])<<5
output |= int(data.split(',')[1])
except:
print_comp_error(instruction, data, instruction_line)
elif data.split(',')[1] == 'W' and data.split(',')[0] != 'W':
output |= 0b0011000000000000000000
try:
output |= int(data.split(',')[0])<<5
except:
print_comp_error(instruction, data, instruction_line)
elif data.split(',')[0] == 'W':
output |= 0b0000100000000000000000
try:
output |= int(data.split(',')[1])
except:
print_comp_error(instruction, data, instruction_line)
else:
print_comp_error(instruction, data, instruction_line)
elif instr == 'MOK':
if data.split(',')[1][0] == '#':
output |= 0b0001000000000000000000
try:
output |= int(bin16(int(data.split(',')[1][1:])),2)
except:
print_comp_error(instruction, data, instruction_line)
else :
print_comp_error(instruction, data, instruction_line)
elif instr == 'ANK':
output |= 0b0001010000000000000000
try:
output |= int(bin16(int(data.split(',')[1][1:])),2)
except:
print_comp_error(instruction, data, instruction_line)
elif instr == 'ORK':
output |= 0b0001100000000000000000
try:
output |= int(bin16(int(data.split(',')[1][1:])),2)
except:
print_comp_error(instruction, data, instruction_line)
elif instr == 'ADK':
output |= 0b0001110000000000000000
try:
output |= int(bin16(int(data.split(',')[1][1:])),2)
except:
print_comp_error(instruction, data, instruction_line)
elif instr == 'ANR':
output |= 0b0000101000000000000000
try:
output |= int(data.split(',')[1])
except:
print_comp_error(instruction, data, instruction_line)
elif instr == 'ORR':
output |= 0b0000110000000000000000
try:
output |= int(data.split(',')[1])
except:
print_comp_error(instruction, data, instruction_line)
elif instr == 'ADR':
output |= 0b0000111000000000000000
try:
output |= int(data.split(',')[1])
except:
print_comp_error(instruction, data, instruction_line)
elif instr == 'CPL':
output |= 0b0000000000000000000000
elif instr == 'CLR':
output |= 0b0000001000000000000000
elif instr == 'SET':
output |= 0b0000010000000000000000
elif instr == 'RET':
output |= 0b0000011000000000000000
elif instr == 'NOP':
output |= 0b0111111111111111111111
else:
print(f"Compilation Error! Instruccion {instruction} no exists! Line {instruction_line}")
return output
if __name__ == "__main__":
f = open(f"{sys.argv[1]}.ev", "r")
string = f.read()
instructions = string.splitlines()
f.close()
out_file = open(f"Program_memory.mif", "w")
out_file.write("""-- Compilator made By G3 EV21 ITBA 2021\n
WIDTH=22;
DEPTH=2048;
ADDRESS_RADIX=UNS;
DATA_RADIX=BIN;\n
CONTENT BEGIN\n""")
line = f" 0 : 0000000000000000000000;\n"
out_file.write(line)
if len(instructions) < 2047:
for i, instruction in enumerate(instructions):
line = instruction.split()
if (len(line) >= 2 or (len(line) == 1 and (line[0] == 'RET' or line[0] == 'NOP'))) and line[0][0] != '/' and line[0][1] != '/':
if line[0] == 'RET' or line[0] == 'NOP':
instr = line[0]
data = ""
else:
instr, data = [line[0], line[1]]
if len(line) > 2 and line[2][0] != '/' and line[2][1] != '/':
print(f"Compilation error at line {i+1}. Comments begin with '//'")
temp_string = bin(decode_instr(instr, data, i+1))[2:]
temp_string = temp_string.rjust(22,'0')
if i != len(instructions) - 1 :
line = f" {i+1} : {temp_string}; -- {instr} {data}\n"
out_file.write(line)
else:
line = f" {i+1} : {temp_string}; -- {instr} {data}\n"
out_file.write(line)
line = f" [{i+2}..2047] : 0000000000000000000000;\n"
out_file.write(line)
else:
if len(line) >= 1:
if line[0][0] != '/' or line[0][1] != '/':
print(f"Compilation error at line {i+1}. Intruction no exists, comments begin with '//'")
out_file.write("END;")
out_file.close()
print("Compilation Success!")
else:
out_file.close()
print(f"The actual Program is too long for the EV21, it supports a maximum of 2047 instructions and the current program is {len(instructions)}!") |
<filename>tests/test_scm_manager.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# The MIT License
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import json
import os
from distutils.version import LooseVersion
import pytest
from mock import mock
from tests import AbstractTestManager, ReturnValueMixin
from yagocd.resources import scm
@pytest.fixture()
def manager(session_fixture):
return scm.SCMManager(session=session_fixture)
class BaseManager(AbstractTestManager):
@pytest.fixture()
def scm_material_foo(self, tests_dir):
path = os.path.join(tests_dir, 'fixtures/resources/scm/scm-material-foo.json')
return json.load(open(path))
@pytest.fixture()
def scm_material_bar(self, tests_dir):
path = os.path.join(tests_dir, 'fixtures/resources/scm/scm-material-bar.json')
return json.load(open(path))
@pytest.fixture()
def scm_material_baz(self, tests_dir):
path = os.path.join(tests_dir, 'fixtures/resources/scm/scm-material-baz.json')
return json.load(open(path))
@pytest.fixture()
def prepare_scm_material(self, manager, my_vcr, scm_material_foo, scm_material_bar):
with my_vcr.use_cassette("scm/prepare"):
manager.create(config=scm_material_foo)
manager.create(config=scm_material_bar)
class TestList(BaseManager, ReturnValueMixin):
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr, prepare_scm_material):
with my_vcr.use_cassette("scm/list") as cass:
return cass, manager.list()
@pytest.fixture()
def expected_request_url(self):
return '/go/api/admin/scms'
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_return_type(self):
return list
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert all(isinstance(i, scm.SCMMaterial) for i in result)
return check_value
class TestGet(BaseManager, ReturnValueMixin):
NAME = 'bar'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr, prepare_scm_material):
with my_vcr.use_cassette("scm/get_{}".format(self.NAME)) as cass:
return cass, manager.get(self.NAME)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/admin/scms/{}'.format(self.NAME)
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_return_type(self):
return scm.SCMMaterial
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert result.data.name == self.NAME
assert result.data.id == "scm-id-bar"
return check_value
class TestCreate(BaseManager, ReturnValueMixin):
NAME = 'baz'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr, scm_material_baz):
with my_vcr.use_cassette("scm/create_{}".format(self.NAME)) as cass:
return cass, manager.create(config=scm_material_baz)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/admin/scms'
@pytest.fixture()
def expected_request_method(self):
return 'POST'
@pytest.fixture()
def expected_return_type(self):
return scm.SCMMaterial
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert result.data.name == self.NAME
assert result.data.id == "scm-id-baz"
return check_value
class TestUpdate(BaseManager, ReturnValueMixin):
NAME = 'bar'
NEW_VALUE = 'NEW-DUMMY-VALUE'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr, prepare_scm_material, scm_material_bar):
with my_vcr.use_cassette("scm/prepare_update_{}".format(self.NAME)):
original = manager.get(self.NAME) # noqa
with my_vcr.use_cassette("scm/update_{}".format(self.NAME)) as cass:
scm_material_bar['configuration'][0]['value'] = self.NEW_VALUE
return cass, manager.update(
name=self.NAME,
config=scm_material_bar,
etag=original.etag
)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/admin/scms/{}'.format(self.NAME)
@pytest.fixture()
def expected_request_method(self, manager):
if LooseVersion(manager._session.server_version) <= LooseVersion('16.9.0'):
return 'PATCH'
return 'PUT'
@pytest.fixture()
def expected_return_type(self):
return scm.SCMMaterial
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert result.data.configuration[0].value == self.NEW_VALUE
return check_value
class TestMagicMethods(object):
@mock.patch('yagocd.resources.scm.SCMManager.get')
def test_indexed_based_access(self, get_mock, manager):
name = mock.MagicMock()
_ = manager[name] # noqa
get_mock.assert_called_once_with(name=name)
@mock.patch('yagocd.resources.scm.SCMManager.list')
def test_iterator_access(self, list_mock, manager):
for _ in manager:
pass
list_mock.assert_called_once_with()
|
# # Imports
from timeit import default_timer as timer
import numpy as np
import pyopencl as cl
import handybeam.tx_array
# # Class
class RectPropMixin():
"""This is a mixin class for the compiled OpenCL kernel _hbk_rect_propagator. It assigns
the compiled OpenCL kernel to this Python class which can then be called by the appropriate
sampler class.
"""
def __init__(self):
"""This method intialises an instance of the mixin class RectPropMixin.
"""
self._hbk_rect_propagator = None
def _register_rect_propagator(self):
"""
This method assigns the compiled OpenCL propagator kernel _hbk_rect_propagator to this
class and then sets the correct data types for the input to the assigned kernel.
"""
self._hbk_rect_propagator = self.cl_system.compiled_kernels._hbk_rect_propagator
self._hbk_rect_propagator.set_scalar_arg_dtypes([None,None,
np.float32,np.float32,np.float32,
np.float32,np.float32,np.float32,
np.float32,np.float32,np.float32,
np.float32,np.float32,np.float32,
np.int32])
def rect_propagator(self,
tx_array: handybeam.tx_array.TxArray,
N_x,
N_y,
delta,
x0, y0, z0,
vx1, vy1, vz1,
vx2, vy2, vz2,
local_work_size = (1,1,1),
print_performance_feedback = None
):
"""This method simulates the acoustic pressure field on a rectilinear sampling grid. It does this by
initialising a pressure field buffer on the CPU. It then passes the required information
to the appropriate OpenCl kernel and executes the computation of the pressure field on the GPU. This
data is then copied over to the pressure field buffer on the CPU.
Parameters
----------
tx_array : handybeam.tx_array.TxArray
This is a handybeam tx_array class.
N_x : numpy int
This assigns the number of sampling points along the x-axis of the sampling grid.
N_y : numpy int
This assigns the number of sampling points along the y-axis of the sampling grid.
delta : numpy float
Distance between adjacent sampling grid points.
x0 : numpy float
The x-coordinate of the origin of the sampling grid.
y0 : numpy float
The y-coordinate of the origin of the sampling grid.
z0 : numpy float
The z-coordinate of the origin of the sampling grid.
vx1 : numpy float
The x-component of the first unit vector that parameterises the sampling grid.
vy1 : numpy float
The y-component of the first unit vector that parameterises the sampling grid.
vz1 : numpy float
The z-component of the first unit vector that parameterises the sampling grid.
vx2 : numpy float
The x-component of the second unit vector that parameterises the sampling grid.
vy2 : numpy float
The y-component of the second unit vector that parameterises the sampling grid.
vz2 : numpy float
The z-component of the second unit vector that parameterises the sampling grid.
local_work_size : tuple
Tuple containing the local work sizes for the GPU.
print_performance_feedback : boolean
Boolean value determining whether or not to output the GPU performance.
"""
# Set the types correctly.
# N_x = int(N_x) # note: the types must be OK before.
# N_y = int(N_y)
# Start the timer to measure wall time.
t_start = timer()
# Set the global work size for the GPU.
global_work_size = (N_x, N_y, 1)
# hardcoded: attempt to optimise the work size by finding the largest work group that will divide the work equally.
# find a highest divisor of the N_x that is smaller than max_local_worksize
max_local_worksize = 1024 # TODO: currently hard-coded, later on grab straight from the device.
current_local_worksize = max_local_worksize
while current_local_worksize > 0:
if N_x % current_local_worksize == 0:
# print(' divisor ', i)
break
current_local_worksize = current_local_worksize - 1
local_work_size = (current_local_worksize, 1, 1)
if print_performance_feedback:
print("global_work_size: {}".format(global_work_size))
print("local_work_size: {}".format(local_work_size))
# Determine the limits of the sampling grid.
x_lim = np.float32(N_x/2)
y_lim = np.float32(N_y/2)
# Number of transducers
tx_count = np.int(tx_array.element_count)
# Create a numpy array, of the correct type, to store the real and imaginary pressure values for
# the acoustic field. The format is:
# ( x,y,z,Real(p), Imag(p) )
py_out_buffer = np.zeros((N_x, N_y, 5), dtype=np.float32)
# Create a buffer on the GPU to store the pressure values for the acoustic field.
cl_field = cl.Buffer(self.cl_system.context, cl.mem_flags.WRITE_ONLY, py_out_buffer.data.nbytes)
# Create a buffer on the GPU to store the transducer data
# and copy the data from the CPU (tx_array.tx_array_element_descriptor)
# to the GPU.
# note! hostbuf must be a np.float32 --
# it is not checked here (for performance), just do it correctly the first time around!
cl_tx_element_array_descriptor = cl.Buffer(
self.cl_system.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=tx_array.tx_array_element_descriptor)
# Create and execute an OpenCL event with the initialised queue, work sizes and data.
cl_profiling_kernel_event = self._hbk_rect_propagator( self.cl_system.queue,
global_work_size,
local_work_size,
cl_tx_element_array_descriptor,
cl_field,
np.float32(x_lim),
np.float32(y_lim),
np.float32(delta),
np.float32(x0),
np.float32(y0),
np.float32(z0),
np.float32(vx1),
np.float32(vy1),
np.float32(vz1),
np.float32(vx2),
np.float32(vy2),
np.float32(vz2),
np.int(tx_count)
)
# Copy the results from the GPU buffer to the associated CPU buffer.
cl_profiling_mem_copy_event = cl.enqueue_copy(self.cl_system.queue, py_out_buffer, cl_field)
# Block until the kernel event has completed and then until the copy event has completed.
cl_profiling_kernel_event.wait()
cl_profiling_mem_copy_event.wait()
# End the timer to measure the wall time.
t_end = timer()
t_elapsed_wall_time = t_end - t_start
# If performance feedback requested then print.
if print_performance_feedback:
ray_count = float(tx_array.element_count * N_x * N_y)
output_buffer_size = py_out_buffer.data.nbytes
self.print_performance_feedback(cl_profiling_kernel_event,
cl_profiling_mem_copy_event,
t_elapsed_wall_time,
ray_count,
output_buffer_size)
return py_out_buffer
|
import asyncore
import matplotlib.pyplot as plt
import zlib,socket
import numpy as np
import MFSKDemodulator, DePacketizer, MFSKSymbolDecoder, time, logging, sys
from scipy.io import wavfile
import MFSKModulator,Packetizer
import sounddevice as sd
import soundfile as sf
from scipy.io import wavfile
from thread import start_new_thread
import StringIO
#Networkrelated variables
Connection_status = False
compression = 1
packet_size = 8192
port_host = 8080
#Audio Related variables
symbol_rate = 15.625
base_freq = 1500
bits_per_symbol = 4
preamble_tones = [0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15]
#non changeables
symb_dec = ''
packet_extract = ''
handler = ''
recordable_file = "test.wav"
def zlib_compress(text):
text_size=sys.getsizeof(text)
compressed = zlib.compress(text)
csize=sys.getsizeof(compressed)
return compressed
def zlib_decompress(compressed):
decompressed=zlib.decompress(compressed)
return decompressed
def recover_packet(payload):
print 'Packet recieved:',payload
if not Connection_status:
handler.handle_sent_data(payload)
def parse_symbol(tone):
tone_bits = symb_dec.tone_to_bits(tone['symbol'])
packet_extract.process_data(tone_bits)
def callback_mic(indata, frames, time, status):
wavhndl_to_data(indata.copy())
def launch_record():
# Make sure the file is opened before recording anything:
with sf.SoundFile(recordable_file, mode='x', samplerate=8000,
channels=1) as file:
with sd.InputStream(samplerate=8000, device=0,
channels=1, callback=callback_mic):
print('#' * 80)
def wavhndl_to_data():
global symb_dec,packet_extract
symb_dec = MFSKSymbolDecoder.MFSKSymbolDecoder(num_tones=16, gray_coded=True)
# De-Packetizer
packet_extract = DePacketizer.DePacketizer(callback=recover_packet)
#get symbol back
demod = MFSKDemodulator.MFSKDemodulator(callback=parse_symbol)
fs, data = wavfile.read('generated_MFSK16_packets.wav')
# Convert to float
if(data.dtype == np.int16):
data = data.astype(np.float)/2**16
elif(data.dtype == np.int32):
data = data.astype(np.float)/2**32
# Feed the demod the entire file.
demod.consume(data)
def data_to_wavhndl(data):
mod = MFSKModulator.MFSKModulator(symbol_rate = symbol_rate, tone_spacing = symbol_rate, start_silence=5, base_freq=base_freq)
p = Packetizer.Packetizer()
mod.modulate_symbol(preamble_tones)
#adding msg together
fs = p.pack_message(data)
tx_bits = np.unpackbits(np.fromstring(fs, dtype=np.uint8))
print(str(tx_bits))
mod.modulate_bits(bits_per_symbol,tx_bits)
out = mod.get_mem()
return out
class data_recv(asyncore.dispatcher_with_send):
def handle_read(self):
data = self.recv(packet_size)
modulated = data_to_wavhndl(data)
sd.play(modulated[0],modulated[1])
sd.wait() #wait for data to play
print 'stat:',sd.get_status()
if data:
print ":Transmitting ("+str(len(modulated[0]))+") to dest"
print "Array:",modulated
print "data sent:",data
def handle_close(self):
self.close()
Connection_status = False
def handle_sent_data(self,data):
self.send(data)
class proxy(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
def handle_accept(self):
global handler
Connection_status = True
pair = self.accept()
if pair is not None:
sock, addr = pair
print 'Incoming connection from %s' % repr(addr)
handler = data_recv(sock)
#slk = data_to_wavhndl("silence")
#sd.play(slk[0],slk[1])
#sd.wait()
#wavfile.write('generated_MFSK16_packets.wav',slk[1],slk[0])
#wavhndl_to_data()
server = proxy('localhost', port_host)
start_new_thread(launch_record,())
asyncore.loop() |
<filename>precipitation_nowcasting/Fugaku/resnet_channels_hvd.py
# coding: utf-8
import os,glob,re
import numpy as np
import tensorflow as tf
from numpy.random import randint,choice
from metrics import *
from multiprocessing import Pool
import itertools
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
folder="data3d"
dim=(56,320,320)
steps=5
forecast=240
pace=int(forecast/(steps*2))
ndim=(56,320,320,1)
out_dim=320*320*56
epochs=5
drop=0.05
print('######################################################################')
print(forecast)
print('######################################################################')
def process(i):
try:
a=np.load("{}/{}.npy".format(folder,i))
return a
except:
print(i)
bce = tf.keras.losses.binary_crossentropy
msle=tf.keras.losses.mean_squared_logarithmic_error
rgl=tf.keras.regularizers.l1(10e-10)
# Encoder
inp,inps=[],[]
for i in range(steps):
inp.append(tf.keras.layers.Input(shape=ndim))
inps.append(tf.keras.layers.Conv3D(8, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(inp[i]))
res1,res2=[],[]
layers={}
layers[1]=tf.keras.layers.add(inps)
#layers[2]=tf.keras.layers.Conv3D(16, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(layers[1])
layers[3]=tf.keras.layers.Conv3D(16, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(layers[1])
res1.append(layers[3])
layers[4]=tf.keras.layers.MaxPooling3D((2,2,2), padding='valid')(layers[3])
layers[5]=tf.keras.layers.Dropout(drop)(layers[4])
#layers[6]=tf.keras.layers.Conv3D(64, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(layers[5])
layers[7]=tf.keras.layers.Conv3D(32, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(layers[5])
res2.append(layers[7])
layers[8]=tf.keras.layers.MaxPooling3D((2,2,2), padding='valid')(layers[7])
#layers[9]=tf.keras.layers.Conv3D(16, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(layers[8])
code=tf.keras.layers.Conv3D(1, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(layers[8])
encoder = tf.keras.models.Model(inp, code)
encoder.summary()
# Decoder
code2=tf.keras.layers.UpSampling3D((2,2,2))(code)
layers[10]=tf.keras.layers.Conv3D(32, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(code2)
#layers[11]=tf.keras.layers.Conv3D(64, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(layers[10])
layers[12]=tf.keras.layers.add([res2[0], layers[10]])
layers[13]=tf.keras.layers.UpSampling3D((2,2,2))(layers[12])
#layers[14]=tf.keras.layers.Conv3D(32, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(layers[13])
layers[15]=tf.keras.layers.Conv3D(16, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(layers[13])
layers[16] = tf.keras.layers.add([res1[0], layers[15]])
layers[17]= tf.keras.layers.Conv3D(1, (3,3,3), activation='relu', padding='same', activity_regularizer=rgl)(layers[16])
#layers[18]=tf.keras.layers.Reshape(dim)(layers[17])
decoded=layers[17]
autoencoder = tf.keras.models.Model(inp, decoded)
#autoencoder.summary()
autoencoder.compile(optimizer='adadelta', loss='mean_squared_error', metrics=[TP,FN,FP])
checkpoint_path = "training/cp_{}.ckpt".format(forecast)
checkpoint_dir = os.path.dirname(checkpoint_path)
try:
print("find weights")
autoencoder.load_weights(checkpoint_path)
except:
print('No weights file')
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
#####################################################################################################
pool=Pool()
def prepare1(prange,k):
data=pool.map(process, prange)
data=np.array([np.where(im<0, 0, im) for im in data])
data=np.array([a[:-1,:-1,:-1] for a in data])
data=np.array([a.reshape(dim) for a in data])
data=[data[i:i+k] for i in range(0,len(data),k)]
data=[x.reshape((56,320,320,k)) for x in data]
data=np.array(data)#*100000
return data
indices=np.load('indices.npy')
for sub in ['obs','truth','forecast']:
path='ChResNET/{}/{}'.format(forecast,sub)
os.makedirs(path, exist_ok = True)
def train():
print("##################################################")
prange=np.random.randint(0,len(indices),50)
prange=indices[prange]
din=[]
for i in range(steps):
rin=[y+i*pace for y in prange]
din.append(prepare1(rin,1))
rout=[x+forecast for x in prange]
dout=prepare1(rout,1)
print("##################################################")
history=autoencoder.fit(din,dout,epochs=epochs,validation_split=0.02, batch_size=32, callbacks=[cp_callback])
#autoencoder.save_weights(model_file)
def evaluate():
for e in range(0,57000,1000):
print(e)
print("##################################################")
prange=[e]
din=[]
for i in range(steps):
rin=[y+i*pace for y in prange]
din.append(prepare1(rin,1))
rout=[x+forecast for x in prange]
dout=prepare1(rout,1)
print("##################################################")
res =autoencoder.predict(din,batch_size=1)
np.save('ChResNET/{}/obs/{}.npy'.format(forecast,e),np.array(din))
np.save('ChResNET/{}/truth/{}.npy'.format(forecast,e),dout)
np.save('ChResNET/{}/forecast/{}.npy'.format(forecast,e),res)
while True:
train()
evaluate()
|
import logging
from typing import TYPE_CHECKING, Optional
import numpy as np
from .base import BaseCallback
if TYPE_CHECKING:
from ..base import BaseTuner
class EarlyStopping(BaseCallback):
"""
Callback to stop training when a monitored metric has stopped improving.
A `model.fit()` training loop will check at the end of every epoch whether
the monitered metric is no longer improving.
"""
def __init__(
self,
monitor: str = 'val_loss',
mode: str = 'auto',
patience: int = 2,
min_delta: int = 0,
baseline: Optional[float] = None,
verbose: bool = False,
):
"""
:param monitor: if `monitor='loss'` best bodel saved will be according
to the training loss, if `monitor='val_loss'` best model saved will be
according to the validation loss
:param mode: one of {'auto', 'min', 'max'}. the
decision to overwrite the current_value save file is made based on either
the maximization or the minimization of the monitored quantity.
For `val_acc`, this should be `max`, for `val_loss` this should be
`min`, etc. In `auto` mode, the mode is set to `max` if the quantities
monitored are 'acc' or start with 'fmeasure' and are set to `min` for
the rest of the quantities.
:param patience: integer, the number of epochs after which the training is
stopped if there is no improvement.
i.e. if `patience = 2`, if the model doesn't improve for 2 consecutive
epochs the training is stopped.
:param min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
:param baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
:param verbose: Wheter to log score improvement events
"""
self._logger = logging.getLogger('finetuner.' + self.__class__.__name__)
self._logger.setLevel(logging.INFO if verbose else logging.WARNING)
self._monitor = monitor
self._mode = mode
self._patience = patience
self._min_delta = min_delta
self._baseline = baseline
self._train_losses = []
self._validation_losses = []
self._epoch_counter = 0
if mode not in ['auto', 'min', 'max']:
self._logger.warning('mode %s is unknown, ' 'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self._monitor_op = np.less
self._best = np.Inf
elif mode == 'max':
self._monitor_op = np.greater
self._best = -np.Inf
else:
if 'acc' in self._monitor: # to adjust other metrics are added
self._monitor_op = np.greater
self._best = -np.Inf
else:
self._monitor_op = np.less
self._best = np.Inf
if self._monitor_op == np.greater:
self._min_delta *= 1
else:
self._min_delta *= -1
def on_epoch_end(self, tuner: 'BaseTuner'):
"""
Called at the end of the training epoch. Checks if the model has improved
or not for a certain metric `monitor`. If the model hasn't improved for
more than `patience` epochs, the training is stopped
"""
self._check(tuner)
self._train_losses = []
self._validation_losses = []
def on_train_batch_end(self, tuner: 'BaseTuner'):
self._train_losses.append(tuner.state.current_loss)
def on_val_batch_end(self, tuner: 'BaseTuner'):
self._validation_losses.append(tuner.state.current_loss)
def _check(self, tuner):
"""
Checks if training should be stopped. If `True`
it stops the training.
"""
current_value = None
if self._baseline is not None:
self._best = self._baseline
if self._monitor == 'val_loss':
current_value = np.mean(self._validation_losses)
elif self._monitor == 'train_loss':
current_value = np.mean(self._train_losses)
else:
self._logger.warning(f'Metric {self._monitor} not available, skipping.')
return
if self._monitor_op(current_value - self._min_delta, self._best):
self._logger.info(f'Model improved from {self._best} to {current_value}')
self._best = current_value
self._epoch_counter = 0
else:
self._epoch_counter += 1
if self._epoch_counter == self._patience:
self._logger.info(
f'Training is stopping, no improvement for {self._patience} epochs'
)
tuner.stop_training = True
|
<filename>IVOS_main_DAVIS.py
from davisinteractive.session import DavisInteractiveSession
from davisinteractive import utils as interactive_utils
from davisinteractive.dataset import Davis
from davisinteractive.metrics import batched_jaccard, batched_f_measure
from libs import custom_transforms as tr
from datasets_torch import davis_2017
import os
import time
import numpy as np
import json
import pickle
from PIL import Image
import csv
from datetime import datetime
import torch
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
from libs import utils_custom, utils_visualize
from libs.analyze_report import analyze_summary
from config import Config
from networks.network import NET_GAmap
import warnings
warnings.filterwarnings("ignore")
class Main_tester(object):
def __init__(self, config):
self.config = config
self.Davisclass = Davis(self.config.davis_dataset_dir)
self.current_time = datetime.now().strftime('%Y%m%d-%H%M%S')
self._palette = Image.open(self.config.palette_dir).getpalette()
self.save_res_dir = str()
self.save_log_dir = str()
self.save_logger = None
self.save_csvsummary_dir = str()
self.n_operated_frames_accum = 0
self.total_taken_times_accum = 0
self.net = NET_GAmap()
self.net.cuda()
self.net.eval()
self.net.load_state_dict(torch.load('checkpoints/ckpt_standard.pth'))
self.scr_indices = [1, 2, 3]
self.max_nb_interactions = 8
self.max_time = self.max_nb_interactions * 30
self.scr_samples = []
for v in sorted(self.Davisclass.sets[self.config.test_subset]):
for idx in self.scr_indices:
self.scr_samples.append((v, idx))
self.img_size, self.num_frames, self.n_objects, self.final_masks, self.tmpdict_siact = None, None, None, None, None
self.pad_info, self.hpad1, self.wpad1, self.hpad2, self.wpad2 = None, None, None, None, None
def run_for_diverse_metrics(self, ):
with torch.no_grad():
for metric in self.config.test_metric_list:
if metric == 'J':
dir_name = os.path.split(os.path.split(__file__)[0])[1] + '[J]_[' + self.config.test_guide_method + ']_' + self.current_time
elif metric == 'J_AND_F':
dir_name = os.path.split(os.path.split(__file__)[0])[1] + '[JF]_[' + self.config.test_guide_method + ']_' + self.current_time
else:
dir_name = None
print("Impossible metric is contained in config.test_metric_list!")
raise NotImplementedError()
self.save_res_dir = os.path.join(self.config.test_result_df_dir, dir_name)
utils_custom.mkdir(self.save_res_dir)
self.save_csvsummary_dir = os.path.join(self.save_res_dir, 'summary_in_csv.csv')
self.save_log_dir = os.path.join(self.save_res_dir, 'test_logs.txt')
self.save_logger = utils_custom.logger(self.save_log_dir)
self.save_logger.printNlog(dir_name + self.current_time)
self.run_IVOS(metric)
def run_IVOS(self, metric):
seen_seq = {}
numseq, tmpseq = 0, ''
with open(self.save_csvsummary_dir, mode='a') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['sequence', 'obj_idx', 'scr_idx'] + ['round-' + str(i + 1) for i in range(self.max_nb_interactions)])
with DavisInteractiveSession(host=self.config.test_host,
user_key=self.config.test_userkey,
davis_root=self.config.davis_dataset_dir,
subset=self.config.test_subset,
report_save_dir=self.save_res_dir,
max_nb_interactions=self.max_nb_interactions,
max_time=self.max_time,
metric_to_optimize=metric) as sess:
sess.connector.service.robot.min_nb_nodes = self.config.test_min_nb_nodes
sess.samples = self.scr_samples
while sess.next():
# Get the current iteration scribbles
self.sequence, scribbles, first_scribble = sess.get_scribbles(only_last=False)
if first_scribble:
anno_dict = {'frames': [], 'annotated_masks': [], 'masks_tobe_modified': []}
n_interaction = 1
info = Davis.dataset[self.sequence]
self.img_size = info['image_size'][::-1]
self.num_frames = info['num_frames']
self.n_objects = info['num_objects']
info = None
seen_seq[self.sequence] = 1 if self.sequence not in seen_seq.keys() else seen_seq[self.sequence] + 1
scr_id = seen_seq[self.sequence]
self.final_masks = np.zeros([self.num_frames, self.img_size[0], self.img_size[1]])
self.pad_info = utils_custom.apply_pad(self.final_masks[0])[1]
self.hpad1, self.wpad1 = self.pad_info[0][0], self.pad_info[1][0]
self.hpad2, self.wpad2 = self.pad_info[0][1], self.pad_info[1][1]
self.h_ds, self.w_ds = int((self.img_size[0] + sum(self.pad_info[0])) / 4), int((self.img_size[1] + sum(self.pad_info[1])) / 4)
self.anno_6chEnc_r4_list = []
self.anno_3chEnc_r4_list = []
self.prob_map_of_frames = torch.zeros((self.num_frames, self.n_objects + 1, 4 * self.h_ds, 4 * self.w_ds)).cuda()
self.gt_masks = self.Davisclass.load_annotations(self.sequence)
self.scores_ni_nf = np.zeros([8, self.num_frames])
IoU_over_eobj = []
else:
n_interaction += 1
self.save_logger.printNlog('\nRunning sequence {} in (scribble index: {}) (round: {})'
.format(self.sequence, sess.samples[sess.sample_idx][1], n_interaction))
annotated_now = interactive_utils.scribbles.annotated_frames(sess.sample_last_scribble)[0]
anno_dict['frames'].append(annotated_now) # Where we save annotated frames
anno_dict['masks_tobe_modified'].append(self.final_masks[annotated_now]) # mask before modefied at the annotated frame
# Get Predicted mask & Mask decision from pred_mask
self.final_masks = self.run_VOS_singleiact(n_interaction, scribbles, anno_dict['frames']) # self.final_mask changes
if self.config.test_save_pngs_option:
utils_custom.mkdir(
os.path.join(self.save_res_dir, 'result_video', '{}-scr{:02d}/round{:02d}'.format(self.sequence, scr_id, n_interaction)))
for fr in range(self.num_frames):
savefname = os.path.join(self.save_res_dir, 'result_video',
'{}-scr{:02d}/round{:02d}'.format(self.sequence, scr_id, n_interaction),
'{:05d}.png'.format(fr))
tmpPIL = Image.fromarray(self.final_masks[fr].astype(np.uint8), 'P')
tmpPIL.putpalette(self._palette)
tmpPIL.save(savefname)
# Limit candidate frames
if n_interaction != self.max_nb_interactions:
self.scores_ni_nf[n_interaction] = self.scores_ni_nf[n_interaction-1]
current_score_np = self.scores_ni_nf[n_interaction-1]
if self.config.test_guide_method=='RS1':
next_scribble_frame_candidates = list(np.argsort(current_score_np)[:1])
elif self.config.test_guide_method=='RS4':
sorted_score_idx = np.argsort(current_score_np)
exclude_range = self.num_frames/10
excluded_next_candidates = []
next_scribble_frame_candidates = []
for i in range(self.num_frames):
if not sorted_score_idx[i] in excluded_next_candidates:
next_scribble_frame_candidates.append(sorted_score_idx[i])
excluded_next_candidates += list(range(
int(sorted_score_idx[i]-(exclude_range/2)+0.5), int(sorted_score_idx[i]+(exclude_range/2)+0.5)))
if len(next_scribble_frame_candidates)==4:
break
elif self.config.test_guide_method=='wo_RS':
next_scribble_frame_candidates=None
else:
raise NotImplementedError
# Submit your prediction
sess.submit_masks(self.final_masks, next_scribble_frame_candidates) # F, H, W
# print sequence name
if tmpseq != self.sequence:
tmpseq, numseq = self.sequence, numseq + 1
print(str(numseq) + ':' + str(self.sequence) + '-' + str(seen_seq[self.sequence]) + '\n')
## Visualizers and Saver
# IoU estimation
jaccard = batched_jaccard(self.gt_masks,
self.final_masks,
average_over_objects=False,
nb_objects=self.n_objects
) # frames, objid
IoU_over_eobj.append(jaccard)
# save final mask in anno_dict
anno_dict['annotated_masks'].append(self.final_masks[annotated_now]) # mask after modefied at the annotated frame
if self.max_nb_interactions == len(anno_dict['frames']): # After Lastround -> total 90 iter
seq_scrid_name = self.sequence + str(scr_id)
# IoU manager
IoU_over_eobj = np.stack(IoU_over_eobj, axis=0) # niact,frames,n_obj
IoUeveryround_perobj = np.mean(IoU_over_eobj, axis=1) # niact,n_obj
# show scribble input and output
savefiledir = os.path.join(self.save_res_dir, 'debug_scribble')
utils_custom.mkdir(savefiledir)
savefilename = os.path.join(savefiledir, seq_scrid_name + '.jpg')
utils_visualize.visualize_scrib_interaction(scribbles, anno_dict, self.sequence, savefilename)
# plot IoU
if self.config.test_save_pngs_option:
savefiledir = os.path.join(self.save_res_dir, 'plot_IoU_perObj')
utils_custom.mkdir(savefiledir)
for obj_idx in range(self.n_objects):
savefilename = os.path.join(savefiledir, seq_scrid_name + '-obj' + str(obj_idx + 1) + '_first{:03d}final{:03d}.png'
.format(int(1000 * IoUeveryround_perobj[0, obj_idx]),
int(1000 * IoUeveryround_perobj[-1, obj_idx])))
utils_visualize.visualize_interactionIoU(IoU_over_eobj[:, :, obj_idx], seq_scrid_name + '-obj' + str(obj_idx + 1),
anno_dict['frames'], save_dir=savefilename, show_propagated_region=True)
# write csv
for obj_idx in range(self.n_objects):
with open(self.save_csvsummary_dir, mode='a') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow([self.sequence, str(obj_idx + 1), str(scr_id)] + list(IoUeveryround_perobj[:, obj_idx]))
summary = sess.get_global_summary(save_file=self.save_res_dir + '/summary_' + sess.report_name[7:] + '.json')
analyze_summary(self.save_res_dir + '/summary_' + sess.report_name[7:] + '.json', metric=metric)
fps = self.n_operated_frames_accum / self.total_taken_times_accum
self.save_logger.printNlog('n_operated_frames_accum={}'.format(str(self.n_operated_frames_accum)))
self.save_logger.printNlog('total_taken_times_accum={}'.format(str(self.total_taken_times_accum)))
self.save_logger.printNlog('fps={}'.format(str(fps)))
# final_IOU = summary['curve'][metric][-1]
average_IoU_per_round = summary['curve'][metric][1:-1]
torch.cuda.empty_cache()
model = None
return average_IoU_per_round
def run_VOS_singleiact(self, n_interaction, scribbles_data, annotated_frames):
annotated_frames_np = np.array(annotated_frames)
num_workers = 4
annotated_now = annotated_frames[-1]
scribbles_list = scribbles_data['scribbles']
seq_name = scribbles_data['sequence']
# output_masks = self.final_masks.copy().astype(np.float64)
prop_list = utils_custom.get_prop_list(annotated_frames, annotated_now, self.num_frames, proportion=self.config.test_propagation_proportion)
prop_fore = sorted(prop_list)[0]
prop_rear = sorted(prop_list)[-1]
# Interaction settings
pm_ps_ns_3ch_t = [] # n_obj,3,h,w
if n_interaction == 1:
for obj_id in range(1, self.n_objects + 1):
pos_scrimg = utils_custom.scribble_to_image(scribbles_list, annotated_now, obj_id,
dilation=self.config.scribble_dilation_param,
prev_mask=self.final_masks[annotated_now])
pm_ps_ns_3ch_t.append(np.stack([np.ones_like(pos_scrimg) / 2, pos_scrimg, np.zeros_like(pos_scrimg)], axis=0))
pm_ps_ns_3ch_t = np.stack(pm_ps_ns_3ch_t, axis=0) # n_obj,3,h,w
else:
for obj_id in range(1, self.n_objects + 1):
prev_round_input = (self.final_masks[annotated_now] == obj_id).astype(np.float32) # H,W
pos_scrimg, neg_scrimg = utils_custom.scribble_to_image(scribbles_list, annotated_now, obj_id,
dilation=self.config.scribble_dilation_param,
prev_mask=self.final_masks[annotated_now], blur=True,
singleimg=False, seperate_pos_neg=True)
pm_ps_ns_3ch_t.append(np.stack([prev_round_input, pos_scrimg, neg_scrimg], axis=0))
pm_ps_ns_3ch_t = np.stack(pm_ps_ns_3ch_t, axis=0) # n_obj,3,h,w
pm_ps_ns_3ch_t = torch.from_numpy(pm_ps_ns_3ch_t).cuda()
if (prop_list[0] != annotated_now) and (prop_list.count(annotated_now) != 2):
print(str(prop_list))
raise NotImplementedError
print(str(prop_list)) # we made our proplist first backward, and then forward
composed_transforms = transforms.Compose([tr.Normalize_ApplymeanvarImage(self.config.mean, self.config.var),
tr.ToTensor()])
db_test = davis_2017.DAVIS2017(split='val', transform=composed_transforms, root=self.config.davis_dataset_dir,
custom_frames=prop_list, seq_name=seq_name, rgb=True,
obj_id=None, no_gt=True, retname=True, prev_round_masks=self.final_masks, )
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=num_workers, pin_memory=True)
flag = 0 # 1: propagating backward, 2: propagating forward
print('[{:01d} round] processing...'.format(n_interaction))
for ii, batched in enumerate(testloader):
# batched : image, scr_img, 0~fr, meta
inpdict = dict()
operating_frame = int(batched['meta']['frame_id'][0])
for inp in batched:
if inp == 'meta': continue
inpdict[inp] = Variable(batched[inp]).cuda()
inpdict['image'] = inpdict['image'].expand(self.n_objects, -1, -1, -1)
t_start = time.time()
#################### Iaction ########################
if operating_frame == annotated_now: # Check the round is on interaction
if flag == 0:
flag += 1
adjacent_to_anno = True
elif flag == 1:
flag += 1
adjacent_to_anno = True
continue
else:
raise NotImplementedError
pm_ps_ns_3ch_t = torch.nn.ReflectionPad2d(self.pad_info[1] + self.pad_info[0])(pm_ps_ns_3ch_t)
inputs = torch.cat([inpdict['image'], pm_ps_ns_3ch_t], dim=1)
anno_3chEnc_r4, _ = self.net.encoder_3ch.forward(inpdict['image'])
neighbor_pred_onehot_sal, anno_6chEnc_r4 = self.net.forward_obj_feature_extractor(inputs) # [nobj, 1, P_H, P_W], # [n_obj,2048,h/16,w/16]
output_logit, r4_anno, score = self.net.forward_prop(
[anno_3chEnc_r4], inpdict['image'], [anno_6chEnc_r4],
anno_3chEnc_r4, torch.sigmoid(neighbor_pred_onehot_sal),
anno_fr_list= annotated_frames_np, que_fr= operating_frame) # [nobj, 1, P_H, P_W]
output_prob_tmp = F.softmax(output_logit, dim=1) # [nobj, 2, P_H, P_W]
output_prob_tmp = output_prob_tmp[:, 1] # [nobj, P_H, P_W]
one_hot_outputs_t = F.softmax(self.soft_aggregation(output_prob_tmp), dim=0) # [nobj+1, P_H, P_W]
anno_onehot_prob = one_hot_outputs_t.clone()[1:].unsqueeze(1) # [nobj, 1, P_H, P_W]
anno_3chEnc_r4, r2_prev_fromanno = self.net.encoder_3ch.forward(inpdict['image'])
self.anno_6chEnc_r4_list.append(anno_6chEnc_r4)
self.anno_3chEnc_r4_list.append(anno_3chEnc_r4)
if len(self.anno_6chEnc_r4_list) != len(annotated_frames):
raise NotImplementedError
#################### Propagation ########################
else:
# Flag [1: propagating backward, 2: propagating forward]
if adjacent_to_anno:
r4_neighbor = r4_anno
neighbor_pred_onehot = anno_onehot_prob
else:
r4_neighbor = r4_que
neighbor_pred_onehot = targ_onehot_prob
adjacent_to_anno = False
output_logit, r4_que, score = self.net.forward_prop(
self.anno_3chEnc_r4_list, inpdict['image'], self.anno_6chEnc_r4_list,
r4_neighbor, neighbor_pred_onehot,
anno_fr_list= annotated_frames_np, que_fr= operating_frame) # [nobj, 1, P_H, P_W]
output_prob_tmp = F.softmax(output_logit, dim=1) # [nobj, 2, P_H, P_W]
output_prob_tmp = output_prob_tmp[:, 1] # [nobj, P_H, P_W]
one_hot_outputs_t = F.softmax(self.soft_aggregation(output_prob_tmp), dim=0) # [nobj+1, P_H, P_W]
smallest_alpha = 0.5
if flag == 1:
sorted_frames = annotated_frames_np[annotated_frames_np < annotated_now]
if len(sorted_frames) == 0:
alpha = 1
else:
closest_addianno_frame = np.max(sorted_frames)
alpha = smallest_alpha + (1 - smallest_alpha) * (
(operating_frame - closest_addianno_frame) / (annotated_now - closest_addianno_frame))
else:
sorted_frames = annotated_frames_np[annotated_frames_np > annotated_now]
if len(sorted_frames) == 0:
alpha = 1
else:
closest_addianno_frame = np.min(sorted_frames)
alpha = smallest_alpha + (1 - smallest_alpha) * (
(closest_addianno_frame - operating_frame) / (closest_addianno_frame - annotated_now))
one_hot_outputs_t = (alpha * one_hot_outputs_t) + ((1 - alpha) * self.prob_map_of_frames[operating_frame])
targ_onehot_prob = one_hot_outputs_t.clone()[1:].unsqueeze(1) # [nobj, 1, P_H, P_W]
# Final mask indexing
self.prob_map_of_frames[operating_frame] = one_hot_outputs_t
self.scores_ni_nf[n_interaction-1,operating_frame] = score
self.n_operated_frames_accum += 1
self.total_taken_times_accum += time.time()-t_start
output_masks = torch.argmax(self.prob_map_of_frames,dim=1).cpu().numpy().astype(np.uint8)[:,self.hpad1:-self.hpad2, self.wpad1:-self.wpad2]
torch.cuda.empty_cache()
return output_masks
def soft_aggregation(self, ps):
num_objects, H, W = ps.shape
em = torch.zeros(num_objects +1, H, W).cuda()
em[0] = torch.prod(1-ps, dim=0) # bg prob
em[1:num_objects+1] = ps # obj prob
em = torch.clamp(em, 1e-7, 1-1e-7)
logit = torch.log((em /(1-em)))
return logit
if __name__ == '__main__':
config = Config()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(config.test_gpu_id)
tester = Main_tester(config)
tester.run_for_diverse_metrics()
|
import os
import time
from datetime import datetime
import numpy as np
import numpy.random as npr
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
print(tf.__version__)
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
from tensorflow.contrib.eager.python import tfe
from scipy.stats import mode
from tensorflow.python.keras.constraints import nonneg
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
import argparse
# always reload dependency code
import importlib
import utils
importlib.reload(utils)
from utils import *
class idgmmvae(tf.keras.Model):
def __init__(self, discretez_dim, contiz_dim, h_dim, out_dim, regularizer, num_bijectors, nf_dim, qy_num_hidden_layers, qz_num_hidden_layers, px_num_hidden_layers, USE_BATCHNORM=False):
super(idgmmvae, self).__init__(discretez_dim, contiz_dim, h_dim, out_dim, regularizer, num_bijectors, nf_dim, qy_num_hidden_layers, qz_num_hidden_layers, px_num_hidden_layers, USE_BATCHNORM)
self.discretez_dim = discretez_dim
self.contiz_dim = contiz_dim
self.h_dim = h_dim
self.out_dim = out_dim
self.qy_num_hidden_layers = qy_num_hidden_layers
self.qy_layers = []
for i in range(self.qy_num_hidden_layers):
self.qy_layers.append(tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer))
self.qz_num_hidden_layers = qz_num_hidden_layers
self.qz_layers = []
for i in range(self.qz_num_hidden_layers):
self.qz_layers.append(tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer))
'''
input convex neural network
input: y
parameters: theta = {W^y_{0:k-1}, W^z_{1:k-1}, b_{0:k-1}}
initial condition: z_0 = 0, W_0^z = 0
intermediate layer: z_{i+1} = g_i(W_i^z z_i + W_i^y y + b_i),
i=0, ..., k-1
final layer: f(y,theta) = z_k
constraints: W^z_{1:k-1} is nonnegative
g_i convex and non-decreasing
'''
# Wy_i involves W_i^y, b_i
self.px1_num_hidden_layers = px_num_hidden_layers
self.px2_num_hidden_layers = px_num_hidden_layers
# self.px3_num_hidden_layers = px_num_hidden_layers
# the first set of icnn
# first layer is treated separately
self.icnn1_Wy0 = tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer)
self.icnn1_Wy_layers = []
self.icnn1_Wz_layers = []
for i in range(self.px1_num_hidden_layers-1):
self.icnn1_Wy_layers.append(tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer))
self.icnn1_Wz_layers.append(tf.keras.layers.Dense(h_dim,
use_bias=False, kernel_constraint=tf.keras.constraints.NonNeg(), kernel_regularizer=regularizer))
# add final layer with output dimension = 1
self.icnn1_Wy_layers.append(tf.keras.layers.Dense(1, kernel_regularizer=regularizer))
self.icnn1_Wz_layers.append(tf.keras.layers.Dense(1,
use_bias=False, kernel_constraint=tf.keras.constraints.NonNeg(), kernel_regularizer=regularizer))
# the second set of icnn
# first layer is treated separately
# self.icnn2_Wy0 = tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer)
self.icnn2_Wy0 = tf.keras.layers.Dense(out_dim, kernel_regularizer=regularizer)
self.icnn2_Wy_layers = []
self.icnn2_Wz_layers = []
for i in range(self.px2_num_hidden_layers-1):
self.icnn2_Wy_layers.append(tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer))
self.icnn2_Wz_layers.append(tf.keras.layers.Dense(h_dim,
use_bias=False, kernel_constraint=tf.keras.constraints.NonNeg(), kernel_regularizer=regularizer))
# add final layer with output dimension = 1
self.icnn2_Wy_layers.append(tf.keras.layers.Dense(1, kernel_regularizer=regularizer))
self.icnn2_Wz_layers.append(tf.keras.layers.Dense(1,
use_bias=False, kernel_constraint=tf.keras.constraints.NonNeg(), kernel_regularizer=regularizer))
# the third set of icnn
# first layer is treated separately
# self.icnn3_Wy0 = tf.keras.layers.Dense(out_dim, kernel_regularizer=regularizer)
# self.icnn3_Wy_layers = []
# self.icnn3_Wz_layers = []
# for i in range(self.px3_num_hidden_layers-1):
# self.icnn3_Wy_layers.append(tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer))
# self.icnn3_Wz_layers.append(tf.keras.layers.Dense(h_dim,
# use_bias=False, kernel_constraint=tf.keras.constraints.NonNeg(), kernel_regularizer=regularizer))
# # add final layer with output dimension = 1
# self.icnn3_Wy_layers.append(tf.keras.layers.Dense(1, kernel_regularizer=regularizer))
# self.icnn3_Wz_layers.append(tf.keras.layers.Dense(1,
# use_bias=False, kernel_constraint=tf.keras.constraints.NonNeg(), kernel_regularizer=regularizer))
self.fc2 = tf.keras.layers.Dense(discretez_dim, kernel_regularizer=regularizer)
self.fc5 = tf.keras.layers.Dense(contiz_dim, kernel_regularizer=regularizer)
self.fc6 = tf.keras.layers.Dense(contiz_dim, kernel_regularizer=regularizer)
self.fc7 = tf.keras.layers.Dense(contiz_dim, kernel_regularizer=regularizer)
self.fc8 = tf.keras.layers.Dense(contiz_dim, kernel_regularizer=regularizer)
# self.fc9 = tf.keras.layers.Dense(h_dim,
# kernel_regularizer=regularizer)
self.fc9 = tf.keras.layers.Dense(self.out_dim,
kernel_regularizer=regularizer)
# self.fc10 = tf.keras.layers.Dense(self.out_dim, kernel_regularizer=regularizer)
self.shift_and_log_scale_fn = []
for i in range(num_bijectors):
self.shift_and_log_scale_fn.append(tfb.real_nvp_default_template(
hidden_layers=[nf_dim, nf_dim], shift_only=True))
bijectors = []
for i in range(num_bijectors):
bijectors.append(tfb.RealNVP(shift_and_log_scale_fn=self.shift_and_log_scale_fn[i], num_masked=2))
if USE_BATCHNORM and i % 2 == 0:
# BatchNorm helps to stabilize deep normalizing flows, esp. Real-NVP
bijectors.append(tfb.BatchNormalization())
self.bijector = tfb.Chain(list(reversed(bijectors)))
def icnn1_grad(self, x_train_tensor):
with tf.GradientTape() as icnn1_tape:
icnn1_tape.watch(x_train_tensor)
h = [[None] for i in range(self.px1_num_hidden_layers + 1)]
h[0] = tf.square(tf.nn.leaky_relu(self.icnn1_Wy0(x_train_tensor)))
for i in range(self.px1_num_hidden_layers):
h[i+1] = tf.nn.leaky_relu(self.icnn1_Wz_layers[i](h[i]) + self.icnn1_Wy_layers[i](x_train_tensor))
dout_dx = icnn1_tape.gradient(h[-1], x_train_tensor)
return dout_dx
def icnn2_grad(self, x_train_tensor):
with tf.GradientTape() as icnn2_tape:
icnn2_tape.watch(x_train_tensor)
h = [[None] for i in range(self.px2_num_hidden_layers + 1)]
h[0] = tf.square(tf.nn.leaky_relu(self.icnn2_Wy0(x_train_tensor)))
for i in range(self.px2_num_hidden_layers):
h[i+1] = tf.nn.leaky_relu(self.icnn2_Wz_layers[i](h[i]) + self.icnn2_Wy_layers[i](x_train_tensor))
dout_dx = icnn2_tape.gradient(h[-1], x_train_tensor)
return dout_dx
def icnn3_grad(self, x_train_tensor):
with tf.GradientTape() as icnn3_tape:
icnn3_tape.watch(x_train_tensor)
h = [[None] for i in range(self.px3_num_hidden_layers + 1)]
h[0] = tf.square(tf.nn.leaky_relu(self.icnn3_Wy0(x_train_tensor)))
for i in range(self.px3_num_hidden_layers):
h[i+1] = tf.nn.leaky_relu(self.icnn3_Wz_layers[i](h[i]) + self.icnn3_Wy_layers[i](x_train_tensor))
dout_dx = icnn3_tape.gradient(h[-1], x_train_tensor)
return dout_dx
def qy_graph(self, x, k):
h = [[None] for i in range(self.qy_num_hidden_layers + 1)]
h[0] = x
for i in range(self.qy_num_hidden_layers):
h[i+1] = tf.nn.relu(self.qy_layers[i](h[i]))
qy_logit = self.fc2(h[-1])
qy = tf.nn.softmax(qy_logit)
return qy_logit, qy
def nfdist(self, zm, zv):
# normalizing flow variational distribution for conti_z
nfdist = tfd.TransformedDistribution(
distribution=tfd.MultivariateNormalDiag(loc=zm, scale_diag=zv),
bijector=self.bijector)
return nfdist
def qz_graph(self, x, y):
xy = tf.concat([x, y], 1)
h = [[None] for i in range(self.qz_num_hidden_layers + 1)]
h[0] = xy
for i in range(self.qz_num_hidden_layers):
h[i+1] = tf.nn.relu(self.qz_layers[i](h[i]))
zm = self.fc5(h[-1])
zv = tf.nn.softplus(self.fc6(h[-1]))
nfdist = self.nfdist(zm, zv)
z = nfdist.sample()
return z, zm, zv
def px_graph(self, z, y):
zm = self.fc7(y)
# zm = tf.pad(y * 10 + 1, [[0,0], [0, self.contiz_dim - self.discretez_dim]], "CONSTANT")
# print(y * 100, self.contiz_dim, self.discretez_dim)
# print(zm)
zv = tf.nn.softplus(self.fc8(y))
# need to do above to create the layers first
# zm = tf.matmul(y, tf.cumsum(tf.nn.softplus(self.fc7.kernel), axis=0))
# zv = tf.nn.softplus(tf.matmul(y, tf.cumsum(tf.nn.softplus(self.fc7.kernel), axis=0)))
# print("z", z)
z_mid1 = self.icnn1_grad(z) # note changes in here should change loss gradient below too
# print("z_mid1", z_mid1)
z_mid2 = self.fc9(z_mid1)
# print(self.fc9.weights)
# make sure the weights are full rank
s, u, v = tf.linalg.svd(self.fc9.weights[0])
s_fr = tf.clip_by_value(s, clip_value_min=1e-5, clip_value_max=10.)
self.fc9.weights[0] = tf.matmul(u, tf.matmul(tf.linalg.diag(s_fr), v, adjoint_b=True))
# print(s, s_fr)
# print(self.fc9.weights)
# beta_zmid1_zmid2 = tf.eye(self.contiz_dim, self.h_dim)
# z_mid2 = tf.matmul(z_mid1, beta_zmid1_zmid2)
# print("z_mid2", z_mid2)
z_mid3 = self.icnn2_grad(z_mid2)
# px_logit = z_mid3
# z_mid4 = self.fc10(z_mid3)
# beta_zmid2_zmid3 = tf.eye(self.h_dim, self.out_dim)
# z_mid4 = tf.matmul(z_mid3, beta_zmid2_zmid3)
# z_mid5 = self.icnn3_grad(z_mid4)
# px_logit = z_mid5
px_logit = z_mid3
# z_mid4 = self.fc10(z_mid3)
# z_mid5 = self.icnn3_grad(z_mid4)
# px_logit = z_mid5
return zm, zv, px_logit
def labeled_loss(self, x, px_logit, z, zm, zv, zm_prior, zv_prior):
xy_loss = -log_bernoulli_with_logits(x, px_logit)
xy_loss += -tfd.MultivariateNormalDiag(
loc=zm_prior, scale_diag=zv_prior).log_prob(z)
nfdist = self.nfdist(zm, zv)
xy_loss += nfdist.log_prob(z)
xy_loss += -np.log(1./self.discretez_dim)
return xy_loss
def call(self, x):
xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)), tf.float32)
qy_logit, qy = self.qy_graph(xb, k=self.discretez_dim)
z, zm, zv, zm_prior, zv_prior, px_logit = [[None] * self.discretez_dim for i in range(6)]
y_ = tf.fill(tf.stack([tf.shape(x)[0], self.discretez_dim]), 0.0)
for i in range(self.discretez_dim):
y = tf.add(y_, tf.constant(np.eye(self.discretez_dim)[i], dtype='float32'))
z[i], zm[i], zv[i] = self.qz_graph(xb, y)
zm_prior[i], zv_prior[i], px_logit[i] = self.px_graph(z[i], y)
return xb, qy_logit, qy, z, zm, zv, zm_prior, zv_prior, px_logit
def iw_nll(model, images, iw_nsamples=100):
loglikeratios_list = []
for i in range(iw_nsamples):
xb, qy_logit, qy, z, zm, zv, zm_prior, zv_prior, px_logit = model(images)
losses = [None] * model.discretez_dim
for i in range(model.discretez_dim):
losses[i] = model.labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], zm_prior[i], zv_prior[i])
loss = tf.reshape(-tf.reduce_mean(tf.add_n([qy[:, i] * losses[i] for i in range(model.discretez_dim)])), [1])
# print("loss", loss)
loglikeratios_list.append(loss)
# print("list", loglikeratios_list)
loglikeratios = tf.concat(loglikeratios_list, 0)
iwae_nll = tf.math.reduce_logsumexp(loglikeratios,0) - tf.math.log(tf.constant([iw_nsamples],dtype=tf.float32))
return iwae_nll
def nent_and_loss(model, images):
xb, qy_logit, qy, z, zm, zv, zm_prior, zv_prior, px_logit = model(images)
nent = -cross_entropy_with_logits(qy_logit, qy)
losses = [None] * model.discretez_dim
for i in range(model.discretez_dim):
losses[i] = model.labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], zm_prior[i], zv_prior[i])
loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in range(model.discretez_dim)])
kl_discrete = nent - np.log(1./model.discretez_dim)
kl_contis = [[None] for i in range(model.discretez_dim)]
for i in range(model.discretez_dim):
kl_contis[i] = model.nfdist(zm[i], zv[i]).log_prob(z) - \
tfd.MultivariateNormalDiag(
loc=zm_prior[i], scale_diag=zv_prior[i]).log_prob(z)
kl_conti = tf.add_n([qy[:, i] * kl_contis[i] for i in range(model.discretez_dim)])
au_discrete = tf.math.reduce_std(qy, 0)
au_conti = tf.math.reduce_std(tf.add_n(
[tf.multiply(tf.expand_dims(qy[:, i], 1), zm[i]) for i in range(model.discretez_dim)]), 0)
return nent, loss, kl_discrete, au_discrete, kl_conti, au_conti, qy_logit
def eval_model(model, train_data, train_labels, test_data, test_labels, itr, outfilename):
with tf.device('/cpu:0'):
train_evalset = np.random.choice(train_data.shape[0], 1000)
test_evalset = np.random.choice(test_data.shape[0], 1000)
train_images = train_data[train_evalset]
train_labels = train_labels.argmax(1)[train_evalset]
test_images = test_data[test_evalset]
test_labels = test_labels.argmax(1)[test_evalset]
train_nent, train_loss, train_kl_discrete, train_au_discrete, train_kl_conti, train_au_conti, train_qy_logit = nent_and_loss(model, train_images)
test_nent, test_loss, test_kl_discrete, test_au_discrete, test_kl_conti, test_au_conti, test_qy_logit = nent_and_loss(model, test_images)
train_iwnll = iw_nll(model, train_images).numpy()[0]
test_iwnll = iw_nll(model, test_images).numpy()[0]
train_ent, train_loss, train_kl_discrete, train_au_discrete, train_kl_conti, train_au_conti = -train_nent.numpy().mean(), train_loss.numpy().mean(), train_kl_discrete.numpy().mean(), train_au_discrete.numpy(), train_kl_conti.numpy().mean(), train_au_conti.numpy()
test_ent, test_loss, test_kl_discrete, test_au_discrete, test_kl_conti, test_au_conti = -test_nent.numpy().mean(), test_loss.numpy().mean(), test_kl_discrete.numpy().mean(), test_au_discrete.numpy(), test_kl_conti.numpy().mean(), test_au_conti.numpy()
zacc_train, zacc_test = -1, -1
if test_labels is not None:
if train_labels is not None:
zacc_test = z_testacc(test_qy_logit, test_labels)
zacc_train = z_testacc(train_qy_logit, train_labels)
# print(itr)
with open(outfilename+'.log', 'a') as f:
f.write("\n\nItr" + str([itr]) +
"\ntrain ent loss zacc kl_discrete kl_conti iw_nll" + str([train_ent]+ [train_loss] + [zacc_train] +
[train_kl_discrete] + [train_kl_conti] + [train_iwnll]) +
"\ntest ent loss zacc kl_discrete kl_conti iw_nll" + str([test_ent]+ [test_loss] + [zacc_test] +
[test_kl_discrete] + [test_kl_conti] + [test_iwnll]))
with open(outfilename+'_au_discrete.log', 'a') as f:
f.write("\n\nItr" + str([itr]) +
"\ntrain au_discrete" + str([train_au_discrete]) +
"\ntest au_discrete" + str([test_au_discrete]))
with open(outfilename+'_au_conti.log', 'a') as f:
f.write("\n\nItr" + str([itr]) +
"\ntrain au_conti" + str([train_au_conti]) +
"\ntest au_conti" + str([test_au_conti]))
print("\n\nItr", itr,
"\ntrain ent loss zacc kl_discrete kl_conti iw_nll", train_ent, train_loss, zacc_train, train_kl_discrete, train_kl_conti, train_iwnll,
"\ntest ent loss zacc kl_discrete kl_conti iw_nll", test_ent, test_loss, zacc_test, test_kl_discrete, test_kl_conti, test_iwnll)
return train_loss
def train_idgmmvae(model, train_data, train_labels, test_data, test_labels, optimizer, batch_size, num_epochs, outfilename, device, grad_clip, checkpoint_path):
dataset = tf.data.Dataset.from_tensor_slices((train_data,))
dataset = dataset.shuffle(batch_size * 5)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(10)
num_batches = train_data.shape[0] // batch_size
for outfile in [outfilename+'.log', outfilename+'_au_discrete.log', outfilename+'_au_conti.log']:
with open(outfile, 'a') as f:
f.write("hi i'm starting")
f.write("idgmmvae_model")
f.write("\noptimizer")
f.write("discretez_dim"+str(model.discretez_dim)+\
"h_dim"+str(model.h_dim)+\
"contiz_dim"+str(model.contiz_dim))
for epoch in range(num_epochs):
for batch, (images,) in enumerate(dataset):
itr = epoch * num_batches + batch
with tf.device(device):
with tf.GradientTape() as loss_tape:
loss_tape.watch(model.variables)
xb, qy_logit, qy, z, zm, zv, zm_prior, zv_prior, _ = model(images)
px_logit = [None] * model.discretez_dim
# we repeat the icnn calculation here. otherwise the
# nested gradient is not calculated correctly
for zi in range(model.discretez_dim):
# print("start", zi)
h1 = [[None] for i in range(model.px1_num_hidden_layers + 1)]
h2 = [[None] for i in range(model.px2_num_hidden_layers + 1)]
# h3 = [[None] for i in range(model.px3_num_hidden_layers + 1)]
hz = z[zi]
# print("hz", hz)
with tf.GradientTape() as loss_icnn1_tape:
loss_icnn1_tape.watch(hz)
h1[0] = tf.square(tf.nn.leaky_relu(model.icnn1_Wy0(hz)))
for i in range(model.px1_num_hidden_layers):
h1[i+1] = tf.nn.leaky_relu(model.icnn1_Wz_layers[i](h1[i]) + model.icnn1_Wy_layers[i](hz))
# print("h1[-1]", h1[-1])
h_mid1 = loss_icnn1_tape.gradient(h1[-1], hz)
# print("h_mid1", h_mid1)
h_mid2 = model.fc9(h_mid1)
# beta_zmid1_zmid2 = tf.eye(model.contiz_dim, model.h_dim)
# h_mid2 = tf.matmul(h_mid1, beta_zmid1_zmid2)
# print("h_mid2", h_mid2)
with tf.GradientTape() as loss_icnn2_tape:
loss_icnn2_tape.watch(h_mid2)
h2[0] = tf.square(tf.nn.leaky_relu(model.icnn2_Wy0(h_mid2)))
for i in range(model.px2_num_hidden_layers):
h2[i+1] = tf.nn.leaky_relu(model.icnn2_Wz_layers[i](h2[i]) + model.icnn2_Wy_layers[i](h_mid2))
# print("h2[-1]", h2[-1])
h_mid3 = loss_icnn2_tape.gradient(h2[-1], h_mid2)
# beta_zmid2_zmid3 = tf.eye(model.h_dim, model.out_dim)
# h_mid4 = tf.matmul(h_mid3, beta_zmid2_zmid3)
# h_mid4 = model.fc10(h_mid3)
# with tf.GradientTape() as loss_icnn3_tape:
# loss_icnn3_tape.watch(h_mid4)
# h3[0] = tf.square(tf.nn.leaky_relu(model.icnn3_Wy0(h_mid4)))
# for i in range(model.px3_num_hidden_layers):
# h3[i+1] = tf.nn.leaky_relu(model.icnn3_Wz_layers[i](h3[i]) + model.icnn3_Wy_layers[i](h_mid4))
# h_mid5 = loss_icnn3_tape.gradient(h3[-1], h_mid4)
# # print("h4", h4)
# px_logit[i] = h_mid5
# print(zi, h_mid3)
px_logit[zi] = h_mid3
# print(px_logit)
# Forward pass
nent = -cross_entropy_with_logits(qy_logit, qy)
losses = [None] * model.discretez_dim
for i in range(model.discretez_dim):
losses[i] = model.labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], zm_prior[i], zv_prior[i])
loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in range(model.discretez_dim)])
# print(model.fc7.weights)
# S1 = tf.linalg.svd(model.fc7.weights[0], compute_uv=False)
# print(S1)
# S2 = tf.linalg.svd(model.fc9.weights[0], compute_uv=False)
# rank_penalty = tf.reduce_sum(tf.math.minimum(tf.math.log(S1), 1e-6 * tf.ones_like(S1))) + tf.reduce_sum(tf.math.minimum(tf.math.log(S2), 1e-6 * tf.ones_like(S2)))
# print(rank_penalty)
# loss -= rank_penalty
gradients = loss_tape.gradient(loss, model.variables)
capped_gradients = tf.clip_by_global_norm(gradients, grad_clip)[0]
# gradient clipping is essential for normalizing flow
grad_vars = zip(capped_gradients, model.variables)
optimizer.apply_gradients(grad_vars, tf.train.get_or_create_global_step())
if itr % num_batches == 0:
# if itr % 10 == 0:
train_loss = eval_model(model, train_data, train_labels, test_data, test_labels, itr, outfilename)
model.save_weights(checkpoint_path.format(itr=itr))
if math.isnan(train_loss):
break
# cmap=plt.cm.jet
# if model.out_dim == 784:
# y_ = tf.fill(tf.stack([batch_size, model.discretez_dim]), 0.0)
# for i in range(model.discretez_dim):
# y = tf.add(y_, tf.constant(np.eye(model.discretez_dim)[i], dtype='float32'))
# zm_prior = model.fc7(y)
# zv_prior = tf.nn.softplus(model.fc8(y))
# z = tfd.MultivariateNormalDiag(loc=zm_prior, scale_diag=zv_prior).sample()
# _, _, px_logit = model.px_graph(z, y)
# out = tf.math.reduce_mean(tf.nn.sigmoid(px_logit), 0)
# out = tf.reshape(out, [28, 28]).numpy() * 255
# out = out.astype(np.uint8)
# plt.imsave(os.path.dirname(checkpoint_path)+'/itr'+str(itr)+'_discretez'+str(i)+'.png', out, cmap=cmap)
return model
|
"""
Nonconforming Group Graphical Lasso experiment
===================================================
Example script for Group Graphical Lasso with non-conforming dimension, i.e. some variables exist in some instances but not in all.
We generate one underlying precision matrix and then drop one block of variables in each instance.
"""
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from gglasso.helper.ext_admm_helper import create_group_array, construct_indexer
from gglasso.helper.utils import sparsity
from gglasso.helper.data_generation import generate_precision_matrix, group_power_network, sample_covariance_matrix
from gglasso.helper.ext_admm_helper import check_G, consensus
from gglasso.problem import glasso_problem
K = 4
p = 50
M = 10
B = int(p/M)
N = 100
#%%
# Generating the data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We generate one precision matrix, sample observations, and finally drop one block of variables in each instance.
# It is important that the observations for each instance is a ``DataFrame`` of shape ``(p_k,N_k)`` where the index has unique ids for each variable.
#
p_arr = (p-B)*np.ones(K, dtype = int)
num_samples = N*np.ones(K, dtype = int)
Sigma, Theta = generate_precision_matrix(p=p, M=M, style = 'powerlaw', gamma = 2.8, prob = 0.1, nxseed = 3456)
all_obs = dict()
S = dict()
for k in np.arange(K):
_, obs = sample_covariance_matrix(Sigma, N)
# drop the k-th block starting from the end
all_obs[k] = pd.DataFrame(obs).drop(np.arange(p-(k+1)*B, p-k*B), axis = 0)
S[k] = np.cov(all_obs[k], bias = True)
#%%
# Creating the groups
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# In this section, we create two important objects for the non-conforming case using functionalities of ``GGLasso``:
#
# ``ix_location`` is a dataframe with every variable as index and each columns is one dataset. It contains the index of the variable in the respective instance (``NaN`` when not present).
#
# ``G`` is a bookeeping array which keeps count of the indices of each group of overlapping variables. It is needed in the solver later on.
#
# **Important:** We only consider pairs of variables which appear at least in 3 instances here!
ix_exist, ix_location = construct_indexer(list(all_obs.values()))
G = create_group_array(ix_exist, ix_location, min_inst = K-1)
check_G(G, p)
print("Dimensions p_k: ", p_arr)
print("Sample sizes N_k: ", num_samples)
print("Number of groups found: ", G.shape[1])
#%%
# Visualizing
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We visualize the case of non-conforming variables by plotting the given empirical covariance matrices.
# Missing variable observations are in **white**.
#
fig, axs = plt.subplots(2,2, figsize = (8,8))
for k in range(K):
ind = ix_exist.index[ix_exist.loc[:,k]]
S_k = pd.DataFrame(S[k], index = ind, columns = ind)
# extend matrix by nonexistent variables
S_k = S_k.reindex(columns = ix_exist.index, index = ix_exist.index)
ax = axs.ravel()[k]
sns.heatmap(S_k, ax = ax, cmap = plt.cm.coolwarm, linewidth = 0.005, linecolor = 'lightgrey',\
cbar = False, vmin = -.5, vmax = .5, xticklabels = [], yticklabels = [])
ax.set_title(f"Empirical covariance, instance {k}")
#%%
# Defining the GGL problem
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We now create the instance of Group Graphical Lasso problem. As we are in the non-conforming case, we need to spcify the array ``G`` which we created before.
reg = 'GGL'
P = glasso_problem(S = S, N = num_samples, reg = "GGL", reg_params = None, latent = True, G = G, do_scaling = True)
print(P)
#%%
# Model selection
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Set the regularization parameter grids and do model selection.
l1 = np.logspace(1,-1,5)
mu1 = np.logspace(1,-1,3)
l2 = np.logspace(0,-2,4)
modelselect_params = {'lambda1_range' : l1, 'mu1_range': mu1, 'lambda2_range': l2}
P.model_selection(modelselect_params = modelselect_params, method = 'eBIC', gamma = 0.1, tol = 1e-7, rtol = 1e-7)
print(P.reg_params)
#%%
# Evaluation of results
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We print the ratio of non-zero entries per instance.
# We plot the distribution of non-zero entries per group.
# With group sparsity regularization we aim for many groups with either no non-zero or many non-zero entries per group.
#
print("Solution sparsity (ratio of nonzero entries): ")
for k in np.arange(K):
print(f"Instance {k}: ", sparsity(P.solution.precision_[k]))
stats = P.modelselect_stats.copy()
nnz,_,_ = consensus(P.solution.precision_, G)
fig, ax = plt.subplots()
sns.histplot(nnz, discrete = True, ax = ax)
ax.set_yscale('log')
ax.set_title('Nonzero entries per group')
|
<reponame>anuragpeshne/DRL_collab
import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 128 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 8e-3 # for soft update of target parameters
LR_ACTOR = 3e-3 # learning rate of the actor
LR_CRITIC = 4e-4 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class MADDPG():
"""Multi Agent DDPG agent"""
def __init__(self, state_size, action_size, random_seed, num_agents):
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
self.action_size = action_size
self.agents = [Agent(state_size, action_size, num_agents, random_seed) for i in range(num_agents)]
def act(self, state):
actions = [agent.act(observation) for agent, observation in zip(self.agents, state)]
return actions
def step(self, state, action, reward, next_state, done):
self.memory.add(state, action, reward, next_state, done)
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def learn(self, experiences, gamma):
for agent_id, agent in enumerate(self.agents):
states, actions, rewards, next_states, dones = experiences
observations = states.view(BATCH_SIZE, -1)
actions = actions.view(BATCH_SIZE, -1)
next_observations = next_states.view(BATCH_SIZE, -1)
# take rewards and dones for this agent only
r = rewards[:, agent_id].unsqueeze_(1)
dones = dones[:, agent_id].unsqueeze_(1)
#---------------------update critic-------------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actions_target(next_states)
actions_next = actions_next.view(BATCH_SIZE, -1)
Q_targets_next = agent.critic_target(next_observations, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = r + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = agent.critic_local(observations, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
agent.critic_optimizer.zero_grad()
critic_loss.backward()
agent.critic_optimizer.step()
#----------------------update actor-------------------------------#
agent.actor_optimizer.zero_grad()
actions_local = self.actions_local(states, agent_id)
actions_local = actions_local.view(BATCH_SIZE, -1)
q_value_predicted = agent.critic_local(observations, actions_local)
loss = -q_value_predicted.mean()
loss.backward()
agent.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
for agent in self.agents:
agent.soft_update(agent.critic_local, agent.critic_target, TAU)
agent.soft_update(agent.actor_local, agent.actor_target, TAU)
def reset(self):
for agent in self.agents:
agent.reset()
def actions_target(self, states):
with torch.no_grad():
actions = torch.empty(
(BATCH_SIZE, len(self.agents), self.action_size),
device=device)
for idx, agent in enumerate(self.agents):
actions[:,idx] = agent.actor_target(states[:,idx])
return actions
def actions_local(self, states, agent_id):
actions = torch.empty(
(BATCH_SIZE, len(self.agents), self.action_size),
device=device)
for idx, agent in enumerate(self.agents):
action = agent.actor_local(states[:,idx])
if not idx == agent_id:
action.detach()
actions[:,idx] = action
return actions
def actor_state_dict(self):
return [agent.actor_local.state_dict() for agent in self.agents]
def load_actor_state_dict(self, state_dict_list):
for agent, state_dict in zip(self.agents, state_dict_list):
agent.actor_local.load_state_dict(state_dict)
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, num_agents, random_seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
replay_buffer (obj): In MADDPG replay buffer is shared among all agents
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, num_agents, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, num_agents, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.1):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(len(x))
self.state = x + dx
return self.state
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.array([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.array([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.array([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.array([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
tempState = [e.state for e in experiences if e is not None]
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
|
<gh_stars>1000+
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Airflow plugin for ML Engine, backported from v1.9."""
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
from apiclient.discovery import build
from googleapiclient import errors
import logging
from oauth2client.client import GoogleCredentials
import re
import time
class MLEngineHook(GoogleCloudBaseHook):
"""Hook for ML Engine."""
def __init__(self, gcp_conn_id='google_cloud_default', delegate_to=None):
super(MLEngineHook, self).__init__(gcp_conn_id, delegate_to)
self._mlengine = self.get_conn()
def normalize_mlengine_job_id(self, job_id):
"""Replaces invalid MLEngine job_id characters with '_'.
This also adds a leading 'z' in case job_id starts with an invalid
character.
Args:
job_id: A job_id str that may have invalid characters.
Returns:
A valid job_id representation.
"""
match = re.search(r'\d', job_id)
if match and match.start() is 0:
job_id = 'z_{}'.format(job_id)
return re.sub('[^0-9a-zA-Z]+', '_', job_id)
def get_conn(self):
"""Returns a Google MLEngine service object."""
credentials = GoogleCredentials.get_application_default()
return build('ml', 'v1', credentials=credentials)
def create_job(self, project_id, job, use_existing_job_fn=None):
"""Launches a MLEngine job and wait for it to reach a terminal state.
Args:
project_id: project id
job: job name
use_existing_job_fn: existing job to use
Returns:
The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
"""
request = self._mlengine.projects().jobs().create(
parent='projects/{}'.format(project_id),
body=job)
job_id = job['jobId']
try:
request.execute()
except errors.HttpError as e:
# 409 means there is an existing job with the same job ID.
if e.resp.status == 409:
if use_existing_job_fn is not None:
existing_job = self._get_job(project_id, job_id)
if not use_existing_job_fn(existing_job):
logging.error(
'Job with job_id %s already exist, but it does '
'not match our expectation: %s',
job_id, existing_job
)
raise
logging.info(
'Job with job_id %s already exist. Will waiting for it to finish',
job_id
)
else:
logging.error('Failed to create MLEngine job: %s', e)
raise
return self._wait_for_job_done(project_id, job_id)
def _get_job(self, project_id, job_id):
"""Gets a MLEngine job based on the job name.
Args:
project_id: project id
job_id: job id
Returns:
MLEngine job object if succeed.
Raises:
apiclient.errors.HttpError: if HTTP error is returned from server
"""
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = self._mlengine.projects().jobs().get(name=job_name)
while True:
try:
return request.execute()
except errors.HttpError as e:
if e.resp.status == 429:
# polling after 30 seconds when quota failure occurs
time.sleep(30)
else:
logging.error('Failed to get MLEngine job: %s', e)
raise
def _wait_for_job_done(self, project_id, job_id, interval=30):
"""Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
Args:
project_id: project id
job_id: job id
interval: check interval in seconds
Returns:
MLEngine job object if succeed.
Raises:
apiclient.errors.HttpError: if HTTP error is returned when getting
the job
"""
assert interval > 0
while True:
job = self._get_job(project_id, job_id)
if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
return job
time.sleep(interval)
class MLEngineTrainingOperator(BaseOperator):
"""Operator for launching a MLEngine training job.
"""
@apply_defaults
def __init__(self,
project_id,
job_id,
package_uris,
training_python_module,
training_args,
region,
scale_tier=None,
master_type=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
mode='PRODUCTION',
*args,
**kwargs):
super(MLEngineTrainingOperator, self).__init__(*args, **kwargs)
self._project_id = project_id
self._job_id = job_id
self._package_uris = package_uris
self._training_python_module = training_python_module
self._training_args = training_args
self._region = region
self._scale_tier = scale_tier
self._master_type = master_type
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
self._mode = mode
if not self._project_id:
raise AirflowException('Google Cloud project id is required.')
if not self._job_id:
raise AirflowException(
'An unique job id is required for Google MLEngine training '
'job.')
if not package_uris:
raise AirflowException(
'At least one python package is required for MLEngine '
'Training job.')
if not training_python_module:
raise AirflowException(
'Python module name to run after installing required '
'packages is required.')
if not self._region:
raise AirflowException('Google Compute Engine region is required.')
def execute(self, context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
job_id = hook.normalize_mlengine_job_id(self._job_id)
training_request = {
'jobId': job_id,
'trainingInput': {
'scaleTier': self._scale_tier,
'packageUris': self._package_uris,
'pythonModule': self._training_python_module,
'region': self._region,
'args': self._training_args,
'masterType': self._master_type
}
}
if self._mode == 'DRY_RUN':
logging.info('In dry_run mode.')
logging.info('MLEngine Training job request is: %s', training_request)
return
# Helper method to check if the existing job's training input is the
# same as the request we get here.
def check_existing_job(existing_job):
return (existing_job.get('trainingInput', None)
== training_request['trainingInput'])
try:
finished_training_job = hook.create_job(
self._project_id, training_request, check_existing_job)
except errors.HttpError:
raise
if finished_training_job['state'] != 'SUCCEEDED':
logging.error('MLEngine training job failed: %s',
str(finished_training_job))
raise RuntimeError(finished_training_job['errorMessage'])
# Plugin class for GoogleMLEngine
class GoogleMLEnginePlugin(AirflowPlugin):
name = 'ml_engine_plugin'
operators = [MLEngineTrainingOperator]
hooks = [MLEngineHook]
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
|
import unittest
from mock import Mock, call
import photo
import json
from StringIO import StringIO
import requests
from download import FlickrApiDownloader
class ThrowsTwice:
def __init__(self, successful_response):
self.successful_response = successful_response
self.count = 0
def get(self, url):
self.count += 1
if self.count == 3:
resp = Mock(spec = requests.models.Response)
resp.status_code = 200
resp.content = self.successful_response
return resp
else:
raise requests.exceptions.ConnectionError('nope')
class ErrorsTwice:
def __init__(self, successful_response):
self.successful_response = successful_response
self.count = 0
def get(self, url):
self.count += 1
resp = Mock(spec = requests.models.Response)
if self.count == 3:
resp.status_code = 200
resp.content = self.successful_response
else:
resp.status_code = 500
return resp
class UnmockedUrlException(Exception):
pass
class MockRequests:
def __init__(self):
self.contents = {}
def get(self, url):
if url not in self.contents:
raise UnmockedUrlException('Un-mocked URL: ' + url)
return MockResponse(self.contents[url])
class MockResponse:
def __init__(self, content):
self.content = content
class MockFlickrApi:
def __init__(self, photo_infos):
self.photos = MockFlickrPhotos(photo_infos)
class MockFlickrPhotos:
def __init__(self, photo_infos):
side_effect = lambda **kwargs: \
json.dumps(photo_infos[kwargs['photo_id']])
self.getInfo = Mock(side_effect=side_effect)
class TestPhoto(unittest.TestCase):
def test_download_originals(self):
photos = [
{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'},
{'id': '23793491473', 'url_o': 'https://farm2.staticflickr.com/1514/23793491473_11cf9041b4_o.jpg'}
]
responses = [
'\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00',
'\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x01'
]
requests = MockRequests()
for i in xrange(0, len(photos)):
requests.contents[photos[i]['url_o']] = responses[i]
file_store = Mock()
file_store.exists.return_value = False
photo.download_originals(photos, [], file_store, requests, StringIO())
file_store.save_image.assert_has_calls([
call('originals/25461030990_o.jpg', responses[0]),
call('originals/23793491473_o.jpg', responses[1])])
def test_download_originals_exception_retries(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
response = '\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00'
requests = ThrowsTwice(response)
file_store = Mock()
file_store.exists.return_value = False
photo.download_originals(photos, [], file_store, requests, StringIO())
file_store.save_image.assert_has_calls([
call('originals/25461030990_o.jpg', response)])
def test_download_originals_bad_status_retries(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
response = '\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00'
requests = ErrorsTwice(response)
file_store = Mock()
file_store.exists.return_value = False
photo.download_originals(photos, [], file_store, requests, StringIO())
file_store.save_image.assert_has_calls([
call('originals/25461030990_o.jpg', response)])
def test_download_originals_eventually_fails(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
requests = MockRequests() # And don't provide a response
file_store = Mock()
file_store.exists.return_value = False
threw = False
try:
photo.download_originals(photos, [], file_store, requests,
StringIO())
except UnmockedUrlException:
threw = True
self.assertTrue(threw)
file_store.save_image.assert_not_called()
def test_download_originals_skips_existing(self):
photos = [{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'}]
requests = Mock()
file_store = Mock()
file_store.exists.return_value = True
photo.download_originals(photos, [], file_store, requests, StringIO())
self.assertEqual(requests.get.call_count, 0)
def test_download_originals_downloads_modified(self):
photos = [
{'id': '25461030990', 'url_o': 'https://farm2.staticflickr.com/1521/25461030990_3621f6ae2d_o.jpg'},
{'id': '23793491473', 'url_o': 'https://farm2.staticflickr.com/1514/23793491473_11cf9041b4_o.jpg'}
]
response = '\xff\xd8\xff\xe1\x16&Exif\x00\x00II*\x00\x08\x00\x00\x00'
requests = MockRequests()
requests.contents[photos[0]['url_o']] = response
for i in xrange(0, len(photos)):
requests.contents[photos[i]['url_o']] = response
file_store = Mock()
file_store.exists.return_value = True
photo.download_originals(photos, ['25461030990'], file_store, requests,
StringIO())
file_store.save_image.assert_called_with(
'originals/25461030990_o.jpg', response)
def test_download_info(self):
photos = [
{'id': '1'},
{'id': '2'}
]
responses = {
'1': { "photo": { "id": "1", "secret": "s1" }, "stat": "ok" },
'2': { "photo": { "id": "2", "secret": "s2" }, "stat": "ok" }
}
file_store = Mock()
file_store.exists.return_value = False
downloader = FlickrApiDownloader(file_store, Mock())
photo.download_info(photos, downloader, MockFlickrApi(responses),
StringIO())
file_store.save_json.assert_has_calls([
call('photo-info/1.json', responses['1']['photo']),
call('photo-info/2.json', responses['2']['photo'])
])
def test_download_infos_skips_existing(self):
photos = [{'id': '1'}]
file_store = Mock()
file_store.exists.return_value = True
flickr = MockFlickrApi({'1': {'photo': {}}})
downloader = FlickrApiDownloader(file_store, Mock())
photo.download_info(photos, downloader, flickr, StringIO())
self.assertEqual(flickr.photos.getInfo.call_count, 0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pylint: disable-msg=E1103
import collections
import functools
import os
import os.path
import cPickle as pickle
import fcntl
import hashlib
from itertools import ifilterfalse
from heapq import nsmallest
from operator import itemgetter
persistent_cache_directory = '~/.alex_persistent_cache'
class Counter(dict):
'Mapping where default values are zero'
def __missing__(self, key):
return 0
def lru_cache(maxsize=100):
'''Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
'''
maxqueue = maxsize * 10
def decorator(user_function, len=len, iter=iter, tuple=tuple, sorted=sorted, KeyError=KeyError):
cache = {} # mapping of args to results
queue = collections.deque() # order that keys have been used
refcount = Counter() # times each key is in the queue
sentinel = object() # marker for looping around the queue
kwd_mark = object() # separate positional and keyword args
# lookup optimizations (ugly but fast)
queue_append, queue_popleft = queue.append, queue.popleft
queue_appendleft, queue_pop = queue.appendleft, queue.pop
@functools.wraps(user_function)
def wrapper(*args, **kwds):
# cache key records both positional and keyword args
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
# record recent use of this key
queue_append(key)
refcount[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least recently used cache entry
if len(cache) > maxsize:
key = queue_popleft()
refcount[key] -= 1
while refcount[key]:
key = queue_popleft()
refcount[key] -= 1
del cache[key], refcount[key]
# periodically compact the queue by eliminating duplicate keys
# while preserving order of most recent access
if len(queue) > maxqueue:
refcount.clear()
queue_appendleft(sentinel)
for key in ifilterfalse(refcount.__contains__,
iter(queue_pop, sentinel)):
queue_appendleft(key)
refcount[key] = 1
return result
def clear():
cache.clear()
queue.clear()
refcount.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorator
def lfu_cache(maxsize=100):
'''Least-frequently-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Least_Frequently_Used
'''
def decorator(user_function):
cache = {} # mapping of args to results
use_count = Counter() # times each key has been accessed
kwarg_mark = object() # separate positional and keyword args
@functools.wraps(user_function)
def wrapper(*args, **kwargs):
key = args
if kwargs:
key += (kwarg_mark,) + tuple(sorted(kwargs.items()))
# get cache entry or compute if not found
try:
result = cache[key]
use_count[key] += 1
wrapper.hits += 1
except KeyError:
# need to add something to the cache, make room if necessary
if len(cache) == maxsize:
for k, _ in nsmallest(maxsize // 10 or 1,
use_count.iteritems(),
key=itemgetter(1)):
del cache[k], use_count[k]
cache[key] = user_function(*args, **kwargs)
result = cache[key]
use_count[key] += 1
wrapper.misses += 1
return result
def clear():
cache.clear()
use_count.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
wrapper.cache = cache
return wrapper
return decorator
def get_persitent_cache_content(key):
key_name = os.path.join(persistent_cache_directory, '_'.join([str(i) for i in key]).replace(' ', '_'))
try:
# you cannot have exlusive lock if you don't ask for writing permissions
# therefor use "r+" mode
f = open(key_name, 'r+b')
fcntl.lockf(f, fcntl.LOCK_EX)
except IOError:
raise KeyError
data = pickle.load(f)
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
return data
def set_persitent_cache_content(key, value):
key_name = os.path.join(persistent_cache_directory,'_'.join([str(i) for i in key]).replace(' ', '_'))
f = open(key_name, 'wb')
fcntl.lockf(f, fcntl.LOCK_EX)
data = pickle.dump(value, f)
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
def persistent_cache(method=False, file_prefix='', file_suffix=''):
'''Persistent cache decorator.
It grows indefinitely.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
'''
sha = hashlib.sha1()
def decorator(user_function):
@functools.wraps(user_function)
def wrapper(*args, **kwds):
key = (file_prefix,)
if method:
key += args[1:]
else:
key += args
if kwds:
key += tuple(sorted(kwds.items()))
key += (file_suffix,)
key = (hashlib.sha224(str(key)).hexdigest(),)
try:
result = get_persitent_cache_content(key)
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
wrapper.misses += 1
# record this key
set_persitent_cache_content(key, result)
return result
wrapper.hits = wrapper.misses = 0
return wrapper
return decorator
persistent_cache_directory = os.path.expanduser(persistent_cache_directory)
if not os.path.exists(persistent_cache_directory):
os.makedirs(persistent_cache_directory)
if __name__ == '__main__':
# pylint: disable-msg=E1101
print "Testing the LRU and LFU cache decorators."
print "=" * 120
print "LRU cache"
@lru_cache(maxsize=40)
def f1(x, y):
return 3 * x + y
domain = range(5)
from random import choice
for i in range(1000):
r = f1(choice(domain), choice(domain))
print(f1.hits, f1.misses)
print "LFU cache"
@lfu_cache(maxsize=40)
def f2(x, y):
return 3 * x + y
domain = range(5)
from random import choice
for i in range(1000):
r = f2(choice(domain), choice(domain))
print(f2.hits, f2.misses)
print "persistent LRU cache"
@persistent_cache(False, 'f3')
def f3(x, y):
return 3 * x + y
domain = range(5)
from random import choice
for i in range(1000):
r = f3(choice(domain), choice(domain))
print(f3.hits, f3.misses)
|
import errno
import os
from functools import reduce
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from pyro.contrib.examples.util import get_data_directory
# This file contains utilities for caching, transforming and splitting MNIST data
# efficiently. By default, a PyTorch DataLoader will apply the transform every epoch
# we avoid this by caching the data early on in MNISTCached class
# transformations for MNIST data
def fn_x_mnist(x, use_cuda):
# normalize pixel values of the image to be in [0,1] instead of [0,255]
xp = x * (1. / 255)
# transform x to a linear tensor from bx * a1 * a2 * ... --> bs * A
xp_1d_size = reduce(lambda a, b: a * b, xp.size()[1:])
xp = xp.view(-1, xp_1d_size)
# send the data to GPU(s)
if use_cuda:
xp = xp.cuda()
return xp
def fn_y_mnist(y, use_cuda):
yp = torch.zeros(y.size(0), 10)
# send the data to GPU(s)
if use_cuda:
yp = yp.cuda()
y = y.cuda()
# transform the label y (integer between 0 and 9) to a one-hot
yp = yp.scatter_(1, y.view(-1, 1), 1.0)
return yp
def get_ss_indices_per_class(y, sup_per_class):
# number of indices to consider
n_idxs = y.size()[0]
# calculate the indices per class
idxs_per_class = {j: [] for j in range(10)}
# for each index identify the class and add the index to the right class
for i in range(n_idxs):
curr_y = y[i]
for j in range(10):
if curr_y[j] == 1:
idxs_per_class[j].append(i)
break
idxs_sup = []
idxs_unsup = []
for j in range(10):
np.random.shuffle(idxs_per_class[j])
idxs_sup.extend(idxs_per_class[j][:sup_per_class])
idxs_unsup.extend(idxs_per_class[j][sup_per_class:len(idxs_per_class[j])])
return idxs_sup, idxs_unsup
def split_sup_unsup_valid(X, y, sup_num, validation_num=10000):
"""
helper function for splitting the data into supervised, un-supervised and validation parts
:param X: images
:param y: labels (digits)
:param sup_num: what number of examples is supervised
:param validation_num: what number of last examples to use for validation
:return: splits of data by sup_num number of supervised examples
"""
# validation set is the last 10,000 examples
X_valid = X[-validation_num:]
y_valid = y[-validation_num:]
X = X[0:-validation_num]
y = y[0:-validation_num]
assert sup_num % 10 == 0, "unable to have equal number of images per class"
# number of supervised examples per class
sup_per_class = int(sup_num / 10)
idxs_sup, idxs_unsup = get_ss_indices_per_class(y, sup_per_class)
X_sup = X[idxs_sup]
y_sup = y[idxs_sup]
X_unsup = X[idxs_unsup]
y_unsup = y[idxs_unsup]
return X_sup, y_sup, X_unsup, y_unsup, X_valid, y_valid
def print_distribution_labels(y):
"""
helper function for printing the distribution of class labels in a dataset
:param y: tensor of class labels given as one-hots
:return: a dictionary of counts for each label from y
"""
counts = {j: 0 for j in range(10)}
for i in range(y.size()[0]):
for j in range(10):
if y[i][j] == 1:
counts[j] += 1
break
print(counts)
class MNISTCached(MNIST):
"""
a wrapper around MNIST to load and cache the transformed data
once at the beginning of the inference
"""
# static class variables for caching training data
train_data_size = 50000
train_data_sup, train_labels_sup = None, None
train_data_unsup, train_labels_unsup = None, None
validation_size = 10000
data_valid, labels_valid = None, None
test_size = 10000
def __init__(self, mode, sup_num, use_cuda=True, *args, **kwargs):
super(MNISTCached, self).__init__(train=mode in ["sup", "unsup", "valid"], *args, **kwargs)
# transformations on MNIST data (normalization and one-hot conversion for labels)
def transform(x):
return fn_x_mnist(x, use_cuda)
def target_transform(y):
return fn_y_mnist(y, use_cuda)
self.mode = mode
assert mode in ["sup", "unsup", "test", "valid"], "invalid train/test option values"
if mode in ["sup", "unsup", "valid"]:
# transform the training data if transformations are provided
if transform is not None:
self.train_data = (transform(self.train_data.float()))
if target_transform is not None:
self.train_labels = (target_transform(self.train_labels))
if MNISTCached.train_data_sup is None:
if sup_num is None:
assert mode == "unsup"
MNISTCached.train_data_unsup, MNISTCached.train_labels_unsup = \
self.train_data, self.train_labels
else:
MNISTCached.train_data_sup, MNISTCached.train_labels_sup, \
MNISTCached.train_data_unsup, MNISTCached.train_labels_unsup, \
MNISTCached.data_valid, MNISTCached.labels_valid = \
split_sup_unsup_valid(self.train_data, self.train_labels, sup_num)
if mode == "sup":
self.train_data, self.train_labels = MNISTCached.train_data_sup, MNISTCached.train_labels_sup
elif mode == "unsup":
self.train_data = MNISTCached.train_data_unsup
# making sure that the unsupervised labels are not available to inference
self.train_labels = (torch.Tensor(
MNISTCached.train_labels_unsup.shape[0]).view(-1, 1)) * np.nan
else:
self.train_data, self.train_labels = MNISTCached.data_valid, MNISTCached.labels_valid
else:
# transform the testing data if transformations are provided
if transform is not None:
self.test_data = (transform(self.test_data.float()))
if target_transform is not None:
self.test_labels = (target_transform(self.test_labels))
def __getitem__(self, index):
"""
:param index: Index or slice object
:returns tuple: (image, target) where target is index of the target class.
"""
if self.mode in ["sup", "unsup", "valid"]:
img, target = self.train_data[index], self.train_labels[index]
elif self.mode == "test":
img, target = self.test_data[index], self.test_labels[index]
else:
assert False, "invalid mode: {}".format(self.mode)
return img, target
def setup_data_loaders(dataset, use_cuda, batch_size, sup_num=None, root=None, download=True, **kwargs):
"""
helper function for setting up pytorch data loaders for a semi-supervised dataset
:param dataset: the data to use
:param use_cuda: use GPU(s) for training
:param batch_size: size of a batch of data to output when iterating over the data loaders
:param sup_num: number of supervised data examples
:param download: download the dataset (if it doesn't exist already)
:param kwargs: other params for the pytorch data loader
:return: three data loaders: (supervised data for training, un-supervised data for training,
supervised data for testing)
"""
# instantiate the dataset as training/testing sets
if root is None:
root = get_data_directory(__file__)
if 'num_workers' not in kwargs:
kwargs = {'num_workers': 0, 'pin_memory': False}
cached_data = {}
loaders = {}
for mode in ["unsup", "test", "sup", "valid"]:
if sup_num is None and mode == "sup":
# in this special case, we do not want "sup" and "valid" data loaders
return loaders["unsup"], loaders["test"]
cached_data[mode] = dataset(root=root, mode=mode, download=download,
sup_num=sup_num, use_cuda=use_cuda)
loaders[mode] = DataLoader(cached_data[mode], batch_size=batch_size, shuffle=True, **kwargs)
return loaders
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
EXAMPLE_DIR = os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir)))
DATA_DIR = os.path.join(EXAMPLE_DIR, 'data')
RESULTS_DIR = os.path.join(EXAMPLE_DIR, 'results') |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-function-docstring
"""Test library of quantum circuits."""
from ddt import data, ddt
from qiskit.test import QiskitTestCase
from qiskit.circuit import bit
from qiskit.circuit import QuantumRegister
from qiskit.circuit import AncillaRegister
from qiskit.circuit import ClassicalRegister
from qiskit.circuit.exceptions import CircuitError
@ddt
class TestRegisterClass(QiskitTestCase):
"""Tests for Register class."""
@data(QuantumRegister, ClassicalRegister, AncillaRegister)
def test_raise_on_init_with_invalid_size(self, reg_type):
with self.assertRaisesRegex(CircuitError, "must be an integer"):
_ = reg_type(1j, 'foo')
@data(QuantumRegister, ClassicalRegister, AncillaRegister)
def test_raise_if_init_passed_both_size_and_bits(self, reg_type):
bits = [reg_type.bit_type()]
with self.assertRaisesRegex(CircuitError, "Exactly one of the size or bits"):
_ = reg_type(1, 'foo', bits)
@data(QuantumRegister, ClassicalRegister, AncillaRegister)
def test_init_raise_if_bits_of_incorrect_type(self, reg_type):
bits = [bit.Bit()]
with self.assertRaisesRegex(CircuitError, "did not all match register type"):
_ = reg_type(bits=bits)
@data(QuantumRegister, ClassicalRegister, AncillaRegister)
def test_init_raise_if_passed_invalid_name(self, reg_type):
with self.assertRaisesRegex(CircuitError, "invalid OPENQASM register name"):
_ = reg_type(size=1, name='_q')
@data(QuantumRegister, ClassicalRegister, AncillaRegister)
def test_implicit_bit_construction_from_size(self, reg_type):
reg = reg_type(2)
self.assertEqual(len(reg), 2)
self.assertEqual(reg.size, 2)
self.assertTrue(all(isinstance(bit, reg.bit_type)
for bit in reg))
@data(QuantumRegister, ClassicalRegister, AncillaRegister)
def test_implicit_size_calculation_from_bits(self, reg_type):
bits = [reg_type.bit_type() for _ in range(3)]
reg = reg_type(bits=bits)
self.assertEqual(reg.size, 3)
@data(QuantumRegister, ClassicalRegister, AncillaRegister)
def test_oldstyle_register_eq(self, reg_type):
test_reg = reg_type(3, 'foo')
self.assertEqual(test_reg, test_reg)
reg_copy = reg_type(3, 'foo')
self.assertEqual(reg_copy, test_reg)
reg_larger = reg_type(4, 'foo')
self.assertNotEqual(reg_larger, test_reg)
reg_renamed = reg_type(3, 'bar')
self.assertNotEqual(reg_renamed, test_reg)
difftype = ({QuantumRegister, ClassicalRegister, AncillaRegister} - {reg_type}).pop()
reg_difftype = difftype(3, 'foo')
self.assertNotEqual(reg_difftype, test_reg)
@data(QuantumRegister, ClassicalRegister, AncillaRegister)
def test_newstyle_register_eq(self, reg_type):
test_bits = [reg_type.bit_type() for _ in range(3)]
test_reg = reg_type(name='foo', bits=test_bits)
self.assertEqual(test_reg, test_reg)
reg_samebits = reg_type(name='foo', bits=test_bits)
self.assertEqual(reg_samebits, test_reg)
test_diffbits = [reg_type.bit_type() for _ in range(3)]
reg_diffbits = reg_type(name='foo', bits=test_diffbits)
self.assertNotEqual(reg_diffbits, test_reg)
reg_oldstyle = reg_type(3, 'foo')
self.assertNotEqual(reg_oldstyle, test_reg)
test_largerbits = [reg_type.bit_type() for _ in range(4)]
reg_larger = reg_type(name='foo', bits=test_largerbits)
self.assertNotEqual(reg_larger, test_reg)
reg_renamed = reg_type(name='bar', bits=test_bits)
self.assertNotEqual(reg_renamed, test_reg)
difftype = ({QuantumRegister, ClassicalRegister, AncillaRegister} - {reg_type}).pop()
bits_difftype = [difftype.bit_type() for _ in range(3)]
reg_difftype = difftype(name='foo', bits=bits_difftype)
self.assertNotEqual(reg_difftype, test_reg)
|
import hashlib
try:
import cPickle as pickle
except:
import pickle
import logging as _logging
import zlib
import time
import gc
import cloudstorage as gcs
from google.appengine.ext import ndb
from google.appengine.api import app_identity
__author__ = 'fernando'
CACHE_TIMEOUT = 86400*7
TEXTCHARS = ''.join(map(chr, [7,8,9,10,12,13,27] + range(0x20, 0x100)))
logging = _logging.getLogger("matrufsc2_cache")
logging.setLevel(_logging.WARNING)
gcs.set_default_retry_params(
gcs.RetryParams(
initial_delay=0.2,
max_delay=5.0,
min_retries=10,
max_retries=60,
backoff_factor=2,
max_retry_period=30,
urlfetch_timeout=60
)
)
class CacheItem(object):
__slots__ = ["value", "expire_on"]
class LRUItem(object):
__slots__ = ["value", "key", "updated_on", "accessed_on"]
def __repr__(self):
if hasattr(self, "value"):
return repr(self.value)
else:
return super(LRUItem, self).__repr__()
def __str__(self):
if hasattr(self, "value"):
return str(self.value)
else:
return super(LRUItem, self).__str__()
class LRUCache(dict):
__slots__ = ["capacity", "expiration", "run_gc"]
def __init__(self, *args, **kwargs):
super(LRUCache, self).__init__(*args, **kwargs)
self.capacity = 1000
self.expiration = 86400
self.run_gc = 0
def set_capacity(self, capacity):
self.capacity = capacity
def get_capacity(self):
return self.capacity
def set_expiration(self, expiration):
self.expiration = expiration
def get_expiration(self):
return self.expiration
def __getitem__(self, item):
val = super(LRUCache, self).__getitem__(item)
val.accessed_on = self.check()
return val.value
def get(self, k, d=None):
try:
return self[k]
except KeyError:
return d
def pop(self, k, d=None):
val = self.get(k, d)
try:
del self[k]
except KeyError:
pass
return val
def check(self):
self.run_gc = (self.run_gc+1)%self.capacity
now = time.time()
dif = len(self)-self.capacity
if dif > 0 or self.run_gc == 0:
# Run and remove expired items first
expired_items = [item for item in self.itervalues() if now > item.updated_on]
for expired_item in expired_items:
del self[expired_item.key]
dif -= 1
gc_collect()
while dif > 0 and self:
expired_item = min(self.itervalues(), key=lambda item: item.accessed_on)
del self[expired_item.key]
dif -= 1
del expired_item
gc_collect()
return now
def __setitem__(self, key, value):
val = LRUItem()
val.key = key
val.value = value
val.accessed_on = self.check()
val.updated_on = val.accessed_on + self.expiration
result = super(LRUCache, self).__setitem__(key, val)
return result
lru_cache = LRUCache()
lru_cache.set_capacity(250) # 250 items
lru_cache.set_expiration(3600) # For 3600 seconds
ndb_context = ndb.get_context()
def gc_collect():
collected = gc.collect()
old_collected = 0
while collected != old_collected:
old_collected = collected
collected += gc.collect()
if collected > 0:
logging.warning("Collected %d objects with GC", collected)
garbage = len(gc.garbage)
if garbage:
logging.warning("There are %d objects with reference cycles", garbage)
@ndb.tasklet
def get_gcs_filename(filename):
bucket_name = lru_cache.get("matrufsc2_bucket_name")
if not bucket_name:
bucket_name = yield ndb_context.memcache_get("matrufsc2_bucket_name")
if not bucket_name:
bucket_name = app_identity.get_default_gcs_bucket_name()
yield ndb_context.memcache_set("matrufsc2_bucket_name", bucket_name, CACHE_TIMEOUT)
lru_cache["matrufsc2_bucket_name"] = bucket_name
bucket = "/" + bucket_name
raise ndb.Return("/".join([bucket, filename]))
@ndb.tasklet
def get_from_cache(key, persistent=True, memcache=True, log=True):
logging.debug("Fetching key '%s' from cache", key)
try:
raise ndb.Return(lru_cache[key])
except KeyError:
pass
start = time.time()
if memcache:
result = yield ndb_context.memcache_get(key, use_cache=False)
if result is not None:
size = "small"
if isinstance(result, basestring) and result.translate(None, TEXTCHARS):
# If result is a string it MAYBE pickled :v
try:
# Try small item first to be more fast :D
result = pickle.loads(zlib.decompress(result, 15, 2097152))
size = "large"
except:
logging.warn("Error when decompressing content, ignoring..")
pass
if log:
logging.debug("Found (%s) item on memcache in %f seconds..Returning", size, time.time()-start)
logging.debug("Saving item on LRU Cache..")
lru_cache[key] = result
raise ndb.Return(result)
if persistent:
start = time.time()
filename = yield get_gcs_filename(key)
try:
gcs_file = gcs.open(filename, 'r')
value = gcs_file.read()
result = pickle.loads(value)
gcs_file.close()
if log:
logging.debug("Found item on GCS in %f seconds..Returning", time.time()-start)
logging.debug("Saving item on LRU Cache")
lru_cache[key] = result
try:
size = len(value)
if size >= 1e6:
value = zlib.compress(value, 9)
if len(value) < 1e6:
if log:
logging.debug("Saving (large) item on memcached (it has %d bytes)..", size)
yield ndb_context.memcache_set(key, value, CACHE_TIMEOUT)
else:
logging.warn("Ignoring large item because it does not fit on memcache :~")
else:
if log:
logging.debug("Saving (small) item on memcached (it has %d bytes)..", size)
yield ndb_context.memcache_set(key, result, CACHE_TIMEOUT)
except:
pass
raise ndb.Return(result)
except Exception, e:
if isinstance(e, ndb.Return):
raise e
elif not isinstance(e, gcs.NotFoundError):
logging.exception("Error detected when getting from GCS")
def set(key, value, ttl=None):
"""
Save the item in a non persistent way, compatible to Memcached API.
:param key: The key that will be saved
:param value: The value that will be saved
:param ttl: The TTL of the item that will be solved
:return:
"""
item = CacheItem()
item.value = value
if ttl is None:
item.expire_on = None
else:
item.expire_on = time.time()+ttl
key = "memcache-friendly-%s"%hashlib.sha1(key).hexdigest()
return set_into_cache(key, item, persistent=False).get_result()
def get(key):
key = "memcache-friendly-%s"%hashlib.sha1(key).hexdigest()
value = get_from_cache(key).get_result()
if not value:
return value
if isinstance(value, CacheItem):
if value.expire_on is not None and value.expire_on < time.time():
value = None
else:
value = value.value
return value
@ndb.tasklet
def set_into_cache(key, value, persistent=True, memcache=True, log=True):
lru_cache[key] = value
pickled_value = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
size = len(pickled_value)
if log:
logging.debug("The content saved to cache in the key '%s' has %d bytes", key, size)
if memcache:
try:
if size >= 1e6:
compressed_value = zlib.compress(pickled_value, 9)
if len(compressed_value) < 1e6:
if log:
logging.debug("Saving (large) item on memcached (it has %d bytes)..", size)
yield ndb_context.memcache_set(key, compressed_value, CACHE_TIMEOUT)
else:
logging.warn("Ignoring large item because it does not fit on memcache :~")
else:
if log:
logging.debug("Saving (small) item on memcached")
yield ndb_context.memcache_set(key, value, CACHE_TIMEOUT)
except:
pass
if persistent:
try:
if log:
logging.debug("Saving item on GCS..")
filename = yield get_gcs_filename(key)
gcs_file = gcs.open(filename, 'w')
gcs_file.write(pickled_value)
gcs_file.close()
if log:
logging.debug("Saved item on GCS..")
except:
logging.exception("There is an error when saving to GCS, but okay :v")
pass
@ndb.tasklet
def delete_from_cache(key, persistent=True):
logging.debug("Deleting key '%s' from cache", key)
lru_cache.pop(key, None)
yield ndb_context.memcache_delete(key, CACHE_TIMEOUT)
if persistent:
try:
filename = yield get_gcs_filename(key)
gcs.delete(filename)
except:
logging.exception("There is an error when deleting from GCS, but okay :v")
pass
def clear_lru_cache():
logging.warning("Clearing %d items of the LRU Cache", len(lru_cache))
lru_cache.clear()
gc_collect() |
<filename>pyeffects/Try.py
# -*- coding: utf-8 -*-
"""
pyeffects.Try
~~~~~~~~~~~~----
This module implements the Try, Success, and Faiure classes.
"""
from typing import Callable, List, Type, TypeVar, Union
from .Monad import Monad
A = TypeVar('A', covariant=True)
B = TypeVar('B')
class Try(Monad[A]):
@staticmethod
def of(func_or_value: Union[B, Callable[[], B]]) -> 'Try[B]':
"""Constructs a :class:`Try <Try>`.
:param func_or_value: function or value to construct a new :class:`Try` object
:rtype: pyEffects.Try
Usage::
>>> from pyeffects.Try import *
>>> Try.of(lambda: 5)
Success(5)
>>> Try.of("abc")
Success(abc)
>>> def error():
... raise Exception("failed")
...
>>> Try.of(error)
Failure(failed)
"""
try:
value = func_or_value() if hasattr(func_or_value, "__call__") else func_or_value # type: ignore
return Success(value) # type: ignore
except Exception as err:
return Failure(err)
def flat_map(self, func: Callable[[A], 'Monad[B]']) -> 'Monad[B]':
"""Flatmaps a function for :class:`Try <Try>`.
:param func: function returning a pyEffects.Try to apply to flat_map.
:rtype: pyEffects.Try
Usage::
>>> from pyeffects.Try import *
>>> Success(5).flat_map(lambda v: Success(v * v))
Success(25)
"""
if not hasattr(func, "__call__"):
raise TypeError("Try.flat_map expects a callable")
if self.is_success():
return func(self.value)
else:
return self # type: ignore
def recover(self, err: Type[Exception], recover: Union[B, Callable[[], B]]) -> 'Try[B]':
"""Recover from an exception for :class:`Try <Try>`.
:param err: The class of exception to recover from.
:param recover: The function to apply when recovering from an exception
:rtype: pyEffects.Try
Usage::
>>> from pyeffects.Try import *
>>> def error():
... raise RuntimeError("failed")
...
>>> Try.of(error).recover(RuntimeError, "abc")
Success(abc)
"""
if self.is_failure() and isinstance(self.value, err):
return Try.of(recover)
return self # type: ignore
def recovers(self, errs: List[Type[Exception]], recover: Union[B, Callable[[], B]]) -> 'Try[B]':
"""Recover from an exception for :class:`Try <Try>`.
:param errs: A list of classes of exceptions to recover from.
:param recover: The function to apply when recovering from an exception
:rtype: pyEffects.Try
Usage::
>>> from pyeffects.Try import *
>>> def error():
... raise RuntimeError("failed")
...
>>> Try.of(error).recovers([RuntimeError, NotImplementedError], "abc")
Success(abc)
"""
if not isinstance(errs, list):
raise TypeError("Try.recovers expects a list of errors as the 1nd arg")
if self.is_failure() and any([isinstance(self.value, e) for e in errs]):
return Try.of(recover)
return self # type: ignore
def error(self) -> Exception: # type: ignore
"""Recover the exception for :class:`Try <Try>`.
:rtype: pyEffects.Try
Usage::
>>> from pyeffects.Try import *
>>> def error():
... raise RuntimeError()
...
>>> Try.of(error).error()
RuntimeError()
"""
if self.is_failure():
return self.value # type: ignore
def is_success(self) -> bool:
"""Return is success for :class:`Try <Try>`.
:rtype: pyEffects.Try
Usage::
>>> from pyeffects.Try import *
>>> Failure(RuntimeError()).is_success()
False
"""
return self.biased
def is_failure(self) -> bool:
"""Return is failure for :class:`Try <Try>`.
:rtype: pyEffects.Try
Usage::
>>> from pyeffects.Try import *
>>> Failure(RuntimeError()).is_failure()
True
"""
return not self.is_success()
class Failure(Try[A]):
def __init__(self, value: Exception) -> None:
self.value = value # type: ignore
self.biased = False
def __repr__(self) -> str:
return 'Failure(' + str(self.value) + ')'
class Success(Try[A]):
def __init__(self, value: A) -> None:
self.value = value
self.biased = True
def __repr__(self) -> str:
return 'Success(' + str(self.value) + ')'
|
<reponame>KhanJr/Generative-Adversarial-Networks-COMPUTER-VISION
class helpyou:
"""
PreRequisite : Knowlege of Python (Basic[class, modules, function]), Pytorch, Neural Network, Activation Function.
Installation : pytorch(CUDA 10.2), torchvision, pillow, mpi4py, numpy.
To Train and Get Generated Images Run design.py
************************************************************************************************************************************************************************************
PAPER :
GAN, MD-GAN :
Generative Adversarial Network, Multi-Discriminator GenerativeAdversarial Networks for Distributed Datasets
AUTHORS :
PAPER I - [ <NAME>, <NAME>-Abadie∗, <NAME>, <NAME>, <NAME>,<NAME>†, <NAME>, <NAME>‡ ]
PAPER II - [ <NAME>, <NAME>, <NAME> ]
LINK :
PAPER I : https://arxiv.org/pdf/1406.2661.pdf
PAPER II : https://arxiv.org/pdf/1811.03850v2.pdf
************************************************************************************************************************************************************************************
Main libraries :
numpy : It's a multidimensional Array.
mpi4py : MPI for Python supports convenient, pickle-based communication of generic Python object as well as fast, near C-speed, direct array data communication of buffer-provider objects.
torch : An open source machine learning framework that accelerates the path from research prototyping to production deployment.(Official site)
torch.nn: : Base class for all neural network modules, our models is also subclass this class.
torch.optim : torch.optim is a package implementing various optimization algorithms. Most commonly used methods are already supported, and the interface is general enough,
so that more sophisticated ones can be also easily integrated in the future.
torch.utils.data : It represents a python iterable over a dataset.
torch.nn.parallel : This container parallelizes the application of the given module by splitting the input across the specified devices by chunking in the batch dimension.
torchvision : The torchvision package consists of popular datasets, model architectures, and common image transformations for computer vision.
Other libraries :
random, os.
************************************************************************************************************************************************************************************
Important Variables :
size : Represt the size of message pass to suffle the Discriminator using peer2peer fashion.
rank : Push the Discriminator to use respective position (bcz we are using two discriminator, rank discriminator by 1/0 (1 : run next, 0: currently running))
datasets : This varible is heart of the programme bcz this will download and store the dataset.
dataloader : At the heart of PyTorch data loading utility is the torch.utils.data.DataLoader class, We are using one HUGE DATASET CIFAR10 in this implimentation.
************************************************************************************************************************************************************************************
Main Class :
G() : This class is used to create generator Neural Network by using torch.nn.Module class.
D() : This class is used to create Discriminator Neural Network by using torch.nn.Module class.
************************************************************************************************************************************************************************************
Main function :
copyGenerator : Create a copy of generator to get the feedback of the generator to learn from them.
shuffleDiscriminators : Shuffle the discriminator on the basis of rank after every 2 epochs.
************************************************************************************************************************************************************************************
Sudo code of implimentation :
Algorithm 1MD-GAN algorithm
1:procedureWORKER(C,Bn,I,L,b)
2: InitializeθnforDn
3: fori←1toIdo
4: X(r)n←SAMPLES(Bn,b)
5: X(g)n,X(d)n←RECEIVEBATCHES(C)
6: forl←0toLdo
7: Dn←DISCLEARNINGSTEP(Jdisc,Dn)
8: end for
9: Fn←{∂ ̃B(X(g)n)∂xi|xi∈X(g)n}
10: SEND(C,Fn).SendFnto server
11: ifimod (mEb) = 0then
12: Dn←SWAP(Dn)
13: end if
14: end for
15:end procedure
16:
17:procedureSWAP(Dn)
18: Wl←GETRANDOMWORKER()
19: SEND(Wl,Dn).SendDnto workerWl.
20: Dn←RECEIVED().Receive a new discriminatorfrom another worker.
21: ReturnDn
22:end procedure
23:
24:procedureSERVER(k,I).Server C
25: InitializewforG
26: fori←1toIdo
27: forj←0tokdo
28: Zj←GAUSSIANNOISE(b)
29: X(j)←{Gw(z)|z∈Zj}
30: end for
31: X(d)1,...,X(d)n←SPLIT(X(1),...,X(k))
32: X(g)1,...,X(g)n←SPLIT(X(1),...,X(k))
33: for n←1toNdo
34: SEND(Wn,(X(d)n,X(g)n))
35: end for
36: F1,...,FN←GETFEEDBACKFROMWORKERS()
37: Compute∆waccording toF1,...,FN
38: for wi∈w do
39: wi←wi+ADAM(∆wi)
40: end for
41: end for
42:end procedure
"""
print(help(helpyou))
|
<filename>pliers/tests/test_stims.py
from .utils import get_test_data_path
from pliers.stimuli import (VideoStim, VideoFrameStim, ComplexTextStim,
AudioStim, ImageStim, CompoundStim,
TranscribedAudioCompoundStim,
TextStim)
from pliers.stimuli.base import Stim, _get_stim_class
from pliers.extractors import BrightnessExtractor
from pliers.extractors.base import Extractor, ExtractorResult
from pliers.support.download import download_nltk_data
import numpy as np
from os.path import join, exists
import pandas as pd
import pytest
class DummyExtractor(Extractor):
_input_type = Stim
def _extract(self, stim):
return ExtractorResult(np.array([[1]]), stim, self,
features=['constant'])
class DummyIterableExtractor(Extractor):
_input_type = Stim
def _extract(self, stim):
time_bins = np.arange(0., stim.duration, 1.)
return ExtractorResult(np.array([1] * len(time_bins)), stim, self,
features=['constant'], onsets=time_bins,
durations=[1.] * len(time_bins))
@pytest.fixture(scope='module')
def get_nltk():
download_nltk_data()
@pytest.fixture(scope='module')
def dummy_extractor():
return DummyExtractor()
@pytest.fixture(scope='module')
def dummy_iter_extractor():
return DummyIterableExtractor()
def test_image_stim(dummy_iter_extractor):
filename = join(get_test_data_path(), 'image', 'apple.jpg')
stim = ImageStim(filename)
assert stim.data.shape == (288, 420, 3)
def test_video_stim():
''' Test VideoStim functionality. '''
filename = join(get_test_data_path(), 'video', 'small.mp4')
video = VideoStim(filename)
assert video.fps == 30
assert video.n_frames in (167, 168)
assert video.width == 560
# Test frame iterator
frames = [f for f in video]
assert len(frames) == 168
f1 = frames[100]
assert isinstance(f1, VideoFrameStim)
assert isinstance(f1.onset, float)
f1.data.shape == (320, 560, 3)
# Test getting of specific frame
f2 = video.get_frame(index=100)
assert isinstance(f2, VideoFrameStim)
assert isinstance(f2.onset, float)
f2.data.shape == (320, 560, 3)
def test_audio_stim(dummy_iter_extractor):
audio_dir = join(get_test_data_path(), 'audio')
stim = AudioStim(join(audio_dir, 'barber.wav'), sampling_rate=11025)
assert round(stim.duration) == 57
assert stim.sampling_rate == 11025
def test_audio_formats():
audio_dir = join(get_test_data_path(), 'audio')
stim = AudioStim(join(audio_dir, 'crowd.mp3'))
assert round(stim.duration) == 28
assert stim.sampling_rate == 44100
def test_complex_text_stim():
text_dir = join(get_test_data_path(), 'text')
stim = ComplexTextStim(join(text_dir, 'complex_stim_no_header.txt'),
columns='ot', default_duration=0.2)
assert len(stim.elements) == 4
assert stim.elements[2].onset == 34
assert stim.elements[2].duration == 0.2
stim = ComplexTextStim(join(text_dir, 'complex_stim_with_header.txt'))
assert len(stim.elements) == 4
assert stim.elements[2].duration == 0.1
def test_complex_stim_from_text():
textfile = join(get_test_data_path(), 'text', 'scandal.txt')
text = open(textfile).read().strip()
stim = ComplexTextStim(text=text)
target = ['To', 'Sherlock', 'Holmes']
assert [w.text for w in stim.elements[:3]] == target
assert len(stim.elements) == 231
stim = ComplexTextStim(text=text, unit='sent')
# Custom tokenizer
stim = ComplexTextStim(text=text, tokenizer='(\w+)')
assert len(stim.elements) == 209
def test_complex_stim_from_srt():
srtfile = join(get_test_data_path(), 'text', 'wonderful.srt')
textfile = join(get_test_data_path(), 'text', 'wonderful.txt')
df = pd.read_csv(textfile, sep='\t')
target = df["text"].tolist()
srt_stim = ComplexTextStim(srtfile)
texts = [sent.text for sent in srt_stim.elements]
assert texts == target
def test_get_stim():
assert issubclass(_get_stim_class('video'), VideoStim)
assert issubclass(_get_stim_class('ComplexTextStim'), ComplexTextStim)
assert issubclass(_get_stim_class('video_frame'), VideoFrameStim)
def test_compound_stim():
audio_dir = join(get_test_data_path(), 'audio')
audio = AudioStim(join(audio_dir, 'crowd.mp3'))
image1 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg'))
image2 = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
filename = join(get_test_data_path(), 'video', 'small.mp4')
video = VideoStim(filename)
text = ComplexTextStim(text="The quick brown fox jumped...")
stim = CompoundStim([audio, image1, image2, video, text])
assert len(stim.elements) == 5
assert isinstance(stim.video, VideoStim)
assert isinstance(stim.complex_text, ComplexTextStim)
assert isinstance(stim.image, ImageStim)
with pytest.raises(AttributeError):
stim.nonexistent_type
assert stim.video_frame is None
imgs = stim.get_stim(ImageStim, return_all=True)
assert len(imgs) == 2
assert all([isinstance(im, ImageStim) for im in imgs])
also_imgs = stim.get_stim('image', return_all=True)
assert imgs == also_imgs
def test_transformations_on_compound_stim():
image1 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg'))
image2 = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
text = ComplexTextStim(text="The quick brown fox jumped...")
stim = CompoundStim([image1, image2, text])
ext = BrightnessExtractor()
results = ext.transform(stim)
assert len(results) == 2
assert np.allclose(results[0].data[0], 0.88784294)
def test_transcribed_audio_stim():
audio = AudioStim(join(get_test_data_path(), 'audio', "barber_edited.wav"))
text_file = join(get_test_data_path(), 'text', "wonderful_edited.srt")
text = ComplexTextStim(text_file)
stim = TranscribedAudioCompoundStim(audio=audio, text=text)
assert isinstance(stim.audio, AudioStim)
assert isinstance(stim.complex_text, ComplexTextStim)
def test_remote_stims():
url = 'http://www.obamadownloads.com/videos/iran-deal-speech.mp4'
video = VideoStim(url=url)
assert video.fps == 12
url = 'http://www.bobainsworth.com/wav/simpsons/themodyn.wav'
audio = AudioStim(url=url)
assert round(audio.duration) == 3
url = 'https://www.whitehouse.gov/sites/whitehouse.gov/files/images/twitter_cards_potus.jpg'
image = ImageStim(url=url)
assert image.data.shape == (240, 240, 3)
url = 'https://github.com/tyarkoni/pliers/blob/master/README.md'
text = TextStim(url=url)
assert len(text.text) > 1
def test_get_filename():
url = 'http://www.bobainsworth.com/wav/simpsons/themodyn.wav'
audio = AudioStim(url=url)
with audio.get_filename() as filename:
assert exists(filename)
assert not exists(filename)
url = 'https://tuition.utexas.edu/sites/all/themes/tuition/logo.png'
image = ImageStim(url=url)
with image.get_filename() as filename:
assert exists(filename)
assert not exists(filename)
|
<reponame>Hybrid-Cloud/birdie-dashboard
# Copyright (c) 2017 Huawei, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from conveyordashboard.common import actions as common_actions
from conveyordashboard.common import constants as consts
from conveyordashboard.common import resource_state
class CreatePlan(common_actions.CreatePlan):
def allowed(self, request, pool=None):
return pool.status in resource_state.POOL_CLONE_STATE
STATUS_CHOICES = (
("Active", True),
)
STATUS_DISPLAY_CHOICES = (
("Active", pgettext_lazy("Current status of a Pool",
u"Active")),
)
ADMIN_STATE_DISPLAY_CHOICES = (
("UP", pgettext_lazy("Admin state of a Load balancer", u"UP")),
("DOWN", pgettext_lazy("Admin state of a Load balancer", u"DOWN")),
)
def get_vip_name(pool):
if hasattr(pool, "vip") and pool.vip:
template_name = 'project/loadbalancers/_pool_table_vip_cell.html'
context = {"vip": pool.vip, }
return template.loader.render_to_string(template_name, context)
else:
return None
def get_subnet(pool):
if hasattr(pool, "subnet") and pool.subnet:
template_name = 'project/loadbalancers/_pool_table_subnet_cell.html'
context = {"subnet": pool.subnet}
return template.loader.render_to_string(template_name, context)
else:
return None
class PoolsTable(tables.DataTable):
METHOD_DISPLAY_CHOICES = (
("round_robin", pgettext_lazy("load balancing method",
u"Round Robin")),
("least_connections", pgettext_lazy("load balancing method",
u"Least Connections")),
("source_ip", pgettext_lazy("load balancing method",
u"Source IP")),
)
name = tables.Column("name_or_id",
verbose_name=_("Name"),
link="horizon:project:loadbalancers:pooldetails")
description = tables.Column('description', verbose_name=_("Description"))
provider = tables.Column('provider', verbose_name=_("Provider"),
filters=(lambda v: filters.default(v, _('N/A')),))
subnet_name = tables.Column(get_subnet, verbose_name=_("Subnet"))
protocol = tables.Column('protocol', verbose_name=_("Protocol"))
method = tables.Column('lb_method',
verbose_name=_("LB Method"),
display_choices=METHOD_DISPLAY_CHOICES)
status = tables.Column('status',
verbose_name=_("Status"),
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
vip_name = tables.Column(get_vip_name, verbose_name=_("VIP"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=ADMIN_STATE_DISPLAY_CHOICES)
class Meta(object):
name = "poolstable"
verbose_name = _("Pools")
css_classes = ' '.join(['table-res', consts.NEUTRON_POOL])
table_actions = (common_actions.CreatePlanWithMultiRes,)
row_actions = (CreatePlan,)
|
import numpy as np
from .dist_func import get_distance_L2_pbc
from numba import njit
from .projection_func import *
# Programmer: <NAME>
# Date: 4.29.2021
def get_comp_perimeter(width=200.,height=200.):
'''returns project_point_2D
Obeys periodic boundary conditions on a width-x-height square domain.
returns the fraction of the length of segment where the projection of point onto segment lies.
returns 0. when point=segment[0].
returns a value in the interval [0,1) if this point lies approximately within this segment
Example Usage:
project_point_2D=get_project_point_2D(width=200,height=200)
frac=project_point_2D(point,segment)
'''
distance_L2_pbc=get_distance_L2_pbc(width=width,height=height)
@njit
def comp_perimeter(contour):
Nseg=contour.shape[0]
arclen=0.
for i in range(-1,Nseg-1):
arclen+=distance_L2_pbc(contour[i],contour[i+1])
return arclen #arclength in pixels
return comp_perimeter
#DONE: compute the node indices for all spiral tips
#DONE(later): integrate ^this into a function
def get_fix_node_id(width=200.,height=200.):
project_point_2D=get_project_point_2D(width=width,height=height)
def fix_node_id(contour,point,node_id):
node_id_out=node_id
segment=contour[node_id_out:node_id_out+2]
frac=project_point_2D(point, segment)
if not (frac>=0)&(frac<1):
# print('closest node index is not valid')
node_id_out=node_id-1
segment=contour[node_id_out:node_id_out+2]
frac=project_point_2D(point, segment)
if not (frac>=0)&(frac<1):
# print('prev is not valid')
node_id_out=node_id+1
segment=contour[node_id_out:node_id_out+2]
frac=project_point_2D(point, segment)
if not (frac>=0)&(frac<1):
# print('next is not valid')
node_id_out=node_id+2
segment=contour[node_id_out:node_id_out+2]
frac=project_point_2D(point, segment)
if not (frac>=0)&(frac<1):
# print('next next is not valid')
node_id_out=node_id-2
segment=contour[node_id_out:node_id_out+2]
frac=project_point_2D(point, segment)
if not (frac>=0)&(frac<1):
# print('prev prev prev is not valid')
node_id_out=node_id-3
segment=contour[node_id_out:node_id_out+2]
frac=project_point_2D(point, segment)
if not (frac>=0)&(frac<1):
# print('next next next is not valid')
node_id_out=node_id+3
segment=contour[node_id_out:node_id_out+2]
frac=project_point_2D(point, segment)
if not (frac>=0)&(frac<1):
# print('prev prev is not valid')
# print('no valid start found... returning input node_id...')
# node_id_out=node_id
print('.', end='\r')
# print("Warning: no valid start found for input")
# raise Exception(f'no valid start found for input, contour,point,node_id={(contour,point,node_id)}')
node_id_out=node_id
return node_id_out
return fix_node_id
def get_segment_pbc(node_start,N_nodes,contour):
na=(node_start) % N_nodes
nb=(node_start+2) % N_nodes
if nb>na:
segment=contour[na : nb]
# assert(segment.shape==(2,2))
else: # edge case segment
Q=contour[-1]
W=contour[0]
segment=np.stack([Q,W])
# assert(segment.shape==(2,2))
return segment
def compare_spiralarm_size(j_lst, j_nxt_lst, archlen_size_lst):
'''supposes pid_lst=list(range(ntips))
Example Usage:
greater_i_lst,lesser_i_lst=compare_spiralarm_size(j_lst, j_nxt_lst, archlen_size_lst)
'''
ntips=len(j_lst)
#iterate over archlen observations
pid_lst=list(range(ntips))
pid_to_i ={}
pid_to_i_nxt ={}
for i in pid_lst:#range(len(j_lst)):
pid_to_i.update({pid_lst[j_lst[i]]:i}) # map from i^th observation start to spiral tip index
pid_to_i_nxt.update({pid_lst[j_nxt_lst[i]]:i}) # map from i^th observation end to spiral tip index
# print(pid_to_i)
# print(pid_to_i_nxt)
#iterate over particles
greater_i_lst=[]
lesser_i_lst=[]
for pid in pid_lst:
# try:
size_right = archlen_size_lst[pid_to_i[pid]]
size_left = archlen_size_lst[pid_to_i_nxt[pid]]
# except Exception as e:
# print(pid,archlen_size_lst,pid_to_i,pid_to_i_nxt)
# raise(e)
if size_right>size_left:
greater_i=pid_to_i[pid]
lesser_i =pid_to_i_nxt[pid]
# greater_archlen_values=archlen_values_lst[greater_i]
# lesser_archlen_values=archlen_values_lst[lesser_i]
else:
lesser_i=pid_to_i[pid]
greater_i =pid_to_i_nxt[pid]
greater_i_lst.append(greater_i)
lesser_i_lst.append(lesser_i)
return greater_i_lst,lesser_i_lst
def compare_spiralarm_voltage(j_lst, j_nxt_lst, avgVoltage_lst):
'''supposes pid_lst=list(range(ntips))
Example Usage:
greater_i_lst,lesser_i_lst=compare_spiralarm_size(j_lst, j_nxt_lst, archlen_size_lst)
'''
ntips=len(j_lst)
#iterate over archlen observations
pid_lst=list(range(ntips))
pid_to_i ={}
pid_to_i_nxt ={}
for i in pid_lst:#range(len(j_lst)):
pid_to_i.update({pid_lst[j_lst[i]]:i}) # map from i^th observation start to spiral tip index
pid_to_i_nxt.update({pid_lst[j_nxt_lst[i]]:i}) # map from i^th observation end to spiral tip index
# print(pid_to_i)
# print(pid_to_i_nxt)
#iterate over particles
greater_i_lst=[]
lesser_i_lst=[]
for pid in pid_lst:
# try:
size_right = avgVoltage_lst[pid_to_i[pid]]
size_left = avgVoltage_lst[pid_to_i_nxt[pid]]
# except Exception as e:
# print(pid,archlen_size_lst,pid_to_i,pid_to_i_nxt)
# raise(e)
if size_right>size_left:
greater_i=pid_to_i[pid]
lesser_i =pid_to_i_nxt[pid]
# greater_archlen_values=archlen_values_lst[greater_i]
# lesser_archlen_values=archlen_values_lst[lesser_i]
else:
lesser_i=pid_to_i[pid]
greater_i =pid_to_i_nxt[pid]
greater_i_lst.append(greater_i)
lesser_i_lst.append(lesser_i)
return greater_i_lst,lesser_i_lst
|
import unittest
from configparser import RawConfigParser
from io import StringIO
from typing import Dict, List, Union
from imperfect import ConfigFile
from parameterized import parameterized
from dowsing.setuptools.types import (
BoolWriter,
DictWriter,
ListCommaWriter,
ListCommaWriterCompat,
ListSemiWriter,
SectionWriter,
StrWriter,
)
class WriterTest(unittest.TestCase):
@parameterized.expand( # type: ignore
[(False,), (True,),]
)
def test_bool_writer(self, arg: bool) -> None:
c = ConfigFile()
c.set_value("a", "b", BoolWriter().to_ini(arg))
buf = StringIO()
c.build(buf)
rcp = RawConfigParser(strict=False)
rcp.read_string(buf.getvalue())
self.assertEqual(str(arg).lower(), rcp["a"]["b"])
@parameterized.expand( # type: ignore
[("hello",), ("a\nb\nc",),]
)
def test_str_writer(self, arg: str) -> None:
c = ConfigFile()
c.set_value("a", "b", StrWriter().to_ini(arg))
buf = StringIO()
c.build(buf)
rcp = RawConfigParser(strict=False)
rcp.read_string(buf.getvalue())
self.assertEqual(arg, rcp["a"]["b"])
@parameterized.expand( # type: ignore
[
([], ""),
(["a"], "\na"),
(["a", "b"], "\na\nb"),
(["a", "b", "c"], "\na\nb\nc"),
]
)
def test_list_comma_writer(self, arg: List[str], expected: str) -> None:
c = ConfigFile()
c.set_value("a", "b", ListCommaWriter().to_ini(arg))
buf = StringIO()
c.build(buf)
rcp = RawConfigParser(strict=False)
rcp.read_string(buf.getvalue())
self.assertEqual(expected, rcp["a"]["b"])
@parameterized.expand( # type: ignore
[
([], ""),
(["a"], "\na"),
(["a", "b"], "\na\nb"),
(["a", "b", "c"], "\na\nb\nc"),
]
)
def test_list_semi_writer(self, arg: List[str], expected: str) -> None:
c = ConfigFile()
c.set_value("a", "b", ListSemiWriter().to_ini(arg))
buf = StringIO()
c.build(buf)
rcp = RawConfigParser(strict=False)
rcp.read_string(buf.getvalue())
self.assertEqual(expected, rcp["a"]["b"])
@parameterized.expand( # type: ignore
# fmt: off
[
({}, ""),
({"x": "y"}, "\nx=y"),
({"x": "y", "z": "zz"}, "\nx=y\nz=zz"),
]
# fmt: on
)
def test_dict_writer(self, arg: Dict[str, str], expected: str) -> None:
c = ConfigFile()
c.set_value("a", "b", DictWriter().to_ini(arg))
buf = StringIO()
c.build(buf)
rcp = RawConfigParser(strict=False)
rcp.read_string(buf.getvalue())
# I would prefer this be dangling lines
self.assertEqual(expected, rcp["a"]["b"])
@parameterized.expand( # type: ignore
# fmt: off
[
([], ""),
("abc", "\nabc"),
(["a"], "\na"),
(["a", "b"], "\na\nb"),
(["a", "b", "c"], "\na\nb\nc"),
]
# fmt: on
)
def test_list_comma_writer_compat(
self, arg: Union[str, List[str]], expected: str
) -> None:
c = ConfigFile()
c.set_value("a", "b", ListCommaWriterCompat().to_ini(arg))
buf = StringIO()
c.build(buf)
rcp = RawConfigParser(strict=False)
rcp.read_string(buf.getvalue())
# I would prefer this be dangling lines
self.assertEqual(expected, rcp["a"]["b"])
@parameterized.expand( # type: ignore
[
([], ""),
(["a"], "\na"),
(["a", "b"], "\na\nb"),
(["a", "b", "c"], "\na\nb\nc"),
]
)
def test_section_writer(self, arg: List[str], expected: str) -> None:
c = ConfigFile()
c.set_value("a", "b", SectionWriter().to_ini(arg))
buf = StringIO()
c.build(buf)
rcp = RawConfigParser(strict=False)
rcp.read_string(buf.getvalue())
self.assertEqual(expected, rcp["a"]["b"])
def test_roundtrip_str(self) -> None:
s = "abc"
inst = StrWriter()
self.assertEqual(s, inst.from_ini(inst.to_ini(s)))
def test_roundtrip_lists(self) -> None:
lst = ["a", "bc"]
inst = ListSemiWriter()
self.assertEqual(lst, inst.from_ini(inst.to_ini(lst)))
inst2 = ListCommaWriter()
self.assertEqual(lst, inst2.from_ini(inst2.to_ini(lst)))
inst3 = ListCommaWriterCompat()
self.assertEqual(lst, inst3.from_ini(inst3.to_ini(lst)))
def test_roundtrip_dict(self) -> None:
d = {"a": "bc", "d": "ef"}
inst = DictWriter()
self.assertEqual(d, inst.from_ini(inst.to_ini(d)))
def test_roundtrip_bool(self) -> None:
for b in (True, False):
inst = BoolWriter()
self.assertEqual(b, inst.from_ini(inst.to_ini(b)))
|
<gh_stars>10-100
import copy
import itertools
import operator
import sqlite3
# All functions in this file written by <NAME> for TALON, and
# adapted to interface with TALON dbs
# Converts input to string that can be used for IN database query
def format_for_in(l):
if type(l) is tuple:
l = list(l)
if type(l) is str:
l = [l]
return "(" + ','.join(['"' + str(x) + '"' for x in l]) + ")"
def fetch_all_transcript_gene_pairs(cursor):
""" Return gene_ID - transcript_ID tuples from database """
query = """ SELECT gene_ID, transcript_ID FROM transcripts """
cursor.execute(query)
pairs = cursor.fetchall()
return pairs
def fetch_all_datasets(cursor):
""" Return a list of all datasets in database """
cursor.execute("SELECT dataset_name FROM dataset")
datasets = [str(x[0]) for x in cursor.fetchall()]
return datasets
def parse_pass_list(pass_list_file):
""" From the pass_list file, obtain a list of acccepted gene and
transcript IDs tuples"""
pass_list = set()
with open(pass_list_file, 'r') as f:
for line in f:
line = line.strip()
fields = line.split(",")
gene_ID = fields[0]
transcript_ID = fields[1]
try:
pass_list.add((int(gene_ID), int(transcript_ID)))
except:
raise ValueError("Gene/Transcript IDs in pass_list must be integer TALON IDs")
return pass_list
def parse_datasets(dataset, cursor):
""" From the dataset file, obtain a list of acccepted dataset names"""
db_datasets = fetch_all_datasets(cursor)
if dataset not in db_datasets:
raise ValueError("Dataset name '%s' not found in database" % dataset)
return dataset
def handle_filtering(database, observed, pass_list_file):
""" Determines which transcripts to allow in the analysis. This can be done
in two different ways. If no pass_list is included, then all of the
transcripts in the database are included (modified by 'observed'
option). If a pass_list is provided, then transcripts on that list
will be included (modified by 'observed' option). This can be
tuned further by providing a dataset file, but this is optional. """
conn = sqlite3.connect(database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Get list of datasets to use in run
# if dataset != None:
# datasets = parse_datasets(dataset, cursor)
# elif observed == True:
if observed:
datasets = fetch_all_datasets(cursor)
else:
datasets = None
# datasets = fetch_all_datasets(cursor)
# Get initial transcript pass_list
if pass_list_file != None:
pass_list = parse_pass_list(pass_list_file)
else:
pass_list = fetch_all_transcript_gene_pairs(cursor)
if datasets != None:
# Limit the pass_list to transcripts detected in the datasets
transcripts = [ x[1] for x in pass_list ]
transcript_str = format_for_in(transcripts)
dataset_str = format_for_in(datasets)
query = """ SELECT DISTINCT gene_ID, transcript_ID
FROM observed
WHERE transcript_ID IN %s
AND dataset in %s """
cursor.execute(query % (transcript_str, dataset_str))
pass_list = cursor.fetchall()
conn.close()
return pass_list
def get_gene_transcript_map(db, pass_list):
""" Creates a dictionary mapping gene IDs to the transcripts that belong to
them. The columns in each tuple are:
0: gene ID
1: transcript ID
2: chromosome
3: start position (min of 5' and 3')
4: end position (max of 5' and 3')
5: strand
6: edge path
7. n_exons
"""
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
pass_list_string = "(" + ','.join([str(x) for x in pass_list]) + ")"
query = """
SELECT
t.gene_ID,
t.transcript_ID,
loc1.chromosome,
MIN(loc1.position,loc2.position) AS min_pos,
MAX(loc1.position,loc2.position) AS max_pos,
genes.strand,
t.jn_path,
t.start_exon,
t.end_exon,
t.n_exons
FROM transcripts t
LEFT JOIN location loc1 ON t.start_vertex = loc1.location_ID
LEFT JOIN location loc2 ON t.end_vertex = loc2.location_ID
LEFT JOIN genes ON t.gene_ID = genes.gene_ID
WHERE t.transcript_ID IN """ + pass_list_string
cursor.execute(query)
transcript_tuples = cursor.fetchall()
# Sort based on gene ID
sorted_transcript_tuples = sorted(transcript_tuples, key=lambda x: x["gene_ID"])
gene_groups = {}
for key,group in itertools.groupby(sorted_transcript_tuples,operator.itemgetter(0)):
# sort by transcript start position
gene_groups[key] = sorted(list(group), key=lambda x: x["min_pos"])
conn.close()
return gene_groups
def get_annotations(database, feat_type, pass_list=None):
"""
Extracts annotations from the gene/transcript/exon annotation table of
the database (depending on choice of feat_type).
Returns:
annotation_dict: dictionary data structure in which the keys are
gene/transcript/exon TALON IDs (depending on
choice of feat_type) and the value is a list of
annotation tuples.
"""
# fetch the annotations
conn = sqlite3.connect(database)
cursor = conn.cursor()
table_name = feat_type + "_annotations"
if pass_list == None:
query = "SELECT * FROM " + table_name
else:
pass_list_string = "(" + ','.join([str(x) for x in pass_list]) + ")"
query = "SELECT * FROM " + table_name + " WHERE ID IN " + pass_list_string
cursor.execute(query)
annotation_tuples = cursor.fetchall()
# sort based on ID
sorted_annotations = sorted(annotation_tuples, key=lambda x: x[0])
# group by ID and store in a dictionary
ID_groups = {}
for key,group in itertools.groupby(sorted_annotations,operator.itemgetter(0)):
ID_groups[key] = list(group)
return ID_groups
def get_gene_2_transcripts(database, pass_list):
""" Creates a dictionary mapping gene IDs to the transcripts that belong to
them. The columns in each tuple are:
0: gene ID
1: transcript ID
2: chromosome
3: start position (min of 5' and 3')
4: end position (max of 5' and 3')
5: strand
6: edge path
7. n_exons
"""
conn = sqlite3.connect(database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
pass_list_string = "(" + ','.join([str(x) for x in pass_list]) + ")"
query = """
SELECT
t.gene_ID,
t.transcript_ID,
loc1.chromosome,
MIN(loc1.position,loc2.position) AS min_pos,
MAX(loc1.position,loc2.position) AS max_pos,
genes.strand,
t.jn_path,
t.start_exon,
t.end_exon,
t.n_exons
FROM transcripts t
LEFT JOIN location loc1 ON t.start_vertex = loc1.location_ID
LEFT JOIN location loc2 ON t.end_vertex = loc2.location_ID
LEFT JOIN genes ON t.gene_ID = genes.gene_ID
WHERE t.transcript_ID IN """ + pass_list_string
cursor.execute(query)
transcript_tuples = cursor.fetchall()
# Sort based on gene ID
sorted_transcript_tuples = sorted(transcript_tuples, key=lambda x: x["gene_ID"])
gene_groups = {}
for key,group in itertools.groupby(sorted_transcript_tuples,operator.itemgetter(0)):
# Sort by transcript start position
gene_groups[key] = sorted(list(group), key=lambda x: x["min_pos"])
conn.close()
return gene_groups
def fetch_exon_locations(database):
""" Queries the database to create a dictionary mapping exon IDs to
the chromosome, start, end, and strand of the exon """
conn = sqlite3.connect(database)
cursor = conn.cursor()
query = """
SELECT
e.edge_ID,
loc1.chromosome,
MIN(loc1.position,loc2.position),
MAX(loc1.position,loc2.position),
e.strand
FROM edge e
LEFT JOIN location loc1 ON e.v1 = loc1.location_ID
LEFT JOIN location loc2 ON e.v2 = loc2.location_ID
WHERE e.edge_type = 'exon';"""
cursor.execute(query)
exon_location_tuples = cursor.fetchall()
# Create dictionary
exon_locations = {}
for loc_tuple in exon_location_tuples:
exon_ID = loc_tuple[0]
exon_locations[exon_ID] = loc_tuple[1:]
conn.close()
return exon_locations
# def check_annot_validity(annot, database):
# """ Make sure that the user has entered a correct annotation name """
# conn = sqlite3.connect(database)
# cursor = conn.cursor()
# cursor.execute("SELECT DISTINCT annot_name FROM gene_annotations")
# annotations = [str(x[0]) for x in cursor.fetchall()]
# conn.close()
# if "TALON" in annotations:
# annotations.remove("TALON")
# if annot not in annotations:
# message = "Annotation name '" + annot + \
# "' not found in this database. Try one of the following: " + \
# ", ".join(annotations)
# raise Exception(message)
# return annot
|
import osmnx as ox
import networkx as nx
import pandas as pd
import geopandas as gpd
from tqdm import tqdm
from shapely.geometry import shape, Polygon, Point
import warnings
warnings.filterwarnings(action='ignore', message='Mean of empty slice')
import numpy as np
def bikeability(place, scale = 'city',data = False):
''' A function that would calculate bikeability value for a given
place of interest.
Parameters
place: the place of interest e.g "Freiburg, Germany" datatype = string
Scale: can be either "grid" or "city" default is "city" datatype = string
data: if True output returns a dataframe along with the standard dictionary
output, datatype = boolean
Returns the average_index for bikeability(number between 0 and 100) and some
summary statistics of index, datatype = dictionary or dataframe and dictionary
if data is set as True.
Usage example
a = bikeability('Freiburg, Germany', scale ='grid', data = False) ... for grid scale approach
a,b = bikeability('Freiburg, Germany', scale ='grid', data = True)
a =bikeability('Freiburg, Germany', scale = 'city')... for city scale approach
a,b =bikeability('Freiburg, Germany', scale = 'city', data = True)
'''
if scale != 'grid':
place = place
# Create and set osmnx to select important tags
useful_tags_way = ['bridge', 'length', 'oneway', 'lanes', 'ref', 'name',
'highway', 'maxspeed', 'service', 'access', 'area', 'cycleway',
'landuse', 'width', 'est_width', 'junction', 'surface']
ox.utils.config(useful_tags_way = useful_tags_way) # = useful_tags_path change here1
# Create basic city graph
place_name = place
graph = ox.graph_from_place(place_name, network_type='all', retain_all=True)
# # Calculate and add edge closeness centrality(connectedness)
centrality = nx.degree_centrality(nx.line_graph(graph))
nx.set_edge_attributes(graph, centrality, 'centrality')
# Extract nodes and edges to geopandas from graph
#edges = ox.graph_to_gdfs(graph, nodes=False)
try:
edges = ox.graph_to_gdfs(graph, nodes= False)
pass
except Exception as e:
print('{} at {}'.format(e, place))
# Remove unwanted columns and add weight variable
cols = ['highway', 'cycleway', 'surface', 'maxspeed', 'length', 'lanes', 'oneway',
'width', 'centrality', 'geometry']
try:
df = edges.loc[:,cols]
except KeyError as e:
print (e)
# Set appropriate data types
df['maxspeed'] = pd.to_numeric(
df['maxspeed'], errors='coerce', downcast='integer')
df['lanes'] = pd.to_numeric(
df['lanes'], errors='coerce', downcast='integer')
df['width'] = pd.to_numeric(
df['width'], errors='coerce', downcast='unsigned')
df['highway'] = df['highway'].astype(str)
df['surface'] = df['surface'].astype(str)
df['oneway'] = df['oneway'].astype(int)
df['cycleway'] = df['cycleway'].astype(str)
# Dataframe cleaning and preprocessing
# highway column
df['highway'] = df['highway'].str.replace(r'[^\w\s-]', '', regex = True)
highway_cols = (pd.DataFrame(df.highway.str.split(' ', expand=True)))
highway_map = ({'service': 6, 'None': np.nan, 'residential': 8, 'unclassified': 7, 'footway': 7, 'track': 5,
'tertiary': 6, 'living_street': 9, 'path': 5, 'pedestrian': 7, 'secondary': 5,
'primary': 2, 'steps': 2, 'cycleway': 10, 'rest_area': 5, 'primary_link': 2, 'ferry': 1,
'construction': 2, 'byway': 8, 'bridleway': 6, 'trunk': 2, 'trunk_link': 2, 'motorway': 1, 'motorway_link': 1})
for column in highway_cols:
highway_cols[column] = highway_cols[column].map(highway_map)
highway_cols['mean'] = np.nanmean(highway_cols, axis=1)
df['highway'] = round(highway_cols['mean'])
# cycleway column
df['cycleway'] = df['cycleway'].str.replace(r'[^\w\s-]', '', regex = True)
cycleway_cols = (pd.DataFrame(df.cycleway.str.split(' ', expand=True)))
cycleway_map = ({'opposite': 9, 'lane': 9, 'share_busway': 8, 'shared_lane': 8, 'segregated': 10,
'no': 1, 'opposite_lane': 9, 'crossing': 10, 'track': 10, 'designated': 10,
'opposite_share_busway': 8, 'seperate': 10, 'shoulder': 8})
for column in cycleway_cols:
cycleway_cols[column] = cycleway_cols[column].map(cycleway_map)
cycleway_cols['mean'] = np.nanmean(cycleway_cols, axis=1)
df['cycleway'] = round(cycleway_cols['mean'])
# surface column
df['surface'] = df['surface'].str.replace(r'[^\w\s-]', '', regex=True)
surface_cols = (pd.DataFrame(df.surface.str.split(' ', expand=True)))
surface_map = ({'asphalt': 10, 'paved': 10, 'cobblestone': 5, 'fine_gravel': 9,
'ground': 7, 'sett': 6, 'gravel': 7, 'metal': 6, 'compacted': 10,
'dirt': 6, 'paving_stones': 7, 'grass_paver': 5, 'unpaved': 8,
'pebblestone': 9, 'concrete': 10, 'grass': 5, 'mud': 1})
for column in surface_cols:
surface_cols[column] = surface_cols[column].map(surface_map)
surface_cols['mean'] = np.nanmean(surface_cols, axis=1)
df['surface'] = round(surface_cols['mean'])
# maxspeed column
df.loc[df['maxspeed'] > 110, 'maxspeed'] = 110
df.loc[df['maxspeed'] < 20, 'maxspeed'] = 20
maxspeed_map = ({20: 10, 30: 9, 40: 8, 50: 7, 60: 6,
70: 5, 80: 4, 90: 3, 100: 2, 110: 1})
df['maxspeed'] = df['maxspeed'].map(maxspeed_map)
# lanes column
df.loc[df['lanes'] > 8, 'lanes'] = 8
lanes_map = {1: 10, 2: 9, 3: 5, 4: 5, 5: 3, 6: 3, 7: 2, 8: 1}
df['lanes'] = df['lanes'].map(lanes_map)
# oneway column
oneway_map = {0: 5, 1: 10, -1: 5}
df['oneway'] = df['oneway'].map(oneway_map)
# width column
df.loc[df['width'] < 2, 'width'] = 1
df.loc[df['width'] > 6, 'width'] = 6
df['width'] = round(df['width'])
width_map = ({1: 1, 2: 2, 3: 5, 4: 7, 5: 9, 6: 10})
df['width'] = df['width'].map(width_map)
# normalize centrality column (between o and 10)
df['centrality'] = ((df['centrality'] - np.min(df['centrality'])) /
(np.max(df['centrality']) - np.min(df['centrality']))) * 10
# Switch to new df for calculation
d_frame = df.copy(deep=True)
# Multiply variables by weights
d_frame['cycleway'] = d_frame['cycleway'] * 0.208074534
d_frame['surface'] = d_frame['surface'] * 0.108695652
d_frame['highway'] = d_frame['highway'] * 0.167701863
d_frame['maxspeed'] = d_frame['maxspeed'] * 0.189440994
d_frame['lanes'] = d_frame['lanes'] * 0.108695652
d_frame['centrality'] = d_frame['centrality'] * 0.071428571
d_frame['width'] = d_frame['width'] * 0.086956522
d_frame['oneway'] = d_frame['oneway'] * 0.059006211
# Normalize variables between 0 and 1
d_frame['index'] = (np.nanmean(d_frame[['cycleway', 'highway', 'surface', 'maxspeed', 'lanes', 'width', 'oneway',
'centrality']], axis=1, dtype='float64')) * 80
# Final statistics index of city
mean_index = np.average(d_frame['index'], weights=d_frame['length'])
max_index = d_frame['index'].max()
min_index = d_frame['index'].min()
std_index = d_frame['index'].std()
# Plot result
#d_frame.plot(column = 'index',legend = True)
# Result dictionary
result = ({'place': place, 'average_index': mean_index, 'max_index': max_index,
'min_index': min_index, 'std_index': std_index})
else:
#Get bounding box for place
place_name = place
area = ox.geocode_to_gdf(place_name) # graph first
xmin,ymin,xmax,ymax = area.total_bounds
#divide into grids x = lon, y = lat
height = 0.041667
width = 0.041667
rows = int(np.ceil((ymax-ymin) / height))
cols = int(np.ceil((xmax-xmin) / width))
XleftOrigin = xmin
XrightOrigin = xmin + width
YtopOrigin = ymax
YbottomOrigin = ymax- height
polygons = []
for i in range(cols):
Ytop = YtopOrigin
Ybottom =YbottomOrigin
for j in range(rows):
polygons.append(Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]))
Ytop = Ytop - height
Ybottom = Ybottom - height
XleftOrigin = XleftOrigin + width
XrightOrigin = XrightOrigin + width
#Ensure the grids are within the polygon
grid_list = []
for i in range(len(polygons)):
p = Point(polygons[i].centroid.x, polygons[i].centroid.y)
geome = shape(polygons[i])
q =gpd.GeoDataFrame({'geometry':geome}, index=[0])
q = q.set_crs("EPSG:4326")
if area.geometry.iloc[0].contains(polygons[i])== True:
grid_list.append(q)
#elif p.within(area.geometry.iloc[0]) == True and area.geometry.iloc[0].contains(polygons[i])== False:
elif area.geometry.iloc[0].intersects(polygons[i]):
#grid_list.append(polygons[i])
clip = gpd.clip(area, q)
grid_list.append(clip)
#Initialize important variables
dflist = []
exception_grids = []
dfs = []
for i in tqdm(range(len(grid_list))):
#graph
useful_tags_way = ['bridge', 'length', 'oneway', 'lanes', 'ref',
'name', 'highway', 'maxspeed', 'surface', 'area',
'landuse', 'width', 'est_width', 'junction','cycleway']
ox.utils.config(useful_tags_way = useful_tags_way) # = =useful_tags_path change 2
try:
box_graph =ox.graph_from_polygon(grid_list[i].geometry.iloc[0], network_type='bike',retain_all=True)
pass
except Exception as e:
print('{} at grid {}, skip grid'.format(e, i+1))
exception_grids.append(i+1)
continue
# Calculate and add edge closeness centrality(connectedness)
centrality = nx.degree_centrality(nx.line_graph(box_graph))
nx.set_edge_attributes(box_graph, centrality, 'centrality')
# Extract nodes and edges to geopandas from graph
try:
edges = ox.graph_to_gdfs(box_graph, nodes= False)
pass
except Exception as e:
print('{} at grid {}, skip grid'.format(e, i+1))
exception_grids.append(i+1)
continue
# Select only the important variables
cols = ['highway','cycleway', 'surface', 'maxspeed', 'length', 'lanes', 'oneway',
'width', 'centrality', 'geometry']
try:
df = edges.loc[:,cols]
pass
except KeyError as e:
print('{} at grid {}, skip grid'.format(e, i+1))
exception_grids.append(i+1)
continue
# Set appropriate data types
df['maxspeed'] = pd.to_numeric(
df['maxspeed'], errors='coerce', downcast='integer')
df['lanes'] = pd.to_numeric(
df['lanes'], errors='coerce', downcast='integer')
df['width'] = pd.to_numeric(
df['width'], errors='coerce', downcast='unsigned')
df['highway'] = df['highway'].astype(str)
df['surface'] = df['surface'].astype(str)
df['oneway'] = df['oneway'].astype(int)
df['cycleway'] = df['cycleway'].astype(str)
# Dataframe cleaning and preprocessing
# highway column
df['highway'] = df['highway'].str.replace(r'[^\w\s-]', '', regex = True)
highway_cols = (pd.DataFrame(df.highway.str.split(' ', expand = True)))
highway_map = ({'service': 6, 'None': np.nan, 'residential': 8, 'unclassified': 7, 'footway': 7, 'track': 5, 'tertiary_link':6,
'tertiary': 6, 'living_street': 9, 'path': 5, 'pedestrian': 7, 'secondary': 5, 'secondary_link':5,
'primary': 2, 'steps': 2, 'cycleway': 10, 'rest_area': 5, 'primary_link': 2, 'ferry': 1,
'construction': 2, 'byway': 8, 'bridleway': 6, 'trunk': 2, 'trunk_link': 2, 'motorway': 1, 'motorway_link': 1})
for column in highway_cols:
highway_cols[column] = highway_cols[column].map(highway_map)
highway_cols['mean'] = np.nanmean(highway_cols, axis=1)
df['highway'] = round(highway_cols['mean'])
#cycleway column
df['cycleway'] = df['cycleway'].str.replace(r'[^\w\s-]', '', regex = True)
cycleway_cols = (pd.DataFrame(df.cycleway.str.split(' ', expand = True)))
cycleway_map = ({'opposite':9, 'lane':9, 'share_busway':8, 'shared_lane':8,'segregated':10,
'no':1, 'opposite_lane':9, 'crossing':10, 'track':10, 'designated':10,
'opposite_share_busway':8, 'seperate':10, 'shoulder':8})
for column in cycleway_cols:
cycleway_cols[column] = cycleway_cols[column].map(cycleway_map)
cycleway_cols['mean'] = np.nanmean(cycleway_cols, axis=1)
df['cycleway'] = round(cycleway_cols['mean'])
# surface column
df['surface'] = df['surface'].str.replace(r'[^\w\s-]', '', regex = True) #''
surface_cols = (pd.DataFrame(df.surface.str.split(' ', expand = True)))
surface_map = ({'asphalt': 10, 'paved': 10, 'cobblestone': 3, 'fine_gravel': 9,
'ground': 6, 'sett': 4, 'gravel': 7, 'metal': 7, 'compacted': 9,
'dirt': 6, 'paving_stones': 7, 'grass_paver': 4, 'unpaved': 7,
'pebblestone': 7, 'concrete': 10, 'grass': 5, 'mud': 2,'sand':5,
'wood':4, 'earth':6, 'woodchips':3, 'snow':2, 'ice':2, 'salt':2})
for column in surface_cols:
surface_cols[column] = surface_cols[column].map(surface_map)
surface_cols['mean'] = np.nanmean(surface_cols, axis=1)
df['surface'] = round(surface_cols['mean'])
# maxspeed column
df.loc[df['maxspeed'] > 110, 'maxspeed'] = 110
df.loc[df['maxspeed'] < 20, 'maxspeed'] = 20
df['maxspeed'] = round(df['maxspeed'], -1)
maxspeed_map = ({20: 10, 30: 9, 40: 8, 50: 7, 60: 6,
70: 5, 80: 4, 90: 3, 100: 2, 110: 1})
df['maxspeed'] = df['maxspeed'].map(maxspeed_map)
# lanes column
df.loc[df['lanes'] > 8, 'lanes'] = 8
lanes_map = {1: 10, 2: 9, 3: 5, 4: 5, 5: 3, 6: 3, 7: 2, 8: 1}
df['lanes'] = df['lanes'].map(lanes_map)
# oneway column
oneway_map = {0: 5, 1: 10, -1:5}
df['oneway'] = df['oneway'].map(oneway_map)
# width column
df.loc[df['width'] < 2, 'width'] = 1
df.loc[df['width'] > 6, 'width'] = 6
df['width'] = round(df['width'])
width_map = ({1: 1, 2: 2, 3: 5, 4: 7, 5: 9, 6: 10})
df['width'] = df['width'].map(width_map)
# normalize centrality column (between o and 10)
df['centrality'] =((df['centrality'] - np.min(df['centrality'])) / (np.max(df['centrality']) - np.min(df['centrality']))) * 10
#Switch to new df for calculation
d_frame = df.copy(deep =True)
# Multiply variables by weights
d_frame['cycleway'] = d_frame['cycleway'] * 0.208074534
d_frame['surface'] = d_frame['surface'] * 0.108695652
d_frame['highway'] = d_frame['highway'] * 0.167701863
d_frame['maxspeed'] = d_frame['maxspeed'] * 0.189440994
d_frame['lanes'] = d_frame['lanes'] * 0.108695652
d_frame['centrality'] = d_frame['centrality'] * 0.071428571
d_frame['width'] = d_frame['width'] * 0.086956522
d_frame['oneway'] = d_frame['oneway'] * 0.059006211
d_frame['index'] = (np.nanmean(d_frame[['cycleway','highway', 'surface', 'maxspeed', 'lanes', 'width', 'oneway',
'centrality']], axis=1,dtype='float64')) * 80
d_frame['grid_index'] = np.average(d_frame['index'],weights=d_frame['length'])
dflist.append(d_frame)
dfs.append(df)
#Final statistics index of city in dictionary
df_indexes = pd.concat(dflist)
result = ({'place':place_name,
'average_index':np.average(df_indexes['index'],weights=df_indexes['length']),
'max_index':df_indexes['index'].max(),
'min_index':df_indexes['index'].min(),
'std_index':df_indexes['index'].std(),
'grids':len(grid_list),
'nsegments':len(df_indexes),
'unused_grids':len(exception_grids)})
if data == False:
return(result)
else:
return(d_frame, result)
|
import math
import torch
from torch.optim.optimizer import Optimizer
from torch.optim.sgd import SGD
import numpy as np
class Neumann(Optimizer):
"""
Documentation about the algorithm
"""
def __init__(self, params , lr=1e-3, eps = 1e-8, alpha = 1e-7, beta = 1e-5, gamma = 0.9, momentum = 1, sgd_steps = 5, K = 10 ):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 1 >= momentum:
raise ValueError("Invalid momentum value: {}".format(eps))
self.iter = 0
# self.sgd = SGD(params, lr=lr, momentum=0.9)
param_count = np.sum([np.prod(p.size()) for p in params]) # got from MNIST-GAN
defaults = dict(lr=lr, eps=eps, alpha=alpha,
beta=beta*param_count, gamma=gamma,
sgd_steps=sgd_steps, momentum=momentum, K=K
)
super(Neumann, self).__init__(params, defaults)
def step(self, closure=None):
"""
Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self.iter += 1
loss = None
if closure is not None: #checkout what's the deal with this. present in multiple pytorch optimizers
loss = closure()
for group in self.param_groups:
sgd_steps = group['sgd_steps']
alpha = group['alpha']
beta = group['beta']
gamma = group['gamma']
K = group['K']
momentum = group['momentum']
mu = momentum*(1 - (1/(1+self.iter)))
if mu >= 0.9:
mu = 0.9
elif mu <= 0.5:
mu = 0.5
eta = group['lr']/self.iter ## update with time ## changed
# print("here")
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['m'] = torch.zeros_like(p.data).float()
state['d'] = torch.zeros_like(p.data).float()
# state['moving_avg'] = p.data
if self.iter <= sgd_steps:
p.data.add_(-group['lr'], grad)
# self.sgd.step()
continue
state['step'] += 1
# Reset neumann iterate
if self.iter%K == 0:
state['m'] = grad.mul(-eta)
## changed
else:
## Compute update d_t
# diff = p.data.sub(state['moving_avg'])
# # print(diff)
# diff_norm = p.data.sub(state['moving_avg']).norm()
# # if np.count_nonzero(diff) and diff_norm > 0:
# state['d'] = grad.add( (( (diff_norm.pow(2)).mul(alpha) ).sub( (diff_norm.pow(-2)).mul(beta) )).mul( diff.div(diff_norm)) )
# # else:
# state['d'].add_(grad)
state['d'] = grad
## Update Neumann iterate
(state['m'].mul_(mu)).sub_( state['d'].mul(eta) )
## Update Weights
p.data.add_((state['m'].mul(mu)).sub( state['d'].mul(eta)))
## Update Moving Average
# state['moving_avg'] = p.data.add( (state['moving_avg'].sub(p.data)).mul(gamma) )
# print(p.data)
## changed
if self.iter%K == 0:
group['K'] = group['K']*2
# return loss
|
<gh_stars>0
import nipype.pipeline.engine as pe
from nipype.interfaces import ants
from nipype.interfaces import fsl
import nipype.interfaces.io as nio
import numpy as np
import os
project_folder = '/home/gdholla1/projects/bias'
workflow = pe.Workflow(name='register_epi_to_struct_ants')
workflow.base_dir = os.path.join(project_folder, 'workflow_folders')
templates = {'mean_epi':os.path.join(project_folder, 'data', 'processed', 'feat_preprocess', 'mean', '_subject_id_{subject_id}', '_fwhm_0.0', 'sub-{subject_id}_task-randomdotmotion_run-01_bold_unwarped_st_dtype_mcf_mask_gms_mean.nii.gz'),
't1_weighted':os.path.join(project_folder, 'data', 'raw', 'sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz')}
selector = pe.Node(nio.SelectFiles(templates), name='selector')
subject_ids = ['%02d' % i for i in np.arange(1, 20)]
selector.iterables = [('subject_id', subject_ids)]
reg = pe.Node(ants.Registration(), name='antsRegister')
reg.inputs.transforms = ['Rigid', 'Affine']
reg.inputs.transform_parameters = [(0.1,), (0.1,)]
reg.inputs.number_of_iterations = [[1000,500,250,100]]*2
reg.inputs.dimension = 3
reg.inputs.write_composite_transform = True
reg.inputs.collapse_output_transforms = True
reg.inputs.metric = ['MI']*2
reg.inputs.metric_weight = [1]*2 # Default (value ignored currently by ANTs)
reg.inputs.radius_or_number_of_bins = [32]*2
reg.inputs.sampling_strategy = ['Regular']*2
reg.inputs.sampling_percentage = [0.25]*2
reg.inputs.convergence_threshold = [1.e-8]*2
reg.inputs.convergence_window_size = [10]*2
reg.inputs.smoothing_sigmas = [[3,2,1,0]]*2
reg.inputs.sigma_units = ['mm']*2
reg.inputs.shrink_factors = [[8,4,2,1]]*2
reg.inputs.use_estimate_learning_rate_once = [True, True, True]
reg.inputs.use_histogram_matching = [False]*2 # This is the default
reg.inputs.initial_moving_transform_com = True
reg.inputs.output_warped_image = True
reg.inputs.winsorize_lower_quantile = 0.01
reg.inputs.winsorize_upper_quantile = 0.99
workflow.connect(selector, 'mean_epi', reg, 'moving_image')
workflow.connect(selector, 't1_weighted', reg, 'fixed_image')
ds = pe.Node(nio.DataSink(), name='datasink')
#ds.inputs.base_directory = '../../data/derivatives/registration/epi2t1weighted'
ds.inputs.base_directory = os.path.join(project_folder, 'data', 'derivatives', 'registration', 'epi2t1weighted')
mni_reg = pe.Node(ants.Registration(args='--float',
collapse_output_transforms=True,
initial_moving_transform_com=True,
num_threads=1,
output_inverse_warped_image=True,
output_warped_image=True,
sigma_units=['vox']*3,
transforms=['Rigid', 'Affine', 'SyN'],
terminal_output='file',
winsorize_lower_quantile=0.005,
winsorize_upper_quantile=0.995,
convergence_threshold=[1e-06],
convergence_window_size=[10],
metric=['MI', 'MI', 'CC'],
metric_weight=[1.0]*3,
number_of_iterations=[[1000, 500, 250, 100],
[1000, 500, 250, 100],
[100, 70, 50, 20]],
radius_or_number_of_bins=[32, 32, 4],
sampling_percentage=[0.25, 0.25, 1],
sampling_strategy=['Regular',
'Regular',
'None'],
shrink_factors=[[8, 4, 2, 1]]*3,
smoothing_sigmas=[[3, 2, 1, 0]]*3,
transform_parameters=[(0.1,),
(0.1,),
(0.1, 3.0, 0.0)],
use_histogram_matching=True,
write_composite_transform=True),
name='mni_reg')
mni_reg.inputs.fixed_image = fsl.Info.standard_image('MNI152_T1_1mm_brain.nii.gz')
workflow.connect(selector, 't1_weighted', mni_reg, 'moving_image')
workflow.connect(reg, 'composite_transform', ds, 'epi2structmat_ants')
workflow.connect(reg, 'inverse_composite_transform', ds, 'struct2epimat_ants')
workflow.connect(reg, 'warped_image', ds, 'epi_in_struct_ants')
workflow.connect(mni_reg, 'composite_transform', ds, 'struct2mnimat_ants')
workflow.connect(mni_reg, 'inverse_composite_transform', ds, 'mni2structmat_ants')
workflow.connect(mni_reg, 'warped_image', ds, 'struct_in_mni_ants')
workflow.run(plugin='MultiProc', plugin_args={'n_procs':8})
|
<filename>pressurecooker/images.py<gh_stars>1-10
import math
import tempfile
import numpy as np
import os
import wave
import subprocess
import sys
import matplotlib
import zipfile
import ebooklib
import ebooklib.epub
from io import BytesIO
# Set the backend to avoid platform-specific differences in MPLBACKEND
matplotlib.use("PS")
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.backends.backend_agg import FigureCanvasAgg
from pdf2image import convert_from_path
from PIL import Image, ImageOps
from le_utils.constants import file_formats
from .thumbscropping import scale_and_crop
# SMARTCROP UTILS
################################################################################
THUMBNAIL_SIZE = (400, 225) # 16:9 aspect ratio
def scale_and_crop_thumbnail(image, size=THUMBNAIL_SIZE, crop="smart", **kwargs):
"""
Scale and crop the PIL Image ``image`` to maximum dimensions of ``size``.
By default, ``crop`` is set to "smart" which will crop the image down to size
based on the entropy content of the pixels. The other options are:
* Use ``crop="0,0"`` to crop from the left and top edges
* Use ``crop=",0"`` to crop from the top edge.
Optional keyword arguments:
* ``zoom=X``: crop outer X% before starting
* ``target``: recenter here before cropping (default center ``(50, 50)``)
See the ``scale_and_crop`` docs in ``thumbscropping.py`` for more details.
"""
return scale_and_crop(image, size, crop=crop, upscale=True, **kwargs)
# THUMBNAILS FOR CONTENT KINDS
################################################################################
def create_image_from_epub(epubfile, fpath_out, crop=None):
"""
Generate a thumbnail image from `epubfile` and save it to `fpath_out`.
Raises ThumbnailGenerationError if thumbnail extraction fails.
"""
try:
book = ebooklib.epub.read_epub(epubfile)
# 1. try to get cover image from book metadata (content.opf)
cover_item = None
covers = book.get_metadata('http://www.idpf.org/2007/opf', 'cover')
if covers:
cover_tuple = covers[0] # ~= (None, {'name':'cover', 'content':'item1'})
cover_item_id = cover_tuple[1]['content']
for item in book.items:
if item.id == cover_item_id:
cover_item = item
if cover_item:
image_data = BytesIO(cover_item.get_content())
else:
# 2. fallback to get first image in the ePub file
images = list(book.get_items_of_type(ebooklib.ITEM_IMAGE))
if not images:
raise ThumbnailGenerationError("ePub file {} contains no images.".format(epubfile))
# TODO: get largest image of the bunch
image_data = BytesIO(images[0].get_content())
# Save image_data to fpath_out
im = Image.open(image_data)
im = scale_and_crop_thumbnail(im, crop=crop)
im.save(fpath_out)
except Exception as e:
raise ThumbnailGenerationError("Fail on ePub {} {}".format(epubfile, e))
def create_image_from_zip(htmlfile, fpath_out, crop="smart"):
"""
Create an image from the html5 zip at htmlfile and write result to fpath_out.
Raises ThumbnailGenerationError if thumbnail extraction fails.
"""
biggest_name = None
size = 0
try:
with zipfile.ZipFile(htmlfile, 'r') as zf:
# get the biggest (most pixels) image in the zip
image_exts = ['png', 'PNG', 'jpeg', 'JPEG', 'jpg', 'JPG']
for filename in zf.namelist():
_, dotext = os.path.splitext(filename)
ext = dotext[1:]
if ext in image_exts:
with zf.open(filename) as fhandle:
image_data = fhandle.read()
with BytesIO(image_data) as bhandle:
img = Image.open(bhandle)
img_size = img.size[0] * img.size[1]
if img_size > size:
biggest_name = filename
size = img_size
if biggest_name is None:
raise ThumbnailGenerationError("HTML5 zip file {} contains no images.".format(htmlfile))
with zf.open(biggest_name) as fhandle:
image_data = fhandle.read()
with BytesIO(image_data) as bhandle:
img = Image.open(bhandle)
img = scale_and_crop_thumbnail(img, crop=crop)
img.save(fpath_out)
except Exception as e:
raise ThumbnailGenerationError("Fail on zip {} {}".format(htmlfile, e))
def create_image_from_pdf_page(fpath_in, fpath_out, page_number=0, crop=None):
"""
Create an image from the pdf at fpath_in and write result to fpath_out.
"""
try:
assert fpath_in.endswith('pdf'), "File must be in pdf format"
pages = convert_from_path(fpath_in, 500, first_page=page_number, last_page=page_number+1)
page = pages[0]
# resize
page = scale_and_crop_thumbnail(page, zoom=10, crop=crop)
page.save(fpath_out, 'PNG')
except Exception as e:
raise ThumbnailGenerationError("Fail on PDF {} {}".format(fpath_in, e))
def create_waveform_image(fpath_in, fpath_out, max_num_of_points=None, colormap_options=None):
"""
Create a waveform image from audio file at fpath_in and write to fpath_out.
Colormap info: http://matplotlib.org/examples/color/colormaps_reference.html
"""
colormap_options = colormap_options or {}
cmap_name = colormap_options.get('name') or 'cool'
vmin = colormap_options.get('vmin') or 0
vmax = colormap_options.get('vmax') or 1
color = colormap_options.get('color') or 'w'
tempwav_fh, tempwav_name = tempfile.mkstemp(suffix=".wav")
os.close(tempwav_fh) # close the file handle so ffmpeg can write to the file
try:
ffmpeg_cmd = ['ffmpeg', '-y', '-loglevel', 'panic', '-i', fpath_in]
# The below settings apply to the WebM encoder, which doesn't seem to be
# built by Homebrew on Mac, so we apply them conditionally
if not sys.platform.startswith('darwin'):
ffmpeg_cmd.extend(['-cpu-used', '-16'])
ffmpeg_cmd += [tempwav_name]
result = subprocess.check_output(ffmpeg_cmd)
spf = wave.open(tempwav_name, 'r')
# Extract raw audio from wav file
signal = spf.readframes(-1)
spf.close()
signal = np.frombuffer(signal, np.int16)
# Get subarray from middle
length = len(signal)
count = max_num_of_points or length
subsignals = signal[int((length-count)/2):int((length+count)/2)]
# Set up max and min values for axes
X = [[.6, .6], [.7, .7]]
xmin, xmax = xlim = 0, count
max_y_axis = max(-min(subsignals), max(subsignals))
ymin, ymax = ylim = -max_y_axis, max_y_axis
# Set up canvas according to user settings
(xsize, ysize) = (THUMBNAIL_SIZE[0]/100.0, THUMBNAIL_SIZE[1]/100.0)
figure = Figure(figsize=(xsize, ysize), dpi=100)
canvas = FigureCanvasAgg(figure)
ax = figure.add_subplot(111, xlim=xlim, ylim=ylim, autoscale_on=False, frameon=False)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticks([])
cmap = plt.get_cmap(cmap_name)
cmap = LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=vmin, b=vmax),
cmap(np.linspace(vmin, vmax, 100))
)
ax.imshow(X, interpolation='bicubic', cmap=cmap, extent=(xmin, xmax, ymin, ymax), alpha=1)
# Plot points
ax.plot(np.arange(count), subsignals, color)
ax.set_aspect("auto")
canvas.print_figure(fpath_out)
except (subprocess.CalledProcessError, Exception) as e:
raise ThumbnailGenerationError("Failed file {} {}".format(fpath_in, e))
finally:
os.remove(tempwav_name)
# TILED THUMBNAILS FOR TOPIC NODES (FOLDERS)
################################################################################
def create_tiled_image(source_images, fpath_out):
"""
Create a 16:9 tiled image from list of image paths provided in source_images
and write result to fpath_out.
"""
try:
sizes = {1:1, 4:2, 9:3, 16:4, 25:5, 36:6, 49:7}
assert len(source_images) in sizes.keys(), "Number of images must be a perfect square <= 49"
root = sizes[len(source_images)]
images = list(map(Image.open, source_images))
new_im = Image.new('RGBA', THUMBNAIL_SIZE)
offset = (int(float(THUMBNAIL_SIZE[0]) / float(root)),
int(float(THUMBNAIL_SIZE[1]) / float(root)) )
index = 0
for y_index in range(root):
for x_index in range(root):
im = scale_and_crop_thumbnail(images[index], size=offset)
new_im.paste(im, (int(offset[0]*x_index), int(offset[1]*y_index)))
index = index + 1
new_im.save(fpath_out)
except Exception as e:
raise ThumbnailGenerationError("Failed due to {}".format(e))
def convert_image(filename, dest_dir=None, size=None, format='PNG'):
"""
Converts an image to a specified output format. The converted image will have the same
file basename as filename, but with the extension of the converted format.
:param filename: Filename of image to covert.
:param dest_dir: Destination directory for image, if None will save to same directory as filename.
:param size: Tuple of size of new image, if None, image is not resized.
:param format: File extension of format to convert to (e.g. PNG, JPG), Defaults to PNG.
:returns: Path to converted file.
"""
assert os.path.exists(filename), "Image file not found: {}".format(os.path.abspath(filename))
if not dest_dir:
dest_dir = os.path.dirname(os.path.abspath(filename))
dest_filename_base = os.path.basename(filename)
base, ext = os.path.splitext(dest_filename_base)
new_filename = base + ".{}".format(format.lower())
dest_filename = os.path.join(dest_dir, new_filename)
img = Image.open(filename)
dest_img = img.convert("RGB")
# resive image to thumbnail dimensions
if size:
dest_img = dest_img.resize(size, Image.ANTIALIAS)
dest_img.save(dest_filename)
return dest_filename
# EXCEPTIONS
################################################################################
class ThumbnailGenerationError(Exception):
"""
Custom error returned when thumbnail extraction process fails.
"""
pass
|
""" Standalone webinterface for Openstack Swift. """
# -*- coding: utf-8 -*-
#pylint:disable=E1101
from swiftclient import client
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.contrib import messages
from django.conf import settings
from django.http import JsonResponse
from django.utils.translation import ugettext as _
from swiftbrowser.forms import CreateContainerForm, UpdateACLForm
from swiftbrowser.utils import *
@session_valid
def containerview(request):
""" Returns a list of all containers in current account. """
# Users with no role will not be able to list containers.
if request.session.get('norole'):
# Redirect them to the container that is their username.
return redirect(swiftbrowser.views.objects.objectview,
request.session.get('user'))
storage_url = request.session.get('storage_url', '')
auth_token = request.session.get('auth_token', '')
try:
account_stat, containers = client.get_account(storage_url, auth_token)
except client.ClientException:
return redirect(login)
account_stat = replace_hyphens(account_stat)
account = storage_url.split('/')[-1]
return render_to_response('containerview.html', {
'account': account,
'account_stat': account_stat,
'containers': containers,
'session': request.session,
}, context_instance=RequestContext(request))
@session_valid
def create_container(request):
""" Creates a container (empty object of type application/directory) """
storage_url = request.session.get('storage_url', '')
auth_token = request.session.get('auth_token', '')
headers = {
'X-Container-Meta-Access-Control-Expose-Headers':
'Access-Control-Allow-Origin',
'X-Container-Meta-Access-Control-Allow-Origin': settings.BASE_URL
}
form = CreateContainerForm(request.POST or None)
if form.is_valid():
container = form.cleaned_data['containername']
#Check container does not already exist
try:
client.get_container(storage_url, auth_token, container)
messages.add_message(
request,
messages.ERROR,
_("Container {0} already exists.".format(container)))
except:
try:
client.put_container(
storage_url, auth_token, container, headers)
messages.add_message(request, messages.INFO,
_("Container created."))
except client.ClientException:
messages.add_message(
request, messages.ERROR, _("Access denied."))
return redirect(containerview)
return render_to_response(
'create_container.html',
{'session': request.session},
context_instance=RequestContext(request))
@session_valid
def delete_container(request, container):
""" Deletes a container """
storage_url = request.session.get('storage_url', '')
auth_token = request.session.get('auth_token', '')
try:
_m, objects = client.get_container(storage_url, auth_token, container)
for obj in objects:
client.delete_object(storage_url, auth_token,
container, obj['name'])
client.delete_container(storage_url, auth_token, container)
messages.add_message(request, messages.INFO, _("Container deleted."))
except client.ClientException:
messages.add_message(request, messages.ERROR, _("Access denied."))
return redirect(containerview)
@ajax_session_valid
def get_acls(request, container):
""" Read and return the Read and Write ACL of the given container. """
storage_url = request.session.get('storage_url', '')
auth_token = request.session.get('auth_token', '')
cont = client.head_container(storage_url, auth_token, container)
readers = split_acl(cont.get('x-container-read', ''))
writers = split_acl(cont.get('x-container-write', ''))
return JsonResponse({
"read_acl": readers,
"write_acl": writers,
})
@ajax_session_valid
def set_acls(request, container):
"""For the given container, set the ACLs. """
form = UpdateACLForm(request.POST)
if (form.is_valid()):
read_acl = form.cleaned_data['read_acl']
write_acl = form.cleaned_data['write_acl']
else:
return JsonResponse({'error': 'invalid form'})
storage_url = request.session.get('storage_url', '')
auth_token = request.session.get('auth_token', '')
headers = {'X-Container-Read': read_acl,
'X-Container-Write': write_acl}
try:
client.post_container(storage_url, auth_token,
container, headers)
return JsonResponse({
"success": "Successfully updated ACL.",
"read_acl": read_acl,
"write_acl": write_acl
})
except client.ClientException:
return JsonResponse({'error': 'Error updating ACL.'})
|
<gh_stars>0
# Takes RAW arrays and returns calculated OD for given shot
# along with the best fit (between gaussian and TF) for ROI.
from __future__ import division
from time import time
from scipy.ndimage import *
from mpl_toolkits.axes_grid1 import make_axes_locatable
import os
import pandas as pd
import numpy as np
import numexpr as ne
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from lyse import *
import fit_table
from common.OD_handler import ODShot, ODslice
from common.traces import bimodal1D
from analysislib.common.get_raw_images import get_raw_images
# Parameters
pixel_size = 5.6e-6/3.44# Divided by Magnification Factor
# 5.6e-6/5.33 for z in situ # Yuchen and Paco: 08/19/2016
#5.6e-6/3.44 for z TOF # Yuchen and Paco: 08/19/2016
#5.6e-6/2.72 for x-in situ # Paco: 05/06/2016
sigma0 = 3*(780.24e-9)**2/(2*np.pi) # Atomic cross section (resonant absorption imaging)
save = True
fit_slices = True
# Time stamp
print '\nRunning %s' % os.path.basename(__file__)
t = time()
# Run method
run = Run(path)
# Methods
def print_time(text):
print 't = %6.3f : %s' % ((time()-t), text)
def raw_to_OD(fpath):
reconstruction_group = 'reconstruct_images'
atoms, probe, bckg = get_raw_images(fpath)
rchi2_item = (reconstruction_group, 'reconstructed_probe_rchi2')
df = data(fpath)
if rchi2_item in df and not np.isnan(df[rchi2_item]):
with h5py.File(fpath) as f:
if reconstruction_group in f['results']:
probe = run.get_result_array('reconstruct_images', 'reconstructed_probe')
bckg = run.get_result_array('reconstruct_images', 'reconstructed_background')
div = np.ma.masked_invalid((atoms - bckg)/(probe - bckg))
div = np.ma.masked_less_equal(div, 0.)
Isat = 127. # Paco: 05/12/2017 **only for insitu **
another_term = 1.*(probe-atoms)/(Isat)
alpha = 0.92 # Paco: 05/16/2017 **only for insitu **
calculated_OD = np.array(-alpha*np.log(div) + another_term)
return np.matrix(calculated_OD)
# Main
try:
with h5py.File(path) as h5_file:
if '/data' in h5_file:
print_time('Calculating OD...')
# Get OD, ROI, BCK
_OD_ = raw_to_OD(path)
OD = ODShot(_OD_)
F, mF, _ROI_, BCK_a = OD.get_ROI(sniff=False, get_background=False)
_, _, ROIcoords, _ = np.load(r'C:\labscript_suite\userlib\analysislib\paco_analysis\ROI_temp.npy')
point1, point2 = ROIcoords
x1, y1 = point1
x2, y2 = point2
ROI = ODShot(np.matrix(np.array(_ROI_)**(3)))
BCK = np.nanmean(BCK_a)*np.ones(_ROI_.shape)
if True:
N = (np.sum((_ROI_-BCK)/sigma0)*pixel_size**2)
else:
N = np.nan
if save:
run.save_result( 'pkOD', (np.nanmax(_ROI_.astype(np.float16))))
run.save_result(('N_(' + str(F) +',' +str(mF)+')'), N)
run.save_result('y_COM', np.abs(ROI.COM_2D(0, 0)[0]))
# Slices
print_time('Slice...')
#xcolOD, x_ax = OD.slice_by_segment_OD(coord_a=np.array([221, 75]), coord_b=np.array([236, 600]))
xcolOD, x_ax = np.mean(np.array(_OD_[213:217, 100:570]), axis=0), np.linspace(0, 470, 470)
ycolOD, y_ax = OD.slice_by_segment_OD(coord_a=np.array([40, 354]), coord_b=np.array([430, 354]))
y_ax=y_ax[::-1] # Reverse to match gravity
# Fits
if fit_slices:
_xslice_, _yslice_ = ODslice(slice_OD=xcolOD, slice_axis=x_ax), ODslice(slice_OD=ycolOD, slice_axis=y_ax)
run.save_result('SNR', (np.nanmean(xcolOD[410:440])/np.std(xcolOD[290:320])))
try:
x_gauss_pars, x_dense_gauss, x_gaussian_fit =_xslice_.fit_gauss()
x_tf_pars, x_dense_tf, x_TF_fit = _xslice_.fit_pure_thomas_fermi()
x_bimodal_pars, x_dense_bimodal, x_bimodal_fit =_xslice_.fit_bimodal()
fit_x_success = True
print 'x slice fit'
fit_table.get_params(x_bimodal_pars)
if save:
run.save_result('x_gauss_width', np.abs(x_gauss_pars[2]*pixel_size/(1e-6)))
run.save_result('x_gauss_amp', np.abs(x_gauss_pars[0]-x_gauss_pars[3]))
run.save_result('x_gauss_center', x_gauss_pars[1]*pixel_size/1e-6)
thermal = bimodal1D(x_ax, x_bimodal_pars[0], 0., x_bimodal_pars[2], x_bimodal_pars[3], 0., x_bimodal_pars[5])
fraction = (np.sum(xcolOD) - np.sum(thermal))/np.sum(xcolOD)
run.save_result('x_condensate_fraction', fraction)
run.save_result('temperature', (1.44e-25*(x_bimodal_pars[3]*pixel_size)**2)/(2*1.38e-23*(24.72e-3**2)))
except Exception as e:
fit_x_success = False
print 'Fit of x slice unsuccessful, %s' %e
try:
y_gauss_pars, y_dense_gauss, y_gaussian_fit = _yslice_.fit_gauss()
y_tf_pars, x_dense_tf, y_TF_fit = _yslice_.fit_pure_thomas_fermi()
y_bimodal_pars, y_dense_bimodal, y_bimodal_fit =_yslice_.fit_bimodal()
fit_y_success = True
print 'y slice fit'
fit_table.get_params(y_gauss_pars)
if save:
run.save_result('y_gauss_center', np.abs(216-y_gauss_pars[1]*pixel_size/1e-6))
except Exception as e:
fit_y_success = False
print 'Fit of y slice unsuccessful, %s' %e
if save:
run.save_result('integrated_linOD', _xslice_.integrate())
else:
fit_x_success, fit_y_success = False, False
# Display OD, slices and N
figOD = plt.figure(figsize=(8, 5), frameon=False)
gs = gridspec.GridSpec(2, 2, width_ratios=[1,2], height_ratios=[4,1])
plt.subplot(gs[2])
number_display = r'N = %d' % N
plt.text(0.4, 0.6, number_display, ha='center', va='top', fontsize=18)
plt.gca().axison = False
plt.subplot(gs[1])
im0= plt.imshow(np.array(_OD_)**(3/3), vmin= -0.35, vmax =0.5, cmap='viridis', aspect='equal', interpolation='none')
#plt.axvline(349, color='r', linewidth=2.5)
#plt.axvline(x2, color='r', linewidth=0.5)
#plt.axhline(np.abs(OD.COM_2D(0, 0)[0]), color='r', linewidth=1.5)
#plt.axhline(203, color='r', linewidth=2.5)
grid_divider = make_axes_locatable(plt.gca())
cax = grid_divider.append_axes("right", "5%", pad="3%")
plt.colorbar(im0, cax=cax)
plt.title('OD')
# X slice
plt.subplot(gs[3])
plt.step(x_ax*pixel_size/1e-6, xcolOD, 'k', linewidth=0.5)
if fit_x_success:
#pass
plt.plot(x_dense_bimodal*pixel_size/1e-6, 0*x_bimodal_fit, 'r')
plt.plot(x_dense_gauss*pixel_size/1e-6, x_gaussian_fit, 'b--')
plt.xlabel('$x \,[\mu m]$', fontsize=15)
plt.ylabel('OD')
plt.title('x_slice')
#plt.xlim(np.amin(x_ax), np.amax(x_ax))
# Y slice
plt.subplot(gs[0])
plt.step(ycolOD, y_ax*pixel_size/1e-6, 'k', linewidth=0.5)
if fit_y_success:
plt.plot(y_gaussian_fit, y_dense_gauss*pixel_size/1e-6, 'r')
plt.xlabel('OD')
plt.ylabel('$y \, [\mu m]$', fontsize=15)
plt.title('y_slice')
#plt.ylim(np.amin(y_ax), np.amax(y_ax))
plt.tight_layout()
plt.show()
else:
print_time('Unsuccessful...')
raise Exception( 'No image found in file...' )
print '\n ********** Successful **********\n\n'
except Exception as e:
print '%s' %e + os.path.basename(path)
print '\n ********** Not Successful **********\n\n' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.