hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c30738c8a1ba6427c0cd62fe0952b1badd4101e | 25,230 | py | Python | ivadomed/training.py | vs74/ivadomed | c3b5a21bbe4907853a330bd18d0dbb048439111d | [
"MIT"
] | null | null | null | ivadomed/training.py | vs74/ivadomed | c3b5a21bbe4907853a330bd18d0dbb048439111d | [
"MIT"
] | null | null | null | ivadomed/training.py | vs74/ivadomed | c3b5a21bbe4907853a330bd18d0dbb048439111d | [
"MIT"
] | null | null | null | import copy
import datetime
import logging
import os
import random
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from ivadomed import losses as imed_losses
from ivadomed import mixup as imed_mixup
from ivadomed import metrics as imed_metrics
from ivadomed import models as imed_models
from ivadomed import utils as imed_utils
from ivadomed import visualize as imed_visualize
from ivadomed.loader import utils as imed_loader_utils
cudnn.benchmark = True
logger = logging.getLogger(__name__)
def train(model_params, dataset_train, dataset_val, training_params, log_directory, device,
cuda_available=True, metric_fns=None, n_gif=0, resume_training=False, debugging=False):
"""Main command to train the network.
Args:
model_params (dict): Model's parameters.
dataset_train (imed_loader): Training dataset.
dataset_val (imed_loader): Validation dataset.
training_params (dict):
log_directory (str): Folder where log files, best and final models are saved.
device (str): Indicates the CPU or GPU ID.
cuda_available (bool): If True, CUDA is available.
metric_fns (list): List of metrics, see :mod:`ivadomed.metrics`.
n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The
parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows
predictions of a given slice from the validation sub-dataset. They are saved within the log directory.
resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the log_directory) for resume
training. This training state is saved everytime a new best model is saved in the log
directory.
debugging (bool): If True, extended verbosity and intermediate outputs.
Returns:
float, float, float, float: best_training_dice, best_training_loss, best_validation_dice,
best_validation_loss.
"""
# Write the metrics, images, etc to TensorBoard format
writer = SummaryWriter(log_dir=log_directory)
# BALANCE SAMPLES AND PYTORCH LOADER
conditions = all([training_params["balance_samples"], model_params["name"] != "HeMIS"])
sampler_train, shuffle_train = get_sampler(dataset_train, conditions)
train_loader = DataLoader(dataset_train, batch_size=training_params["batch_size"],
shuffle=shuffle_train, pin_memory=True, sampler=sampler_train,
collate_fn=imed_loader_utils.imed_collate,
num_workers=0)
gif_dict = {"image_path": [], "slice_id": [], "gif": []}
if dataset_val:
sampler_val, shuffle_val = get_sampler(dataset_val, conditions)
val_loader = DataLoader(dataset_val, batch_size=training_params["batch_size"],
shuffle=shuffle_val, pin_memory=True, sampler=sampler_val,
collate_fn=imed_loader_utils.imed_collate,
num_workers=0)
# Init GIF
if n_gif > 0:
indexes_gif = random.sample(range(len(dataset_val)), n_gif)
for i_gif in range(n_gif):
random_metadata = dict(dataset_val[indexes_gif[i_gif]]["input_metadata"][0])
gif_dict["image_path"].append(random_metadata['input_filenames'])
gif_dict["slice_id"].append(random_metadata['slice_index'])
gif_obj = imed_utils.AnimatedGif(size=dataset_val[indexes_gif[i_gif]]["input"].numpy()[0].shape)
gif_dict["gif"].append(copy.copy(gif_obj))
# GET MODEL
if training_params["transfer_learning"]["retrain_model"]:
print("\nLoading pretrained model's weights: {}.")
print("\tFreezing the {}% first layers.".format(
100 - training_params["transfer_learning"]['retrain_fraction'] * 100.))
old_model_path = training_params["transfer_learning"]["retrain_model"]
fraction = training_params["transfer_learning"]['retrain_fraction']
if 'reset' in training_params["transfer_learning"]:
reset = training_params["transfer_learning"]['reset']
else :
reset = True
# Freeze first layers and reset last layers
model = imed_models.set_model_for_retrain(old_model_path, retrain_fraction=fraction, map_location=device,
reset=reset)
else:
print("\nInitialising model's weights from scratch.")
model_class = getattr(imed_models, model_params["name"])
model = model_class(**model_params)
if cuda_available:
model.cuda()
num_epochs = training_params["training_time"]["num_epochs"]
# OPTIMIZER
initial_lr = training_params["scheduler"]["initial_lr"]
# filter out the parameters you are going to fine-tuning
params_to_opt = filter(lambda p: p.requires_grad, model.parameters())
# Using Adam
optimizer = optim.Adam(params_to_opt, lr=initial_lr)
scheduler, step_scheduler_batch = get_scheduler(copy.copy(training_params["scheduler"]["lr_scheduler"]), optimizer,
num_epochs)
print("\nScheduler parameters: {}".format(training_params["scheduler"]["lr_scheduler"]))
# Create dict containing gammas and betas after each FiLM layer.
if 'film_layers' in model_params and any(model_params['film_layers']):
gammas_dict = {i: [] for i in range(1, 2 * model_params["depth"] + 3)}
betas_dict = {i: [] for i in range(1, 2 * model_params["depth"] + 3)}
contrast_list = []
# Resume
start_epoch = 1
resume_path = os.path.join(log_directory, "checkpoint.pth.tar")
if resume_training:
model, optimizer, gif_dict, start_epoch, val_loss_total_avg, scheduler, patience_count = load_checkpoint(
model=model,
optimizer=optimizer,
gif_dict=gif_dict,
scheduler=scheduler,
fname=resume_path)
# Individually transfer the optimizer parts
# TODO: check if following lines are needed
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(device)
# LOSS
print("\nSelected Loss: {}".format(training_params["loss"]["name"]))
print("\twith the parameters: {}".format(
[training_params["loss"][k] for k in training_params["loss"] if k != "name"]))
loss_fct = get_loss_function(copy.copy(training_params["loss"]))
loss_dice_fct = imed_losses.DiceLoss() # For comparison when another loss is used
# INIT TRAINING VARIABLES
best_training_dice, best_training_loss = float("inf"), float("inf")
best_validation_loss, best_validation_dice = float("inf"), float("inf")
patience_count = 0
begin_time = time.time()
# EPOCH LOOP
for epoch in tqdm(range(num_epochs), desc="Training", initial=start_epoch):
epoch = epoch + start_epoch
start_time = time.time()
lr = scheduler.get_last_lr()[0]
writer.add_scalar('learning_rate', lr, epoch)
# Training loop -----------------------------------------------------------
model.train()
train_loss_total, train_dice_loss_total = 0.0, 0.0
num_steps = 0
for i, batch in enumerate(train_loader):
# GET SAMPLES
if model_params["name"] == "HeMISUnet":
input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch["input"]), cuda_available)
else:
input_samples = imed_utils.cuda(batch["input"], cuda_available)
gt_samples = imed_utils.cuda(batch["gt"], cuda_available, non_blocking=True)
# MIXUP
if training_params["mixup_alpha"]:
input_samples, gt_samples = imed_mixup.mixup(input_samples, gt_samples, training_params["mixup_alpha"],
debugging and epoch == 1, log_directory)
# RUN MODEL
if model_params["name"] == "HeMISUnet" or \
('film_layers' in model_params and any(model_params['film_layers'])):
metadata = get_metadata(batch["input_metadata"], model_params)
preds = model(input_samples, metadata)
else:
preds = model(input_samples)
# LOSS
loss = loss_fct(preds, gt_samples)
train_loss_total += loss.item()
train_dice_loss_total += loss_dice_fct(preds, gt_samples).item()
# UPDATE OPTIMIZER
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step_scheduler_batch:
scheduler.step()
num_steps += 1
if i == 0 and debugging:
imed_visualize.save_tensorboard_img(writer, epoch, "Train", input_samples, gt_samples, preds,
is_three_dim=not model_params["is_2d"])
if not step_scheduler_batch:
scheduler.step()
# TRAINING LOSS
train_loss_total_avg = train_loss_total / num_steps
msg = "Epoch {} training loss: {:.4f}.".format(epoch, train_loss_total_avg)
train_dice_loss_total_avg = train_dice_loss_total / num_steps
if training_params["loss"]["name"] != "DiceLoss":
msg += "\tDice training loss: {:.4f}.".format(train_dice_loss_total_avg)
tqdm.write(msg)
# CURRICULUM LEARNING
if model_params["name"] == "HeMISUnet":
# Increase the probability of a missing modality
model_params["missing_probability"] **= model_params["missing_probability_growth"]
dataset_train.update(p=model_params["missing_probability"])
# Validation loop -----------------------------------------------------
model.eval()
val_loss_total, val_dice_loss_total = 0.0, 0.0
num_steps = 0
metric_mgr = imed_metrics.MetricManager(metric_fns)
if dataset_val:
for i, batch in enumerate(val_loader):
with torch.no_grad():
# GET SAMPLES
if model_params["name"] == "HeMISUnet":
input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch["input"]), cuda_available)
else:
input_samples = imed_utils.cuda(batch["input"], cuda_available)
gt_samples = imed_utils.cuda(batch["gt"], cuda_available, non_blocking=True)
# RUN MODEL
if model_params["name"] == "HeMISUnet" or \
('film_layers' in model_params and any(model_params['film_layers'])):
metadata = get_metadata(batch["input_metadata"], model_params)
preds = model(input_samples, metadata)
else:
preds = model(input_samples)
# LOSS
loss = loss_fct(preds, gt_samples)
val_loss_total += loss.item()
val_dice_loss_total += loss_dice_fct(preds, gt_samples).item()
# Add frame to GIF
for i_ in range(len(input_samples)):
im, pr, met = input_samples[i_].cpu().numpy()[0], preds[i_].cpu().numpy()[0], \
batch["input_metadata"][i_][0]
for i_gif in range(n_gif):
if gif_dict["image_path"][i_gif] == met.__getitem__('input_filenames') and \
gif_dict["slice_id"][i_gif] == met.__getitem__('slice_index'):
overlap = imed_visualize.overlap_im_seg(im, pr)
gif_dict["gif"][i_gif].add(overlap, label=str(epoch))
num_steps += 1
# METRICS COMPUTATION
gt_npy = gt_samples.cpu().numpy()
preds_npy = preds.data.cpu().numpy()
metric_mgr(preds_npy, gt_npy)
if i == 0 and debugging:
imed_visualize.save_tensorboard_img(writer, epoch, "Validation", input_samples, gt_samples, preds,
is_three_dim=not model_params['is_2d'])
if 'film_layers' in model_params and any(model_params['film_layers']) and debugging and \
epoch == num_epochs and i < int(len(dataset_val) / training_params["batch_size"]) + 1:
# Store the values of gammas and betas after the last epoch for each batch
gammas_dict, betas_dict, contrast_list = store_film_params(gammas_dict, betas_dict, contrast_list,
batch['input_metadata'], model,
model_params["film_layers"],
model_params["depth"])
# METRICS COMPUTATION FOR CURRENT EPOCH
val_loss_total_avg_old = val_loss_total_avg if epoch > 1 else None
metrics_dict = metric_mgr.get_results()
metric_mgr.reset()
writer.add_scalars('Validation/Metrics', metrics_dict, epoch)
val_loss_total_avg = val_loss_total / num_steps
writer.add_scalars('losses', {
'train_loss': train_loss_total_avg,
'val_loss': val_loss_total_avg,
}, epoch)
msg = "Epoch {} validation loss: {:.4f}.".format(epoch, val_loss_total_avg)
val_dice_loss_total_avg = val_dice_loss_total / num_steps
if training_params["loss"]["name"] != "DiceLoss":
msg += "\tDice validation loss: {:.4f}.".format(val_dice_loss_total_avg)
tqdm.write(msg)
end_time = time.time()
total_time = end_time - start_time
tqdm.write("Epoch {} took {:.2f} seconds.".format(epoch, total_time))
# UPDATE BEST RESULTS
if val_loss_total_avg < best_validation_loss:
# Save checkpoint
state = {'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'gif_dict': gif_dict,
'scheduler': scheduler,
'patience_count': patience_count,
'validation_loss': val_loss_total_avg}
torch.save(state, resume_path)
# Save best model file
model_path = os.path.join(log_directory, "best_model.pt")
torch.save(model, model_path)
# Update best scores
best_validation_loss, best_training_loss = val_loss_total_avg, train_loss_total_avg
best_validation_dice, best_training_dice = val_dice_loss_total_avg, train_dice_loss_total_avg
# EARLY STOPPING
if epoch > 1:
val_diff = (val_loss_total_avg_old - val_loss_total_avg) * 100 / abs(val_loss_total_avg)
if val_diff < training_params["training_time"]["early_stopping_epsilon"]:
patience_count += 1
if patience_count >= training_params["training_time"]["early_stopping_patience"]:
print("Stopping training due to {} epochs without improvements".format(patience_count))
break
# Save final model
final_model_path = os.path.join(log_directory, "final_model.pt")
torch.save(model, final_model_path)
if 'film_layers' in model_params and any(model_params['film_layers']) and debugging:
save_film_params(gammas_dict, betas_dict, contrast_list, model_params["depth"], log_directory)
# Save best model in log directory
if os.path.isfile(resume_path):
state = torch.load(resume_path)
model_path = os.path.join(log_directory, "best_model.pt")
model.load_state_dict(state['state_dict'])
torch.save(model, model_path)
# Save best model as ONNX in the model directory
try:
# Convert best model to ONNX and save it in model directory
best_model_path = os.path.join(log_directory, model_params["folder_name"],
model_params["folder_name"] + ".onnx")
imed_utils.save_onnx_model(model, input_samples, best_model_path)
except:
# Save best model in model directory
best_model_path = os.path.join(log_directory, model_params["folder_name"],
model_params["folder_name"] + ".pt")
torch.save(model, best_model_path)
logger.warning("Failed to save the model as '.onnx', saved it as '.pt': {}".format(best_model_path))
# Save GIFs
gif_folder = os.path.join(log_directory, "gifs")
if n_gif > 0 and not os.path.isdir(gif_folder):
os.makedirs(gif_folder)
for i_gif in range(n_gif):
fname_out = gif_dict["image_path"][i_gif].split('/')[-3] + "__"
fname_out += gif_dict["image_path"][i_gif].split('/')[-1].split(".nii.gz")[0].split(
gif_dict["image_path"][i_gif].split('/')[-3] + "_")[1] + "__"
fname_out += str(gif_dict["slice_id"][i_gif]) + ".gif"
path_gif_out = os.path.join(gif_folder, fname_out)
gif_dict["gif"][i_gif].save(path_gif_out)
writer.close()
final_time = time.time()
duration_time = final_time - begin_time
print('begin ' + time.strftime('%H:%M:%S', time.localtime(begin_time)) + "| End " +
time.strftime('%H:%M:%S', time.localtime(final_time)) +
"| duration " + str(datetime.timedelta(seconds=duration_time)))
return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss
def get_sampler(ds, balance_bool):
"""Get sampler.
Args:
ds (BidsDataset): BidsDataset object.
balance_bool (bool): If True, a sampler is generated that balance positive and negative samples.
Returns:
If balance_bool is True: Returns BalancedSampler, Bool: Sampler and boolean for shuffling (set to False).
Otherwise: Returns None and True.
"""
if balance_bool:
return imed_loader_utils.BalancedSampler(ds), False
else:
return None, True
def get_scheduler(params, optimizer, num_epochs=0):
"""Get scheduler.
Args:
params (dict): scheduler parameters, see `PyTorch documentation <https://pytorch.org/docs/stable/optim.html>`__
optimizer (torch optim):
num_epochs (int): number of epochs.
Returns:
torch.optim, bool, which indicates if the scheduler is updated for each batch (True), or for each epoch (False).
"""
step_scheduler_batch = False
scheduler_name = params["name"]
del params["name"]
if scheduler_name == "CosineAnnealingLR":
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)
elif scheduler_name == "CosineAnnealingWarmRestarts":
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, **params)
elif scheduler_name == "CyclicLR":
scheduler = optim.lr_scheduler.CyclicLR(optimizer, **params, mode="triangular2", cycle_momentum=False)
step_scheduler_batch = True
else:
raise ValueError(
"{} is an unknown LR Scheduler name, please choose between 'CosineAnnealingLR', "
"'CosineAnnealingWarmRestarts', or 'CyclicLR'".format(scheduler_name))
return scheduler, step_scheduler_batch
def get_loss_function(params):
"""Get Loss function.
Args:
params (dict): See :mod:`ivadomed.losses`.
Returns:
imed_losses object.
"""
# Loss function name
loss_name = params["name"]
del params["name"]
# Check if implemented
loss_function_available = ["DiceLoss", "FocalLoss", "GeneralizedDiceLoss", "FocalDiceLoss", "MultiClassDiceLoss",
"BinaryCrossEntropyLoss", "TverskyLoss", "FocalTverskyLoss", "AdapWingLoss", "L2loss",
"LossCombination"]
if loss_name not in loss_function_available:
raise ValueError("Unknown Loss function: {}, please choose between {}".format(loss_name, loss_function_available))
loss_class = getattr(imed_losses, loss_name)
loss_fct = loss_class(**params)
return loss_fct
def get_metadata(metadata, model_params):
"""Get metadata during batch loop.
Args:
metadata (batch):
model_params (dict):
Returns:
If FiLMedUnet, Returns a list of metadata, that have been transformed by the One Hot Encoder.
If HeMISUnet, Returns a numpy array where each row represents a sample and each column represents a contrast.
"""
if model_params["name"] == "HeMISUnet":
return np.array([m[0]["missing_mod"] for m in metadata])
else:
return [model_params["film_onehotencoder"].transform([metadata[k][0]['film_input']]).tolist()[0]
for k in range(len(metadata))]
def store_film_params(gammas, betas, contrasts, metadata, model, film_layers, depth):
"""Store FiLM params.
Args:
gammas (dict):
betas (dict):
contrasts (list): list of the batch sample's contrasts (eg T2w, T1w)
metadata (list):
model (nn.Module):
film_layers (list):
depth (int):
Returns:
dict, dict: gammas, betas
"""
new_contrast = [metadata[0][k]['contrast'] for k in range(len(metadata[0]))]
contrasts.append(new_contrast)
# Fill the lists of gammas and betas
for idx in [i for i, x in enumerate(film_layers) if x]:
if idx < depth:
layer_cur = model.encoder.down_path[idx * 3 + 1]
elif idx == depth:
layer_cur = model.encoder.film_bottom
elif idx == depth * 2 + 1:
layer_cur = model.decoder.last_film
else:
layer_cur = model.decoder.up_path[(idx - depth - 1) * 2 + 1]
gammas[idx + 1].append(layer_cur.gammas[:, :, 0, 0].cpu().numpy())
betas[idx + 1].append(layer_cur.betas[:, :, 0, 0].cpu().numpy())
return gammas, betas, contrasts
def save_film_params(gammas, betas, contrasts, depth, ofolder):
"""Save FiLM params as npy files.
These parameters can be further used for visualisation purposes. They are saved in the `ofolder` with `.npy` format.
Args:
gammas (dict):
betas (dict):
contrasts (list): list of the batch sample's contrasts (eg T2w, T1w)
depth (int):
ofolder (str):
"""
# Convert list of gammas/betas into numpy arrays
gammas_dict = {i: np.array(gammas[i]) for i in range(1, 2 * depth + 3)}
betas_dict = {i: np.array(betas[i]) for i in range(1, 2 * depth + 3)}
# Save the numpy arrays for gammas/betas inside files.npy in log_directory
for i in range(1, 2 * depth + 3):
gamma_layer_path = os.path.join(ofolder, "gamma_layer_{}.npy".format(i))
np.save(gamma_layer_path, gammas_dict[i])
beta_layer_path = os.path.join(ofolder, "beta_layer_{}.npy".format(i))
np.save(beta_layer_path, betas_dict[i])
# Convert into numpy and save the contrasts of all batch images
contrast_images = np.array(contrasts)
contrast_path = os.path.join(ofolder, "contrast_image.npy")
np.save(contrast_path, contrast_images)
def load_checkpoint(model, optimizer, gif_dict, scheduler, fname):
"""Load checkpoint.
This function check if a checkpoint is available. If so, it updates the state of the input objects.
Args:
model (nn.Module): Init model.
optimizer (torch.optim): Model's optimizer.
gif_dict (dict): Dictionary containing a GIF of the training.
scheduler (_LRScheduler): Learning rate scheduler.
fname (str): Checkpoint filename.
Return:
nn.Module, torch, dict, int, float, _LRScheduler, int
"""
start_epoch = 1
validation_loss = 0
patience_count = 0
try:
print("\nLoading checkpoint: {}".format(fname))
checkpoint = torch.load(fname)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
validation_loss = checkpoint['validation_loss']
scheduler = checkpoint['scheduler']
gif_dict = checkpoint['gif_dict']
patience_count = checkpoint['patience_count']
print("... Resume training from epoch #{}".format(start_epoch))
except:
logger.warning("\nNo checkpoint found at: {}".format(fname))
return model, optimizer, gif_dict, start_epoch, validation_loss, scheduler, patience_count
| 45.29623 | 122 | 0.615696 | import copy
import datetime
import logging
import os
import random
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from ivadomed import losses as imed_losses
from ivadomed import mixup as imed_mixup
from ivadomed import metrics as imed_metrics
from ivadomed import models as imed_models
from ivadomed import utils as imed_utils
from ivadomed import visualize as imed_visualize
from ivadomed.loader import utils as imed_loader_utils
cudnn.benchmark = True
logger = logging.getLogger(__name__)
def train(model_params, dataset_train, dataset_val, training_params, log_directory, device,
cuda_available=True, metric_fns=None, n_gif=0, resume_training=False, debugging=False):
writer = SummaryWriter(log_dir=log_directory)
conditions = all([training_params["balance_samples"], model_params["name"] != "HeMIS"])
sampler_train, shuffle_train = get_sampler(dataset_train, conditions)
train_loader = DataLoader(dataset_train, batch_size=training_params["batch_size"],
shuffle=shuffle_train, pin_memory=True, sampler=sampler_train,
collate_fn=imed_loader_utils.imed_collate,
num_workers=0)
gif_dict = {"image_path": [], "slice_id": [], "gif": []}
if dataset_val:
sampler_val, shuffle_val = get_sampler(dataset_val, conditions)
val_loader = DataLoader(dataset_val, batch_size=training_params["batch_size"],
shuffle=shuffle_val, pin_memory=True, sampler=sampler_val,
collate_fn=imed_loader_utils.imed_collate,
num_workers=0)
if n_gif > 0:
indexes_gif = random.sample(range(len(dataset_val)), n_gif)
for i_gif in range(n_gif):
random_metadata = dict(dataset_val[indexes_gif[i_gif]]["input_metadata"][0])
gif_dict["image_path"].append(random_metadata['input_filenames'])
gif_dict["slice_id"].append(random_metadata['slice_index'])
gif_obj = imed_utils.AnimatedGif(size=dataset_val[indexes_gif[i_gif]]["input"].numpy()[0].shape)
gif_dict["gif"].append(copy.copy(gif_obj))
if training_params["transfer_learning"]["retrain_model"]:
print("\nLoading pretrained model's weights: {}.")
print("\tFreezing the {}% first layers.".format(
100 - training_params["transfer_learning"]['retrain_fraction'] * 100.))
old_model_path = training_params["transfer_learning"]["retrain_model"]
fraction = training_params["transfer_learning"]['retrain_fraction']
if 'reset' in training_params["transfer_learning"]:
reset = training_params["transfer_learning"]['reset']
else :
reset = True
# Freeze first layers and reset last layers
model = imed_models.set_model_for_retrain(old_model_path, retrain_fraction=fraction, map_location=device,
reset=reset)
else:
print("\nInitialising model's weights from scratch.")
model_class = getattr(imed_models, model_params["name"])
model = model_class(**model_params)
if cuda_available:
model.cuda()
num_epochs = training_params["training_time"]["num_epochs"]
initial_lr = training_params["scheduler"]["initial_lr"]
params_to_opt = filter(lambda p: p.requires_grad, model.parameters())
optimizer = optim.Adam(params_to_opt, lr=initial_lr)
scheduler, step_scheduler_batch = get_scheduler(copy.copy(training_params["scheduler"]["lr_scheduler"]), optimizer,
num_epochs)
print("\nScheduler parameters: {}".format(training_params["scheduler"]["lr_scheduler"]))
if 'film_layers' in model_params and any(model_params['film_layers']):
gammas_dict = {i: [] for i in range(1, 2 * model_params["depth"] + 3)}
betas_dict = {i: [] for i in range(1, 2 * model_params["depth"] + 3)}
contrast_list = []
start_epoch = 1
resume_path = os.path.join(log_directory, "checkpoint.pth.tar")
if resume_training:
model, optimizer, gif_dict, start_epoch, val_loss_total_avg, scheduler, patience_count = load_checkpoint(
model=model,
optimizer=optimizer,
gif_dict=gif_dict,
scheduler=scheduler,
fname=resume_path)
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(device)
print("\nSelected Loss: {}".format(training_params["loss"]["name"]))
print("\twith the parameters: {}".format(
[training_params["loss"][k] for k in training_params["loss"] if k != "name"]))
loss_fct = get_loss_function(copy.copy(training_params["loss"]))
loss_dice_fct = imed_losses.DiceLoss()
best_training_dice, best_training_loss = float("inf"), float("inf")
best_validation_loss, best_validation_dice = float("inf"), float("inf")
patience_count = 0
begin_time = time.time()
for epoch in tqdm(range(num_epochs), desc="Training", initial=start_epoch):
epoch = epoch + start_epoch
start_time = time.time()
lr = scheduler.get_last_lr()[0]
writer.add_scalar('learning_rate', lr, epoch)
model.train()
train_loss_total, train_dice_loss_total = 0.0, 0.0
num_steps = 0
for i, batch in enumerate(train_loader):
if model_params["name"] == "HeMISUnet":
input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch["input"]), cuda_available)
else:
input_samples = imed_utils.cuda(batch["input"], cuda_available)
gt_samples = imed_utils.cuda(batch["gt"], cuda_available, non_blocking=True)
if training_params["mixup_alpha"]:
input_samples, gt_samples = imed_mixup.mixup(input_samples, gt_samples, training_params["mixup_alpha"],
debugging and epoch == 1, log_directory)
if model_params["name"] == "HeMISUnet" or \
('film_layers' in model_params and any(model_params['film_layers'])):
metadata = get_metadata(batch["input_metadata"], model_params)
preds = model(input_samples, metadata)
else:
preds = model(input_samples)
loss = loss_fct(preds, gt_samples)
train_loss_total += loss.item()
train_dice_loss_total += loss_dice_fct(preds, gt_samples).item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step_scheduler_batch:
scheduler.step()
num_steps += 1
if i == 0 and debugging:
imed_visualize.save_tensorboard_img(writer, epoch, "Train", input_samples, gt_samples, preds,
is_three_dim=not model_params["is_2d"])
if not step_scheduler_batch:
scheduler.step()
train_loss_total_avg = train_loss_total / num_steps
msg = "Epoch {} training loss: {:.4f}.".format(epoch, train_loss_total_avg)
train_dice_loss_total_avg = train_dice_loss_total / num_steps
if training_params["loss"]["name"] != "DiceLoss":
msg += "\tDice training loss: {:.4f}.".format(train_dice_loss_total_avg)
tqdm.write(msg)
if model_params["name"] == "HeMISUnet":
model_params["missing_probability"] **= model_params["missing_probability_growth"]
dataset_train.update(p=model_params["missing_probability"])
model.eval()
val_loss_total, val_dice_loss_total = 0.0, 0.0
num_steps = 0
metric_mgr = imed_metrics.MetricManager(metric_fns)
if dataset_val:
for i, batch in enumerate(val_loader):
with torch.no_grad():
if model_params["name"] == "HeMISUnet":
input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch["input"]), cuda_available)
else:
input_samples = imed_utils.cuda(batch["input"], cuda_available)
gt_samples = imed_utils.cuda(batch["gt"], cuda_available, non_blocking=True)
if model_params["name"] == "HeMISUnet" or \
('film_layers' in model_params and any(model_params['film_layers'])):
metadata = get_metadata(batch["input_metadata"], model_params)
preds = model(input_samples, metadata)
else:
preds = model(input_samples)
loss = loss_fct(preds, gt_samples)
val_loss_total += loss.item()
val_dice_loss_total += loss_dice_fct(preds, gt_samples).item()
for i_ in range(len(input_samples)):
im, pr, met = input_samples[i_].cpu().numpy()[0], preds[i_].cpu().numpy()[0], \
batch["input_metadata"][i_][0]
for i_gif in range(n_gif):
if gif_dict["image_path"][i_gif] == met.__getitem__('input_filenames') and \
gif_dict["slice_id"][i_gif] == met.__getitem__('slice_index'):
overlap = imed_visualize.overlap_im_seg(im, pr)
gif_dict["gif"][i_gif].add(overlap, label=str(epoch))
num_steps += 1
gt_npy = gt_samples.cpu().numpy()
preds_npy = preds.data.cpu().numpy()
metric_mgr(preds_npy, gt_npy)
if i == 0 and debugging:
imed_visualize.save_tensorboard_img(writer, epoch, "Validation", input_samples, gt_samples, preds,
is_three_dim=not model_params['is_2d'])
if 'film_layers' in model_params and any(model_params['film_layers']) and debugging and \
epoch == num_epochs and i < int(len(dataset_val) / training_params["batch_size"]) + 1:
gammas_dict, betas_dict, contrast_list = store_film_params(gammas_dict, betas_dict, contrast_list,
batch['input_metadata'], model,
model_params["film_layers"],
model_params["depth"])
val_loss_total_avg_old = val_loss_total_avg if epoch > 1 else None
metrics_dict = metric_mgr.get_results()
metric_mgr.reset()
writer.add_scalars('Validation/Metrics', metrics_dict, epoch)
val_loss_total_avg = val_loss_total / num_steps
writer.add_scalars('losses', {
'train_loss': train_loss_total_avg,
'val_loss': val_loss_total_avg,
}, epoch)
msg = "Epoch {} validation loss: {:.4f}.".format(epoch, val_loss_total_avg)
val_dice_loss_total_avg = val_dice_loss_total / num_steps
if training_params["loss"]["name"] != "DiceLoss":
msg += "\tDice validation loss: {:.4f}.".format(val_dice_loss_total_avg)
tqdm.write(msg)
end_time = time.time()
total_time = end_time - start_time
tqdm.write("Epoch {} took {:.2f} seconds.".format(epoch, total_time))
if val_loss_total_avg < best_validation_loss:
state = {'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'gif_dict': gif_dict,
'scheduler': scheduler,
'patience_count': patience_count,
'validation_loss': val_loss_total_avg}
torch.save(state, resume_path)
model_path = os.path.join(log_directory, "best_model.pt")
torch.save(model, model_path)
best_validation_loss, best_training_loss = val_loss_total_avg, train_loss_total_avg
best_validation_dice, best_training_dice = val_dice_loss_total_avg, train_dice_loss_total_avg
if epoch > 1:
val_diff = (val_loss_total_avg_old - val_loss_total_avg) * 100 / abs(val_loss_total_avg)
if val_diff < training_params["training_time"]["early_stopping_epsilon"]:
patience_count += 1
if patience_count >= training_params["training_time"]["early_stopping_patience"]:
print("Stopping training due to {} epochs without improvements".format(patience_count))
break
final_model_path = os.path.join(log_directory, "final_model.pt")
torch.save(model, final_model_path)
if 'film_layers' in model_params and any(model_params['film_layers']) and debugging:
save_film_params(gammas_dict, betas_dict, contrast_list, model_params["depth"], log_directory)
if os.path.isfile(resume_path):
state = torch.load(resume_path)
model_path = os.path.join(log_directory, "best_model.pt")
model.load_state_dict(state['state_dict'])
torch.save(model, model_path)
try:
best_model_path = os.path.join(log_directory, model_params["folder_name"],
model_params["folder_name"] + ".onnx")
imed_utils.save_onnx_model(model, input_samples, best_model_path)
except:
best_model_path = os.path.join(log_directory, model_params["folder_name"],
model_params["folder_name"] + ".pt")
torch.save(model, best_model_path)
logger.warning("Failed to save the model as '.onnx', saved it as '.pt': {}".format(best_model_path))
gif_folder = os.path.join(log_directory, "gifs")
if n_gif > 0 and not os.path.isdir(gif_folder):
os.makedirs(gif_folder)
for i_gif in range(n_gif):
fname_out = gif_dict["image_path"][i_gif].split('/')[-3] + "__"
fname_out += gif_dict["image_path"][i_gif].split('/')[-1].split(".nii.gz")[0].split(
gif_dict["image_path"][i_gif].split('/')[-3] + "_")[1] + "__"
fname_out += str(gif_dict["slice_id"][i_gif]) + ".gif"
path_gif_out = os.path.join(gif_folder, fname_out)
gif_dict["gif"][i_gif].save(path_gif_out)
writer.close()
final_time = time.time()
duration_time = final_time - begin_time
print('begin ' + time.strftime('%H:%M:%S', time.localtime(begin_time)) + "| End " +
time.strftime('%H:%M:%S', time.localtime(final_time)) +
"| duration " + str(datetime.timedelta(seconds=duration_time)))
return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss
def get_sampler(ds, balance_bool):
if balance_bool:
return imed_loader_utils.BalancedSampler(ds), False
else:
return None, True
def get_scheduler(params, optimizer, num_epochs=0):
step_scheduler_batch = False
scheduler_name = params["name"]
del params["name"]
if scheduler_name == "CosineAnnealingLR":
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)
elif scheduler_name == "CosineAnnealingWarmRestarts":
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, **params)
elif scheduler_name == "CyclicLR":
scheduler = optim.lr_scheduler.CyclicLR(optimizer, **params, mode="triangular2", cycle_momentum=False)
step_scheduler_batch = True
else:
raise ValueError(
"{} is an unknown LR Scheduler name, please choose between 'CosineAnnealingLR', "
"'CosineAnnealingWarmRestarts', or 'CyclicLR'".format(scheduler_name))
return scheduler, step_scheduler_batch
def get_loss_function(params):
loss_name = params["name"]
del params["name"]
loss_function_available = ["DiceLoss", "FocalLoss", "GeneralizedDiceLoss", "FocalDiceLoss", "MultiClassDiceLoss",
"BinaryCrossEntropyLoss", "TverskyLoss", "FocalTverskyLoss", "AdapWingLoss", "L2loss",
"LossCombination"]
if loss_name not in loss_function_available:
raise ValueError("Unknown Loss function: {}, please choose between {}".format(loss_name, loss_function_available))
loss_class = getattr(imed_losses, loss_name)
loss_fct = loss_class(**params)
return loss_fct
def get_metadata(metadata, model_params):
if model_params["name"] == "HeMISUnet":
return np.array([m[0]["missing_mod"] for m in metadata])
else:
return [model_params["film_onehotencoder"].transform([metadata[k][0]['film_input']]).tolist()[0]
for k in range(len(metadata))]
def store_film_params(gammas, betas, contrasts, metadata, model, film_layers, depth):
new_contrast = [metadata[0][k]['contrast'] for k in range(len(metadata[0]))]
contrasts.append(new_contrast)
for idx in [i for i, x in enumerate(film_layers) if x]:
if idx < depth:
layer_cur = model.encoder.down_path[idx * 3 + 1]
elif idx == depth:
layer_cur = model.encoder.film_bottom
elif idx == depth * 2 + 1:
layer_cur = model.decoder.last_film
else:
layer_cur = model.decoder.up_path[(idx - depth - 1) * 2 + 1]
gammas[idx + 1].append(layer_cur.gammas[:, :, 0, 0].cpu().numpy())
betas[idx + 1].append(layer_cur.betas[:, :, 0, 0].cpu().numpy())
return gammas, betas, contrasts
def save_film_params(gammas, betas, contrasts, depth, ofolder):
gammas_dict = {i: np.array(gammas[i]) for i in range(1, 2 * depth + 3)}
betas_dict = {i: np.array(betas[i]) for i in range(1, 2 * depth + 3)}
for i in range(1, 2 * depth + 3):
gamma_layer_path = os.path.join(ofolder, "gamma_layer_{}.npy".format(i))
np.save(gamma_layer_path, gammas_dict[i])
beta_layer_path = os.path.join(ofolder, "beta_layer_{}.npy".format(i))
np.save(beta_layer_path, betas_dict[i])
contrast_images = np.array(contrasts)
contrast_path = os.path.join(ofolder, "contrast_image.npy")
np.save(contrast_path, contrast_images)
def load_checkpoint(model, optimizer, gif_dict, scheduler, fname):
start_epoch = 1
validation_loss = 0
patience_count = 0
try:
print("\nLoading checkpoint: {}".format(fname))
checkpoint = torch.load(fname)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
validation_loss = checkpoint['validation_loss']
scheduler = checkpoint['scheduler']
gif_dict = checkpoint['gif_dict']
patience_count = checkpoint['patience_count']
print("... Resume training from epoch #{}".format(start_epoch))
except:
logger.warning("\nNo checkpoint found at: {}".format(fname))
return model, optimizer, gif_dict, start_epoch, validation_loss, scheduler, patience_count
| true | true |
1c3073db02788b6fee392aaaf1064cf444cb1052 | 916 | py | Python | tests/conftest.py | imfht/sec-flask-cookiecutter | 81410cc72cc401cd1bd4698c5958f3ce0c5d581a | [
"MIT"
] | null | null | null | tests/conftest.py | imfht/sec-flask-cookiecutter | 81410cc72cc401cd1bd4698c5958f3ce0c5d581a | [
"MIT"
] | 5 | 2021-03-09T12:28:12.000Z | 2022-02-26T15:30:29.000Z | tests/conftest.py | jnoble/my_flask_app | 6d9681b04aa08b26b3d4577cca6311aa79673d79 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Defines fixtures available to all tests."""
import pytest
from webtest import TestApp
from my_flask_app.app import create_app
from my_flask_app.database import db as _db
from .factories import UserFactory
@pytest.fixture
def app():
"""Create application for the tests."""
_app = create_app("tests.settings")
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture
def testapp(app):
"""Create Webtest app."""
return TestApp(app)
@pytest.fixture
def db(app):
"""Create database for the tests."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
# Explicitly close DB connection
_db.session.close()
_db.drop_all()
@pytest.fixture
def user(db):
"""Create user for the tests."""
user = UserFactory(password="myprecious")
db.session.commit()
return user
| 17.960784 | 46 | 0.667031 |
import pytest
from webtest import TestApp
from my_flask_app.app import create_app
from my_flask_app.database import db as _db
from .factories import UserFactory
@pytest.fixture
def app():
_app = create_app("tests.settings")
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture
def testapp(app):
return TestApp(app)
@pytest.fixture
def db(app):
_db.app = app
with app.app_context():
_db.create_all()
yield _db
_db.session.close()
_db.drop_all()
@pytest.fixture
def user(db):
user = UserFactory(password="myprecious")
db.session.commit()
return user
| true | true |
1c307427210ec8a88e7edf75867cc0afb7b49e39 | 693 | py | Python | experimental/Python/src/Intersection/Carla/ROS/carla_intersection/src/launch_parameters.py | soerendomroes/lingua-franca | 9e5e5a16f013f6ea9760cc65f850986319dc8917 | [
"BSD-2-Clause"
] | null | null | null | experimental/Python/src/Intersection/Carla/ROS/carla_intersection/src/launch_parameters.py | soerendomroes/lingua-franca | 9e5e5a16f013f6ea9760cc65f850986319dc8917 | [
"BSD-2-Clause"
] | null | null | null | experimental/Python/src/Intersection/Carla/ROS/carla_intersection/src/launch_parameters.py | soerendomroes/lingua-franca | 9e5e5a16f013f6ea9760cc65f850986319dc8917 | [
"BSD-2-Clause"
] | null | null | null | SPAWN_POINTS = [
# x y z yaw
[-122.0, 39.6, 0.3, -90.0],
[-177.77, 6.48, 0.3, 0.0],
[-132.77, -40, 0.3, 90.0],
[-80.77, -4.5, 0.3, 180.0]
]
INITIAL_POSITIONS = [
# x y z
[0.000038, -0.000674, 2.794825], # /|\
[-0.000501, -0.001084, 2.794891], # ->
[-0.000060, -0.001510, 2.794854], # \|/
[0.000367, -0.001185, 2.794846] # <-
]
INITIAL_VELOCITIES = [
# x y z
[ 0.0, -8.0, 0.0],
[ 8.0, 0.0, 0.0],
[ 0.0, 8.0, 0.0],
[-8.0, 0.0, 0.0]
]
INTERSECTION_WIDTH = 40
NOMINAL_SPEED_IN_INTERSECTION = 14.0
INTERSECTION_POSITION = [-0.000007632,-0.001124366,2.792485] | 23.896552 | 60 | 0.454545 | SPAWN_POINTS = [
[-122.0, 39.6, 0.3, -90.0],
[-177.77, 6.48, 0.3, 0.0],
[-132.77, -40, 0.3, 90.0],
[-80.77, -4.5, 0.3, 180.0]
]
INITIAL_POSITIONS = [
[0.000038, -0.000674, 2.794825],
[-0.000501, -0.001084, 2.794891],
[-0.000060, -0.001510, 2.794854],
[0.000367, -0.001185, 2.794846]
]
INITIAL_VELOCITIES = [
[ 0.0, -8.0, 0.0],
[ 8.0, 0.0, 0.0],
[ 0.0, 8.0, 0.0],
[-8.0, 0.0, 0.0]
]
INTERSECTION_WIDTH = 40
NOMINAL_SPEED_IN_INTERSECTION = 14.0
INTERSECTION_POSITION = [-0.000007632,-0.001124366,2.792485] | true | true |
1c3074614147e0039a1a48614dff9fe9dd4a7c69 | 1,919 | py | Python | tests/plugins/test_render.py | illikainen/luoda | eb9788b64aa18df7424e28ec902972fcbaa20620 | [
"BSD-2-Clause"
] | null | null | null | tests/plugins/test_render.py | illikainen/luoda | eb9788b64aa18df7424e28ec902972fcbaa20620 | [
"BSD-2-Clause"
] | null | null | null | tests/plugins/test_render.py | illikainen/luoda | eb9788b64aa18df7424e28ec902972fcbaa20620 | [
"BSD-2-Clause"
] | null | null | null | # pylint: disable=W0621
#
# Copyright (c) 2019, Hans Jerry Illikainen <hji@dyntopia.com>
#
# SPDX-License-Identifier: BSD-2-Clause
from datetime import date
from pathlib import Path
from time import time
from jinja2 import FileSystemLoader
from jinja2.exceptions import SecurityError, TemplateError
from pytest import raises
from luoda.item import Item
from luoda.plugins.render import Sandbox, _strftime, available, run
from ..fixtures import tmpdir # pylint: disable=W0611
def test_sandbox(tmpdir: Path) -> None:
(tmpdir / "template").write_text("{{ func() }}")
loader = FileSystemLoader(".")
env = Sandbox(loader=loader)
template = env.get_template("template")
with raises(SecurityError):
template.render(func=lambda: 123)
def test_available() -> None:
assert available()
def test_render(tmpdir: Path) -> None:
(tmpdir / "template").write_text("x {{ item.content }} y")
item = Item(content="abc", path=tmpdir / "foo", template="template")
config = {"build": {"template-dir": "."}}
new_item = run(item, items=[], config=config)
assert new_item.content == "x abc y"
assert new_item != item
def test_no_template(tmpdir: Path) -> None:
item = Item(content="abc", path=tmpdir / "foo")
config = {"build": {"template-dir": "."}}
assert run(item, items=[], config=config) == item
def test_template_not_found(tmpdir: Path) -> None:
item = Item(content="abc", path=tmpdir / "foo", template="template")
config = {"build": {"template-dir": "."}}
assert run(item, items=[], config=config) == item
def test_strftime() -> None:
with raises(TemplateError):
_strftime("0") # type: ignore
with raises(TemplateError):
_strftime({}) # type: ignore
with raises(TemplateError):
_strftime(0, {}) # type: ignore
ts = time()
assert _strftime(ts, "%Y") == str(date.fromtimestamp(ts).year)
| 26.287671 | 72 | 0.663366 |
from datetime import date
from pathlib import Path
from time import time
from jinja2 import FileSystemLoader
from jinja2.exceptions import SecurityError, TemplateError
from pytest import raises
from luoda.item import Item
from luoda.plugins.render import Sandbox, _strftime, available, run
from ..fixtures import tmpdir
def test_sandbox(tmpdir: Path) -> None:
(tmpdir / "template").write_text("{{ func() }}")
loader = FileSystemLoader(".")
env = Sandbox(loader=loader)
template = env.get_template("template")
with raises(SecurityError):
template.render(func=lambda: 123)
def test_available() -> None:
assert available()
def test_render(tmpdir: Path) -> None:
(tmpdir / "template").write_text("x {{ item.content }} y")
item = Item(content="abc", path=tmpdir / "foo", template="template")
config = {"build": {"template-dir": "."}}
new_item = run(item, items=[], config=config)
assert new_item.content == "x abc y"
assert new_item != item
def test_no_template(tmpdir: Path) -> None:
item = Item(content="abc", path=tmpdir / "foo")
config = {"build": {"template-dir": "."}}
assert run(item, items=[], config=config) == item
def test_template_not_found(tmpdir: Path) -> None:
item = Item(content="abc", path=tmpdir / "foo", template="template")
config = {"build": {"template-dir": "."}}
assert run(item, items=[], config=config) == item
def test_strftime() -> None:
with raises(TemplateError):
_strftime("0")
with raises(TemplateError):
_strftime({})
with raises(TemplateError):
_strftime(0, {})
ts = time()
assert _strftime(ts, "%Y") == str(date.fromtimestamp(ts).year)
| true | true |
1c3076036b9cb826399283de80e1d2e33c1bd6c2 | 1,067 | py | Python | app/core/models.py | burakkirlaroglu/recipe-app-api | 0953921bce860502746af4447fb90136e7070faf | [
"MIT"
] | null | null | null | app/core/models.py | burakkirlaroglu/recipe-app-api | 0953921bce860502746af4447fb90136e7070faf | [
"MIT"
] | null | null | null | app/core/models.py | burakkirlaroglu/recipe-app-api | 0953921bce860502746af4447fb90136e7070faf | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('Users must have an email address !!')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email=email, password=password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 30.485714 | 76 | 0.673852 | from django.db import models
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('Users must have an email address !!')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email=email, password=password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| true | true |
1c30768285fc10fefd2eec1b486d15b6c493688a | 11,078 | py | Python | pytorch_lightning/trainer/configuration_validator.py | kazhang/pytorch-lightning | 54e95d3e2896bfe092d5ee18ede2c153c200c266 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/configuration_validator.py | kazhang/pytorch-lightning | 54e95d3e2896bfe092d5ee18ede2c153c200c266 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/configuration_validator.py | kazhang/pytorch-lightning | 54e95d3e2896bfe092d5ee18ede2c153c200c266 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import rank_zero_deprecation, rank_zero_warn
class ConfigValidator:
def __init__(self, trainer: "pl.Trainer") -> None:
self.trainer = trainer
def verify_loop_configurations(self, model: "pl.LightningModule") -> None:
r"""
Checks that the model is configured correctly before the run is started.
Args:
model: The model to check the configuration.
"""
if self.trainer.state.fn in (TrainerFn.FITTING, TrainerFn.TUNING):
self.__verify_train_loop_configuration(model)
self.__verify_eval_loop_configuration(model, "val")
self.__verify_manual_optimization_support(model)
self.__check_training_step_requires_dataloader_iter(model)
elif self.trainer.state.fn == TrainerFn.VALIDATING:
self.__verify_eval_loop_configuration(model, "val")
elif self.trainer.state.fn == TrainerFn.TESTING:
self.__verify_eval_loop_configuration(model, "test")
elif self.trainer.state.fn == TrainerFn.PREDICTING:
self.__verify_predict_loop_configuration(model)
self.__verify_dp_batch_transfer_support(model)
# TODO: Delete _check_on_keyboard_interrupt in v1.7
self._check_on_keyboard_interrupt()
def __verify_train_loop_configuration(self, model: "pl.LightningModule") -> None:
# -----------------------------------
# verify model has a training step
# -----------------------------------
has_training_step = is_overridden("training_step", model)
if not has_training_step:
raise MisconfigurationException(
"No `training_step()` method defined. Lightning `Trainer` expects as minimum a"
" `training_step()`, `train_dataloader()` and `configure_optimizers()` to be defined."
)
# -----------------------------------
# verify model has a train dataloader
# -----------------------------------
has_train_dataloader = is_overridden("train_dataloader", model)
if not has_train_dataloader:
raise MisconfigurationException(
"No `train_dataloader()` method defined. Lightning `Trainer` expects as minimum a"
" `training_step()`, `train_dataloader()` and `configure_optimizers()` to be defined."
)
# -----------------------------------
# verify model has optimizer
# -----------------------------------
has_optimizers = is_overridden("configure_optimizers", model)
if not has_optimizers:
raise MisconfigurationException(
"No `configure_optimizers()` method defined. Lightning `Trainer` expects as minimum a"
" `training_step()`, `train_dataloader()` and `configure_optimizers()` to be defined."
)
# ----------------------------------------------
# verify model does not have
# - on_train_dataloader
# - on_val_dataloader
# ----------------------------------------------
has_on_train_dataloader = is_overridden("on_train_dataloader", model)
if has_on_train_dataloader:
rank_zero_deprecation(
"Method `on_train_dataloader` in DataHooks is deprecated and will be removed in v1.7.0."
" Please use `train_dataloader()` directly."
)
has_on_val_dataloader = is_overridden("on_val_dataloader", model)
if has_on_val_dataloader:
rank_zero_deprecation(
"Method `on_val_dataloader` in DataHooks is deprecated and will be removed in v1.7.0."
" Please use `val_dataloader()` directly."
)
trainer = self.trainer
trainer.overriden_optimizer_step = is_overridden("optimizer_step", model)
trainer.overriden_optimizer_zero_grad = is_overridden("optimizer_zero_grad", model)
automatic_optimization = model.automatic_optimization
going_to_accumulate_grad_batches = trainer.accumulation_scheduler.going_to_accumulate_grad_batches()
has_overriden_optimization_functions = trainer.overriden_optimizer_step or trainer.overriden_optimizer_zero_grad
if has_overriden_optimization_functions and going_to_accumulate_grad_batches and automatic_optimization:
rank_zero_warn(
"When using `Trainer(accumulate_grad_batches != 1)` and overriding"
"`LightningModule.optimizer_{step,zero_grad}`, the hooks will not be called on every batch"
"(rather, they are called on every optimization step)."
)
def __verify_eval_loop_configuration(self, model: "pl.LightningModule", stage: str) -> None:
loader_name = f"{stage}_dataloader"
step_name = "validation_step" if stage == "val" else "test_step"
has_loader = is_overridden(loader_name, model)
has_step = is_overridden(step_name, model)
if has_loader and not has_step:
rank_zero_warn(f"you passed in a {loader_name} but have no {step_name}. Skipping {stage} loop")
if has_step and not has_loader:
rank_zero_warn(f"you defined a {step_name} but have no {loader_name}. Skipping {stage} loop")
# ----------------------------------------------
# verify model does not have
# - on_val_dataloader
# - on_test_dataloader
# ----------------------------------------------
has_on_val_dataloader = is_overridden("on_val_dataloader", model)
if has_on_val_dataloader:
rank_zero_deprecation(
"Method `on_val_dataloader` in DataHooks is deprecated and will be removed in v1.7.0."
" Please use `val_dataloader()` directly."
)
has_on_test_dataloader = is_overridden("on_test_dataloader", model)
if has_on_test_dataloader:
rank_zero_deprecation(
"Method `on_test_dataloader` in DataHooks is deprecated and will be removed in v1.7.0."
" Please use `test_dataloader()` directly."
)
def __verify_predict_loop_configuration(self, model: "pl.LightningModule") -> None:
has_predict_dataloader = is_overridden("predict_dataloader", model)
if not has_predict_dataloader:
raise MisconfigurationException("Dataloader not found for `Trainer.predict`")
# ----------------------------------------------
# verify model does not have
# - on_predict_dataloader
# ----------------------------------------------
has_on_predict_dataloader = is_overridden("on_predict_dataloader", model)
if has_on_predict_dataloader:
rank_zero_deprecation(
"Method `on_predict_dataloader` in DataHooks is deprecated and will be removed in v1.7.0."
" Please use `predict_dataloader()` directly."
)
def __verify_dp_batch_transfer_support(self, model: "pl.LightningModule") -> None:
"""Raise Misconfiguration exception since these hooks are not supported in DP mode."""
# TODO: Remove this blocker once batch transfer to device is integrated in Lightning for DP mode.
batch_transfer_hooks = ("on_before_batch_transfer", "transfer_batch_to_device", "on_after_batch_transfer")
for hook in batch_transfer_hooks:
if self.trainer.accelerator_connector.use_dp and is_overridden(hook, model):
raise MisconfigurationException(f"Overriding `{hook}` is not supported in DP mode.")
def __verify_manual_optimization_support(self, model: "pl.LightningModule") -> None:
if model.automatic_optimization:
return
if self.trainer.gradient_clip_val > 0:
raise MisconfigurationException(
"Automatic gradient clipping is not supported for manual optimization."
f" Remove `Trainer(gradient_clip_val={self.trainer.gradient_clip_val})`"
" or switch to automatic optimization."
)
if self.trainer.accumulate_grad_batches != 1:
raise MisconfigurationException(
"Automatic gradient accumulation is not supported for manual optimization."
f" Remove `Trainer(accumulate_grad_batches={self.trainer.accumulate_grad_batches})`"
" or switch to automatic optimization."
)
def __check_training_step_requires_dataloader_iter(self, model: "pl.LightningModule"):
"""Check if the current `training_step` is requesting `dataloader_iter`."""
training_step_fx = getattr(model, "training_step")
if is_param_in_hook_signature(training_step_fx, "dataloader_iter", explicit=True):
if is_overridden("on_train_batch_start", model):
raise MisconfigurationException(
"The model hook `on_train_batch_start` is not compatible with "
"taking a `dataloader_iter` argument in your `training_step`."
)
if is_overridden("on_train_batch_end", model):
raise MisconfigurationException(
"The model hook `on_train_batch_end` is not compatible with "
"taking a `dataloader_iter` argument in your `training_step`."
)
if model.truncated_bptt_steps > 0:
raise MisconfigurationException(
"The model taking a `dataloader_iter` argument in your `training_step` "
"is incompatible with `truncated_bptt_steps > 0`."
)
def _check_on_keyboard_interrupt(self) -> None:
"""Checks if on_keyboard_interrupt is overriden and sends a deprecation warning."""
for callback in self.trainer.callbacks:
if is_overridden(method_name="on_keyboard_interrupt", instance=callback):
rank_zero_deprecation(
"The `on_keyboard_interrupt` callback hook was deprecated in v1.5 and will be removed in v1.7."
" Please use the `on_exception` callback hook instead."
)
| 51.525581 | 120 | 0.639827 |
import pytorch_lightning as pl
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import rank_zero_deprecation, rank_zero_warn
class ConfigValidator:
def __init__(self, trainer: "pl.Trainer") -> None:
self.trainer = trainer
def verify_loop_configurations(self, model: "pl.LightningModule") -> None:
if self.trainer.state.fn in (TrainerFn.FITTING, TrainerFn.TUNING):
self.__verify_train_loop_configuration(model)
self.__verify_eval_loop_configuration(model, "val")
self.__verify_manual_optimization_support(model)
self.__check_training_step_requires_dataloader_iter(model)
elif self.trainer.state.fn == TrainerFn.VALIDATING:
self.__verify_eval_loop_configuration(model, "val")
elif self.trainer.state.fn == TrainerFn.TESTING:
self.__verify_eval_loop_configuration(model, "test")
elif self.trainer.state.fn == TrainerFn.PREDICTING:
self.__verify_predict_loop_configuration(model)
self.__verify_dp_batch_transfer_support(model)
self._check_on_keyboard_interrupt()
def __verify_train_loop_configuration(self, model: "pl.LightningModule") -> None:
has_training_step = is_overridden("training_step", model)
if not has_training_step:
raise MisconfigurationException(
"No `training_step()` method defined. Lightning `Trainer` expects as minimum a"
" `training_step()`, `train_dataloader()` and `configure_optimizers()` to be defined."
)
has_train_dataloader = is_overridden("train_dataloader", model)
if not has_train_dataloader:
raise MisconfigurationException(
"No `train_dataloader()` method defined. Lightning `Trainer` expects as minimum a"
" `training_step()`, `train_dataloader()` and `configure_optimizers()` to be defined."
)
has_optimizers = is_overridden("configure_optimizers", model)
if not has_optimizers:
raise MisconfigurationException(
"No `configure_optimizers()` method defined. Lightning `Trainer` expects as minimum a"
" `training_step()`, `train_dataloader()` and `configure_optimizers()` to be defined."
)
has_on_train_dataloader = is_overridden("on_train_dataloader", model)
if has_on_train_dataloader:
rank_zero_deprecation(
"Method `on_train_dataloader` in DataHooks is deprecated and will be removed in v1.7.0."
" Please use `train_dataloader()` directly."
)
has_on_val_dataloader = is_overridden("on_val_dataloader", model)
if has_on_val_dataloader:
rank_zero_deprecation(
"Method `on_val_dataloader` in DataHooks is deprecated and will be removed in v1.7.0."
" Please use `val_dataloader()` directly."
)
trainer = self.trainer
trainer.overriden_optimizer_step = is_overridden("optimizer_step", model)
trainer.overriden_optimizer_zero_grad = is_overridden("optimizer_zero_grad", model)
automatic_optimization = model.automatic_optimization
going_to_accumulate_grad_batches = trainer.accumulation_scheduler.going_to_accumulate_grad_batches()
has_overriden_optimization_functions = trainer.overriden_optimizer_step or trainer.overriden_optimizer_zero_grad
if has_overriden_optimization_functions and going_to_accumulate_grad_batches and automatic_optimization:
rank_zero_warn(
"When using `Trainer(accumulate_grad_batches != 1)` and overriding"
"`LightningModule.optimizer_{step,zero_grad}`, the hooks will not be called on every batch"
"(rather, they are called on every optimization step)."
)
def __verify_eval_loop_configuration(self, model: "pl.LightningModule", stage: str) -> None:
loader_name = f"{stage}_dataloader"
step_name = "validation_step" if stage == "val" else "test_step"
has_loader = is_overridden(loader_name, model)
has_step = is_overridden(step_name, model)
if has_loader and not has_step:
rank_zero_warn(f"you passed in a {loader_name} but have no {step_name}. Skipping {stage} loop")
if has_step and not has_loader:
rank_zero_warn(f"you defined a {step_name} but have no {loader_name}. Skipping {stage} loop")
has_on_val_dataloader = is_overridden("on_val_dataloader", model)
if has_on_val_dataloader:
rank_zero_deprecation(
"Method `on_val_dataloader` in DataHooks is deprecated and will be removed in v1.7.0."
" Please use `val_dataloader()` directly."
)
has_on_test_dataloader = is_overridden("on_test_dataloader", model)
if has_on_test_dataloader:
rank_zero_deprecation(
"Method `on_test_dataloader` in DataHooks is deprecated and will be removed in v1.7.0."
" Please use `test_dataloader()` directly."
)
def __verify_predict_loop_configuration(self, model: "pl.LightningModule") -> None:
has_predict_dataloader = is_overridden("predict_dataloader", model)
if not has_predict_dataloader:
raise MisconfigurationException("Dataloader not found for `Trainer.predict`")
has_on_predict_dataloader = is_overridden("on_predict_dataloader", model)
if has_on_predict_dataloader:
rank_zero_deprecation(
"Method `on_predict_dataloader` in DataHooks is deprecated and will be removed in v1.7.0."
" Please use `predict_dataloader()` directly."
)
def __verify_dp_batch_transfer_support(self, model: "pl.LightningModule") -> None:
batch_transfer_hooks = ("on_before_batch_transfer", "transfer_batch_to_device", "on_after_batch_transfer")
for hook in batch_transfer_hooks:
if self.trainer.accelerator_connector.use_dp and is_overridden(hook, model):
raise MisconfigurationException(f"Overriding `{hook}` is not supported in DP mode.")
def __verify_manual_optimization_support(self, model: "pl.LightningModule") -> None:
if model.automatic_optimization:
return
if self.trainer.gradient_clip_val > 0:
raise MisconfigurationException(
"Automatic gradient clipping is not supported for manual optimization."
f" Remove `Trainer(gradient_clip_val={self.trainer.gradient_clip_val})`"
" or switch to automatic optimization."
)
if self.trainer.accumulate_grad_batches != 1:
raise MisconfigurationException(
"Automatic gradient accumulation is not supported for manual optimization."
f" Remove `Trainer(accumulate_grad_batches={self.trainer.accumulate_grad_batches})`"
" or switch to automatic optimization."
)
def __check_training_step_requires_dataloader_iter(self, model: "pl.LightningModule"):
training_step_fx = getattr(model, "training_step")
if is_param_in_hook_signature(training_step_fx, "dataloader_iter", explicit=True):
if is_overridden("on_train_batch_start", model):
raise MisconfigurationException(
"The model hook `on_train_batch_start` is not compatible with "
"taking a `dataloader_iter` argument in your `training_step`."
)
if is_overridden("on_train_batch_end", model):
raise MisconfigurationException(
"The model hook `on_train_batch_end` is not compatible with "
"taking a `dataloader_iter` argument in your `training_step`."
)
if model.truncated_bptt_steps > 0:
raise MisconfigurationException(
"The model taking a `dataloader_iter` argument in your `training_step` "
"is incompatible with `truncated_bptt_steps > 0`."
)
def _check_on_keyboard_interrupt(self) -> None:
for callback in self.trainer.callbacks:
if is_overridden(method_name="on_keyboard_interrupt", instance=callback):
rank_zero_deprecation(
"The `on_keyboard_interrupt` callback hook was deprecated in v1.5 and will be removed in v1.7."
" Please use the `on_exception` callback hook instead."
)
| true | true |
1c3076bcbd064f30b55915f2787a6ddae3356262 | 12 | py | Python | lona/default_routes.py | sobolevn/lona | 14af03bb8607f553ccfaf8e86b748a76b359518d | [
"MIT"
] | null | null | null | lona/default_routes.py | sobolevn/lona | 14af03bb8607f553ccfaf8e86b748a76b359518d | [
"MIT"
] | null | null | null | lona/default_routes.py | sobolevn/lona | 14af03bb8607f553ccfaf8e86b748a76b359518d | [
"MIT"
] | null | null | null | routes = []
| 6 | 11 | 0.5 | routes = []
| true | true |
1c3077632495fee023345b216ceb0915ef87302e | 10,261 | py | Python | tests/test_exceptions.py | adriangb/di | f277bb7189c8e8bde41170afb3181e6600b06be8 | [
"MIT"
] | 57 | 2021-09-28T00:48:08.000Z | 2022-03-16T16:50:39.000Z | tests/test_exceptions.py | adriangb/di | f277bb7189c8e8bde41170afb3181e6600b06be8 | [
"MIT"
] | 59 | 2021-09-25T00:06:22.000Z | 2022-03-31T15:49:36.000Z | tests/test_exceptions.py | adriangb/di | f277bb7189c8e8bde41170afb3181e6600b06be8 | [
"MIT"
] | 3 | 2021-12-31T10:03:03.000Z | 2021-12-31T16:07:54.000Z | from dataclasses import dataclass, field
from typing import AsyncGenerator, Dict, Generator
import pytest
from di.container import Container, bind_by_type
from di.dependant import Dependant, Marker
from di.executors import AsyncExecutor, SyncExecutor
from di.typing import Annotated
@dataclass
class Recorder:
caught: Dict[str, bool] = field(default_factory=dict)
class MyException(Exception):
...
def dep1(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep1"] = True
def dep2(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep2"] = True
async def async_dep1(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep1"] = True
async def async_dep2(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep1"] = True
def test_dependency_can_catch_exception_single_sync() -> None:
def collector(one: Annotated[None, Marker(dep1)]) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
with container.enter_scope(None) as state:
container.execute_sync(
container.solve(Dependant(collector), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
assert rec.caught == {"dep1": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_single_async() -> None:
def collector(one: Annotated[None, Marker(async_dep1)]) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
solved = container.solve(Dependant(collector), scopes=[None])
async with container.enter_scope(None) as state:
await container.execute_async(
solved,
executor=AsyncExecutor(),
state=state,
)
assert rec.caught == {"async_dep1": True}
def test_dependency_can_catch_exception_concurrent_sync() -> None:
def collector(
one: Annotated[None, Marker(dep1)], two: Annotated[None, Marker(dep2)]
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
with container.enter_scope(None) as state:
container.execute_sync(
container.solve(Dependant(collector), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
# one of the dependencies catches and swallows the exception
# so the other one nevers sees it
# there is no promises as to the order, both cases are valid
assert rec.caught == {"dep1": True} or rec.caught == {"dep2": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_async() -> None:
def collector(
one: Annotated[None, Marker(async_dep1)],
two: Annotated[None, Marker(async_dep2)],
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
solved = container.solve(Dependant(collector), scopes=[None])
async with container.enter_scope(None) as state:
await container.execute_async(
solved,
executor=AsyncExecutor(),
state=state,
)
# one of the dependencies catches and swallows the exception
# so the other one nevers sees it
# there is no promises as to the order, both cases are valid
assert rec.caught == {"async_dep1": True} or rec.caught == {"async_dep2": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_mixed() -> None:
def collector(
one: Annotated[None, Marker(async_dep1)],
two: Annotated[None, Marker(dep2)],
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
async with container.enter_scope(None) as state:
await container.execute_async(
container.solve(Dependant(collector), scopes=[None]),
executor=AsyncExecutor(),
state=state,
)
# one of the dependencies catches and swallows the exception
# so the other one nevers sees it
# there is no promises as to the order, both cases are valid
assert rec.caught == {"async_dep1": True} or rec.caught == {"dep2": True}
def dep1_reraise(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep1_reraise"] = True
raise
def dep2_reraise(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep2_reraise"] = True
raise
async def async_dep1_reraise(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep1_reraise"] = True
raise
async def async_dep2_reraise(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep2_reraise"] = True
raise
def test_dependency_can_catch_exception_single_sync_reraise() -> None:
def collector(one: Annotated[None, Marker(dep1_reraise)]) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
try:
with container.enter_scope(None) as state:
container.execute_sync(
container.solve(Dependant(collector), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
) # pragma: no cover
assert rec.caught == {"dep1_reraise": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_single_async_reraise() -> None:
def collector(one: Annotated[None, Marker(async_dep1_reraise)]) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
try:
async with container.enter_scope(None) as state:
await container.execute_async(
container.solve(Dependant(collector), scopes=[None]),
executor=AsyncExecutor(),
state=state,
)
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
) # pragma: no cover
assert rec.caught == {"async_dep1_reraise": True}
def test_dependency_can_catch_exception_concurrent_sync_reraise() -> None:
def collector(
one: Annotated[None, Marker(dep1_reraise)],
two: Annotated[None, Marker(dep2_reraise)],
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
try:
with container.enter_scope(None) as state:
container.execute_sync(
container.solve(Dependant(collector), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
) # pragma: no cover
assert rec.caught == {"dep1_reraise": True, "dep2_reraise": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_async_reraise() -> None:
def collector(
one: Annotated[None, Marker(async_dep1_reraise)],
two: Annotated[None, Marker(async_dep2_reraise)],
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
try:
async with container.enter_scope(None) as state:
await container.execute_async(
container.solve(Dependant(collector), scopes=[None]),
executor=AsyncExecutor(),
state=state,
)
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
) # pragma: no cover
assert rec.caught == {"async_dep1_reraise": True, "async_dep2_reraise": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_mixed_reraise() -> None:
def collector(
one: Annotated[None, Marker(async_dep1_reraise)],
two: Annotated[None, Marker(dep2_reraise)],
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
try:
async with container.enter_scope(None) as state:
await container.execute_async(
container.solve(Dependant(collector), scopes=[None]),
executor=AsyncExecutor(),
state=state,
)
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
) # pragma: no cover
assert rec.caught == {"async_dep1_reraise": True, "dep2_reraise": True}
def test_deep_reraise() -> None:
def leaf() -> Generator[None, None, None]:
try:
yield
except MyException:
pass
else:
raise AssertionError("Exception did not propagate") # pragma: no cover
def parent(child: Annotated[None, Marker(leaf)]) -> Generator[None, None, None]:
try:
yield
except MyException:
raise
def root(child: Annotated[None, Marker(parent)]) -> None:
raise MyException
container = Container()
with container.enter_scope(None) as state:
container.execute_sync(
container.solve(Dependant(root), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
| 30.629851 | 84 | 0.639801 | from dataclasses import dataclass, field
from typing import AsyncGenerator, Dict, Generator
import pytest
from di.container import Container, bind_by_type
from di.dependant import Dependant, Marker
from di.executors import AsyncExecutor, SyncExecutor
from di.typing import Annotated
@dataclass
class Recorder:
caught: Dict[str, bool] = field(default_factory=dict)
class MyException(Exception):
...
def dep1(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep1"] = True
def dep2(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep2"] = True
async def async_dep1(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep1"] = True
async def async_dep2(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep1"] = True
def test_dependency_can_catch_exception_single_sync() -> None:
def collector(one: Annotated[None, Marker(dep1)]) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
with container.enter_scope(None) as state:
container.execute_sync(
container.solve(Dependant(collector), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
assert rec.caught == {"dep1": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_single_async() -> None:
def collector(one: Annotated[None, Marker(async_dep1)]) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
solved = container.solve(Dependant(collector), scopes=[None])
async with container.enter_scope(None) as state:
await container.execute_async(
solved,
executor=AsyncExecutor(),
state=state,
)
assert rec.caught == {"async_dep1": True}
def test_dependency_can_catch_exception_concurrent_sync() -> None:
def collector(
one: Annotated[None, Marker(dep1)], two: Annotated[None, Marker(dep2)]
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
with container.enter_scope(None) as state:
container.execute_sync(
container.solve(Dependant(collector), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
assert rec.caught == {"dep1": True} or rec.caught == {"dep2": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_async() -> None:
def collector(
one: Annotated[None, Marker(async_dep1)],
two: Annotated[None, Marker(async_dep2)],
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
solved = container.solve(Dependant(collector), scopes=[None])
async with container.enter_scope(None) as state:
await container.execute_async(
solved,
executor=AsyncExecutor(),
state=state,
)
assert rec.caught == {"async_dep1": True} or rec.caught == {"async_dep2": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_mixed() -> None:
def collector(
one: Annotated[None, Marker(async_dep1)],
two: Annotated[None, Marker(dep2)],
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
async with container.enter_scope(None) as state:
await container.execute_async(
container.solve(Dependant(collector), scopes=[None]),
executor=AsyncExecutor(),
state=state,
)
assert rec.caught == {"async_dep1": True} or rec.caught == {"dep2": True}
def dep1_reraise(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep1_reraise"] = True
raise
def dep2_reraise(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep2_reraise"] = True
raise
async def async_dep1_reraise(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep1_reraise"] = True
raise
async def async_dep2_reraise(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep2_reraise"] = True
raise
def test_dependency_can_catch_exception_single_sync_reraise() -> None:
def collector(one: Annotated[None, Marker(dep1_reraise)]) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
try:
with container.enter_scope(None) as state:
container.execute_sync(
container.solve(Dependant(collector), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
)
assert rec.caught == {"dep1_reraise": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_single_async_reraise() -> None:
def collector(one: Annotated[None, Marker(async_dep1_reraise)]) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
try:
async with container.enter_scope(None) as state:
await container.execute_async(
container.solve(Dependant(collector), scopes=[None]),
executor=AsyncExecutor(),
state=state,
)
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
)
assert rec.caught == {"async_dep1_reraise": True}
def test_dependency_can_catch_exception_concurrent_sync_reraise() -> None:
def collector(
one: Annotated[None, Marker(dep1_reraise)],
two: Annotated[None, Marker(dep2_reraise)],
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
try:
with container.enter_scope(None) as state:
container.execute_sync(
container.solve(Dependant(collector), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
)
assert rec.caught == {"dep1_reraise": True, "dep2_reraise": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_async_reraise() -> None:
def collector(
one: Annotated[None, Marker(async_dep1_reraise)],
two: Annotated[None, Marker(async_dep2_reraise)],
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
try:
async with container.enter_scope(None) as state:
await container.execute_async(
container.solve(Dependant(collector), scopes=[None]),
executor=AsyncExecutor(),
state=state,
)
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
)
assert rec.caught == {"async_dep1_reraise": True, "async_dep2_reraise": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_mixed_reraise() -> None:
def collector(
one: Annotated[None, Marker(async_dep1_reraise)],
two: Annotated[None, Marker(dep2_reraise)],
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(bind_by_type(Dependant(lambda: rec), Recorder))
try:
async with container.enter_scope(None) as state:
await container.execute_async(
container.solve(Dependant(collector), scopes=[None]),
executor=AsyncExecutor(),
state=state,
)
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
)
assert rec.caught == {"async_dep1_reraise": True, "dep2_reraise": True}
def test_deep_reraise() -> None:
def leaf() -> Generator[None, None, None]:
try:
yield
except MyException:
pass
else:
raise AssertionError("Exception did not propagate")
def parent(child: Annotated[None, Marker(leaf)]) -> Generator[None, None, None]:
try:
yield
except MyException:
raise
def root(child: Annotated[None, Marker(parent)]) -> None:
raise MyException
container = Container()
with container.enter_scope(None) as state:
container.execute_sync(
container.solve(Dependant(root), scopes=[None]),
executor=SyncExecutor(),
state=state,
)
| true | true |
1c3079030e036d60b7d7ba161c6b01ad3c4257bc | 517 | py | Python | PaddleCode.py | DavidGoedicke/Keybyfoot | 071a198cc8b7c288c663684661910856bee9554f | [
"MIT"
] | null | null | null | PaddleCode.py | DavidGoedicke/Keybyfoot | 071a198cc8b7c288c663684661910856bee9554f | [
"MIT"
] | null | null | null | PaddleCode.py | DavidGoedicke/Keybyfoot | 071a198cc8b7c288c663684661910856bee9554f | [
"MIT"
] | null | null | null | # Write your code here :-)
from adafruit_circuitplayground.express import cpx
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
kbd = Keyboard()
while True:
if cpx.button_a:
kbd.send(Keycode.SHIFT, Keycode.F1) # Type capital 'A'
while cpx.button_a: # Wait for button to be released
pass
if cpx.button_b:
kbd.send(Keycode.CONTROL, Keycode.F2) # control-X key
while cpx.button_b: # Wait for button to be released
pass | 30.411765 | 63 | 0.68472 |
from adafruit_circuitplayground.express import cpx
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
kbd = Keyboard()
while True:
if cpx.button_a:
kbd.send(Keycode.SHIFT, Keycode.F1)
while cpx.button_a:
pass
if cpx.button_b:
kbd.send(Keycode.CONTROL, Keycode.F2)
while cpx.button_b:
pass | true | true |
1c3079c602eecd54e71a2cec8693fdc41f994ccb | 10,128 | py | Python | river/naive_bayes/multinomial.py | mathco-wf/river | c6ff38fa4ce4843ede1cba77248e0370a67a36f6 | [
"BSD-3-Clause"
] | 4 | 2019-04-09T16:51:08.000Z | 2021-07-26T15:43:30.000Z | river/naive_bayes/multinomial.py | mathco-wf/river | c6ff38fa4ce4843ede1cba77248e0370a67a36f6 | [
"BSD-3-Clause"
] | null | null | null | river/naive_bayes/multinomial.py | mathco-wf/river | c6ff38fa4ce4843ede1cba77248e0370a67a36f6 | [
"BSD-3-Clause"
] | 1 | 2021-10-19T03:16:07.000Z | 2021-10-19T03:16:07.000Z | import collections
import math
import numpy as np
import pandas as pd
from scipy import sparse
from river.base import tags
from . import base
__all__ = ["MultinomialNB"]
class MultinomialNB(base.BaseNB):
"""Naive Bayes classifier for multinomial models.
Multinomial Naive Bayes model learns from occurrences between features such as word counts
and discrete classes. The input vector must contain positive values, such as
counts or TF-IDF values.
Parameters
----------
alpha
Additive (Laplace/Lidstone) smoothing parameter (use 0 for no smoothing).
Attributes
----------
class_dist : proba.Multinomial
Class prior probability distribution.
feature_counts : collections.defaultdict
Total frequencies per feature and class.
class_totals : collections.Counter
Total frequencies per class.
Examples
--------
>>> import math
>>> from river import compose
>>> from river import feature_extraction
>>> from river import naive_bayes
>>> docs = [
... ('Chinese Beijing Chinese', 'yes'),
... ('Chinese Chinese Shanghai', 'yes'),
... ('Chinese Macao', 'yes'),
... ('Tokyo Japan Chinese', 'no')
... ]
>>> model = compose.Pipeline(
... ('tokenize', feature_extraction.BagOfWords(lowercase=False)),
... ('nb', naive_bayes.MultinomialNB(alpha=1))
... )
>>> for sentence, label in docs:
... model = model.learn_one(sentence, label)
>>> model['nb'].p_class('yes')
0.75
>>> model['nb'].p_class('no')
0.25
>>> cp = model['nb'].p_feature_given_class
>>> cp('Chinese', 'yes') == (5 + 1) / (8 + 6)
True
>>> cp('Tokyo', 'yes') == (0 + 1) / (8 + 6)
True
>>> cp('Japan', 'yes') == (0 + 1) / (8 + 6)
True
>>> cp('Chinese', 'no') == (1 + 1) / (3 + 6)
True
>>> cp('Tokyo', 'no') == (1 + 1) / (3 + 6)
True
>>> cp('Japan', 'no') == (1 + 1) / (3 + 6)
True
>>> new_text = 'Chinese Chinese Chinese Tokyo Japan'
>>> tokens = model['tokenize'].transform_one(new_text)
>>> jlh = model['nb'].joint_log_likelihood(tokens)
>>> math.exp(jlh['yes'])
0.000301
>>> math.exp(jlh['no'])
0.000135
>>> model.predict_one(new_text)
'yes'
>>> new_unseen_text = 'Taiwanese Taipei'
>>> tokens = model['tokenize'].transform_one(new_unseen_text)
>>> # P(Taiwanese|yes)
>>> # = (N_Taiwanese_yes + 1) / (N_yes + N_terms)
>>> cp('Taiwanese', 'yes') == cp('Taipei', 'yes') == (0 + 1) / (8 + 6)
True
>>> cp('Taiwanese', 'no') == cp('Taipei', 'no') == (0 + 1) / (3 + 6)
True
>>> # P(yes|Taiwanese Taipei)
>>> # ∝ P(Taiwanese|yes) * P(Taipei|yes) * P(yes)
>>> posterior_yes_given_new_text = (0 + 1) / (8 + 6) * (0 + 1) / (8 + 6) * 0.75
>>> jlh = model['nb'].joint_log_likelihood(tokens)
>>> jlh['yes'] == math.log(posterior_yes_given_new_text)
True
>>> model.predict_one(new_unseen_text)
'yes'
You can train the model and make predictions in mini-batch mode using the class methods `learn_many` and `predict_many`.
>>> import pandas as pd
>>> docs = [
... ('Chinese Beijing Chinese', 'yes'),
... ('Chinese Chinese Shanghai', 'yes'),
... ('Chinese Macao', 'yes'),
... ('Tokyo Japan Chinese', 'no')
... ]
>>> docs = pd.DataFrame(docs, columns = ['docs', 'y'])
>>> X, y = docs['docs'], docs['y']
>>> model = compose.Pipeline(
... ('tokenize', feature_extraction.BagOfWords(lowercase=False)),
... ('nb', naive_bayes.MultinomialNB(alpha=1))
... )
>>> model = model.learn_many(X, y)
>>> model['nb'].p_class('yes')
0.75
>>> model['nb'].p_class('no')
0.25
>>> cp = model['nb'].p_feature_given_class
>>> cp('Chinese', 'yes') == (5 + 1) / (8 + 6)
True
>>> cp('Tokyo', 'yes') == (0 + 1) / (8 + 6)
True
>>> cp('Japan', 'yes') == (0 + 1) / (8 + 6)
True
>>> cp('Chinese', 'no') == (1 + 1) / (3 + 6)
True
>>> cp('Tokyo', 'no') == (1 + 1) / (3 + 6)
True
>>> cp('Japan', 'no') == (1 + 1) / (3 + 6)
True
>>> unseen_data = pd.Series(
... ['Taiwanese Taipei', 'Chinese Shanghai'], name = 'docs', index = ['river', 'rocks'])
>>> model.predict_proba_many(unseen_data)
no yes
river 0.446469 0.553531
rocks 0.118501 0.881499
>>> model.predict_many(unseen_data)
river yes
rocks yes
dtype: object
References
----------
[^1]: [Naive Bayes text classification](https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html)
"""
def __init__(self, alpha=1.0):
self.alpha = alpha
self.class_counts = collections.Counter()
self.feature_counts = collections.defaultdict(collections.Counter)
self.class_totals = collections.Counter()
def _more_tags(self):
return {tags.POSITIVE_INPUT}
def learn_one(self, x, y):
"""Updates the model with a single observation.
Parameters
----------
x
Dictionary of term frequencies.
y
Target class.
Returns
--------
self
"""
self.class_counts.update((y,))
for f, frequency in x.items():
self.feature_counts[f].update({y: frequency})
self.class_totals.update({y: frequency})
return self
@property
def classes_(self):
return list(self.class_counts.keys())
@property
def n_terms(self):
return len(self.feature_counts)
def p_feature_given_class(self, f, c):
num = self.feature_counts.get(f, {}).get(c, 0.0) + self.alpha
den = self.class_totals[c] + self.alpha * self.n_terms
return num / den
def p_class(self, c) -> float:
return self.class_counts[c] / sum(self.class_counts.values())
def p_class_many(self) -> pd.DataFrame:
return base.from_dict(self.class_counts).T[self.class_counts] / sum(
self.class_counts.values()
)
def joint_log_likelihood(self, x):
"""Computes the joint log likelihood of input features.
Parameters
----------
x
Dictionary of term frequencies.
Returns
--------
Mapping between classes and joint log likelihood.
"""
return {
c: math.log(self.p_class(c))
+ sum(
frequency * math.log(self.p_feature_given_class(f, c))
for f, frequency in x.items()
)
for c in self.classes_
}
def learn_many(self, X: pd.DataFrame, y: pd.Series):
"""Updates the model with a term-frequency or TF-IDF pandas dataframe.
Parameters
----------
X
Term-frequency or TF-IDF pandas dataframe.
y
Target classes.
Returns
--------
self
"""
y = base.one_hot_encode(y)
columns, classes = X.columns, y.columns
y = sparse.csc_matrix(y.sparse.to_coo()).T
self.class_counts.update(
{c: count.item() for c, count in zip(classes, y.sum(axis=1))}
)
if hasattr(X, "sparse"):
X = sparse.csr_matrix(X.sparse.to_coo())
fc = y @ X
self.class_totals.update(
{c: count.item() for c, count in zip(classes, fc.sum(axis=1))}
)
# Update feature counts by slicing the sparse matrix per column.
# Each column correspond to a class.
for c, i in zip(classes, range(fc.shape[0])):
counts = {
c: {columns[f]: count for f, count in zip(fc[i].indices, fc[i].data)}
}
# Transform {classe_i: {token_1: f_1, ... token_n: f_n}} into:
# [{token_1: {classe_i: f_1}},.. {token_n: {class_i: f_n}}]
for dict_count in [
{token: {c: f} for token, f in frequencies.items()}
for c, frequencies in counts.items()
]:
for f, count in dict_count.items():
self.feature_counts[f].update(count)
return self
def _feature_log_prob(
self, columns: list, known: list, unknown: list
) -> pd.DataFrame:
"""Compute log probabilities of input features.
Parameters
----------
columns
List of input features.
known
List of input features that are part of the vocabulary.
unknown
List of input features that are not part the vocabulary.
Returns
--------
Log probabilities of input features.
"""
smooth_fc = np.log(
base.from_dict(self.feature_counts).fillna(0).T[known] + self.alpha
)
smooth_fc[unknown] = np.log(self.alpha)
smooth_cc = np.log(
base.from_dict(self.class_totals) + self.alpha * self.n_terms
)
return smooth_fc.subtract(smooth_cc.values, axis="rows")[columns].T
def joint_log_likelihood_many(self, X: pd.DataFrame) -> pd.DataFrame:
"""Computes the joint log likelihood of input features.
Parameters
----------
X
Term-frequency or TF-IDF pandas dataframe.
Returns
--------
Input samples joint log likelihood.
"""
index, columns = X.index, X.columns
known, unknown = [], []
if not self.class_counts or not self.feature_counts:
return pd.DataFrame(index=index)
for f in columns:
if f in self.feature_counts:
known.append(f)
else:
unknown.append(f)
if hasattr(X, "sparse"):
X = sparse.csr_matrix(X.sparse.to_coo())
return pd.DataFrame(
X @ self._feature_log_prob(columns=columns, known=known, unknown=unknown)
+ np.log(self.p_class_many()).values,
index=index,
columns=self.class_totals.keys(),
)
| 27.225806 | 133 | 0.545616 | import collections
import math
import numpy as np
import pandas as pd
from scipy import sparse
from river.base import tags
from . import base
__all__ = ["MultinomialNB"]
class MultinomialNB(base.BaseNB):
def __init__(self, alpha=1.0):
self.alpha = alpha
self.class_counts = collections.Counter()
self.feature_counts = collections.defaultdict(collections.Counter)
self.class_totals = collections.Counter()
def _more_tags(self):
return {tags.POSITIVE_INPUT}
def learn_one(self, x, y):
self.class_counts.update((y,))
for f, frequency in x.items():
self.feature_counts[f].update({y: frequency})
self.class_totals.update({y: frequency})
return self
@property
def classes_(self):
return list(self.class_counts.keys())
@property
def n_terms(self):
return len(self.feature_counts)
def p_feature_given_class(self, f, c):
num = self.feature_counts.get(f, {}).get(c, 0.0) + self.alpha
den = self.class_totals[c] + self.alpha * self.n_terms
return num / den
def p_class(self, c) -> float:
return self.class_counts[c] / sum(self.class_counts.values())
def p_class_many(self) -> pd.DataFrame:
return base.from_dict(self.class_counts).T[self.class_counts] / sum(
self.class_counts.values()
)
def joint_log_likelihood(self, x):
return {
c: math.log(self.p_class(c))
+ sum(
frequency * math.log(self.p_feature_given_class(f, c))
for f, frequency in x.items()
)
for c in self.classes_
}
def learn_many(self, X: pd.DataFrame, y: pd.Series):
y = base.one_hot_encode(y)
columns, classes = X.columns, y.columns
y = sparse.csc_matrix(y.sparse.to_coo()).T
self.class_counts.update(
{c: count.item() for c, count in zip(classes, y.sum(axis=1))}
)
if hasattr(X, "sparse"):
X = sparse.csr_matrix(X.sparse.to_coo())
fc = y @ X
self.class_totals.update(
{c: count.item() for c, count in zip(classes, fc.sum(axis=1))}
)
for c, i in zip(classes, range(fc.shape[0])):
counts = {
c: {columns[f]: count for f, count in zip(fc[i].indices, fc[i].data)}
}
for dict_count in [
{token: {c: f} for token, f in frequencies.items()}
for c, frequencies in counts.items()
]:
for f, count in dict_count.items():
self.feature_counts[f].update(count)
return self
def _feature_log_prob(
self, columns: list, known: list, unknown: list
) -> pd.DataFrame:
smooth_fc = np.log(
base.from_dict(self.feature_counts).fillna(0).T[known] + self.alpha
)
smooth_fc[unknown] = np.log(self.alpha)
smooth_cc = np.log(
base.from_dict(self.class_totals) + self.alpha * self.n_terms
)
return smooth_fc.subtract(smooth_cc.values, axis="rows")[columns].T
def joint_log_likelihood_many(self, X: pd.DataFrame) -> pd.DataFrame:
index, columns = X.index, X.columns
known, unknown = [], []
if not self.class_counts or not self.feature_counts:
return pd.DataFrame(index=index)
for f in columns:
if f in self.feature_counts:
known.append(f)
else:
unknown.append(f)
if hasattr(X, "sparse"):
X = sparse.csr_matrix(X.sparse.to_coo())
return pd.DataFrame(
X @ self._feature_log_prob(columns=columns, known=known, unknown=unknown)
+ np.log(self.p_class_many()).values,
index=index,
columns=self.class_totals.keys(),
)
| true | true |
1c307c2ec51e4229f3a13309e2eaebf35e627944 | 12,700 | py | Python | src/utils.py | CSMMLab/neuralEntropyClosures | 5efc5961f2fac36921a749d35f3636c61d1cc873 | [
"MIT"
] | 4 | 2021-09-23T07:21:23.000Z | 2021-12-24T11:35:39.000Z | src/utils.py | ScSteffen/neuralEntropyClosures | 5efc5961f2fac36921a749d35f3636c61d1cc873 | [
"MIT"
] | null | null | null | src/utils.py | ScSteffen/neuralEntropyClosures | 5efc5961f2fac36921a749d35f3636c61d1cc873 | [
"MIT"
] | null | null | null | '''
Accumulation of utility functions
Date: 15.03.2021
Author: Steffen Schotthöfer
'''
import numpy as np
import time
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import cm
import seaborn as sns
import os
from pathlib import Path
import git
# plt.style.use("kitish")
def finiteDiff(x, y):
'''
:param x: Function Argument
:param y: Function value = f(x)
:return: df/dx at all points x
'''
grad = np.zeros(x.shape)
grad[0] = (y[1] - y[0]) / (x[1] - x[0])
for i in range(0, x.shape[0] - 1):
grad[i + 1] = (y[i] - y[i - 1]) / (x[i] - x[i - 1])
return grad
def integrate(x, y):
'''
:param x: function argument
:param y: = f(x)
:return: integrate y over span of x
'''
integral = np.zeros(x.shape)
for i in range(0, x.shape[0] - 1):
integral[i + 1] = integral[i] + (x[i + 1] - x[i]) * y[i + 1]
return integral
def load_data(filename: str, input_dim: int, selected_cols: list = [True, True, True]) -> list:
'''
Load training Data from csv file <filename>
u, alpha have length <inputDim>
returns: training_data = [u,alpha,h]
'''
training_data = []
print("Loading Data from location: " + filename)
# determine which cols correspond to u, alpha and h
u_cols = list(range(1, input_dim + 1))
alpha_cols = list(range(input_dim + 1, 2 * input_dim + 1))
h_col = [2 * input_dim + 1]
# selectedCols = self.selectTrainingData() #outputs a boolean triple.
start = time.time()
if selected_cols[0]:
df = pd.read_csv(filename, usecols=[i for i in u_cols])
uNParray = df.to_numpy()
training_data.append(uNParray)
if selected_cols[1]:
df = pd.read_csv(filename, usecols=[i for i in alpha_cols])
alphaNParray = df.to_numpy()
training_data.append(alphaNParray)
if selected_cols[2]:
df = pd.read_csv(filename, usecols=[i for i in h_col])
hNParray = df.to_numpy()
training_data.append(hNParray)
end = time.time()
print("Data loaded. Elapsed time: " + str(end - start))
return training_data
def load_density_function(filename: str) -> list:
'''
Load training Data from csv file <filename>
u, alpha have length <inputDim>
returns: training_data = [u,alpha,h]
'''
print("Loading Data from location: " + filename)
start = time.time()
df = pd.read_csv(filename, header=None)
df = df.drop(df.columns[0], axis=1)
data = df.to_numpy()
x = data[0, :].reshape((1, len(data[0, :])))
weights = data[1, :].reshape((1, len(data[0, :])))
f_kinetic = data[2:, :]
end = time.time()
print("Data loaded. Elapsed time: " + str(end - start))
return [x, weights, f_kinetic]
def load_solution(filename: str) -> list:
'''
Load training Data from csv file <filename>
u, alpha have length <inputDim>
returns: training_data = [u,alpha,h]
'''
print("Loading Data from location: " + filename)
start = time.time()
df = pd.read_csv(filename)
data = df.to_numpy()
t = data.shape[1] / 2
u_neural = data[:, :int(data.shape[1] / 2)]
u_ref = data[:, int(data.shape[1] / 2):]
end = time.time()
print("Data loaded. Elapsed time: " + str(end - start))
return [u_neural, u_ref]
def evaluateModel(model, input):
'''Evaluates the model at input'''
# x = input
# print(x.shape)
# print(x)
return model.predict(input)
def evaluateModelDerivative(model, input):
'''Evaluates model derivatives at input'''
x_model = tf.Variable(input)
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(x_model, training=False) # same as model.predict(x)
gradients = tape.gradient(predictions, x_model)
return gradients
def loadTFModel(filename):
'''Loads a .h5 file to memory'''
nn = tf.keras.models.load_model(filename)
return nn
def plot_1d(xs, ys, labels=None, name='defaultName', log=True, folder_name="figures", linetypes=None, show_fig=False,
xlim=None, ylim=None, xlabel=None, ylabel=None, title: str = r"$h^n$ over ${\mathcal{R}^r}$"):
plt.clf()
if not linetypes:
linetypes = ['-', '--', '-.', ':', ':', '.', ',', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', 's', 'p', '*',
'h',
'H',
'+', 'x', 'D', 'd', '|']
if labels is not None:
linetypes = linetypes[0:len(labels)]
sns.set_theme()
sns.set_style("white")
colors = ['k', 'r', 'g', 'b']
symbol_size = 0.7
if len(xs) == 1:
x = xs[0]
for y, lineType in zip(ys, linetypes):
for i in range(y.shape[1]):
if colors[i] == 'k' and lineType in ['.', ',', 'o', 'v', '^', '<', '>']:
colors[i] = 'w'
plt.plot(x, y[:, i], colors[i] + lineType, linewidth=symbol_size, markersize=2.5,
markeredgewidth=0.5, markeredgecolor='k')
if labels != None:
plt.legend(labels)
elif len(xs) is not len(ys):
print("Error: List of x entries must be of same length as y entries")
exit(1)
else:
for x, y, lineType in zip(xs, ys, linetypes):
plt.plot(x, y, lineType, linewidth=symbol_size)
plt.legend(labels) # , prop={'size': 6})
if log:
plt.yscale('log')
if show_fig:
plt.show()
if ylim is not None:
plt.ylim(ylim[0], ylim[1])
if xlim is not None:
plt.xlim(xlim[0], xlim[1])
if xlabel is not None:
plt.xlabel(xlabel, fontsize=12)
# plt.xticks(fontsize=6)
# plt.yticks(fontsize=6)
if ylabel is not None:
plt.ylabel(ylabel, fontsize=12)
plt.title(title, fontsize=14)
plt.savefig(folder_name + "/" + name + ".png", dpi=500)
print("Figure successfully saved to file: " + str(folder_name + "/" + name + ".png"))
return 0
def scatter_plot_2d(x_in: np.ndarray, z_in: np.ndarray, lim_x: tuple = (-1, 1), lim_y: tuple = (0, 1),
lim_z: tuple = (0, 1), label_x: str = r"$u_1^r$", label_y: str = r"$u_2^r$",
title: str = r"$h^n$ over ${\mathcal{R}^r}$", name: str = 'defaultName', log: bool = True,
folder_name: str = "figures", show_fig: bool = False, color_map: int = 0):
'''
brief: Compute a scatter plot
input: x_in = [x1,x2] function arguments
y_in = function values
return: True if exit successfully
'''
# choose colormap
if color_map == 1:
c_map = cm.summer
else:
c_map = cm.hot
fig = plt.figure(figsize=(5.8, 4.7), dpi=400)
ax = fig.add_subplot(111) # , projection='3d')
x = x_in[:, 0]
y = x_in[:, 1]
z = z_in
if log:
out = ax.scatter(x, y, s=6, c=z, cmap=c_map, norm=colors.LogNorm(), vmin=lim_z[0], vmax=lim_z[1])
else:
out = ax.scatter(x, y, s=6, c=z, cmap=c_map, vmin=lim_z[0], vmax=lim_z[1])
plt.xlim(lim_x[0], lim_x[1])
plt.ylim(lim_y[0], lim_y[1])
ax.set_title(title, fontsize=14)
ax.set_xlabel(label_x)
ax.set_ylabel(label_y)
ax.set_aspect('auto')
cbar = fig.colorbar(out, ax=ax, extend='both')
if show_fig:
plt.show()
plt.savefig(folder_name + "/" + name + ".png", dpi=150)
return 0
def scatter_plot_2d_N2(x_in: np.ndarray, z_in: np.ndarray, lim_x: tuple = (-1, 1), lim_y: tuple = (0, 1),
lim_z: tuple = (0, 1), label_x: str = r"$u_1^r$", label_y: str = r"$u_2^r$",
title: str = r"$h^n$ over ${\mathcal{R}^r}$", name: str = 'defaultName', log: bool = True,
folder_name: str = "figures", show_fig: bool = False, color_map: int = 0):
'''
brief: Compute a scatter plot
input: x_in = [x1,x2] function arguments
y_in = function values
return: True if exit successfully
'''
# choose colormap
if color_map == 1:
c_map = cm.summer
else:
c_map = cm.hot
plt.plot()
fig = plt.figure(figsize=(5.8, 4.7), dpi=400)
ax = fig.add_subplot(111) # , projection='3d')
u1 = np.linspace(-1, 1, 100)
u2 = u1 * u1
u2_top = np.ones(100)
ax.plot(u1, u2, 'k--')
ax.plot(u1, u2_top, 'k--')
x = x_in[:, 0]
y = x_in[:, 1]
z = z_in
if log:
out = ax.scatter(x, y, s=6, c=z, cmap=c_map, norm=colors.LogNorm(), vmin=lim_z[0], vmax=lim_z[1])
else:
out = ax.scatter(x, y, s=6, c=z, cmap=c_map, vmin=lim_z[0], vmax=lim_z[1])
plt.xlim(lim_x[0], lim_x[1])
plt.ylim(lim_y[0], lim_y[1])
ax.set_title(title, fontsize=14)
ax.set_xlabel(label_x)
ax.set_ylabel(label_y)
ax.set_aspect('auto')
cbar = fig.colorbar(out, ax=ax, extend='both')
if show_fig:
plt.show()
plt.savefig(folder_name + "/" + name + ".png", dpi=150)
return 0
def write_config_file(options, neural_closure_model):
# create String to create a python runscript
runScript = "python callNeuralClosure.py \\\n"
runScript = runScript + "--sampling=" + str(int(options.sampling)) + " \\\n"
runScript = runScript + "--batch=" + str(options.batch) + " \\\n"
runScript = runScript + "--curriculum=" + str(options.curriculum) + " \\\n"
runScript = runScript + "--degree=" + str(options.degree) + " \\\n"
runScript = runScript + "--epoch=" + str(options.epoch) + " \\\n"
runScript = runScript + "--folder=" + str(options.folder) + " \\\n"
runScript = runScript + "--loadModel=" + str(1) + " \\\n" # force to load
runScript = runScript + "--model=" + str(options.model) + " \\\n"
runScript = runScript + "--normalized=" + str(int(options.normalized)) + " \\\n"
runScript = runScript + "--scaledOutput=" + str(int(options.scaledOutput)) + " \\\n"
runScript = runScript + "--decorrInput=" + str(int(options.decorrInput)) + " \\\n"
runScript = runScript + "--objective=" + str(options.objective) + " \\\n"
runScript = runScript + "--processingmode=" + str(options.processingmode) + " \\\n"
runScript = runScript + "--spatialDimension=" + str(options.spatial_dimension) + " \\\n"
runScript = runScript + "--training=" + str(options.training) + " \\\n"
runScript = runScript + "--verbosity=" + str(options.verbosity) + " \\\n"
runScript = runScript + "--networkwidth=" + str(options.networkwidth) + " \\\n"
runScript = runScript + "--networkdepth=" + str(options.networkdepth)
# Getting filename
rsFile = neural_closure_model.folder_name + '/runScript_001_'
count = 0
# create directory if it does not exist
make_directory(neural_closure_model.folder_name)
while os.path.isfile(rsFile + '.sh'):
count += 1
rsFile = neural_closure_model.folder_name + '/runScript_' + str(count).zfill(3) + '_'
rsFile = rsFile + '.sh'
print("Writing config to " + rsFile)
f = open(rsFile, "w")
f.write(runScript)
f.close()
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
print("Current git checkout: " + str(sha))
# Print chosen options to csv
d = {'git_version': [sha],
'sampling': [options.sampling],
'batch': [options.batch],
'curriculum': [options.curriculum],
'degree': [options.degree],
'epoch': [options.epoch],
'folder': [options.folder],
'loadModel': [options.loadmodel],
'model': [options.model],
'normalized moments': [options.normalized],
'decorrelate inputs': [options.decorrInput],
'scaled outputs': [options.scaledOutput],
'objective': [options.objective],
'processingmode': [options.processingmode],
'spatial Dimension': [options.spatial_dimension],
'verbosity': [options.verbosity],
'training': [options.training],
'network width': [options.networkwidth],
'network depth': [options.networkdepth]}
count = 0
cfg_file = neural_closure_model.folder_name + '/config_001_'
while os.path.isfile(cfg_file + '.csv'):
count += 1
cfg_file = neural_closure_model.folder_name + '/config_' + str(count).zfill(3) + '_'
cfg_file = cfg_file + '.csv'
pd.DataFrame.from_dict(data=d, orient='index').to_csv(cfg_file, header=False, sep=';')
return True
def make_directory(path_to_directory):
if not os.path.exists(path_to_directory):
p = Path(path_to_directory)
p.mkdir(parents=True)
return 0
| 34.048257 | 117 | 0.586299 |
import numpy as np
import time
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import cm
import seaborn as sns
import os
from pathlib import Path
import git
def finiteDiff(x, y):
grad = np.zeros(x.shape)
grad[0] = (y[1] - y[0]) / (x[1] - x[0])
for i in range(0, x.shape[0] - 1):
grad[i + 1] = (y[i] - y[i - 1]) / (x[i] - x[i - 1])
return grad
def integrate(x, y):
integral = np.zeros(x.shape)
for i in range(0, x.shape[0] - 1):
integral[i + 1] = integral[i] + (x[i + 1] - x[i]) * y[i + 1]
return integral
def load_data(filename: str, input_dim: int, selected_cols: list = [True, True, True]) -> list:
training_data = []
print("Loading Data from location: " + filename)
u_cols = list(range(1, input_dim + 1))
alpha_cols = list(range(input_dim + 1, 2 * input_dim + 1))
h_col = [2 * input_dim + 1]
if selected_cols[0]:
df = pd.read_csv(filename, usecols=[i for i in u_cols])
uNParray = df.to_numpy()
training_data.append(uNParray)
if selected_cols[1]:
df = pd.read_csv(filename, usecols=[i for i in alpha_cols])
alphaNParray = df.to_numpy()
training_data.append(alphaNParray)
if selected_cols[2]:
df = pd.read_csv(filename, usecols=[i for i in h_col])
hNParray = df.to_numpy()
training_data.append(hNParray)
end = time.time()
print("Data loaded. Elapsed time: " + str(end - start))
return training_data
def load_density_function(filename: str) -> list:
print("Loading Data from location: " + filename)
start = time.time()
df = pd.read_csv(filename, header=None)
df = df.drop(df.columns[0], axis=1)
data = df.to_numpy()
x = data[0, :].reshape((1, len(data[0, :])))
weights = data[1, :].reshape((1, len(data[0, :])))
f_kinetic = data[2:, :]
end = time.time()
print("Data loaded. Elapsed time: " + str(end - start))
return [x, weights, f_kinetic]
def load_solution(filename: str) -> list:
print("Loading Data from location: " + filename)
start = time.time()
df = pd.read_csv(filename)
data = df.to_numpy()
t = data.shape[1] / 2
u_neural = data[:, :int(data.shape[1] / 2)]
u_ref = data[:, int(data.shape[1] / 2):]
end = time.time()
print("Data loaded. Elapsed time: " + str(end - start))
return [u_neural, u_ref]
def evaluateModel(model, input):
return model.predict(input)
def evaluateModelDerivative(model, input):
x_model = tf.Variable(input)
with tf.GradientTape() as tape:
predictions = model(x_model, training=False)
gradients = tape.gradient(predictions, x_model)
return gradients
def loadTFModel(filename):
nn = tf.keras.models.load_model(filename)
return nn
def plot_1d(xs, ys, labels=None, name='defaultName', log=True, folder_name="figures", linetypes=None, show_fig=False,
xlim=None, ylim=None, xlabel=None, ylabel=None, title: str = r"$h^n$ over ${\mathcal{R}^r}$"):
plt.clf()
if not linetypes:
linetypes = ['-', '--', '-.', ':', ':', '.', ',', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', 's', 'p', '*',
'h',
'H',
'+', 'x', 'D', 'd', '|']
if labels is not None:
linetypes = linetypes[0:len(labels)]
sns.set_theme()
sns.set_style("white")
colors = ['k', 'r', 'g', 'b']
symbol_size = 0.7
if len(xs) == 1:
x = xs[0]
for y, lineType in zip(ys, linetypes):
for i in range(y.shape[1]):
if colors[i] == 'k' and lineType in ['.', ',', 'o', 'v', '^', '<', '>']:
colors[i] = 'w'
plt.plot(x, y[:, i], colors[i] + lineType, linewidth=symbol_size, markersize=2.5,
markeredgewidth=0.5, markeredgecolor='k')
if labels != None:
plt.legend(labels)
elif len(xs) is not len(ys):
print("Error: List of x entries must be of same length as y entries")
exit(1)
else:
for x, y, lineType in zip(xs, ys, linetypes):
plt.plot(x, y, lineType, linewidth=symbol_size)
plt.legend(labels)
if log:
plt.yscale('log')
if show_fig:
plt.show()
if ylim is not None:
plt.ylim(ylim[0], ylim[1])
if xlim is not None:
plt.xlim(xlim[0], xlim[1])
if xlabel is not None:
plt.xlabel(xlabel, fontsize=12)
if ylabel is not None:
plt.ylabel(ylabel, fontsize=12)
plt.title(title, fontsize=14)
plt.savefig(folder_name + "/" + name + ".png", dpi=500)
print("Figure successfully saved to file: " + str(folder_name + "/" + name + ".png"))
return 0
def scatter_plot_2d(x_in: np.ndarray, z_in: np.ndarray, lim_x: tuple = (-1, 1), lim_y: tuple = (0, 1),
lim_z: tuple = (0, 1), label_x: str = r"$u_1^r$", label_y: str = r"$u_2^r$",
title: str = r"$h^n$ over ${\mathcal{R}^r}$", name: str = 'defaultName', log: bool = True,
folder_name: str = "figures", show_fig: bool = False, color_map: int = 0):
if color_map == 1:
c_map = cm.summer
else:
c_map = cm.hot
fig = plt.figure(figsize=(5.8, 4.7), dpi=400)
ax = fig.add_subplot(111)
x = x_in[:, 0]
y = x_in[:, 1]
z = z_in
if log:
out = ax.scatter(x, y, s=6, c=z, cmap=c_map, norm=colors.LogNorm(), vmin=lim_z[0], vmax=lim_z[1])
else:
out = ax.scatter(x, y, s=6, c=z, cmap=c_map, vmin=lim_z[0], vmax=lim_z[1])
plt.xlim(lim_x[0], lim_x[1])
plt.ylim(lim_y[0], lim_y[1])
ax.set_title(title, fontsize=14)
ax.set_xlabel(label_x)
ax.set_ylabel(label_y)
ax.set_aspect('auto')
cbar = fig.colorbar(out, ax=ax, extend='both')
if show_fig:
plt.show()
plt.savefig(folder_name + "/" + name + ".png", dpi=150)
return 0
def scatter_plot_2d_N2(x_in: np.ndarray, z_in: np.ndarray, lim_x: tuple = (-1, 1), lim_y: tuple = (0, 1),
lim_z: tuple = (0, 1), label_x: str = r"$u_1^r$", label_y: str = r"$u_2^r$",
title: str = r"$h^n$ over ${\mathcal{R}^r}$", name: str = 'defaultName', log: bool = True,
folder_name: str = "figures", show_fig: bool = False, color_map: int = 0):
if color_map == 1:
c_map = cm.summer
else:
c_map = cm.hot
plt.plot()
fig = plt.figure(figsize=(5.8, 4.7), dpi=400)
ax = fig.add_subplot(111)
u1 = np.linspace(-1, 1, 100)
u2 = u1 * u1
u2_top = np.ones(100)
ax.plot(u1, u2, 'k--')
ax.plot(u1, u2_top, 'k--')
x = x_in[:, 0]
y = x_in[:, 1]
z = z_in
if log:
out = ax.scatter(x, y, s=6, c=z, cmap=c_map, norm=colors.LogNorm(), vmin=lim_z[0], vmax=lim_z[1])
else:
out = ax.scatter(x, y, s=6, c=z, cmap=c_map, vmin=lim_z[0], vmax=lim_z[1])
plt.xlim(lim_x[0], lim_x[1])
plt.ylim(lim_y[0], lim_y[1])
ax.set_title(title, fontsize=14)
ax.set_xlabel(label_x)
ax.set_ylabel(label_y)
ax.set_aspect('auto')
cbar = fig.colorbar(out, ax=ax, extend='both')
if show_fig:
plt.show()
plt.savefig(folder_name + "/" + name + ".png", dpi=150)
return 0
def write_config_file(options, neural_closure_model):
runScript = "python callNeuralClosure.py \\\n"
runScript = runScript + "--sampling=" + str(int(options.sampling)) + " \\\n"
runScript = runScript + "--batch=" + str(options.batch) + " \\\n"
runScript = runScript + "--curriculum=" + str(options.curriculum) + " \\\n"
runScript = runScript + "--degree=" + str(options.degree) + " \\\n"
runScript = runScript + "--epoch=" + str(options.epoch) + " \\\n"
runScript = runScript + "--folder=" + str(options.folder) + " \\\n"
runScript = runScript + "--loadModel=" + str(1) + " \\\n"
runScript = runScript + "--model=" + str(options.model) + " \\\n"
runScript = runScript + "--normalized=" + str(int(options.normalized)) + " \\\n"
runScript = runScript + "--scaledOutput=" + str(int(options.scaledOutput)) + " \\\n"
runScript = runScript + "--decorrInput=" + str(int(options.decorrInput)) + " \\\n"
runScript = runScript + "--objective=" + str(options.objective) + " \\\n"
runScript = runScript + "--processingmode=" + str(options.processingmode) + " \\\n"
runScript = runScript + "--spatialDimension=" + str(options.spatial_dimension) + " \\\n"
runScript = runScript + "--training=" + str(options.training) + " \\\n"
runScript = runScript + "--verbosity=" + str(options.verbosity) + " \\\n"
runScript = runScript + "--networkwidth=" + str(options.networkwidth) + " \\\n"
runScript = runScript + "--networkdepth=" + str(options.networkdepth)
rsFile = neural_closure_model.folder_name + '/runScript_001_'
count = 0
make_directory(neural_closure_model.folder_name)
while os.path.isfile(rsFile + '.sh'):
count += 1
rsFile = neural_closure_model.folder_name + '/runScript_' + str(count).zfill(3) + '_'
rsFile = rsFile + '.sh'
print("Writing config to " + rsFile)
f = open(rsFile, "w")
f.write(runScript)
f.close()
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
print("Current git checkout: " + str(sha))
d = {'git_version': [sha],
'sampling': [options.sampling],
'batch': [options.batch],
'curriculum': [options.curriculum],
'degree': [options.degree],
'epoch': [options.epoch],
'folder': [options.folder],
'loadModel': [options.loadmodel],
'model': [options.model],
'normalized moments': [options.normalized],
'decorrelate inputs': [options.decorrInput],
'scaled outputs': [options.scaledOutput],
'objective': [options.objective],
'processingmode': [options.processingmode],
'spatial Dimension': [options.spatial_dimension],
'verbosity': [options.verbosity],
'training': [options.training],
'network width': [options.networkwidth],
'network depth': [options.networkdepth]}
count = 0
cfg_file = neural_closure_model.folder_name + '/config_001_'
while os.path.isfile(cfg_file + '.csv'):
count += 1
cfg_file = neural_closure_model.folder_name + '/config_' + str(count).zfill(3) + '_'
cfg_file = cfg_file + '.csv'
pd.DataFrame.from_dict(data=d, orient='index').to_csv(cfg_file, header=False, sep=';')
return True
def make_directory(path_to_directory):
if not os.path.exists(path_to_directory):
p = Path(path_to_directory)
p.mkdir(parents=True)
return 0
| true | true |
1c307c3e462df1c24c517bb60c8034e700396b3c | 4,162 | py | Python | haystack/backends/simple_backend.py | speedplane/django-haystack | 4ace30aea6aa1b1708f79a5a9df20a00fa0b4d96 | [
"BSD-3-Clause"
] | 1 | 2017-10-12T14:25:06.000Z | 2017-10-12T14:25:06.000Z | haystack/backends/simple_backend.py | speedplane/django-haystack | 4ace30aea6aa1b1708f79a5a9df20a00fa0b4d96 | [
"BSD-3-Clause"
] | 1 | 2016-08-03T18:01:43.000Z | 2016-08-03T18:03:00.000Z | haystack/backends/simple_backend.py | speedplane/django-haystack | 4ace30aea6aa1b1708f79a5a9df20a00fa0b4d96 | [
"BSD-3-Clause"
] | null | null | null | """
A very basic, ORM-based backend for simple search during tests.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.db.models import Q
from django.utils import six
from haystack import connections
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, SearchNode, log_query
from haystack.inputs import PythonData
from haystack.models import SearchResult
if settings.DEBUG:
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger = logging.getLogger('haystack.simple_backend')
logger.setLevel(logging.WARNING)
logger.addHandler(NullHandler())
logger.addHandler(ch)
else:
logger = None
class SimpleSearchBackend(BaseSearchBackend):
def update(self, indexer, iterable, commit=True):
if logger is not None:
logger.warning('update is not implemented in this backend')
def remove(self, obj, commit=True):
if logger is not None:
logger.warning('remove is not implemented in this backend')
def clear(self, models=[], commit=True):
if logger is not None:
logger.warning('clear is not implemented in this backend')
@log_query
def search(self, query_string, **kwargs):
hits = 0
results = []
result_class = SearchResult
models = connections[self.connection_alias].get_unified_index().get_indexed_models()
if kwargs.get('result_class'):
result_class = kwargs['result_class']
if kwargs.get('models'):
models = kwargs['models']
if query_string:
for model in models:
if query_string == '*':
qs = model.objects.all()
else:
for term in query_string.split():
queries = []
for field in model._meta.fields:
if hasattr(field, 'related'):
continue
if not field.get_internal_type() in ('TextField', 'CharField', 'SlugField'):
continue
queries.append(Q(**{'%s__icontains' % field.name: term}))
qs = model.objects.filter(six.moves.reduce(lambda x, y: x|y, queries))
hits += len(qs)
for match in qs:
match.__dict__.pop('score', None)
result = result_class(match._meta.app_label, match._meta.module_name, match.pk, 0, **match.__dict__)
# For efficiency.
result._model = match.__class__
result._object = match
results.append(result)
return {
'results': results,
'hits': hits,
}
def prep_value(self, db_field, value):
return value
def more_like_this(self, model_instance, additional_query_string=None,
start_offset=0, end_offset=None,
limit_to_registered_models=None, result_class=None, **kwargs):
return {
'results': [],
'hits': 0
}
class SimpleSearchQuery(BaseSearchQuery):
def build_query(self):
if not self.query_filter:
return '*'
return self._build_sub_query(self.query_filter)
def _build_sub_query(self, search_node):
term_list = []
for child in search_node.children:
if isinstance(child, SearchNode):
term_list.append(self._build_sub_query(child))
else:
value = child[1]
if not hasattr(value, 'input_type_name'):
value = PythonData(value)
term_list.append(value.prepare(self))
return (' ').join(map(six.text_type, term_list))
class SimpleEngine(BaseEngine):
backend = SimpleSearchBackend
query = SimpleSearchQuery
| 31.530303 | 120 | 0.585296 | from __future__ import unicode_literals
from django.conf import settings
from django.db.models import Q
from django.utils import six
from haystack import connections
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, SearchNode, log_query
from haystack.inputs import PythonData
from haystack.models import SearchResult
if settings.DEBUG:
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger = logging.getLogger('haystack.simple_backend')
logger.setLevel(logging.WARNING)
logger.addHandler(NullHandler())
logger.addHandler(ch)
else:
logger = None
class SimpleSearchBackend(BaseSearchBackend):
def update(self, indexer, iterable, commit=True):
if logger is not None:
logger.warning('update is not implemented in this backend')
def remove(self, obj, commit=True):
if logger is not None:
logger.warning('remove is not implemented in this backend')
def clear(self, models=[], commit=True):
if logger is not None:
logger.warning('clear is not implemented in this backend')
@log_query
def search(self, query_string, **kwargs):
hits = 0
results = []
result_class = SearchResult
models = connections[self.connection_alias].get_unified_index().get_indexed_models()
if kwargs.get('result_class'):
result_class = kwargs['result_class']
if kwargs.get('models'):
models = kwargs['models']
if query_string:
for model in models:
if query_string == '*':
qs = model.objects.all()
else:
for term in query_string.split():
queries = []
for field in model._meta.fields:
if hasattr(field, 'related'):
continue
if not field.get_internal_type() in ('TextField', 'CharField', 'SlugField'):
continue
queries.append(Q(**{'%s__icontains' % field.name: term}))
qs = model.objects.filter(six.moves.reduce(lambda x, y: x|y, queries))
hits += len(qs)
for match in qs:
match.__dict__.pop('score', None)
result = result_class(match._meta.app_label, match._meta.module_name, match.pk, 0, **match.__dict__)
result._model = match.__class__
result._object = match
results.append(result)
return {
'results': results,
'hits': hits,
}
def prep_value(self, db_field, value):
return value
def more_like_this(self, model_instance, additional_query_string=None,
start_offset=0, end_offset=None,
limit_to_registered_models=None, result_class=None, **kwargs):
return {
'results': [],
'hits': 0
}
class SimpleSearchQuery(BaseSearchQuery):
def build_query(self):
if not self.query_filter:
return '*'
return self._build_sub_query(self.query_filter)
def _build_sub_query(self, search_node):
term_list = []
for child in search_node.children:
if isinstance(child, SearchNode):
term_list.append(self._build_sub_query(child))
else:
value = child[1]
if not hasattr(value, 'input_type_name'):
value = PythonData(value)
term_list.append(value.prepare(self))
return (' ').join(map(six.text_type, term_list))
class SimpleEngine(BaseEngine):
backend = SimpleSearchBackend
query = SimpleSearchQuery
| true | true |
1c307ce12279edfdfc64e8c1d5f17d195f3bab13 | 5,961 | py | Python | tests/test_app.py | lonjoy/webssh-1 | 96d9ae5b4d1654957f155e95c5bf1ad4ac65c734 | [
"MIT"
] | 1 | 2019-08-06T21:06:01.000Z | 2019-08-06T21:06:01.000Z | tests/test_app.py | lonjoy/webssh-1 | 96d9ae5b4d1654957f155e95c5bf1ad4ac65c734 | [
"MIT"
] | null | null | null | tests/test_app.py | lonjoy/webssh-1 | 96d9ae5b4d1654957f155e95c5bf1ad4ac65c734 | [
"MIT"
] | 3 | 2019-08-06T21:06:05.000Z | 2021-03-19T18:05:02.000Z | import json
import webssh.handler as handler
import random
import threading
import tornado.websocket
import tornado.gen
from tornado.testing import AsyncHTTPTestCase
from tornado.options import options
from webssh.main import make_app, make_handlers
from webssh.settings import get_app_settings
from tests.sshserver import run_ssh_server
handler.DELAY = 0.1
class TestApp(AsyncHTTPTestCase):
_is_running = False
sshserver_port = 2200
body = u'hostname=127.0.0.1&port={}&username=robey&password=foo'.format(sshserver_port) # noqa
def get_app(self):
loop = self.io_loop
options.debug = True
options.policy = random.choice(['warning', 'autoadd'])
options.hostFile = ''
options.sysHostFile = ''
app = make_app(make_handlers(loop, options), get_app_settings(options))
return app
@classmethod
def setUpClass(cls):
t = threading.Thread(
target=run_ssh_server, args=(cls.sshserver_port, cls)
)
t.setDaemon(True)
t.start()
@classmethod
def tearDownClass(cls):
cls._is_running = True
@classmethod
def __bool__(cls):
return cls._is_running
def test_app_with_invalid_form(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
body = u'hostname=&port=&username=&password'
response = self.fetch('/', method="POST", body=body)
self.assertIn(b'"status": "Empty hostname"', response.body)
body = u'hostname=127.0.0.1&port=&username=&password'
response = self.fetch('/', method="POST", body=body)
self.assertIn(b'"status": "Empty port"', response.body)
body = u'hostname=127.0.0.1&port=port&username=&password'
response = self.fetch('/', method="POST", body=body)
self.assertIn(b'"status": "Invalid port', response.body)
body = u'hostname=127.0.0.1&port=70000&username=&password'
response = self.fetch('/', method="POST", body=body)
self.assertIn(b'"status": "Invalid port', response.body)
body = u'hostname=127.0.0.1&port=7000&username=&password'
response = self.fetch('/', method="POST", body=body)
self.assertIn(b'"status": "Empty username"', response.body)
def test_app_with_wrong_credentials(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
response = self.fetch('/', method="POST", body=self.body + u's')
self.assertIn(b'Authentication failed.', response.body)
def test_app_with_correct_credentials(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
response = self.fetch('/', method="POST", body=self.body)
worker_id = json.loads(response.body.decode('utf-8'))['id']
self.assertIsNotNone(worker_id)
@tornado.testing.gen_test
def test_app_with_correct_credentials_timeout(self):
url = self.get_url('/')
client = self.get_http_client()
response = yield client.fetch(url)
self.assertEqual(response.code, 200)
response = yield client.fetch(url, method="POST", body=self.body)
worker_id = json.loads(response.body.decode('utf-8'))['id']
self.assertIsNotNone(worker_id)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + worker_id
yield tornado.gen.sleep(handler.DELAY + 0.1)
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
ws.close()
@tornado.testing.gen_test
def test_app_with_correct_credentials_user_robey(self):
url = self.get_url('/')
client = self.get_http_client()
response = yield client.fetch(url)
self.assertEqual(response.code, 200)
response = yield client.fetch(url, method="POST", body=self.body)
worker_id = json.loads(response.body.decode('utf-8'))['id']
self.assertIsNotNone(worker_id)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + worker_id
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIn(b'Welcome!', msg)
ws.close()
@tornado.testing.gen_test
def test_app_with_correct_credentials_user_bar(self):
url = self.get_url('/')
client = self.get_http_client()
response = yield client.fetch(url)
self.assertEqual(response.code, 200)
body = self.body.replace('robey', 'bar')
response = yield client.fetch(url, method="POST", body=body)
worker_id = json.loads(response.body.decode('utf-8'))['id']
self.assertIsNotNone(worker_id)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + worker_id
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIn(b'Welcome!', msg)
# message will be ignored silently
yield ws.write_message('hello')
yield ws.write_message('"hello"')
yield ws.write_message('[hello]')
yield ws.write_message(json.dumps({'resize': []}))
yield ws.write_message(json.dumps({'resize': {}}))
yield ws.write_message(json.dumps({'resize': [100]}))
yield ws.write_message(json.dumps({'resize': [100]*10}))
yield ws.write_message(json.dumps({'resize': [-1, -1]}))
yield ws.write_message(json.dumps({'data': [1]}))
yield ws.write_message(json.dumps({'data': (1,)}))
yield ws.write_message(json.dumps({'data': {'a': 2}}))
yield ws.write_message(json.dumps({'data': 1}))
yield ws.write_message(json.dumps({'data': 2.1}))
yield ws.write_message(json.dumps({'key-non-existed': 'hello'}))
yield ws.write_message(json.dumps({'resize': [79, 23], 'data': 'bye'}))
msg = yield ws.read_message()
self.assertEqual(b'bye', msg)
ws.close()
| 37.490566 | 98 | 0.635464 | import json
import webssh.handler as handler
import random
import threading
import tornado.websocket
import tornado.gen
from tornado.testing import AsyncHTTPTestCase
from tornado.options import options
from webssh.main import make_app, make_handlers
from webssh.settings import get_app_settings
from tests.sshserver import run_ssh_server
handler.DELAY = 0.1
class TestApp(AsyncHTTPTestCase):
_is_running = False
sshserver_port = 2200
body = u'hostname=127.0.0.1&port={}&username=robey&password=foo'.format(sshserver_port)
def get_app(self):
loop = self.io_loop
options.debug = True
options.policy = random.choice(['warning', 'autoadd'])
options.hostFile = ''
options.sysHostFile = ''
app = make_app(make_handlers(loop, options), get_app_settings(options))
return app
@classmethod
def setUpClass(cls):
t = threading.Thread(
target=run_ssh_server, args=(cls.sshserver_port, cls)
)
t.setDaemon(True)
t.start()
@classmethod
def tearDownClass(cls):
cls._is_running = True
@classmethod
def __bool__(cls):
return cls._is_running
def test_app_with_invalid_form(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
body = u'hostname=&port=&username=&password'
response = self.fetch('/', method="POST", body=body)
self.assertIn(b'"status": "Empty hostname"', response.body)
body = u'hostname=127.0.0.1&port=&username=&password'
response = self.fetch('/', method="POST", body=body)
self.assertIn(b'"status": "Empty port"', response.body)
body = u'hostname=127.0.0.1&port=port&username=&password'
response = self.fetch('/', method="POST", body=body)
self.assertIn(b'"status": "Invalid port', response.body)
body = u'hostname=127.0.0.1&port=70000&username=&password'
response = self.fetch('/', method="POST", body=body)
self.assertIn(b'"status": "Invalid port', response.body)
body = u'hostname=127.0.0.1&port=7000&username=&password'
response = self.fetch('/', method="POST", body=body)
self.assertIn(b'"status": "Empty username"', response.body)
def test_app_with_wrong_credentials(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
response = self.fetch('/', method="POST", body=self.body + u's')
self.assertIn(b'Authentication failed.', response.body)
def test_app_with_correct_credentials(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
response = self.fetch('/', method="POST", body=self.body)
worker_id = json.loads(response.body.decode('utf-8'))['id']
self.assertIsNotNone(worker_id)
@tornado.testing.gen_test
def test_app_with_correct_credentials_timeout(self):
url = self.get_url('/')
client = self.get_http_client()
response = yield client.fetch(url)
self.assertEqual(response.code, 200)
response = yield client.fetch(url, method="POST", body=self.body)
worker_id = json.loads(response.body.decode('utf-8'))['id']
self.assertIsNotNone(worker_id)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + worker_id
yield tornado.gen.sleep(handler.DELAY + 0.1)
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
ws.close()
@tornado.testing.gen_test
def test_app_with_correct_credentials_user_robey(self):
url = self.get_url('/')
client = self.get_http_client()
response = yield client.fetch(url)
self.assertEqual(response.code, 200)
response = yield client.fetch(url, method="POST", body=self.body)
worker_id = json.loads(response.body.decode('utf-8'))['id']
self.assertIsNotNone(worker_id)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + worker_id
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIn(b'Welcome!', msg)
ws.close()
@tornado.testing.gen_test
def test_app_with_correct_credentials_user_bar(self):
url = self.get_url('/')
client = self.get_http_client()
response = yield client.fetch(url)
self.assertEqual(response.code, 200)
body = self.body.replace('robey', 'bar')
response = yield client.fetch(url, method="POST", body=body)
worker_id = json.loads(response.body.decode('utf-8'))['id']
self.assertIsNotNone(worker_id)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + worker_id
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIn(b'Welcome!', msg)
yield ws.write_message('hello')
yield ws.write_message('"hello"')
yield ws.write_message('[hello]')
yield ws.write_message(json.dumps({'resize': []}))
yield ws.write_message(json.dumps({'resize': {}}))
yield ws.write_message(json.dumps({'resize': [100]}))
yield ws.write_message(json.dumps({'resize': [100]*10}))
yield ws.write_message(json.dumps({'resize': [-1, -1]}))
yield ws.write_message(json.dumps({'data': [1]}))
yield ws.write_message(json.dumps({'data': (1,)}))
yield ws.write_message(json.dumps({'data': {'a': 2}}))
yield ws.write_message(json.dumps({'data': 1}))
yield ws.write_message(json.dumps({'data': 2.1}))
yield ws.write_message(json.dumps({'key-non-existed': 'hello'}))
yield ws.write_message(json.dumps({'resize': [79, 23], 'data': 'bye'}))
msg = yield ws.read_message()
self.assertEqual(b'bye', msg)
ws.close()
| true | true |
1c307d5ea663713b36099465d0bc37a762fec3b4 | 4,978 | py | Python | tests/test_identifiers_org.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | tests/test_identifiers_org.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | tests/test_identifiers_org.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for identifiers.org."""
import unittest
from textwrap import dedent, fill
import requests
import bioregistry
from bioregistry import get_identifiers_org_curie, get_identifiers_org_url
from bioregistry.constants import IDOT_BROKEN
from bioregistry.version import VERSION
class TestIdentifiersOrg(unittest.TestCase):
"""Tests for identifiers.org."""
def setUp(self) -> None:
"""Prepare a session that has a user agent."""
self.session = requests.Session()
self.session.headers = {
'User-Agent': f'bioregistry/{VERSION}',
}
def test_get_prefix(self):
"""Test getting identifiers.org prefixes."""
for prefix, miriam_prefix in [
('ncbitaxon', 'taxonomy'),
('eccode', 'ec-code'),
]:
with self.subTest(prefix=prefix):
self.assertEqual(miriam_prefix, bioregistry.get_identifiers_org_prefix(prefix))
for prefix in ['MONDO']:
self.assertIsNone(bioregistry.get_identifiers_org_prefix(prefix))
def test_banana(self):
"""Test that entries curated with a new banana are resolved properly."""
for prefix, entry in bioregistry.read_registry().items():
banana = entry.banana
if banana is None:
continue
if prefix in IDOT_BROKEN:
continue # identifiers.org is broken for these prefixes
with self.subTest(
prefix=prefix,
banana=banana,
pattern=bioregistry.get_pattern(prefix),
):
identifier = bioregistry.get_example(prefix)
self.assertIsNotNone(identifier)
url = bioregistry.resolve_identifier.get_identifiers_org_url(prefix, identifier)
res = self.session.get(url, allow_redirects=False)
self.assertEqual(302, res.status_code, msg=f'failed with URL: {url}')
def test_url_auto(self):
"""Test formatting URLs."""
for prefix, entry in bioregistry.read_registry().items():
if prefix in IDOT_BROKEN:
continue
identifier = bioregistry.get_example(prefix)
if identifier is None:
continue
if (
'example' not in entry
and 'banana' not in entry
and 'pattern' not in entry
):
continue
url = get_identifiers_org_url(prefix, identifier)
if url is None:
continue
with self.subTest(prefix=prefix, identifier=identifier):
# FIXME
# The following tests don't work because the CURIE generation often throws away the prefix.
# miriam_prefix = bioregistry.get_identifiers_org_prefix(prefix)
# self.assertIsNotNone(miriam_prefix)
# self.assertTrue(
# url.startswith(f'https://identifiers.org/{miriam_prefix}:'),
# msg=f"bad prefix for {prefix}. Expected {miriam_prefix} in {url}",
# )
res = self.session.get(url, allow_redirects=False)
self.assertEqual(302, res.status_code, msg='\n' + dedent(f'''\
Prefix: {prefix}
Identifier: {identifier}
URL: {url}
Text: ''') + fill(res.text, 70, subsequent_indent=' '))
def test_url(self):
"""Test formatting URLs."""
for prefix, identifier, expected, _reason in [
('efo', '0000400', 'efo:0000400', 'test simple concatenation'),
('chebi', 'CHEBI:1234', 'CHEBI:1234', 'test redundant namespace (standard)'),
('chebi', '1234', 'CHEBI:1234', 'test exclusion of redundant namespace (standard)'),
(
'mzspec',
'PXD002255::ES_XP_Ubi_97H_HCD_349:scan:9617:LAEIYVNSSFYK/2',
'mzspec:PXD002255::ES_XP_Ubi_97H_HCD_349:scan:9617:LAEIYVNSSFYK/2',
'test simple concatenation with false banana',
),
(
'mzspec',
'mzspec:PXD002255::ES_XP_Ubi_97H_HCD_349:scan:9617:LAEIYVNSSFYK/2',
'mzspec:PXD002255::ES_XP_Ubi_97H_HCD_349:scan:9617:LAEIYVNSSFYK/2',
'test simple concatenation (redundant) with false banana',
),
]:
with self.subTest(p=prefix, i=identifier):
curie = get_identifiers_org_curie(prefix, identifier)
self.assertEqual(expected, curie, msg='wrong CURIE')
url = get_identifiers_org_url(prefix, identifier)
self.assertEqual(f'https://identifiers.org/{curie}', url, msg='wrong URL')
# Check that the URL resolves
res = self.session.get(url, allow_redirects=False)
self.assertEqual(302, res.status_code, msg=res.reason)
| 41.140496 | 107 | 0.579751 |
import unittest
from textwrap import dedent, fill
import requests
import bioregistry
from bioregistry import get_identifiers_org_curie, get_identifiers_org_url
from bioregistry.constants import IDOT_BROKEN
from bioregistry.version import VERSION
class TestIdentifiersOrg(unittest.TestCase):
def setUp(self) -> None:
self.session = requests.Session()
self.session.headers = {
'User-Agent': f'bioregistry/{VERSION}',
}
def test_get_prefix(self):
for prefix, miriam_prefix in [
('ncbitaxon', 'taxonomy'),
('eccode', 'ec-code'),
]:
with self.subTest(prefix=prefix):
self.assertEqual(miriam_prefix, bioregistry.get_identifiers_org_prefix(prefix))
for prefix in ['MONDO']:
self.assertIsNone(bioregistry.get_identifiers_org_prefix(prefix))
def test_banana(self):
for prefix, entry in bioregistry.read_registry().items():
banana = entry.banana
if banana is None:
continue
if prefix in IDOT_BROKEN:
continue
with self.subTest(
prefix=prefix,
banana=banana,
pattern=bioregistry.get_pattern(prefix),
):
identifier = bioregistry.get_example(prefix)
self.assertIsNotNone(identifier)
url = bioregistry.resolve_identifier.get_identifiers_org_url(prefix, identifier)
res = self.session.get(url, allow_redirects=False)
self.assertEqual(302, res.status_code, msg=f'failed with URL: {url}')
def test_url_auto(self):
for prefix, entry in bioregistry.read_registry().items():
if prefix in IDOT_BROKEN:
continue
identifier = bioregistry.get_example(prefix)
if identifier is None:
continue
if (
'example' not in entry
and 'banana' not in entry
and 'pattern' not in entry
):
continue
url = get_identifiers_org_url(prefix, identifier)
if url is None:
continue
with self.subTest(prefix=prefix, identifier=identifier):
# miriam_prefix = bioregistry.get_identifiers_org_prefix(prefix)
# self.assertIsNotNone(miriam_prefix)
# self.assertTrue(
# url.startswith(f'https://identifiers.org/{miriam_prefix}:'),
# msg=f"bad prefix for {prefix}. Expected {miriam_prefix} in {url}",
# )
res = self.session.get(url, allow_redirects=False)
self.assertEqual(302, res.status_code, msg='\n' + dedent(f'''\
Prefix: {prefix}
Identifier: {identifier}
URL: {url}
Text: ''') + fill(res.text, 70, subsequent_indent=' '))
def test_url(self):
for prefix, identifier, expected, _reason in [
('efo', '0000400', 'efo:0000400', 'test simple concatenation'),
('chebi', 'CHEBI:1234', 'CHEBI:1234', 'test redundant namespace (standard)'),
('chebi', '1234', 'CHEBI:1234', 'test exclusion of redundant namespace (standard)'),
(
'mzspec',
'PXD002255::ES_XP_Ubi_97H_HCD_349:scan:9617:LAEIYVNSSFYK/2',
'mzspec:PXD002255::ES_XP_Ubi_97H_HCD_349:scan:9617:LAEIYVNSSFYK/2',
'test simple concatenation with false banana',
),
(
'mzspec',
'mzspec:PXD002255::ES_XP_Ubi_97H_HCD_349:scan:9617:LAEIYVNSSFYK/2',
'mzspec:PXD002255::ES_XP_Ubi_97H_HCD_349:scan:9617:LAEIYVNSSFYK/2',
'test simple concatenation (redundant) with false banana',
),
]:
with self.subTest(p=prefix, i=identifier):
curie = get_identifiers_org_curie(prefix, identifier)
self.assertEqual(expected, curie, msg='wrong CURIE')
url = get_identifiers_org_url(prefix, identifier)
self.assertEqual(f'https://identifiers.org/{curie}', url, msg='wrong URL')
# Check that the URL resolves
res = self.session.get(url, allow_redirects=False)
self.assertEqual(302, res.status_code, msg=res.reason)
| true | true |
1c307f129b7b9a4f05c6c3ccf4c09c472b5e3e97 | 784 | py | Python | PARCIAL 3/TIENE BIENGO.py | msolivera/Phyton | 1322fa2ff4bb06a17350fefa7e5268c0969e5b53 | [
"bzip2-1.0.6"
] | null | null | null | PARCIAL 3/TIENE BIENGO.py | msolivera/Phyton | 1322fa2ff4bb06a17350fefa7e5268c0969e5b53 | [
"bzip2-1.0.6"
] | null | null | null | PARCIAL 3/TIENE BIENGO.py | msolivera/Phyton | 1322fa2ff4bb06a17350fefa7e5268c0969e5b53 | [
"bzip2-1.0.6"
] | null | null | null | #EJERCICIO 2:
def tiene_bingo (lista_carton,lista_sorteados):
for i in lista_carton:
if lista_carton == lista_sorteados:
return True
return False
#print(tiene_bingo([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],[89,2,3,4,5,6,7,8,9,10,11,12,13,14,15]))
#EJERCICIO 3:
def va_por_uno (lista_carton,lista_sorteados):
nueva_lista = []
contador = 1
for i in lista_sorteados:
if i not in lista_carton:
nueva_lista.append(i)
contador= contador+1
if len(nueva_lista) == 1 :
return True
else:
return False
print(va_por_uno([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],[123,2,3,4,5,6,7,8,9,10,11,12,13,14,15]))
| 23.757576 | 107 | 0.542092 |
def tiene_bingo (lista_carton,lista_sorteados):
for i in lista_carton:
if lista_carton == lista_sorteados:
return True
return False
def va_por_uno (lista_carton,lista_sorteados):
nueva_lista = []
contador = 1
for i in lista_sorteados:
if i not in lista_carton:
nueva_lista.append(i)
contador= contador+1
if len(nueva_lista) == 1 :
return True
else:
return False
print(va_por_uno([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],[123,2,3,4,5,6,7,8,9,10,11,12,13,14,15]))
| true | true |
1c307f7380f8fd71a24f0817b185c3e074fee827 | 788 | py | Python | profiles_api/migrations/0002_profilefeeditem.py | ViswanathToku/profiles-rest-api | d1f803cf4fa0bd1a9256981c6f36b235ff0921ae | [
"MIT"
] | null | null | null | profiles_api/migrations/0002_profilefeeditem.py | ViswanathToku/profiles-rest-api | d1f803cf4fa0bd1a9256981c6f36b235ff0921ae | [
"MIT"
] | null | null | null | profiles_api/migrations/0002_profilefeeditem.py | ViswanathToku/profiles-rest-api | d1f803cf4fa0bd1a9256981c6f36b235ff0921ae | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-09-02 07:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.52 | 126 | 0.633249 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
1c3080268c0c639b8ac8ad3feeff60bd30358ede | 483 | py | Python | webdemo/db/models.py | HandingHu/webdemo | b9fdb67b66da8e59097c962971b32a8d7a3fc471 | [
"Apache-2.0"
] | null | null | null | webdemo/db/models.py | HandingHu/webdemo | b9fdb67b66da8e59097c962971b32a8d7a3fc471 | [
"Apache-2.0"
] | null | null | null | webdemo/db/models.py | HandingHu/webdemo | b9fdb67b66da8e59097c962971b32a8d7a3fc471 | [
"Apache-2.0"
] | null | null | null |
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext import declarative
from sqlalchemy import Index
Base = declarative.declarative_base()
class User(Base):
"""User table"""
__tablename__ = 'user'
__table_args__ = (
Index('ix_user_user_id', 'user_id'),
)
id = Column(Integer, primary_key=True)
user_id = Column(String(255), nullable=False)
name = Column(String(64), nullable=False, unique=True)
email = Column(String(255))
| 23 | 58 | 0.693582 |
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext import declarative
from sqlalchemy import Index
Base = declarative.declarative_base()
class User(Base):
__tablename__ = 'user'
__table_args__ = (
Index('ix_user_user_id', 'user_id'),
)
id = Column(Integer, primary_key=True)
user_id = Column(String(255), nullable=False)
name = Column(String(64), nullable=False, unique=True)
email = Column(String(255))
| true | true |
1c3080d4f7687ff106fe73ad8217819e989cb32a | 1,802 | py | Python | baselines/unrel/spatial_features.py | sadjadasghari/SpatialSense | 4cb5ecb4b99dbea76ecb92878cce411e1c5edfcd | [
"BSD-2-Clause"
] | null | null | null | baselines/unrel/spatial_features.py | sadjadasghari/SpatialSense | 4cb5ecb4b99dbea76ecb92878cce411e1c5edfcd | [
"BSD-2-Clause"
] | null | null | null | baselines/unrel/spatial_features.py | sadjadasghari/SpatialSense | 4cb5ecb4b99dbea76ecb92878cce411e1c5edfcd | [
"BSD-2-Clause"
] | null | null | null | import pdb
import json
import pickle
import numpy as np
import math
import random
from sklearn.mixture import GaussianMixture
def raw_spatial_feature(bbox_s, bbox_o):
w_s = bbox_s[3] - bbox_s[2]
h_s = bbox_s[1] - bbox_s[0]
w_o = bbox_o[3] - bbox_o[2]
h_o = bbox_o[1] - bbox_o[0]
# Scale
scale_s = w_s * h_s;
scale_o = w_o * h_o;
# Offset
xc_s = (bbox_s[2] + bbox_s[3]) / 2.
yc_s = (bbox_s[0] + bbox_s[1]) / 2.
xc_o = (bbox_o[2] + bbox_o[3]) / 2.
yc_o = (bbox_o[0] + bbox_o[1]) / 2.
offsetx = xc_o - xc_s
offsety = yc_o - yc_s
# Aspect ratio
aspect_s = w_s / h_s;
aspect_o = w_o / h_o;
# Overlap
boxI_xmin = max(bbox_s[2], bbox_o[2])
boxI_ymin = max(bbox_s[0], bbox_o[0])
boxI_xmax = min(bbox_s[3], bbox_o[3])
boxI_ymax = min(bbox_s[1], bbox_o[1])
wI = max(boxI_xmax - boxI_xmin, 0)
yI = max(boxI_ymax - boxI_ymin, 0)
areaI = wI * yI
areaU = scale_s + scale_o - areaI
# Fill the raw spatial feature
feature = np.asarray([offsetx / math.sqrt(scale_s),
offsety / math.sqrt(scale_s),
math.sqrt(scale_o / scale_s),
aspect_s,
aspect_o,
math.sqrt(areaI / areaU)])
return feature
if __name__ == '__main__':
data = json.load(open('../annotations.json'))
X = []
for img in data:
if img['split'] == 'test':
continue
for annot in img['annotations']:
X.append(raw_spatial_feature(annot['subject']['bbox'], annot['object']['bbox']))
random.shuffle(X)
X = np.vstack(X)
gmm = GaussianMixture(400, max_iter=100, verbose=1)
gmm.fit(X)
pickle.dump(gmm, open('gmm.pickle', 'wb'))
| 27.30303 | 97 | 0.554939 | import pdb
import json
import pickle
import numpy as np
import math
import random
from sklearn.mixture import GaussianMixture
def raw_spatial_feature(bbox_s, bbox_o):
w_s = bbox_s[3] - bbox_s[2]
h_s = bbox_s[1] - bbox_s[0]
w_o = bbox_o[3] - bbox_o[2]
h_o = bbox_o[1] - bbox_o[0]
scale_s = w_s * h_s;
scale_o = w_o * h_o;
xc_s = (bbox_s[2] + bbox_s[3]) / 2.
yc_s = (bbox_s[0] + bbox_s[1]) / 2.
xc_o = (bbox_o[2] + bbox_o[3]) / 2.
yc_o = (bbox_o[0] + bbox_o[1]) / 2.
offsetx = xc_o - xc_s
offsety = yc_o - yc_s
aspect_s = w_s / h_s;
aspect_o = w_o / h_o;
boxI_xmin = max(bbox_s[2], bbox_o[2])
boxI_ymin = max(bbox_s[0], bbox_o[0])
boxI_xmax = min(bbox_s[3], bbox_o[3])
boxI_ymax = min(bbox_s[1], bbox_o[1])
wI = max(boxI_xmax - boxI_xmin, 0)
yI = max(boxI_ymax - boxI_ymin, 0)
areaI = wI * yI
areaU = scale_s + scale_o - areaI
feature = np.asarray([offsetx / math.sqrt(scale_s),
offsety / math.sqrt(scale_s),
math.sqrt(scale_o / scale_s),
aspect_s,
aspect_o,
math.sqrt(areaI / areaU)])
return feature
if __name__ == '__main__':
data = json.load(open('../annotations.json'))
X = []
for img in data:
if img['split'] == 'test':
continue
for annot in img['annotations']:
X.append(raw_spatial_feature(annot['subject']['bbox'], annot['object']['bbox']))
random.shuffle(X)
X = np.vstack(X)
gmm = GaussianMixture(400, max_iter=100, verbose=1)
gmm.fit(X)
pickle.dump(gmm, open('gmm.pickle', 'wb'))
| true | true |
1c30813c31cea016c2bfc6735c4a6c49e9435a81 | 82 | py | Python | src/server/roles.py | Duje1/discord-bot | e764fa7f4d8270c67128653fc53ad350d2269739 | [
"MIT"
] | 1 | 2020-10-05T16:56:36.000Z | 2020-10-05T16:56:36.000Z | src/server/roles.py | Duje1/discord-bot | e764fa7f4d8270c67128653fc53ad350d2269739 | [
"MIT"
] | 3 | 2020-10-08T14:46:01.000Z | 2022-03-01T23:36:40.000Z | src/server/roles.py | Duje1/discord-bot | e764fa7f4d8270c67128653fc53ad350d2269739 | [
"MIT"
] | 2 | 2020-10-11T10:01:50.000Z | 2020-10-18T18:12:19.000Z | ADMIN = 762379009672740905
BOT = 762734724204199946
TESTROLE = 763054600063549440
| 20.5 | 29 | 0.853659 | ADMIN = 762379009672740905
BOT = 762734724204199946
TESTROLE = 763054600063549440
| true | true |
1c308275c43b758f63c40d690cd36354a92de7e0 | 1,892 | py | Python | train.py | jakubzadrozny/kmml-data-challenge | 5127fb1df14808fb9b5cda2599b503beba6ccb23 | [
"MIT"
] | null | null | null | train.py | jakubzadrozny/kmml-data-challenge | 5127fb1df14808fb9b5cda2599b503beba6ccb23 | [
"MIT"
] | null | null | null | train.py | jakubzadrozny/kmml-data-challenge | 5127fb1df14808fb9b5cda2599b503beba6ccb23 | [
"MIT"
] | null | null | null | import numpy as np
from data import load_data
from ridge import KRRClassifier
from logistic import KernelLogisticClassifier
from svm import KSVM
from utils import KernelCrossValidation
from kernels import SpectrumKernel, SumKernel, SubstringKernel
SEED = 47
FOLDS = 10
model_path = None # set this to save trained model in a file
results_path = None # set this to save CV results in a .csv file
kernels = [
# Kernel for D0
[
SumKernel([
SubstringKernel(dataset_id=0, k=10, alpha=0.23, normalize="sqrt"),
SpectrumKernel(k=9, normalize="sqrt"),
]),
],
# Kernel for D1
[
SumKernel([
SubstringKernel(dataset_id=1, k=9, alpha=0.27, normalize="sqrt"),
SubstringKernel(dataset_id=1, k=10, alpha=0.27, normalize="sqrt"),
SubstringKernel(dataset_id=1, k=8, alpha=0.23, normalize="sqrt"),
SpectrumKernel(k=8, normalize="sqrt"),
SpectrumKernel(k=6, normalize="sqrt"),
SpectrumKernel(k=5, normalize="sqrt"),
]),
],
# Kernel for D2
[
SumKernel([
SubstringKernel(dataset_id=2, k=7, alpha=0.27, normalize="sqrt"),
SubstringKernel(dataset_id=2, k=8, alpha=0.25, normalize="sqrt"),
SpectrumKernel(k=7, normalize="sqrt"),
SpectrumKernel(k=6, normalize="sqrt"),
]),
],
]
lambdas = [
1e-5,
3e-5,
1e-4,
3e-4,
5e-4,
1e-3,
3e-3,
5e-3,
1e-2,
]
models = [
KRRClassifier,
KernelLogisticClassifier,
KSVM,
]
def select_model(dataset_id):
np.random.seed(SEED)
X, y = load_data(dataset_id)
cv = KernelCrossValidation(models, kernels[dataset_id], lambdas,
folds=FOLDS, model_path=model_path, results_path=results_path)
return cv.fit(X, y)
if __name__ == '__main__':
select_model(0)
| 24.571429 | 93 | 0.609937 | import numpy as np
from data import load_data
from ridge import KRRClassifier
from logistic import KernelLogisticClassifier
from svm import KSVM
from utils import KernelCrossValidation
from kernels import SpectrumKernel, SumKernel, SubstringKernel
SEED = 47
FOLDS = 10
model_path = None
results_path = None
kernels = [
[
SumKernel([
SubstringKernel(dataset_id=0, k=10, alpha=0.23, normalize="sqrt"),
SpectrumKernel(k=9, normalize="sqrt"),
]),
],
[
SumKernel([
SubstringKernel(dataset_id=1, k=9, alpha=0.27, normalize="sqrt"),
SubstringKernel(dataset_id=1, k=10, alpha=0.27, normalize="sqrt"),
SubstringKernel(dataset_id=1, k=8, alpha=0.23, normalize="sqrt"),
SpectrumKernel(k=8, normalize="sqrt"),
SpectrumKernel(k=6, normalize="sqrt"),
SpectrumKernel(k=5, normalize="sqrt"),
]),
],
[
SumKernel([
SubstringKernel(dataset_id=2, k=7, alpha=0.27, normalize="sqrt"),
SubstringKernel(dataset_id=2, k=8, alpha=0.25, normalize="sqrt"),
SpectrumKernel(k=7, normalize="sqrt"),
SpectrumKernel(k=6, normalize="sqrt"),
]),
],
]
lambdas = [
1e-5,
3e-5,
1e-4,
3e-4,
5e-4,
1e-3,
3e-3,
5e-3,
1e-2,
]
models = [
KRRClassifier,
KernelLogisticClassifier,
KSVM,
]
def select_model(dataset_id):
np.random.seed(SEED)
X, y = load_data(dataset_id)
cv = KernelCrossValidation(models, kernels[dataset_id], lambdas,
folds=FOLDS, model_path=model_path, results_path=results_path)
return cv.fit(X, y)
if __name__ == '__main__':
select_model(0)
| true | true |
1c3082a7b233f9f2ee619318845bb47275ea08c1 | 189 | py | Python | tweets/api/pagination.py | rakibulislam01/Tweetme | ae787b1b6c0303ba8a52a804764e9fc5853b2219 | [
"MIT"
] | null | null | null | tweets/api/pagination.py | rakibulislam01/Tweetme | ae787b1b6c0303ba8a52a804764e9fc5853b2219 | [
"MIT"
] | null | null | null | tweets/api/pagination.py | rakibulislam01/Tweetme | ae787b1b6c0303ba8a52a804764e9fc5853b2219 | [
"MIT"
] | null | null | null | from rest_framework import pagination
class StandardResultPagination(pagination.PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
max_page_size = 1000
| 23.625 | 64 | 0.798942 | from rest_framework import pagination
class StandardResultPagination(pagination.PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
max_page_size = 1000
| true | true |
1c3083b053c36cb7424857788d7f86f73d8c2312 | 862 | py | Python | azure-mgmt-dns/azure/mgmt/dns/models/ptr_record.py | CharaD7/azure-sdk-for-python | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | [
"MIT"
] | null | null | null | azure-mgmt-dns/azure/mgmt/dns/models/ptr_record.py | CharaD7/azure-sdk-for-python | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | [
"MIT"
] | null | null | null | azure-mgmt-dns/azure/mgmt/dns/models/ptr_record.py | CharaD7/azure-sdk-for-python | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PtrRecord(Model):
"""A PTR record.
:param ptrdname: Gets or sets the PTR target domain name for this record
without a terminating dot.
:type ptrdname: str
"""
_attribute_map = {
'ptrdname': {'key': 'ptrdname', 'type': 'str'},
}
def __init__(self, ptrdname=None):
self.ptrdname = ptrdname
| 29.724138 | 76 | 0.569606 |
from msrest.serialization import Model
class PtrRecord(Model):
_attribute_map = {
'ptrdname': {'key': 'ptrdname', 'type': 'str'},
}
def __init__(self, ptrdname=None):
self.ptrdname = ptrdname
| true | true |
1c30848fe8db838bf2ea7ab14ebea0d07ae3d297 | 2,311 | py | Python | setup.py | mark-mishyn/django-axes | dfaf67810abd21a0e76200a4906c1bffdd4fa9c9 | [
"MIT"
] | null | null | null | setup.py | mark-mishyn/django-axes | dfaf67810abd21a0e76200a4906c1bffdd4fa9c9 | [
"MIT"
] | null | null | null | setup.py | mark-mishyn/django-axes | dfaf67810abd21a0e76200a4906c1bffdd4fa9c9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="django-axes",
description="Keep track of failed login attempts in Django-powered sites.",
long_description="\n".join(
[
open("README.rst", encoding="utf-8").read(),
open("CHANGES.rst", encoding="utf-8").read(),
]
),
keywords="authentication django pci security",
author=", ".join(
[
"Josh VanderLinden",
"Philip Neustrom",
"Michael Blume",
"Alex Clark",
"Camilo Nova",
"Aleksi Hakli",
]
),
author_email="security@jazzband.co",
maintainer="Jazzband",
maintainer_email="security@jazzband.co",
url="https://github.com/jazzband/django-axes",
project_urls={
"Documentation": "https://django-axes.readthedocs.io/",
"Source": "https://github.com/jazzband/django-axes",
"Tracker": "https://github.com/jazzband/django-axes/issues",
},
license="MIT",
package_dir={"axes": "axes"},
use_scm_version=True,
setup_requires=["setuptools_scm"],
python_requires="~=3.6",
install_requires=["django>=1.11", "django-appconf>=1.0.3", "django-ipware>=2.0.2"],
include_package_data=True,
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Environment :: Plugins",
"Framework :: Django",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: Log Analysis",
"Topic :: Security",
"Topic :: System :: Logging",
],
zip_safe=False,
)
| 34.492537 | 87 | 0.581134 |
from setuptools import setup, find_packages
setup(
name="django-axes",
description="Keep track of failed login attempts in Django-powered sites.",
long_description="\n".join(
[
open("README.rst", encoding="utf-8").read(),
open("CHANGES.rst", encoding="utf-8").read(),
]
),
keywords="authentication django pci security",
author=", ".join(
[
"Josh VanderLinden",
"Philip Neustrom",
"Michael Blume",
"Alex Clark",
"Camilo Nova",
"Aleksi Hakli",
]
),
author_email="security@jazzband.co",
maintainer="Jazzband",
maintainer_email="security@jazzband.co",
url="https://github.com/jazzband/django-axes",
project_urls={
"Documentation": "https://django-axes.readthedocs.io/",
"Source": "https://github.com/jazzband/django-axes",
"Tracker": "https://github.com/jazzband/django-axes/issues",
},
license="MIT",
package_dir={"axes": "axes"},
use_scm_version=True,
setup_requires=["setuptools_scm"],
python_requires="~=3.6",
install_requires=["django>=1.11", "django-appconf>=1.0.3", "django-ipware>=2.0.2"],
include_package_data=True,
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Environment :: Plugins",
"Framework :: Django",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: Log Analysis",
"Topic :: Security",
"Topic :: System :: Logging",
],
zip_safe=False,
)
| true | true |
1c3085fd230c87ccfd30d9a8b6765ad6ffd8fc48 | 1,277 | py | Python | Modeling_Scripts/my_utils.py | VolpeUSDOT/gtfs-measures | 0530d3c7193f10d591edd446d7e4985d03a7c48a | [
"CC0-1.0"
] | 3 | 2019-08-29T13:31:14.000Z | 2021-06-18T06:10:06.000Z | Modeling_Scripts/my_utils.py | VolpeUSDOT/gtfs-measures | 0530d3c7193f10d591edd446d7e4985d03a7c48a | [
"CC0-1.0"
] | null | null | null | Modeling_Scripts/my_utils.py | VolpeUSDOT/gtfs-measures | 0530d3c7193f10d591edd446d7e4985d03a7c48a | [
"CC0-1.0"
] | null | null | null | #-------------------------------------------------------------
#
# Utility functions for printing progress and start/end timing.
#
# Stephen Zitzow-Childs
#
# Volpe National Transportation Systems Center, USDOT
#
# Last modified: 6/18/2017
# Note: Newer version of this needs to be loaded and incorporated (7/20/2017)
#-------------------------------------------------------------
import sys
import datetime
def update():
sys.stdout.write('.')
sys.stdout.flush()
return
def final():
sys.stdout.write('\n')
sys.stdout.flush()
return
def print_start_time():
start_time = datetime.datetime.now()
print('')
print('Start at {:%Y-%m-%d %H:%M:%S}'.format(start_time))
print('=====================================================')
return(start_time)
def print_end_time(start_time):
end_time = datetime.datetime.now()
total_run_time = (end_time - start_time)
# Calculate duration
hours = total_run_time.seconds / 3600
mins = (total_run_time.seconds % 3600) / 60
secs = (total_run_time.seconds % 3600) % 60
print('End at {:%Y-%m-%d %H:%M:%S} -- Elapsed {:02d}:{:02d}:{:02d}'.format(end_time, hours, mins, secs))
print('=====================================================')
print('') | 29.022727 | 110 | 0.528583 |
import sys
import datetime
def update():
sys.stdout.write('.')
sys.stdout.flush()
return
def final():
sys.stdout.write('\n')
sys.stdout.flush()
return
def print_start_time():
start_time = datetime.datetime.now()
print('')
print('Start at {:%Y-%m-%d %H:%M:%S}'.format(start_time))
print('=====================================================')
return(start_time)
def print_end_time(start_time):
end_time = datetime.datetime.now()
total_run_time = (end_time - start_time)
hours = total_run_time.seconds / 3600
mins = (total_run_time.seconds % 3600) / 60
secs = (total_run_time.seconds % 3600) % 60
print('End at {:%Y-%m-%d %H:%M:%S} -- Elapsed {:02d}:{:02d}:{:02d}'.format(end_time, hours, mins, secs))
print('=====================================================')
print('') | true | true |
1c308617feebe376013412816298504ee37251c6 | 314 | py | Python | venv/Scripts/Ex 08.py | brunobendel/Exercicios-python-Pycharm | 145ded6cb5533aeef1b89f0bce20f0a90e37216c | [
"MIT"
] | null | null | null | venv/Scripts/Ex 08.py | brunobendel/Exercicios-python-Pycharm | 145ded6cb5533aeef1b89f0bce20f0a90e37216c | [
"MIT"
] | null | null | null | venv/Scripts/Ex 08.py | brunobendel/Exercicios-python-Pycharm | 145ded6cb5533aeef1b89f0bce20f0a90e37216c | [
"MIT"
] | null | null | null | m = float(input('Digite um valor em m: '))
Valor = print("Em mm {:.2f}, em cm {}, em dm {}, em dam {}, em hm {}, em km {}".format((m*1000),(m*100),(m*10),(m/10),(m/100),(m/1000)))
# {:.2f} seguinifica que todo numeoro vai ser representado 2 numeros flutuantes depois da virgula
# 100*10/100= 10 >> 10% de 100 = 10
| 62.8 | 136 | 0.60828 | m = float(input('Digite um valor em m: '))
Valor = print("Em mm {:.2f}, em cm {}, em dm {}, em dam {}, em hm {}, em km {}".format((m*1000),(m*100),(m*10),(m/10),(m/100),(m/1000)))
| true | true |
1c308856f546fd03749470355c1de9d5256d0fed | 259 | py | Python | exception handling in file.py | syed219/letsupgrade-pytho-essentials | 3306fd23e30de3bced2ed5bb285f5399731d64b6 | [
"Apache-2.0"
] | null | null | null | exception handling in file.py | syed219/letsupgrade-pytho-essentials | 3306fd23e30de3bced2ed5bb285f5399731d64b6 | [
"Apache-2.0"
] | null | null | null | exception handling in file.py | syed219/letsupgrade-pytho-essentials | 3306fd23e30de3bced2ed5bb285f5399731d64b6 | [
"Apache-2.0"
] | null | null | null | file=open("Myfile.txt","w")
file.write("Hello")
file.close()
try:
file=open("Myfile.txt","r")
file.write("Hello....!!! This is Aishu")
file.close()
print("success")
except Exception as e:
print(e)
finally:
print("---END---") | 21.583333 | 45 | 0.567568 | file=open("Myfile.txt","w")
file.write("Hello")
file.close()
try:
file=open("Myfile.txt","r")
file.write("Hello....!!! This is Aishu")
file.close()
print("success")
except Exception as e:
print(e)
finally:
print("---END---") | true | true |
1c3088f84b738b7692343342b33b77d35fd52bab | 1,385 | py | Python | bin/translatedarb_to_pojson.py | tundak/kalium_wallet_flutter | 87ab154d748d08bb8f90bb42c0fdebbe193f48d2 | [
"MIT"
] | 656 | 2019-03-09T02:50:38.000Z | 2022-03-29T14:00:04.000Z | bin/translatedarb_to_pojson.py | tundak/kalium_wallet_flutter | 87ab154d748d08bb8f90bb42c0fdebbe193f48d2 | [
"MIT"
] | 145 | 2019-03-08T18:19:09.000Z | 2022-03-23T15:59:34.000Z | bin/translatedarb_to_pojson.py | tundak/kalium_wallet_flutter | 87ab154d748d08bb8f90bb42c0fdebbe193f48d2 | [
"MIT"
] | 260 | 2019-03-09T23:37:56.000Z | 2022-03-30T02:38:26.000Z | #!/usr/bin/env python
import argparse
import json
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='Path to arb file', required=True)
options = parser.parse_args()
if not options.file.endswith("arb"):
print(f"Can't process {options.file}, file must be a .arb")
parser.print_help()
sys.exit(0)
out_file = options.file.replace(".arb", ".json")
ret = []
with open('lib/l10n/intl_messages.arb') as base_file:
base_data = json.load(base_file)
with open('lib/l10n/intl_messages.json') as json_file:
json_data = json.load(json_file)
with open(options.file) as arb_file:
data = json.load(arb_file)
obj = {}
for key, value in data.items():
if key.startswith("@"):
continue
obj['term'] = base_data[key].replace("\n", "<newline>")
obj['definition'] = value.replace("\n", "<newline>")
for i in json_data:
if i['term'] == obj['term']:
obj['context'] = i['context']
obj['term_plural'] = ""
obj['reference'] = key
obj['comment'] = ""
ret.append(obj)
obj = {}
with open(out_file, 'w') as outf:
json.dump(ret, outf, indent=4, ensure_ascii=False)
print(f"Wrote {out_file}")
| 34.625 | 75 | 0.550903 |
import argparse
import json
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='Path to arb file', required=True)
options = parser.parse_args()
if not options.file.endswith("arb"):
print(f"Can't process {options.file}, file must be a .arb")
parser.print_help()
sys.exit(0)
out_file = options.file.replace(".arb", ".json")
ret = []
with open('lib/l10n/intl_messages.arb') as base_file:
base_data = json.load(base_file)
with open('lib/l10n/intl_messages.json') as json_file:
json_data = json.load(json_file)
with open(options.file) as arb_file:
data = json.load(arb_file)
obj = {}
for key, value in data.items():
if key.startswith("@"):
continue
obj['term'] = base_data[key].replace("\n", "<newline>")
obj['definition'] = value.replace("\n", "<newline>")
for i in json_data:
if i['term'] == obj['term']:
obj['context'] = i['context']
obj['term_plural'] = ""
obj['reference'] = key
obj['comment'] = ""
ret.append(obj)
obj = {}
with open(out_file, 'w') as outf:
json.dump(ret, outf, indent=4, ensure_ascii=False)
print(f"Wrote {out_file}")
| true | true |
1c30897b295b6b42f3872aeec1b687ceea9beb6b | 925 | py | Python | image_vision/extensions/visualizers/base.py | IvanKosik/ImageVision | 038b2b3948a16adc4c2abb3bc8c1c32f62aa4319 | [
"BSD-3-Clause"
] | null | null | null | image_vision/extensions/visualizers/base.py | IvanKosik/ImageVision | 038b2b3948a16adc4c2abb3bc8c1c32f62aa4319 | [
"BSD-3-Clause"
] | null | null | null | image_vision/extensions/visualizers/base.py | IvanKosik/ImageVision | 038b2b3948a16adc4c2abb3bc8c1c32f62aa4319 | [
"BSD-3-Clause"
] | null | null | null | from core import Data
from extensions.mdi.windows import DataViewerSubWindow
from PyQt5.QtCore import QObject, pyqtSignal
import abc
class DataVisualizerMeta(abc.ABCMeta, type(QObject)):
_DATA_TYPES = ()
@property
def data_types(cls) -> tuple:
return cls._DATA_TYPES
class DataVisualizer(QObject, metaclass=DataVisualizerMeta):
#% _DATA_TYPES = ()
data_visualized = pyqtSignal(DataViewerSubWindow)
def __init__(self, mdi_area):
super().__init__()
self.mdi_area = mdi_area
@property
def data_types(self):
return type(self).data_types
def visualize_data(self, data: Data):
data_viewer_sub_window = self._visualize_data(data)
self.data_visualized.emit(data_viewer_sub_window)
return data_viewer_sub_window
@abc.abstractmethod
def _visualize_data(self, data: Data):
...
| 23.717949 | 61 | 0.675676 | from core import Data
from extensions.mdi.windows import DataViewerSubWindow
from PyQt5.QtCore import QObject, pyqtSignal
import abc
class DataVisualizerMeta(abc.ABCMeta, type(QObject)):
_DATA_TYPES = ()
@property
def data_types(cls) -> tuple:
return cls._DATA_TYPES
class DataVisualizer(QObject, metaclass=DataVisualizerMeta):
data_visualized = pyqtSignal(DataViewerSubWindow)
def __init__(self, mdi_area):
super().__init__()
self.mdi_area = mdi_area
@property
def data_types(self):
return type(self).data_types
def visualize_data(self, data: Data):
data_viewer_sub_window = self._visualize_data(data)
self.data_visualized.emit(data_viewer_sub_window)
return data_viewer_sub_window
@abc.abstractmethod
def _visualize_data(self, data: Data):
...
| true | true |
1c3089b3a8364d43bf3095d1e1152f085836cfaf | 741 | py | Python | imapclient/test/test_version.py | maxiimou/imapclient | 755936fb2ac4a3da9f898e504cd1a8f4b5da9b84 | [
"BSD-3-Clause"
] | 1 | 2017-05-12T23:54:10.000Z | 2017-05-12T23:54:10.000Z | imapclient/test/test_version.py | maxiimou/imapclient | 755936fb2ac4a3da9f898e504cd1a8f4b5da9b84 | [
"BSD-3-Clause"
] | 2 | 2019-05-01T08:41:02.000Z | 2020-01-03T21:54:51.000Z | imapclient/test/test_version.py | maxiimou/imapclient | 755936fb2ac4a3da9f898e504cd1a8f4b5da9b84 | [
"BSD-3-Clause"
] | 5 | 2015-12-03T03:17:52.000Z | 2021-01-31T13:10:25.000Z | from __future__ import unicode_literals
from imapclient import _imapclient_version_string
from imapclient.test.util import unittest
class TestVersionString(unittest.TestCase):
def test_dot_oh(self):
self.assertEqual(_imapclient_version_string((1, 0, 0, 'final')), '1.0')
def test_minor(self):
self.assertEqual(_imapclient_version_string((2, 1, 0, 'final')), '2.1')
def test_point_release(self):
self.assertEqual(_imapclient_version_string((1, 2, 3, 'final')), '1.2.3')
def test_alpha(self):
self.assertEqual(_imapclient_version_string((2, 1, 0, 'alpha')), '2.1-alpha')
def test_beta_point(self):
self.assertEqual(_imapclient_version_string((2, 1, 3, 'beta')), '2.1.3-beta')
| 33.681818 | 85 | 0.701754 | from __future__ import unicode_literals
from imapclient import _imapclient_version_string
from imapclient.test.util import unittest
class TestVersionString(unittest.TestCase):
def test_dot_oh(self):
self.assertEqual(_imapclient_version_string((1, 0, 0, 'final')), '1.0')
def test_minor(self):
self.assertEqual(_imapclient_version_string((2, 1, 0, 'final')), '2.1')
def test_point_release(self):
self.assertEqual(_imapclient_version_string((1, 2, 3, 'final')), '1.2.3')
def test_alpha(self):
self.assertEqual(_imapclient_version_string((2, 1, 0, 'alpha')), '2.1-alpha')
def test_beta_point(self):
self.assertEqual(_imapclient_version_string((2, 1, 3, 'beta')), '2.1.3-beta')
| true | true |
1c308a7fbdb910ad969a4423381b4d5c606d7b13 | 7,370 | py | Python | examples/pybullet/gym/pybullet_envs/prediction/pybullet_sim_gym_env.py | foolyc/bullet3 | f4f5f70886e8d85bb5c000fe0c443fbf958f45d8 | [
"Zlib"
] | 158 | 2016-11-17T19:37:51.000Z | 2022-03-21T19:57:55.000Z | examples/pybullet/gym/pybullet_envs/prediction/pybullet_sim_gym_env.py | foolyc/bullet3 | f4f5f70886e8d85bb5c000fe0c443fbf958f45d8 | [
"Zlib"
] | 94 | 2016-11-18T09:55:57.000Z | 2021-01-14T08:50:40.000Z | examples/pybullet/gym/pybullet_envs/prediction/pybullet_sim_gym_env.py | foolyc/bullet3 | f4f5f70886e8d85bb5c000fe0c443fbf958f45d8 | [
"Zlib"
] | 51 | 2017-05-24T10:20:25.000Z | 2022-03-17T15:07:02.000Z | """This file implements the gym environment of example PyBullet simulation.
"""
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import math
import time
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import pybullet
from pybullet_envs.bullet import bullet_client
from pybullet_envs.prediction import boxstack_pybullet_sim
import os
import pybullet_data
from pkg_resources import parse_version
class PyBulletSimGymEnv(gym.Env):
"""The gym environment to run pybullet simulations.
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 50}
def __init__(self,
pybullet_sim_factory=boxstack_pybullet_sim,
render=True,
render_sleep=False,
debug_visualization=True,
hard_reset=False,
render_width=240,
render_height=240,
action_repeat=1,
time_step=1. / 240.,
num_bullet_solver_iterations=50,
urdf_root=pybullet_data.getDataPath()):
"""Initialize the gym environment.
Args:
urdf_root: The path to the urdf data folder.
"""
self._pybullet_sim_factory = pybullet_sim_factory
self._time_step = time_step
self._urdf_root = urdf_root
self._observation = []
self._action_repeat = action_repeat
self._num_bullet_solver_iterations = num_bullet_solver_iterations
self._env_step_counter = 0
self._is_render = render
self._debug_visualization = debug_visualization
self._render_sleep = render_sleep
self._render_width = render_width
self._render_height = render_height
self._cam_dist = .3
self._cam_yaw = 50
self._cam_pitch = -35
self._hard_reset = True
self._last_frame_time = 0.0
optionstring = '--width={} --height={}'.format(render_width, render_height)
print("urdf_root=" + self._urdf_root)
if self._is_render:
self._pybullet_client = bullet_client.BulletClient(connection_mode=pybullet.GUI,
options=optionstring)
else:
self._pybullet_client = bullet_client.BulletClient()
if (debug_visualization == False):
self._pybullet_client.configureDebugVisualizer(flag=self._pybullet_client.COV_ENABLE_GUI,
enable=0)
self._pybullet_client.configureDebugVisualizer(
flag=self._pybullet_client.COV_ENABLE_RGB_BUFFER_PREVIEW, enable=0)
self._pybullet_client.configureDebugVisualizer(
flag=self._pybullet_client.COV_ENABLE_DEPTH_BUFFER_PREVIEW, enable=0)
self._pybullet_client.configureDebugVisualizer(
flag=self._pybullet_client.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, enable=0)
self._pybullet_client.setAdditionalSearchPath(urdf_root)
self.seed()
self.reset()
observation_high = (self._example_sim.GetObservationUpperBound())
observation_low = (self._example_sim.GetObservationLowerBound())
action_dim = self._example_sim.GetActionDimension()
self._action_bound = 1
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(observation_low, observation_high)
self.viewer = None
self._hard_reset = hard_reset # This assignment need to be after reset()
def configure(self, args):
self._args = args
def reset(self):
if self._hard_reset:
self._pybullet_client.resetSimulation()
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=int(self._num_bullet_solver_iterations))
self._pybullet_client.setTimeStep(self._time_step)
self._example_sim = self._pybullet_sim_factory.CreateSim(
pybullet_client=self._pybullet_client,
urdf_root=self._urdf_root,
time_step=self._time_step)
else:
self._example_sim.Reset(reload_urdf=False)
self._env_step_counter = 0
#self._pybullet_client.resetDebugVisualizerCamera(
# self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0])
return self._get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""Step forward the simulation, given the action.
Args:
action: the predicted state
Returns:
observations: The actual state.
reward: The reward for how well the prediction matches the actual state.
done: Whether the episode has ended.
info: A dictionary that stores diagnostic information.
Raises:
ValueError: The action dimension is not the same as the number of motors.
ValueError: The magnitude of actions is out of bounds.
"""
if self._render_sleep:
# Sleep, otherwise the computation takes less time than real time,
# which will make the visualization like a fast-forward video.
time_spent = time.time() - self._last_frame_time
self._last_frame_time = time.time()
time_to_sleep = self._action_repeat * self._time_step - time_spent
if time_to_sleep > 0:
time.sleep(time_to_sleep)
#base_pos = self.minitaur.GetBasePosition()
#self._pybullet_client.resetDebugVisualizerCamera(
# self._cam_dist, self._cam_yaw, self._cam_pitch, base_pos)
for _ in range(self._action_repeat):
self._example_sim.ApplyAction(action)
self._pybullet_client.stepSimulation()
self._env_step_counter += 1
reward = self._reward()
done = self._termination()
return np.array(self._get_observation()), reward, done, {}
def render(self, mode="rgb_array", close=False):
if mode != "rgb_array":
return np.array([])
base_pos = [0, 0, 0]
view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width) / self._render_width, nearVal=0.01, farVal=100.0)
proj_matrix = [
1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0,
-0.02000020071864128, 0.0
]
(_, _, px, _, _) = self._pybullet_client.getCameraImage(
width=self._render_width,
height=self._render_height,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=pybullet.ER_BULLET_HARDWARE_OPENGL) #ER_TINY_RENDERER)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (self._render_height, self._render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _termination(self):
terminate = self._example_sim.Termination()
return terminate
def _reward(self):
reward = 0
return reward
def _get_observation(self):
self._observation = self._example_sim.GetObservation()
return self._observation
if parse_version(gym.__version__) < parse_version('0.9.6'):
_render = render
_reset = reset
_seed = seed
_step = step
| 33.963134 | 98 | 0.695794 |
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import math
import time
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import pybullet
from pybullet_envs.bullet import bullet_client
from pybullet_envs.prediction import boxstack_pybullet_sim
import os
import pybullet_data
from pkg_resources import parse_version
class PyBulletSimGymEnv(gym.Env):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 50}
def __init__(self,
pybullet_sim_factory=boxstack_pybullet_sim,
render=True,
render_sleep=False,
debug_visualization=True,
hard_reset=False,
render_width=240,
render_height=240,
action_repeat=1,
time_step=1. / 240.,
num_bullet_solver_iterations=50,
urdf_root=pybullet_data.getDataPath()):
self._pybullet_sim_factory = pybullet_sim_factory
self._time_step = time_step
self._urdf_root = urdf_root
self._observation = []
self._action_repeat = action_repeat
self._num_bullet_solver_iterations = num_bullet_solver_iterations
self._env_step_counter = 0
self._is_render = render
self._debug_visualization = debug_visualization
self._render_sleep = render_sleep
self._render_width = render_width
self._render_height = render_height
self._cam_dist = .3
self._cam_yaw = 50
self._cam_pitch = -35
self._hard_reset = True
self._last_frame_time = 0.0
optionstring = '--width={} --height={}'.format(render_width, render_height)
print("urdf_root=" + self._urdf_root)
if self._is_render:
self._pybullet_client = bullet_client.BulletClient(connection_mode=pybullet.GUI,
options=optionstring)
else:
self._pybullet_client = bullet_client.BulletClient()
if (debug_visualization == False):
self._pybullet_client.configureDebugVisualizer(flag=self._pybullet_client.COV_ENABLE_GUI,
enable=0)
self._pybullet_client.configureDebugVisualizer(
flag=self._pybullet_client.COV_ENABLE_RGB_BUFFER_PREVIEW, enable=0)
self._pybullet_client.configureDebugVisualizer(
flag=self._pybullet_client.COV_ENABLE_DEPTH_BUFFER_PREVIEW, enable=0)
self._pybullet_client.configureDebugVisualizer(
flag=self._pybullet_client.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, enable=0)
self._pybullet_client.setAdditionalSearchPath(urdf_root)
self.seed()
self.reset()
observation_high = (self._example_sim.GetObservationUpperBound())
observation_low = (self._example_sim.GetObservationLowerBound())
action_dim = self._example_sim.GetActionDimension()
self._action_bound = 1
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(observation_low, observation_high)
self.viewer = None
self._hard_reset = hard_reset
def configure(self, args):
self._args = args
def reset(self):
if self._hard_reset:
self._pybullet_client.resetSimulation()
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=int(self._num_bullet_solver_iterations))
self._pybullet_client.setTimeStep(self._time_step)
self._example_sim = self._pybullet_sim_factory.CreateSim(
pybullet_client=self._pybullet_client,
urdf_root=self._urdf_root,
time_step=self._time_step)
else:
self._example_sim.Reset(reload_urdf=False)
self._env_step_counter = 0
return self._get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
if self._render_sleep:
time_spent = time.time() - self._last_frame_time
self._last_frame_time = time.time()
time_to_sleep = self._action_repeat * self._time_step - time_spent
if time_to_sleep > 0:
time.sleep(time_to_sleep)
for _ in range(self._action_repeat):
self._example_sim.ApplyAction(action)
self._pybullet_client.stepSimulation()
self._env_step_counter += 1
reward = self._reward()
done = self._termination()
return np.array(self._get_observation()), reward, done, {}
def render(self, mode="rgb_array", close=False):
if mode != "rgb_array":
return np.array([])
base_pos = [0, 0, 0]
view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width) / self._render_width, nearVal=0.01, farVal=100.0)
proj_matrix = [
1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0,
-0.02000020071864128, 0.0
]
(_, _, px, _, _) = self._pybullet_client.getCameraImage(
width=self._render_width,
height=self._render_height,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (self._render_height, self._render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _termination(self):
terminate = self._example_sim.Termination()
return terminate
def _reward(self):
reward = 0
return reward
def _get_observation(self):
self._observation = self._example_sim.GetObservation()
return self._observation
if parse_version(gym.__version__) < parse_version('0.9.6'):
_render = render
_reset = reset
_seed = seed
_step = step
| true | true |
1c308b87287ef457b1cf66d441ccf8f1b70d1b0b | 497 | py | Python | dna_features_viewer/BiopythonTranslator/BlackBoxlessLabelTranslator.py | jbloom/DnaFeaturesViewer | 4d6f31d7d8e5942e75d22915ce803ddc574c9d7e | [
"MIT"
] | null | null | null | dna_features_viewer/BiopythonTranslator/BlackBoxlessLabelTranslator.py | jbloom/DnaFeaturesViewer | 4d6f31d7d8e5942e75d22915ce803ddc574c9d7e | [
"MIT"
] | null | null | null | dna_features_viewer/BiopythonTranslator/BlackBoxlessLabelTranslator.py | jbloom/DnaFeaturesViewer | 4d6f31d7d8e5942e75d22915ce803ddc574c9d7e | [
"MIT"
] | null | null | null | from .BiopythonTranslator import BiopythonTranslator
class BlackBoxlessLabelTranslator(BiopythonTranslator):
"""Translates Biopython records into GraphicRecords where annotations
appear black on a white background with no box. Which can be cleaner."""
def compute_feature_box_linewidth(self, feature):
"""Return 0 as this translator doesn't show a box."""
return 0
def compute_feature_box_color(self, feature):
"""Return white."""
return "white" | 38.230769 | 77 | 0.72837 | from .BiopythonTranslator import BiopythonTranslator
class BlackBoxlessLabelTranslator(BiopythonTranslator):
def compute_feature_box_linewidth(self, feature):
return 0
def compute_feature_box_color(self, feature):
return "white" | true | true |
1c308bffa6643041581c3879d2aa9d8b6b93c638 | 121 | py | Python | islandora7_rest/__init__.py | shorock/islandora7-rest | 1dc9d3132f82671efe544ca4a6cd3b72b580e917 | [
"BSD-3-Clause"
] | 3 | 2019-11-01T18:52:14.000Z | 2020-10-06T19:42:55.000Z | islandora7_rest/__init__.py | shorock/islandora7-rest | 1dc9d3132f82671efe544ca4a6cd3b72b580e917 | [
"BSD-3-Clause"
] | null | null | null | islandora7_rest/__init__.py | shorock/islandora7-rest | 1dc9d3132f82671efe544ca4a6cd3b72b580e917 | [
"BSD-3-Clause"
] | 1 | 2019-07-14T21:57:44.000Z | 2019-07-14T21:57:44.000Z | # islandora7_rest/__init__.py
# Copyright (c) 2019 The University of Kansas
from .IslandoraClient import IslandoraClient | 30.25 | 45 | 0.826446 |
from .IslandoraClient import IslandoraClient | true | true |
1c308c83b6ea9a33366dd805fb9691f3a9a11c0d | 12,959 | py | Python | test/functional/dbcrash.py | zentoshi/zentoshi | 99ef65d56ac5d702556f2b718298c34f07168498 | [
"MIT"
] | 4 | 2019-12-08T19:54:02.000Z | 2020-02-04T20:11:14.000Z | test/functional/dbcrash.py | zentoshi/zentoshi | 99ef65d56ac5d702556f2b718298c34f07168498 | [
"MIT"
] | 1 | 2019-11-10T14:06:54.000Z | 2019-11-10T14:06:54.000Z | test/functional/dbcrash.py | zentoshi/zentoshi | 99ef65d56ac5d702556f2b718298c34f07168498 | [
"MIT"
] | 13 | 2019-11-09T17:09:35.000Z | 2021-12-21T07:07:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test recovery from a crash during chainstate writing."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import random
try:
import http.client as httplib
except ImportError:
import httplib
import errno
'''
Test structure:
- 4 nodes
* node0, node1, and node2 will have different dbcrash ratios, and different
dbcache sizes
* node3 will be a regular node, with no crashing.
* The nodes will not connect to each other.
- use default test framework starting chain. initialize starting_tip_height to
tip height.
- Main loop:
* generate lots of transactions on node3, enough to fill up a block.
* uniformly randomly pick a tip height from starting_tip_height to
tip_height; with probability 1/(height_difference+4), invalidate this block.
* mine enough blocks to overtake tip_height at start of loop.
* for each node in [node0,node1,node2]:
- for each mined block:
* submit block to node
* if node crashed on/after submitting:
- restart until recovery succeeds
- check that utxo matches node3 using gettxoutsetinfo
'''
class ChainstateWriteCrashTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = False
# Set -maxmempool=0 to turn off mempool memory sharing with dbcache
# Set -rpcservertimeout=900 to reduce socket disconnects in this
# long-running test
self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900"]
# Set different crash ratios and cache sizes. Note that not all of
# -dbcache goes to pcoinsTip.
self.node0_args = ["-dbcrashratio=8", "-dbcache=4", "-dbbatchsize=200000"] + self.base_args
self.node1_args = ["-dbcrashratio=16", "-dbcache=8", "-dbbatchsize=200000"] + self.base_args
self.node2_args = ["-dbcrashratio=24", "-dbcache=16", "-dbbatchsize=200000"] + self.base_args
# Node3 is a normal node with default args, except will mine full blocks
self.node3_args = ["-blockmaxweight=4000000"]
self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]
# We'll track some test coverage statistics
self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
self.crashed_on_restart = 0 # Track count of crashes during recovery
def setup_network(self):
# Need a bit of extra time for the nodes to start up for this test
self.add_nodes(self.num_nodes, extra_args=self.extra_args, timewait=90)
self.start_nodes()
# Leave them unconnected, we'll use submitblock directly in this test
# Starts up a given node id, waits for the tip to reach the given block
# hash, and calculates the utxo hash. Exceptions on startup should
# indicate node crash (due to -dbcrashratio), in which case we try again.
# Give up after 60 seconds.
# Returns the utxo hash of the given node.
def restart_node(self, node_index, expected_tip):
time_start = time.time()
while time.time() - time_start < 120:
try:
# Any of these RPC calls could throw due to node crash
self.start_node(node_index)
self.nodes[node_index].waitforblock(expected_tip)
utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
return utxo_hash
except:
# An exception here should mean the node is about to crash.
# If zenxd exits, then try again. wait_for_node_exit()
# should raise an exception if zenxd doesn't exit.
self.wait_for_node_exit(node_index, timeout=10)
self.crashed_on_restart += 1
time.sleep(1)
# If we got here, zenxd isn't coming back up on restart. Could be a
# bug in zenxd, or we've gotten unlucky with our dbcrash ratio --
# perhaps we generated a test case that blew up our cache?
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
# and make sure that recovery happens.
raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
# Try submitting a block to the given node.
# Catch any exceptions that indicate the node has crashed.
# Returns true if the block was submitted successfully; false otherwise.
def submit_block_catch_error(self, node_index, block):
try:
self.nodes[node_index].submitblock(block)
return True
except (httplib.CannotSendRequest, httplib.RemoteDisconnected) as e:
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
except OSError as e:
self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:
# The node has likely crashed
return False
else:
# Unexpected exception, raise
raise
# Use submitblock to sync node3's chain with the other nodes
# If submitblock fails, restart the node and get the new utxo hash.
def sync_node3blocks(self, block_hashes):
# If any nodes crash while updating, we'll compare utxo hashes to
# ensure recovery was successful.
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
# Retrieve all the blocks from node3
blocks = []
for block_hash in block_hashes:
blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)])
# Deliver each block to each other node
for i in range(3):
nodei_utxo_hash = None
self.log.debug("Syncing blocks to node %d", i)
for (block_hash, block) in blocks:
# Get the block from node3, and submit to node_i
self.log.debug("submitting block %s", block_hash)
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
self.log.debug("Restarting node %d after block hash %s", i, block_hash)
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
self.restart_counts[i] += 1
else:
# Clear it out after successful submitblock calls -- the cached
# utxo hash will no longer be correct
nodei_utxo_hash = None
# Check that the utxo hash matches node3's utxo set
# NOTE: we only check the utxo set if we had to restart the node
# after the last block submitted:
# - checking the utxo hash causes a cache flush, which we don't
# want to do every time; so
# - we only update the utxo cache after a node restart, since flushing
# the cache is a no-op at that point
if nodei_utxo_hash is not None:
self.log.debug("Checking txoutsetinfo matches for node %d", i)
assert_equal(nodei_utxo_hash, node3_utxo_hash)
# Verify that the utxo hash of each node matches node3.
# Restart any nodes that crash while querying.
def verify_utxo_hash(self):
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
self.log.info("Verifying utxo hash matches for all nodes")
for i in range(3):
try:
nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2']
except OSError:
# probably a crash on db flushing
nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def generate_small_transactions(self, node, count, utxo_list):
FEE = 1000 # TODO: replace this with node relay fee based calculation
num_transactions = 0
random.shuffle(utxo_list)
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
for i in range(2):
utxo = utxo_list.pop()
tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount']*COIN)
output_amount = (input_amount - FEE)//3
if output_amount <= 0:
# Sanity check -- if we chose inputs that are too small, skip
continue
for i in range(3):
tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex']
node.sendrawtransaction(tx_signed_hex)
num_transactions += 1
def run_test(self):
# Start by creating a lot of utxos on node3
initial_height = self.nodes[3].getblockcount()
utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
self.log.info("Prepped %d utxo entries", len(utxo_list))
# Sync these blocks with the other nodes
block_hashes_to_sync = []
for height in range(initial_height+1, self.nodes[3].getblockcount()+1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
# Syncing the blocks could cause nodes to crash, so the test begins here.
self.sync_node3blocks(block_hashes_to_sync)
starting_tip_height = self.nodes[3].getblockcount()
# Main test loop:
# each time through the loop, generate a bunch of transactions,
# and then either mine a single new block on the tip, or some-sized reorg.
for i in range(40):
self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
# Generate a bunch of small-ish transactions
self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
# Pick a random block between current tip, and starting tip
current_height = self.nodes[3].getblockcount()
# TODO: re-enable this when ReplayBlocks is fixed to support evodb and additional indexes
# random_height = random.randint(starting_tip_height, current_height)
# self.log.debug("At height %d, considering height %d", current_height, random_height)
# if random_height > starting_tip_height:
# # Randomly reorg from this point with some probability (1/4 for
# # tip, 1/5 for tip-1, ...)
# if random.random() < 1.0/(current_height + 4 - random_height):
# self.log.debug("Invalidating block at height %d", random_height)
# self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
# Now generate new blocks until we pass the old tip height
self.log.debug("Mining longer tip")
block_hashes = self.nodes[3].generate(current_height+1-self.nodes[3].getblockcount())
self.log.debug("Syncing %d new blocks...", len(block_hashes))
self.sync_node3blocks(block_hashes)
utxo_list = self.nodes[3].listunspent()
self.log.debug("Node3 utxo count: %d", len(utxo_list))
# Check that the utxo hashes agree with node3
# Useful side effect: each utxo cache gets flushed here, so that we
# won't get crashes on shutdown at the end of the test.
self.verify_utxo_hash()
# Check the test coverage
self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
# If no nodes were restarted, we didn't test anything.
assert self.restart_counts != [0, 0, 0]
# Make sure we tested the case of crash-during-recovery.
assert self.crashed_on_restart > 0
# Warn if any of the nodes escaped restart.
for i in range(3):
if self.restart_counts[i] == 0:
self.log.warn("Node %d never crashed during utxo flush!", i)
if __name__ == "__main__":
ChainstateWriteCrashTest().main()
| 47.819188 | 114 | 0.643491 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import random
try:
import http.client as httplib
except ImportError:
import httplib
import errno
class ChainstateWriteCrashTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900"]
self.node0_args = ["-dbcrashratio=8", "-dbcache=4", "-dbbatchsize=200000"] + self.base_args
self.node1_args = ["-dbcrashratio=16", "-dbcache=8", "-dbbatchsize=200000"] + self.base_args
self.node2_args = ["-dbcrashratio=24", "-dbcache=16", "-dbbatchsize=200000"] + self.base_args
self.node3_args = ["-blockmaxweight=4000000"]
self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]
self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
self.crashed_on_restart = 0 # Track count of crashes during recovery
def setup_network(self):
# Need a bit of extra time for the nodes to start up for this test
self.add_nodes(self.num_nodes, extra_args=self.extra_args, timewait=90)
self.start_nodes()
# Leave them unconnected, we'll use submitblock directly in this test
def restart_node(self, node_index, expected_tip):
time_start = time.time()
while time.time() - time_start < 120:
try:
self.start_node(node_index)
self.nodes[node_index].waitforblock(expected_tip)
utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
return utxo_hash
except:
self.wait_for_node_exit(node_index, timeout=10)
self.crashed_on_restart += 1
time.sleep(1)
# If we got here, zenxd isn't coming back up on restart. Could be a
# perhaps we generated a test case that blew up our cache?
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
# and make sure that recovery happens.
raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
# Try submitting a block to the given node.
# Catch any exceptions that indicate the node has crashed.
# Returns true if the block was submitted successfully; false otherwise.
def submit_block_catch_error(self, node_index, block):
try:
self.nodes[node_index].submitblock(block)
return True
except (httplib.CannotSendRequest, httplib.RemoteDisconnected) as e:
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
except OSError as e:
self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:
# The node has likely crashed
return False
else:
# Unexpected exception, raise
raise
# Use submitblock to sync node3's chain with the other nodes
def sync_node3blocks(self, block_hashes):
# ensure recovery was successful.
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
# Retrieve all the blocks from node3
blocks = []
for block_hash in block_hashes:
blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)])
# Deliver each block to each other node
for i in range(3):
nodei_utxo_hash = None
self.log.debug("Syncing blocks to node %d", i)
for (block_hash, block) in blocks:
# Get the block from node3, and submit to node_i
self.log.debug("submitting block %s", block_hash)
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
self.log.debug("Restarting node %d after block hash %s", i, block_hash)
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
self.restart_counts[i] += 1
else:
# Clear it out after successful submitblock calls -- the cached
# utxo hash will no longer be correct
nodei_utxo_hash = None
# Check that the utxo hash matches node3's utxo set
# want to do every time; so
# - we only update the utxo cache after a node restart, since flushing
# the cache is a no-op at that point
if nodei_utxo_hash is not None:
self.log.debug("Checking txoutsetinfo matches for node %d", i)
assert_equal(nodei_utxo_hash, node3_utxo_hash)
# Verify that the utxo hash of each node matches node3.
# Restart any nodes that crash while querying.
def verify_utxo_hash(self):
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
self.log.info("Verifying utxo hash matches for all nodes")
for i in range(3):
try:
nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2']
except OSError:
# probably a crash on db flushing
nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def generate_small_transactions(self, node, count, utxo_list):
FEE = 1000 # TODO: replace this with node relay fee based calculation
num_transactions = 0
random.shuffle(utxo_list)
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
for i in range(2):
utxo = utxo_list.pop()
tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount']*COIN)
output_amount = (input_amount - FEE)//3
if output_amount <= 0:
# Sanity check -- if we chose inputs that are too small, skip
continue
for i in range(3):
tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex']
node.sendrawtransaction(tx_signed_hex)
num_transactions += 1
def run_test(self):
# Start by creating a lot of utxos on node3
initial_height = self.nodes[3].getblockcount()
utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
self.log.info("Prepped %d utxo entries", len(utxo_list))
# Sync these blocks with the other nodes
block_hashes_to_sync = []
for height in range(initial_height+1, self.nodes[3].getblockcount()+1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
# Syncing the blocks could cause nodes to crash, so the test begins here.
self.sync_node3blocks(block_hashes_to_sync)
starting_tip_height = self.nodes[3].getblockcount()
# Main test loop:
# each time through the loop, generate a bunch of transactions,
# and then either mine a single new block on the tip, or some-sized reorg.
for i in range(40):
self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
# Generate a bunch of small-ish transactions
self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
# Pick a random block between current tip, and starting tip
current_height = self.nodes[3].getblockcount()
# TODO: re-enable this when ReplayBlocks is fixed to support evodb and additional indexes
# random_height = random.randint(starting_tip_height, current_height)
# self.log.debug("At height %d, considering height %d", current_height, random_height)
# if random_height > starting_tip_height:
# # Randomly reorg from this point with some probability (1/4 for
# # tip, 1/5 for tip-1, ...)
# if random.random() < 1.0/(current_height + 4 - random_height):
# self.log.debug("Invalidating block at height %d", random_height)
# self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
# Now generate new blocks until we pass the old tip height
self.log.debug("Mining longer tip")
block_hashes = self.nodes[3].generate(current_height+1-self.nodes[3].getblockcount())
self.log.debug("Syncing %d new blocks...", len(block_hashes))
self.sync_node3blocks(block_hashes)
utxo_list = self.nodes[3].listunspent()
self.log.debug("Node3 utxo count: %d", len(utxo_list))
# Check that the utxo hashes agree with node3
# Useful side effect: each utxo cache gets flushed here, so that we
# won't get crashes on shutdown at the end of the test.
self.verify_utxo_hash()
self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
assert self.restart_counts != [0, 0, 0]
# Make sure we tested the case of crash-during-recovery.
assert self.crashed_on_restart > 0
# Warn if any of the nodes escaped restart.
for i in range(3):
if self.restart_counts[i] == 0:
self.log.warn("Node %d never crashed during utxo flush!", i)
if __name__ == "__main__":
ChainstateWriteCrashTest().main()
| true | true |
1c308df7c40a004d46d64fd7a1620aaab5c9e207 | 1,295 | py | Python | src/server.py | Azkel/Distributed-Matrix-Multiplication-In-Python | 4b1edab32dcdf4ea1801e1e688d4c734cd57cb22 | [
"MIT"
] | null | null | null | src/server.py | Azkel/Distributed-Matrix-Multiplication-In-Python | 4b1edab32dcdf4ea1801e1e688d4c734cd57cb22 | [
"MIT"
] | null | null | null | src/server.py | Azkel/Distributed-Matrix-Multiplication-In-Python | 4b1edab32dcdf4ea1801e1e688d4c734cd57cb22 | [
"MIT"
] | 3 | 2020-06-09T17:58:57.000Z | 2021-09-04T14:27:37.000Z | _author__ = 'Michal Smyk'
import numpy.matlib
import time
import Pyro4
import sys
class MatrixProcessing:
def __init__(self):
self.matrix = None
self.matrix_a = None
self.matrix_b = None
def set_matrix_a(self, new_matrix_a):
self.matrix_a = new_matrix_a
def set_matrix_b(self, new_matrix_b):
self.matrix_b = new_matrix_b
def get_matrix_a(self):
return self.matrix_a
def get_matrix_b(self):
return self.matrix_b
def print_matrix(self):
print(self.matrix)
def get_c_matrix(self):
return self.matrix
def clear_c_matrix(self):
self.matrix = None
def multiply(self):
if self.matrix is None:
self.matrix = numpy.matlib.dot(self.matrix_a, self.matrix_b)
else:
self.matrix = self.matrix + numpy.matlib.dot(self.matrix_a, self.matrix_b)
if __name__ == '__main__':
Pyro4.config.SERIALIZER = "pickle"
port_id = int(sys.argv[2])
matrix = MatrixProcessing()
print('System started at port '+str(port_id)+'!')
Pyro4.Daemon.serveSimple(
{
matrix: "matrix"
},
host = sys.argv[1],
port = port_id,
ns = False)
while 1:
time.sleep(0.1)
| 21.949153 | 86 | 0.601544 | _author__ = 'Michal Smyk'
import numpy.matlib
import time
import Pyro4
import sys
class MatrixProcessing:
def __init__(self):
self.matrix = None
self.matrix_a = None
self.matrix_b = None
def set_matrix_a(self, new_matrix_a):
self.matrix_a = new_matrix_a
def set_matrix_b(self, new_matrix_b):
self.matrix_b = new_matrix_b
def get_matrix_a(self):
return self.matrix_a
def get_matrix_b(self):
return self.matrix_b
def print_matrix(self):
print(self.matrix)
def get_c_matrix(self):
return self.matrix
def clear_c_matrix(self):
self.matrix = None
def multiply(self):
if self.matrix is None:
self.matrix = numpy.matlib.dot(self.matrix_a, self.matrix_b)
else:
self.matrix = self.matrix + numpy.matlib.dot(self.matrix_a, self.matrix_b)
if __name__ == '__main__':
Pyro4.config.SERIALIZER = "pickle"
port_id = int(sys.argv[2])
matrix = MatrixProcessing()
print('System started at port '+str(port_id)+'!')
Pyro4.Daemon.serveSimple(
{
matrix: "matrix"
},
host = sys.argv[1],
port = port_id,
ns = False)
while 1:
time.sleep(0.1)
| true | true |
1c308eddea6c9cef23f7490ddb06787dcca40fb5 | 3,384 | py | Python | unified_planning/plans/plan.py | aiplan4eu/unified-planning | d2fd18baa3a2110595e5dfdc3f55254df72c3016 | [
"Apache-2.0"
] | 9 | 2022-02-18T14:51:58.000Z | 2022-03-31T06:02:43.000Z | unified_planning/plans/plan.py | aiplan4eu/unified-planning | d2fd18baa3a2110595e5dfdc3f55254df72c3016 | [
"Apache-2.0"
] | 37 | 2022-02-01T10:44:38.000Z | 2022-03-31T09:13:42.000Z | unified_planning/plans/plan.py | aiplan4eu/unified-planning | d2fd18baa3a2110595e5dfdc3f55254df72c3016 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unified_planning as up
import unified_planning.model
from unified_planning.environment import Environment, get_env
from unified_planning.model import FNode, Action, InstantaneousAction, Expression, Effect
from unified_planning.walkers import Substituter, Simplifier
from typing import Callable, Dict, Optional, Tuple
from enum import Enum, auto
'''This module defines the different plan classes.'''
class ActionInstance:
'''Represents an action instance with the actual parameters.
NOTE: two action instances of the same action with the same parameters are
considered different as it is possible to have the same action twice in a plan.'''
def __init__(self, action: 'unified_planning.model.Action', params: Tuple['unified_planning.model.FNode', ...] = tuple()):
assert len(action.parameters) == len(params)
self._action = action
self._params = tuple(params)
def __repr__(self) -> str:
s = []
if len(self._params) > 0:
s.append('(')
first = True
for p in self._params:
if not first:
s.append(', ')
s.append(str(p))
first = False
s.append(')')
return self._action.name + ''.join(s)
@property
def action(self) -> 'Action':
'''Returns the action.'''
return self._action
@property
def actual_parameters(self) -> Tuple['FNode', ...]:
'''Returns the actual parameters.'''
return self._params
def is_semantically_equivalent(self, oth: 'ActionInstance') -> bool:
'''This method returns True Iff the 2 Action Instances have the same semantic.
NOTE: This is different from __eq__; there the 2 Action Instances need to be exactly the same object.'''
return self.action == oth.action and self._params == oth._params
class PlanKind(Enum):
SEQUENTIAL_PLAN = auto()
TIME_TRIGGERED_PLAN = auto()
PARTIAL_ORDER_PLAN = auto()
class Plan:
'''Represents a generic plan.'''
def __init__(self, kind: PlanKind, environment: Optional['Environment'] = None) -> None:
self._kind = kind
self._environment = get_env(environment)
@property
def environment(self) -> 'Environment':
'''Return this plan environment.'''
return self._environment
@property
def kind(self) -> PlanKind:
'''Returns the plan kind'''
return self._kind
def replace_action_instances(self, replace_function: Callable[[ActionInstance], ActionInstance]) -> 'Plan':
'''This function takes a function from ActionInstance to ActionInstance and returns a new Plan
that have the ActionInstance modified by the "replace_function" function.'''
raise NotImplementedError
| 35.25 | 126 | 0.6776 |
import unified_planning as up
import unified_planning.model
from unified_planning.environment import Environment, get_env
from unified_planning.model import FNode, Action, InstantaneousAction, Expression, Effect
from unified_planning.walkers import Substituter, Simplifier
from typing import Callable, Dict, Optional, Tuple
from enum import Enum, auto
class ActionInstance:
def __init__(self, action: 'unified_planning.model.Action', params: Tuple['unified_planning.model.FNode', ...] = tuple()):
assert len(action.parameters) == len(params)
self._action = action
self._params = tuple(params)
def __repr__(self) -> str:
s = []
if len(self._params) > 0:
s.append('(')
first = True
for p in self._params:
if not first:
s.append(', ')
s.append(str(p))
first = False
s.append(')')
return self._action.name + ''.join(s)
@property
def action(self) -> 'Action':
return self._action
@property
def actual_parameters(self) -> Tuple['FNode', ...]:
return self._params
def is_semantically_equivalent(self, oth: 'ActionInstance') -> bool:
return self.action == oth.action and self._params == oth._params
class PlanKind(Enum):
SEQUENTIAL_PLAN = auto()
TIME_TRIGGERED_PLAN = auto()
PARTIAL_ORDER_PLAN = auto()
class Plan:
def __init__(self, kind: PlanKind, environment: Optional['Environment'] = None) -> None:
self._kind = kind
self._environment = get_env(environment)
@property
def environment(self) -> 'Environment':
return self._environment
@property
def kind(self) -> PlanKind:
return self._kind
def replace_action_instances(self, replace_function: Callable[[ActionInstance], ActionInstance]) -> 'Plan':
raise NotImplementedError
| true | true |
1c3090983794c2122f4c62e96544d9601689b8e5 | 2,428 | py | Python | lib/coloraide/color/gamut/fit_lch_chroma.py | adaminfinitum/ColorHelper | d6ab02ccff01dd1e3a01dbc186b5ba3ff1fcca47 | [
"MIT"
] | 253 | 2015-03-04T06:48:43.000Z | 2022-03-25T14:22:17.000Z | lib/coloraide/color/gamut/fit_lch_chroma.py | adaminfinitum/ColorHelper | d6ab02ccff01dd1e3a01dbc186b5ba3ff1fcca47 | [
"MIT"
] | 197 | 2015-03-04T21:40:47.000Z | 2022-03-25T17:04:36.000Z | lib/coloraide/color/gamut/fit_lch_chroma.py | adaminfinitum/ColorHelper | d6ab02ccff01dd1e3a01dbc186b5ba3ff1fcca47 | [
"MIT"
] | 32 | 2015-03-21T03:28:01.000Z | 2021-09-06T07:20:51.000Z | """Fit by compressing chroma in Lch."""
from ..gamut import Fit
EPSILON = 0.001
class LchChroma(Fit):
"""Lch chroma gamut mapping class."""
@staticmethod
def name():
"""Get plugin name."""
return "lch-chroma"
@staticmethod
def fit(color):
"""
Gamut mapping via chroma Lch.
Algorithm originally came from https://colorjs.io/docs/gamut-mapping.html.
Some things have been optimized and fixed though to better perform as intended.
The idea is to hold hue and lightness constant and decrease chroma until
color comes under gamut.
We'll use a binary search and at after each stage, we will clip the color
and compare the distance of the two colors (clipped and current color via binary search).
If the distance is less than two, we can return the clipped color.
---
Original Authors: Lea Verou, Chris Lilley
License: MIT (As noted in https://github.com/LeaVerou/color.js/blob/master/package.json)
"""
space = color.space()
# If flooring chroma doesn't work, just clip the floored color
# because there is no optimal compression.
floor = color.clone().set('lch.chroma', 0)
if not floor.in_gamut(tolerance=0):
return floor.clip().coords()
# If we are already below the JND, just clip as we will gain no
# noticeable difference moving forward.
clipped = color.clip()
if color.delta_e(clipped, method="2000") < 2:
return clipped.coords()
# Convert to CIELCH and set our boundaries
mapcolor = color.convert("lch")
low = 0.0
high = mapcolor.chroma
# Adjust chroma (using binary search).
# This helps preserve the other attributes of the color.
# Each time we compare the compressed color to it's clipped form
# to see how close we are. A delta less than 2 is our target.
while (high - low) > EPSILON:
delta = mapcolor.delta_e(
mapcolor.clip(space),
method="2000"
)
if (delta - 2) < EPSILON:
low = mapcolor.chroma
else:
high = mapcolor.chroma
mapcolor.chroma = (high + low) * 0.5
# Update and clip off noise
return color.update(mapcolor).clip(space, in_place=True).coords()
| 32.810811 | 97 | 0.605848 | from ..gamut import Fit
EPSILON = 0.001
class LchChroma(Fit):
@staticmethod
def name():
return "lch-chroma"
@staticmethod
def fit(color):
space = color.space()
# because there is no optimal compression.
floor = color.clone().set('lch.chroma', 0)
if not floor.in_gamut(tolerance=0):
return floor.clip().coords()
# If we are already below the JND, just clip as we will gain no
# noticeable difference moving forward.
clipped = color.clip()
if color.delta_e(clipped, method="2000") < 2:
return clipped.coords()
# Convert to CIELCH and set our boundaries
mapcolor = color.convert("lch")
low = 0.0
high = mapcolor.chroma
# Adjust chroma (using binary search).
# This helps preserve the other attributes of the color.
# Each time we compare the compressed color to it's clipped form
while (high - low) > EPSILON:
delta = mapcolor.delta_e(
mapcolor.clip(space),
method="2000"
)
if (delta - 2) < EPSILON:
low = mapcolor.chroma
else:
high = mapcolor.chroma
mapcolor.chroma = (high + low) * 0.5
return color.update(mapcolor).clip(space, in_place=True).coords()
| true | true |
1c3091f391a26f143d0600d84110df9d81a0ac47 | 127 | py | Python | xlib/api/win32/winmm/winmm.py | jkennedyvz/DeepFaceLive | 274c20808da089eb7fc0fc0e8abe649379a29ffe | [
"MIT"
] | null | null | null | xlib/api/win32/winmm/winmm.py | jkennedyvz/DeepFaceLive | 274c20808da089eb7fc0fc0e8abe649379a29ffe | [
"MIT"
] | null | null | null | xlib/api/win32/winmm/winmm.py | jkennedyvz/DeepFaceLive | 274c20808da089eb7fc0fc0e8abe649379a29ffe | [
"MIT"
] | null | null | null | from ..wintypes import DWORD, MMRESULT, dll_import
@dll_import('Winmm')
def timeBeginPeriod(uPeriod : DWORD) -> MMRESULT: ... | 31.75 | 53 | 0.740157 | from ..wintypes import DWORD, MMRESULT, dll_import
@dll_import('Winmm')
def timeBeginPeriod(uPeriod : DWORD) -> MMRESULT: ... | true | true |
1c30938bc735f9a5f84678013884e0afa93553ea | 51,188 | py | Python | src/sage/rings/polynomial/skew_polynomial_ring.py | mkoeppe/sage-1 | 249fc903897809e1eb081fbacb94741e01b37e73 | [
"BSL-1.0"
] | null | null | null | src/sage/rings/polynomial/skew_polynomial_ring.py | mkoeppe/sage-1 | 249fc903897809e1eb081fbacb94741e01b37e73 | [
"BSL-1.0"
] | null | null | null | src/sage/rings/polynomial/skew_polynomial_ring.py | mkoeppe/sage-1 | 249fc903897809e1eb081fbacb94741e01b37e73 | [
"BSL-1.0"
] | null | null | null | r"""
Skew Univariate Polynomial Rings
This module provides the :class:`~sage.rings.polynomial.skew_polynomial_ring.SkewPolynomialRing`
which constructs a general dense skew univariate polynomials over commutative base rings with
automorphisms over the base rings. This is the set of formal polynomials where the coefficients
are written on the left of the variable of the skew polynomial ring. The modified multiplication
operation over elements of the base ring is extended to all elements of the skew poynomial ring
by associativity and distributivity.
This module also provides :class:`~sage.rings.polynomial.skew_polynomial_ring.SkewPolynomialRing_finite_order`
which is a specialized class for skew polynomial rings over fields equipped with an automorphism of
finite order. It inherits from
:class:`~sage.rings.polynomial.skew_polynomial_ring.SkewPolynomialRing` but contains more
methods and provides better algorithms.
AUTHOR:
- Xavier Caruso (2012-06-29): initial version
- Arpit Merchant (2016-08-04): improved docstrings, fixed doctests
and refactored classes and methods
- Johan Rosenkilde (2016-08-03): changes for bug fixes, docstring and
doctest errors
"""
# ***************************************************************************
# Copyright (C) 2012 Xavier Caruso <xavier.caruso@normalesup.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ***************************************************************************
import sage
from sage.structure.richcmp import op_EQ
from sage.misc.prandom import randint
from sage.misc.cachefunc import cached_method
from sage.rings.infinity import Infinity
from sage.structure.category_object import normalize_names
from sage.structure.unique_representation import UniqueRepresentation
from sage.rings.ring import Algebra, Field
from sage.rings.integer import Integer
from sage.categories.commutative_rings import CommutativeRings
from sage.categories.algebras import Algebras
from sage.categories.fields import Fields
from sage.categories.morphism import Morphism, IdentityMorphism
from sage.rings.morphism import RingHomomorphism
from sage.categories.homset import Hom
from sage.categories.map import Section
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.polynomial.skew_polynomial_element import SkewPolynomialBaseringInjection
WORKING_CENTER_MAX_TRIES = 1000
# Helper functions
def _base_ring_to_fraction_field(S):
"""
Return the unique skew polynomial ring over the fraction field of
``S.base_ring()`` which has ``S`` a sub-ring (internal method).
INPUT:
- ``S`` -- a skew polynomial ring.
OUTPUT:
- ``Q`` -- the skew polynomial ring over the fraction field of
``S.base_ring``.
EXAMPLES::
sage: from sage.rings.polynomial.skew_polynomial_ring import _base_ring_to_fraction_field
sage: R.<t> = ZZ[]
sage: sigma = R.hom([t+1])
sage: S.<x> = R['x', sigma]
sage: _base_ring_to_fraction_field(S)
Skew Polynomial Ring in x over Fraction Field of Univariate Polynomial Ring in t over Integer Ring twisted by t |--> t + 1
"""
R = S.base_ring()
if isinstance(R, Field):
return S
else:
Q = R.fraction_field()
gens = R.gens()
sigmaS = S.twist_map()
# try:
sigmaQ = Q.hom([ Q(sigmaS(g)) for g in gens ])
return Q[S.variable_name(), sigmaQ]
# except Exception, e:
# raise ValueError("unable to lift the twist map to a twist map over %s (error was: %s)" % (Q, e))
def _minimal_vanishing_polynomial(R, eval_pts):
"""
Return the minimal vanishing polynomial (internal function).
See the documentation for
:meth:`SkewPolynomialRing.minimal_vanishing_polynomial` for a description.
INPUT:
- ``R`` -- A skew polynomial ring over a field.
- ``eval_pts`` -- a list of evaluation points
OUTPUT:
The minimal vanishing polynomial.
EXAMPLES::
sage: from sage.rings.polynomial.skew_polynomial_ring import _minimal_vanishing_polynomial
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: S.<x> = k['x',Frob]
sage: eval_pts = [1, t, t^2]
sage: b = _minimal_vanishing_polynomial(S, eval_pts); b
doctest:...: FutureWarning: This class/method/function is marked as experimental. It, its functionality or its interface might change without a formal deprecation.
See http://trac.sagemath.org/13215 for details.
x^3 + 4
"""
l = len(eval_pts)
if l == 0:
return R.one()
elif l == 1:
e = eval_pts[0]
if e.is_zero():
return R.one()
else:
return R.gen() - R.twist_map()(e)/e
else:
t = l//2
A = eval_pts[:t]
B = eval_pts[t:]
M_A = _minimal_vanishing_polynomial(R, A)
B_moved = M_A.multi_point_evaluation(B)
M_at_B_moved = _minimal_vanishing_polynomial(R, B_moved)
return M_at_B_moved * M_A
def _lagrange_polynomial(R, eval_pts, values):
"""
Return the Lagrange polynomial of the given points if it exists.
Otherwise return an unspecified polynomial (internal method).
See the documentation for
:meth:`SkewPolynomialRing.lagrange_polynomial` for a description
of Lagrange polynomial.
INPUT:
- ``R`` -- a skew polynomial ring over a field
- ``eval_pts`` -- list of evaluation points
- ``values`` -- list of values that the Lagrange polynomial takes
at the respective ``eval_pts``
OUTPUT:
- the Lagrange polynomial.
EXAMPLES::
sage: from sage.rings.polynomial.skew_polynomial_ring import _lagrange_polynomial
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: S.<x> = k['x',Frob]
sage: eval_pts = [ t , t^2 ]
sage: values = [ 3*t^2 + 4*t + 4 , 4*t ]
sage: d = _lagrange_polynomial(S, eval_pts, values); d
x + t
sage: d.multi_point_evaluation(eval_pts) == values
True
The following restrictions are impossible to satisfy because the evaluation
points are linearly dependent over the fixed field of the twist map, and the
corresponding values do not match::
sage: eval_pts = [ t, 2*t ]
sage: values = [ 1, 3 ]
sage: _lagrange_polynomial(S, eval_pts, values)
Traceback (most recent call last):
...
ValueError: the given evaluation points are linearly dependent over the fixed field of the twist map,
so a Lagrange polynomial could not be determined (and might not exist).
"""
l = len(eval_pts)
if l == 1:
if eval_pts[0].is_zero():
# This is due to linear dependence among the eval_pts.
raise ValueError("the given evaluation points are linearly dependent over the fixed field of the twist map, so a Lagrange polynomial could not be determined (and might not exist).")
return (values[0]/eval_pts[0])*R.one()
else:
t = l//2
A = eval_pts[:t]
B = eval_pts[t:]
M_A = _minimal_vanishing_polynomial(R, A)
M_B = _minimal_vanishing_polynomial(R, B)
A_ = M_B.multi_point_evaluation(A)
B_ = M_A.multi_point_evaluation(B)
I_1 = _lagrange_polynomial(R, A_, values[:t])
I_2 = _lagrange_polynomial(R, B_, values[t:])
return I_1 * M_B + I_2 * M_A
# Generic implementation of skew polynomial rings
#################################################
class SkewPolynomialRing(Algebra, UniqueRepresentation):
r"""
Construct and return the globally unique skew polynomial ring with the
given properties and variable names.
Given a ring `R` and a ring automorphism `\sigma` of `R`, the ring of
skew polynomials `R[X, \sigma]` is the usual abelian group polynomial
`R[X]` equipped with the modification multiplication deduced from the
rule `X a = \sigma(a) X`.
We refer to [Ore1933]_ for more material on skew polynomials.
.. SEEALSO::
- :class:`sage.rings.polynomial.skew_polynomial_element.SkewPolynomial`
INPUT:
- ``base_ring`` -- a commutative ring
- ``twist_map`` -- an automorphism of the base ring
- ``names`` -- a string or a list of strings
- ``sparse`` -- a boolean (default: ``False``). Currently not supported.
.. NOTE::
The current implementation of skew polynomial rings does not
support derivations. Sparse skew polynomials and multivariate skew
polynomials are also not implemented.
OUTPUT:
A univariate skew polynomial ring over ``base_ring`` twisted by
``twist_map`` when ``names`` is a string with no
commas (``,``) or a list of length 1. Otherwise we raise a
``NotImplementedError`` as multivariate skew polynomial rings are
not yet implemented.
UNIQUENESS and IMMUTABILITY:
In Sage, there is exactly one skew polynomial ring for each
triple (base ring, twisting map, name of the variable).
EXAMPLES of VARIABLE NAME CONTEXT::
sage: R.<t> = ZZ[]
sage: sigma = R.hom([t+1])
sage: S.<x> = SkewPolynomialRing(R, sigma); S
Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Integer Ring
twisted by t |--> t + 1
The names of the variables defined above cannot be arbitrarily
modified because each skew polynomial ring is unique in Sage and other
objects in Sage could have pointers to that skew polynomial ring.
However, the variable can be changed within the scope of a ``with``
block using the localvars context::
sage: with localvars(S, ['y']):
....: print(S)
Skew Polynomial Ring in y over Univariate Polynomial Ring in t over Integer Ring
twisted by t |--> t + 1
SQUARE BRACKETS NOTATION:
You can alternatively create a skew polynomial ring over `R`
twisted by ``twist_map`` by writing
``R['varname', twist_map]``.
EXAMPLES:
We first define the base ring::
sage: R.<t> = ZZ[]; R
Univariate Polynomial Ring in t over Integer Ring
and the twisting map::
sage: twist_map = R.hom([t+1]); twist_map
Ring endomorphism of Univariate Polynomial Ring in t over Integer Ring
Defn: t |--> t + 1
Now, we are ready to define the skew polynomial ring::
sage: S = SkewPolynomialRing(R, twist_map, names='x'); S
Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Integer Ring
twisted by t |--> t + 1
Use the diamond brackets notation to make the variable ready
for use after you define the ring::
sage: S.<x> = SkewPolynomialRing(R, twist_map)
sage: (x + t)^2
x^2 + (2*t + 1)*x + t^2
Here is an example with the square bracket notations::
sage: S.<x> = R['x', twist_map]; S
Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Integer Ring
twisted by t |--> t + 1
Rings with different variables names are different::
sage: R['x', twist_map] == R['y', twist_map]
False
Of course, skew polynomial rings with different twist maps are not
equal either::
sage: R['x',sigma] == R['x',sigma^2]
False
TESTS:
You must specify a variable name::
sage: SkewPolynomialRing(R, twist_map)
Traceback (most recent call last):
...
TypeError: you must specify the name of the variable
With this syntax, it is not possible to omit the name of the
variable neither in LHS nor in RHS. If we omit it in LHS, the
variable is not created::
sage: Sy = R['y', twist_map]; Sy
Skew Polynomial Ring in y over Univariate Polynomial Ring in t over Integer Ring
twisted by t |--> t + 1
sage: y.parent()
Traceback (most recent call last):
...
NameError: name 'y' is not defined
If we omit it in RHS, sage tries to create a polynomial ring and fails::
sage: Sz.<z> = R[twist_map]
Traceback (most recent call last):
...
ValueError: variable name 'Ring endomorphism of Univariate Polynomial Ring in t over Integer Ring\n Defn: t |--> t + 1' is not alphanumeric
Multivariate skew polynomial rings are not supported::
sage: S = SkewPolynomialRing(R, twist_map,names=['x','y'])
Traceback (most recent call last):
...
NotImplementedError: multivariate skew polynomials rings not supported
Sparse skew polynomial rings are not implemented::
sage: S = SkewPolynomialRing(R, twist_map, names='x', sparse=True)
Traceback (most recent call last):
...
NotImplementedError: sparse skew polynomial rings are not implemented
Saving and loading of polynomial rings works::
sage: loads(dumps(R['x',sigma])) == R['x',sigma]
True
.. TODO::
- Sparse Skew Polynomial Ring
- Multivariate Skew Polynomial Ring
- Add derivations.
"""
Element = sage.rings.polynomial.skew_polynomial_element.SkewPolynomial_generic_dense
@staticmethod
def __classcall_private__(cls, base_ring, twist_map=None, names=None, sparse=False):
r"""
Construct the skew polynomial ring associated to the given parameters
TESTS::
sage: k.<t> = ZZ[]
sage: theta = k.hom([t+1])
sage: S.<x> = SkewPolynomialRing(k, theta)
sage: S
Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Integer Ring twisted by t |--> t + 1
sage: type(S)
<class 'sage.rings.polynomial.skew_polynomial_ring.SkewPolynomialRing_with_category'>
We check the uniqueness property of parents::
sage: sigma = k.hom([t+1])
sage: T.<x> = SkewPolynomialRing(k, sigma)
sage: S is T
True
When the twisting morphism has finite order, a special class
is used::
sage: k.<a> = GF(7^5)
sage: Frob = k.frobenius_endomorphism(2)
sage: S.<x> = SkewPolynomialRing(k, Frob)
sage: type(S)
<class 'sage.rings.polynomial.skew_polynomial_ring.SkewPolynomialRing_finite_order_with_category'>
"""
if base_ring not in CommutativeRings():
raise TypeError('base_ring must be a commutative ring')
if twist_map is None:
twist_map = IdentityMorphism(base_ring)
else:
if (not isinstance(twist_map, Morphism)
or twist_map.domain() is not base_ring
or twist_map.codomain() is not base_ring):
raise TypeError("the twist map must be a ring automorphism of base_ring (=%s)" % base_ring)
if sparse:
raise NotImplementedError("sparse skew polynomial rings are not implemented")
if names is None:
raise TypeError("you must specify the name of the variable")
try:
names = normalize_names(1, names)[0]
except IndexError:
raise NotImplementedError("multivariate skew polynomials rings not supported")
# We check if the twisting morphism has finite order
if base_ring in Fields():
try:
order = twist_map.order()
if order is not Infinity:
from sage.rings.polynomial.skew_polynomial_ring import SkewPolynomialRing_finite_order
return SkewPolynomialRing_finite_order(base_ring, twist_map, names, sparse)
except (AttributeError, NotImplementedError):
pass
# We fallback to generic implementation
return cls.__classcall__(cls, base_ring, twist_map, names, sparse)
def __init__(self, base_ring, twist_map, name, sparse, category=None):
r"""
Initialize ``self``.
INPUT:
- ``base_ring`` -- a commutative ring
- ``twist_map`` -- an automorphism of the base ring
- ``name`` -- string or list of strings representing the name of
the variables of ring
- ``sparse`` -- boolean (default: ``False``)
- ``category`` -- a category
EXAMPLES::
sage: R.<t> = ZZ[]
sage: sigma = R.hom([t+1])
sage: S.<x> = SkewPolynomialRing(R,sigma)
sage: S.category()
Category of algebras over Univariate Polynomial Ring in t over Integer Ring
sage: S([1]) + S([-1])
0
sage: TestSuite(S).run()
"""
self.__is_sparse = sparse
self._map = twist_map
self._maps = {0: IdentityMorphism(base_ring), 1: self._map}
category = Algebras(base_ring).or_subcategory(category)
Algebra.__init__(self, base_ring, names=name, normalize=True, category=category)
def _element_constructor_(self, a=None, check=True, construct=False, **kwds):
r"""
Convert a base ring element ``a`` into a constant of this univariate
skew polynomial ring, possibly non-canonically.
INPUT:
- ``a`` -- (default: ``None``) an element of the base ring
of ``self`` or a ring that has a coerce map from ``self``
- ``check`` -- boolean (default: ``True``)
- ``construct`` -- boolean (default: ``False``)
OUTPUT:
An zero-degree skew polynomial in ``self``, equal to ``a``.
EXAMPLES::
sage: R.<t> = ZZ[]
sage: sigma = R.hom([t+1])
sage: S.<x> = SkewPolynomialRing(R,sigma)
sage: S(1 + x + x^2 + x^3)
x^3 + x^2 + x + 1
sage: S(1 + t)
t + 1
sage: S(1 + t).degree()
0
sage: S(0).list()
[]
TESTS::
sage: S(x, check=True)
x
"""
C = self.Element
if isinstance(a, list):
return C(self, a, check=check, construct=construct)
if isinstance(a, sage.structure.element.Element):
P = a.parent()
def build(check):
if a.is_zero():
return P.zero()
else:
return C(self, [a], check=check, construct=construct)
if P is self:
return a
elif P is self.base_ring():
build(False)
elif P == self.base_ring() or self.base_ring().has_coerce_map_from(P):
build(True)
try:
return a._polynomial_(self)
except AttributeError:
pass
if isinstance(a, str):
try:
from sage.misc.parser import Parser, LookupNameMaker
R = self.base_ring()
p = Parser(Integer, R, LookupNameMaker({self.variable_name(): self.gen()}, R))
return self(p.parse(a))
except NameError:
raise TypeError("unable to coerce string")
return C(self, a, check, construct=construct, **kwds)
def _coerce_map_from_base_ring(self):
"""
Return a coercion map from the base ring of ``self``.
EXAMPLES::
sage: R.<t> = ZZ[]
sage: S.<x> = SkewPolynomialRing(R, R.hom([t + 1]))
sage: S.coerce_map_from(R)
Skew Polynomial base injection morphism:
From: Univariate Polynomial Ring in t over Integer Ring
To: Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Integer Ring twisted by t |--> t + 1
sage: x.parent()
Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Integer Ring twisted by t |--> t + 1
sage: t.parent()
Univariate Polynomial Ring in t over Integer Ring
sage: y = x + t # indirect doctest
sage: y
x + t
sage: y.parent() is S
True
"""
return SkewPolynomialBaseringInjection(self.base_ring(), self)
def _coerce_map_from_(self, P):
r"""
Check whether ``self`` has a coerce map from ``P``.
The rings that canonically coerce into this ring are:
- this ring itself
- any ring that canonically coerces to the base ring of this ring
- skew polynomial rings in the same variable and automorphism over
any base ring that canonically coerces to the base ring of this ring
INPUT:
- ``P`` -- a ring
EXAMPLES::
sage: R.<t> = ZZ[]
sage: sigma = R.hom([t+1])
sage: S.<x> = SkewPolynomialRing(R,sigma)
sage: S.has_coerce_map_from(S)
True
sage: S.has_coerce_map_from(R)
True
sage: S.has_coerce_map_from(ZZ)
True
sage: S.has_coerce_map_from(GF(5^3))
False
sage: S.coerce_map_from(ZZ)
Composite map:
From: Integer Ring
To: Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Integer Ring twisted by t |--> t + 1
Defn: Polynomial base injection morphism:
From: Integer Ring
To: Univariate Polynomial Ring in t over Integer Ring
then
Skew Polynomial base injection morphism:
From: Univariate Polynomial Ring in t over Integer Ring
To: Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Integer Ring twisted by t |--> t + 1
sage: S.coerce_map_from(S)
Identity endomorphism of Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Integer Ring twisted by t |--> t + 1
"""
base_ring = self.base_ring()
try:
connecting = base_ring.coerce_map_from(P)
if connecting is not None:
return self.coerce_map_from(base_ring) * connecting
except TypeError:
pass
if isinstance(P, SkewPolynomialRing):
if self.__is_sparse and not P.is_sparse():
return False
if P.variable_name() == self.variable_name():
return base_ring.has_coerce_map_from(P.base_ring())
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: R.<t> = ZZ[]
sage: sigma = R.hom([t+1])
sage: S.<x> = SkewPolynomialRing(R,sigma)
sage: S
Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Integer Ring twisted by t |--> t + 1
"""
s = "Skew Polynomial Ring in %s over %s twisted by %s" % (self.variable_name(),
self.base_ring(),
self._map._repr_short())
if self.is_sparse():
s = "Sparse " + s
return s
def _latex_(self):
r"""
Return a latex representation of ``self``.
EXAMPLES::
sage: R.<t> = ZZ[]
sage: sigma = R.hom([t+1])
sage: S.<x> = SkewPolynomialRing(R,sigma)
sage: latex(S)
\Bold{Z}[t][x,\begin{array}{l}
\text{\texttt{Ring{ }endomorphism...}}
\end{array}]
"""
from sage.misc.latex import latex
return "%s[%s,%s]" % (latex(self.base_ring()), self.latex_variable_names()[0],
latex(self._map))
def change_var(self, var):
r"""
Return the skew polynomial ring in variable ``var`` with the same base
ring and twist map as ``self``.
INPUT:
- ``var`` -- a string representing the name of the new variable.
OUTPUT:
``self`` with variable name changed to ``var``.
EXAMPLES::
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: R.<x> = SkewPolynomialRing(k,Frob); R
Skew Polynomial Ring in x over Finite Field in t of size 5^3 twisted by t |--> t^5
sage: Ry = R.change_var('y'); Ry
Skew Polynomial Ring in y over Finite Field in t of size 5^3 twisted by t |--> t^5
sage: Ry is R.change_var('y')
True
"""
from sage.rings.polynomial.skew_polynomial_ring import SkewPolynomialRing
return SkewPolynomialRing(self.base_ring(), self._map, names=var,
sparse=self.__is_sparse)
def characteristic(self):
r"""
Return the characteristic of the base ring of ``self``.
EXAMPLES::
sage: R.<t> = QQ[]
sage: sigma = R.hom([t+1])
sage: R['x',sigma].characteristic()
0
sage: k.<u> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: k['y',Frob].characteristic()
5
"""
return self.base_ring().characteristic()
@cached_method
def twist_map(self, n=1):
r"""
Return the twist map, the automorphism of the base ring of
``self``, iterated ``n`` times.
INPUT:
- ``n`` - an integer (default: 1)
OUTPUT:
``n``-th iterative of the twist map of this skew polynomial ring.
EXAMPLES::
sage: R.<t> = QQ[]
sage: sigma = R.hom([t+1])
sage: S.<x> = R['x',sigma]
sage: S.twist_map()
Ring endomorphism of Univariate Polynomial Ring in t over Rational Field
Defn: t |--> t + 1
sage: S.twist_map() == sigma
True
sage: S.twist_map(10)
Ring endomorphism of Univariate Polynomial Ring in t over Rational Field
Defn: t |--> t + 10
If ``n`` in negative, Sage tries to compute the inverse of the
twist map::
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: T.<y> = k['y',Frob]
sage: T.twist_map(-1)
Frobenius endomorphism t |--> t^(5^2) on Finite Field in t of size 5^3
Sometimes it fails, even if the twist map is actually invertible::
sage: S.twist_map(-1)
Traceback (most recent call last):
...
NotImplementedError: inversion of the twist map Ring endomorphism of Univariate Polynomial Ring in t over Rational Field
Defn: t |--> t + 1
"""
try:
return self._map ** n
except TypeError as e:
if n < 0:
raise NotImplementedError("inversion of the twist map %s" % self._map)
else:
raise ValueError("Unexpected error in iterating the twist map: %s", e)
@cached_method
def gen(self, n=0):
r"""
Return the indeterminate generator of this skew polynomial ring.
INPUT:
- ``n`` -- index of generator to return (default: 0). Exists for
compatibility with other polynomial rings.
EXAMPLES::
sage: R.<t> = QQ[]
sage: sigma = R.hom([t+1])
sage: S.<x> = R['x',sigma]; S
Skew Polynomial Ring in x over Univariate Polynomial Ring in t over Rational Field twisted by t |--> t + 1
sage: y = S.gen(); y
x
sage: y == x
True
sage: y is x
True
sage: S.gen(0)
x
This is also known as the parameter::
sage: S.parameter() is S.gen()
True
"""
if n != 0:
raise IndexError("generator %s not defined" % n)
return self.Element(self, [0,1])
parameter = gen
def gens_dict(self):
r"""
Return a {name: variable} dictionary of the generators of ``self``.
EXAMPLES::
sage: R.<t> = ZZ[]
sage: sigma = R.hom([t+1])
sage: S.<x> = SkewPolynomialRing(R,sigma)
sage: S.gens_dict()
{'x': x}
"""
return dict(zip(self.variable_names(), self.gens()))
def is_finite(self):
r"""
Return ``False`` since skew polynomial rings are not finite
(unless the base ring is `0`.)
EXAMPLES::
sage: k.<t> = GF(5^3)
sage: k.is_finite()
True
sage: Frob = k.frobenius_endomorphism()
sage: S.<x> = k['x',Frob]
sage: S.is_finite()
False
"""
R = self.base_ring()
return R.is_finite() and R.order() == 1
def is_exact(self):
r"""
Return ``True`` if elements of this skew polynomial ring are exact.
This happens if and only if elements of the base ring are exact.
EXAMPLES::
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: S.<x> = k['x',Frob]
sage: S.is_exact()
True
sage: S.base_ring().is_exact()
True
sage: R.<u> = k[[]]
sage: sigma = R.hom([u+u^2])
sage: T.<y> = R['y',sigma]
sage: T.is_exact()
False
sage: T.base_ring().is_exact()
False
"""
return self.base_ring().is_exact()
def is_sparse(self):
r"""
Return ``True`` if the elements of this polynomial ring are sparsely
represented.
.. WARNING::
Since sparse skew polynomials are not yet implemented, this
function always returns ``False``.
EXAMPLES::
sage: R.<t> = RR[]
sage: sigma = R.hom([t+1])
sage: S.<x> = R['x',sigma]
sage: S.is_sparse()
False
"""
return self.__is_sparse
def ngens(self):
r"""
Return the number of generators of this skew polynomial ring,
which is 1.
EXAMPLES::
sage: R.<t> = RR[]
sage: sigma = R.hom([t+1])
sage: S.<x> = R['x',sigma]
sage: S.ngens()
1
"""
return 1
def random_element(self, degree=2, monic=False, *args, **kwds):
r"""
Return a random skew polynomial in ``self``.
INPUT:
- ``degree`` -- (default: 2) integer with degree
or a tuple of integers with minimum and maximum degrees
- ``monic`` -- (default: ``False``) if ``True``, return a monic
skew polynomial
- ``*args, **kwds`` -- passed on to the ``random_element`` method
for the base ring
OUTPUT:
Skew polynomial such that the coefficients of `x^i`, for `i` up
to ``degree``, are random elements from the base ring, randomized
subject to the arguments ``*args`` and ``**kwds``.
EXAMPLES::
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: S.<x> = k['x', Frob]
sage: S.random_element() # random
(2*t^2 + 3)*x^2 + (4*t^2 + t + 4)*x + 2*t^2 + 2
sage: S.random_element(monic=True) # random
x^2 + (2*t^2 + t + 1)*x + 3*t^2 + 3*t + 2
Use ``degree`` to obtain polynomials of higher degree
sage: p = S.random_element(degree=5) # random
(t^2 + 3*t)*x^4 + (4*t + 4)*x^3 + (4*t^2 + 4*t)*x^2 + (2*t^2 + 1)*x + 3
When ``monic`` is ``False``, the returned skew polynomial may have
a degree less than ``degree`` (it happens when the random leading
coefficient is zero). However, if ``monic`` is ``True``, this can't
happen::
sage: p = S.random_element(degree=4, monic=True)
sage: p.leading_coefficient() == S.base_ring().one()
True
sage: p.degree() == 4
True
If a tuple of two integers is given for the degree argument, a random
integer will be chosen between the first and second element of the
tuple as the degree, both inclusive::
sage: S.random_element(degree=(2,7)) # random
(3*t^2 + 1)*x^4 + (4*t + 2)*x^3 + (4*t + 1)*x^2
+ (t^2 + 3*t + 3)*x + 3*t^2 + 2*t + 2
If the first tuple element is greater than the second, a a
``ValueError`` is raised::
sage: S.random_element(degree=(5,4))
Traceback (most recent call last):
...
ValueError: first degree argument must be less or equal to the second
"""
R = self.base_ring()
if isinstance(degree, (list, tuple)):
if len(degree) != 2:
raise ValueError("degree argument must be an integer or a tuple of 2 integers (min_degree, max_degree)")
if degree[0] > degree[1]:
raise ValueError("first degree argument must be less or equal to the second")
degree = randint(*degree)
if monic:
return self([R.random_element(*args, **kwds) for _ in range(degree)] + [R.one()])
else:
return self([R.random_element(*args, **kwds) for _ in range(degree+1)])
def is_commutative(self):
r"""
Return ``True`` if this skew polynomial ring is commutative, i.e. if the
twist map is the identity.
EXAMPLES::
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: S.<x> = k['x',Frob]
sage: S.is_commutative()
False
sage: T.<y> = k['y',Frob^3]
sage: T.is_commutative()
True
"""
return self.twist_map().is_identity()
def minimal_vanishing_polynomial(self, eval_pts):
"""
Return the minimal-degree, monic skew polynomial which vanishes at all
the given evaluation points.
The degree of the vanishing polynomial is at most the length of
``eval_pts``. Equality holds if and only if the elements of ``eval_pts``
are linearly independent over the fixed field of ``self.twist_map()``.
- ``eval_pts`` -- list of evaluation points which are linearly
independent over the fixed field of the twist map of the associated
skew polynomial ring
OUTPUT:
The minimal vanishing polynomial.
EXAMPLES::
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: S.<x> = k['x',Frob]
sage: eval_pts = [1, t, t^2]
sage: b = S.minimal_vanishing_polynomial(eval_pts); b
x^3 + 4
The minimal vanishing polynomial evaluates to 0 at each of the evaluation points::
sage: eval = b.multi_point_evaluation(eval_pts); eval
[0, 0, 0]
If the evaluation points are linearly dependent over the fixed field of
the twist map, then the returned polynomial has lower degree than the
number of evaluation points::
sage: S.minimal_vanishing_polynomial([t])
x + 3*t^2 + 3*t
sage: S.minimal_vanishing_polynomial([t, 3*t])
x + 3*t^2 + 3*t
"""
return _minimal_vanishing_polynomial(_base_ring_to_fraction_field(self), eval_pts)
def lagrange_polynomial(self, points):
r"""
Return the minimal-degree polynomial which interpolates the given
points.
More precisely, given `n` pairs `(x_1, y_1), ..., (x_n, y_n) \in R^2`,
where `R` is ``self.base_ring()``, compute a skew polynomial `p(x)` such
that `p(x_i) = y_i` for each `i`, under the condition that the `x_i` are
linearly independent over the fixed field of ``self.twist_map()``.
If the `x_i` are linearly independent over the fixed field of
``self.twist_map()`` then such a polynomial is guaranteed to exist.
Otherwise, it might exist depending on the `y_i`, but the algorithm used
in this implementation does not support that, and so an error is always
raised.
INPUT:
- ``points`` -- a list of pairs ``(x_1, y_1),..., (x_n, y_n)`` of
elements of the base ring of ``self``. The `x_i` should be linearly
independent over the fixed field of ``self.twist_map()``.
OUTPUT:
The Lagrange polynomial.
EXAMPLES::
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: S.<x> = k['x',Frob]
sage: points = [(t, 3*t^2 + 4*t + 4), (t^2, 4*t)]
sage: d = S.lagrange_polynomial(points); d
x + t
sage: R.<t> = ZZ[]
sage: sigma = R.hom([t+1])
sage: T.<x> = R['x', sigma]
sage: points = [ (1, t^2 + 3*t + 4), (t, 2*t^2 + 3*t + 1), (t^2, t^2 + 3*t + 4) ]
sage: p = T.lagrange_polynomial(points); p
((-t^4 - 2*t - 3)/-2)*x^2 + (-t^4 - t^3 - t^2 - 3*t - 2)*x + (-t^4 - 2*t^3 - 4*t^2 - 10*t - 9)/-2
sage: p.multi_point_evaluation([1, t, t^2]) == [ t^2 + 3*t + 4, 2*t^2 + 3*t + 1, t^2 + 3*t + 4 ]
True
If the `x_i` are linearly dependent over the fixed field of
``self.twist_map()``, then an error is raised::
sage: T.lagrange_polynomial([ (t, 1), (2*t, 3) ])
Traceback (most recent call last):
...
ValueError: the given evaluation points are linearly dependent over the fixed field of the twist map,
so a Lagrange polynomial could not be determined (and might not exist).
"""
l = len(points)
if not all( len(pair) == 2 for pair in points ):
raise TypeError("supplied points must be pairs of elements of base ring")
eval_pts = [ x for (x,_) in points ]
values = [ y for (_,y) in points ]
if l > len(set(eval_pts)):
raise TypeError("the evaluation points must be distinct")
zero_i = [ i for i in range(l) if eval_pts[i].is_zero() ]
if zero_i and not values[zero_i[0]].is_zero():
raise TypeError("a skew polynomial always evaluates to 0 at 0, but a non-zero value was requested.")
return _lagrange_polynomial(_base_ring_to_fraction_field(self), eval_pts, values)
# Special classes for twisting morphisms with finite order
##########################################################
class SectionSkewPolynomialCenterInjection(Section):
r"""
Section of the canonical injection of the center of a skew
polynomial ring into this ring
TESTS::
sage: k.<a> = GF(5^3)
sage: S.<x> = SkewPolynomialRing(k, k.frobenius_endomorphism())
sage: Z = S.center()
sage: iota = S.convert_map_from(Z)
sage: sigma = iota.section()
sage: TestSuite(sigma).run(skip=['_test_category'])
"""
def _call_ (self, x):
r"""
Return `x` viewed as an element of the center
EXAMPLES::
sage: k.<a> = GF(5^3)
sage: S.<x> = SkewPolynomialRing(k, k.frobenius_endomorphism())
sage: Z = S.center()
sage: iota = S.convert_map_from(Z)
sage: sigma = iota.section()
sage: sigma(x^3)
z
sage: sigma(x^2)
Traceback (most recent call last):
...
ValueError: x^2 is not in the center
"""
order = self.inverse()._order
section = self.inverse()._embed.section()
lx = x.list()
l = [ ]
mod = 0
for c in lx:
if mod == 0:
l.append(section(c))
else:
if not c.is_zero():
raise ValueError("%s is not in the center" % x)
mod += 1
if mod == order:
mod = 0
return self.codomain()(l)
def _richcmp_(self, right, op):
r"""
Compare this morphism with ``right``
TESTS::
sage: k.<a> = GF(5^3)
sage: S.<x> = SkewPolynomialRing(k, k.frobenius_endomorphism())
sage: Z = S.center()
sage: iota = S.convert_map_from(Z)
sage: sigma = iota.section()
sage: s = loads(dumps(sigma))
sage: s == sigma
True
sage: s is sigma
False
"""
if op == op_EQ:
return (self.domain() is right.domain()) and (self.codomain() is right.codomain())
return NotImplemented
class SkewPolynomialCenterInjection(RingHomomorphism):
r"""
Canonical injection of the center of a skew polynomial ring
into this ring
TESTS::
sage: k.<a> = GF(5^3)
sage: S.<x> = SkewPolynomialRing(k, k.frobenius_endomorphism())
sage: Z = S.center()
sage: iota = S.convert_map_from(Z)
sage: TestSuite(iota).run(skip=['_test_category'])
"""
def __init__(self, domain, codomain, embed, order):
r"""
Initialize this morphism
EXAMPLES::
sage: k.<a> = GF(5^3)
sage: S.<x> = SkewPolynomialRing(k, k.frobenius_endomorphism())
sage: Z = S.center()
sage: S.convert_map_from(Z) # indirect doctest
Embedding of the center of Skew Polynomial Ring in x over Finite Field in a of size 5^3 twisted by a |--> a^5 into this ring
"""
RingHomomorphism.__init__(self, Hom(domain, codomain))
self._embed = embed
self._order = order
self._codomain = codomain
self._section = SectionSkewPolynomialCenterInjection(self)
def _repr_(self):
r"""
Return a string representation of this morphism
EXAMPLES::
sage: k.<a> = GF(5^3)
sage: S.<x> = SkewPolynomialRing(k, k.frobenius_endomorphism())
sage: Z = S.center()
sage: iota = S.convert_map_from(Z)
sage: iota
Embedding of the center of Skew Polynomial Ring in x over Finite Field in a of size 5^3 twisted by a |--> a^5 into this ring
sage: iota._repr_()
'Embedding of the center of Skew Polynomial Ring in x over Finite Field in a of size 5^3 twisted by a |--> a^5 into this ring'
"""
return "Embedding of the center of %s into this ring" % self._codomain
def _call_(self,x):
r"""
Return the image of `x` by this morphism
TESTS::
sage: k.<a> = GF(5^3)
sage: S.<x> = SkewPolynomialRing(k, k.frobenius_endomorphism())
sage: Z.<z> = S.center()
sage: iota = S.convert_map_from(Z)
sage: iota(z)
x^3
"""
k = self._codomain.base_ring ()
l = [ ]
lz = [ k(0) ] * (self._order-1)
for c in x.list():
l += [ self._embed(c) ] + lz
return self._codomain (l)
def _richcmp_(self, right, op):
r"""
Compare this morphism with ``right``
TESTS::
sage: k.<a> = GF(5^3)
sage: S.<x> = SkewPolynomialRing(k, k.frobenius_endomorphism())
sage: Z = S.center()
sage: iota = S.convert_map_from(Z)
sage: i = loads(dumps(iota))
sage: i == iota
True
sage: i is iota
False
"""
if op == op_EQ:
return (self.domain() is right.domain()) and (self.codomain() is right.codomain())
return NotImplemented
def section(self):
r"""
Return a section of this morphism
EXAMPLES::
sage: k.<a> = GF(5^3)
sage: S.<x> = SkewPolynomialRing(k, k.frobenius_endomorphism())
sage: Z = S.center()
sage: iota = S.convert_map_from(Z)
sage: sigma = iota.section()
sage: sigma(x^3)
z
"""
return self._section
class SkewPolynomialRing_finite_order(SkewPolynomialRing):
"""
A specialized class for skew polynomial rings over finite fields.
.. SEEALSO::
:meth:`sage.rings.polynomial.skew_polynomial_ring_constructor.SkewPolynomialRing`
:class:`sage.rings.polynomial.skew_polynomial_ring.SkewPolynomialRing`
:mod:`sage.rings.polynomial.skew_polynomial_finite_order`
"""
import sage.rings.polynomial.skew_polynomial_finite_order
Element = sage.rings.polynomial.skew_polynomial_finite_order.SkewPolynomial_finite_order_dense
def __init__(self, base_ring, twist_map, name, sparse, category=None):
r"""
Initialize this skew polynomial
TESTS::
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: S.<x> = k['x', Frob]; S
Skew Polynomial Ring in x over Finite Field in t of size 5^3 twisted by t |--> t^5
sage: S.category()
Category of algebras over Finite Field in t of size 5^3
sage: TestSuite(S).run()
We check that a call to the method
:meth:`sage.rings.polynomial.skew_polynomial_finite_order.SkewPolynomial_finite_order.is_central`
does not affect the behaviour of default central variable names::
sage: k.<a> = GF(7^4)
sage: phi = k.frobenius_endomorphism()
sage: S.<x> = k['x', phi]
sage: (x^4).is_central()
True
sage: Z.<u> = S.center()
sage: S.center() is Z
True
"""
SkewPolynomialRing.__init__(self, base_ring, twist_map, name, sparse, category)
self._order = twist_map.order()
(self._constants, self._embed_constants) = twist_map.fixed_field()
# Configure and create center
self._center = { }
self._center_variable_name = 'z'
for i in range(WORKING_CENTER_MAX_TRIES):
try:
self._working_center = self.center()
self._center_variable_name = None
break
except ValueError:
self._center_variable_name = "z%s_" % i
if self._center_variable_name is not None:
raise NotImplementedError("unable to create the center")
def center(self, name=None, names=None, default=False):
r"""
Return the center of this skew polynomial ring.
.. NOTE::
If F denotes the subring of R fixed by `\sigma` and `\sigma`
has order `r`, the center of `K[x,\sigma]` is `F[x^r]`, that
is a univariate polynomial ring over `F`.
INPUT:
- ``name`` -- a string or ``None`` (default: ``None``);
the name for the central variable (namely `x^r`)
- ``default`` -- a boolean (default: ``False``); if ``True``,
set the default variable name for the center to ``name``
EXAMPLES::
sage: k.<t> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: S.<x> = k['x',Frob]; S
Skew Polynomial Ring in x over Finite Field in t of size 5^3 twisted by t |--> t^5
sage: Z = S.center(); Z
Univariate Polynomial Ring in z over Finite Field of size 5
sage: Z.gen()
z
We can pass in another variable name::
sage: S.center(name='y')
Univariate Polynomial Ring in y over Finite Field of size 5
or use the bracket notation::
sage: Zy.<y> = S.center(); Zy
Univariate Polynomial Ring in y over Finite Field of size 5
sage: y.parent() is Zy
True
A coercion map from the center to the skew polynomial ring is set::
sage: S.has_coerce_map_from(Zy)
True
sage: P = y + x; P
x^3 + x
sage: P.parent()
Skew Polynomial Ring in x over Finite Field in t of size 5^3 twisted by t |--> t^5
sage: P.parent() is S
True
together with a converion map in the reverse direction::
sage: Zy(x^6 + 2*x^3 + 3)
y^2 + 2*y + 3
sage: Zy(x^2)
Traceback (most recent call last):
...
ValueError: x^2 is not in the center
Two different skew polynomial rings can share the same center::
sage: S1.<x1> = k['x1', Frob]
sage: S2.<x2> = k['x2', Frob]
sage: S1.center() is S2.center()
True
ABOUT THE DEFAULT NAME OF THE CENTRAL VARIABLE:
A priori, the default is ``z``.
However, a variable name is given the first time this method is
called, the given name become the default for the next calls::
sage: K.<t> = GF(11^3)
sage: phi = K.frobenius_endomorphism()
sage: A.<X> = K['X', phi]
sage: C.<u> = A.center() # first call
sage: C
Univariate Polynomial Ring in u over Finite Field of size 11
sage: A.center() # second call: the variable name is still u
Univariate Polynomial Ring in u over Finite Field of size 11
sage: A.center() is C
True
We can update the default variable name by passing in the argument
``default=True``::
sage: D.<v> = A.center(default=True)
sage: D
Univariate Polynomial Ring in v over Finite Field of size 11
sage: A.center()
Univariate Polynomial Ring in v over Finite Field of size 11
sage: A.center() is D
True
TESTS::
sage: C.<a,b> = S.center()
Traceback (most recent call last):
...
IndexError: the number of names must equal the number of generators
"""
if name is not None and names is not None:
raise ValueError
if names is None:
if name is None:
name = self._center_variable_name
if name is None:
name = 'z'
names = (name,)
names = normalize_names(1, names)
name = names[0]
if name in self._center:
center = self._center[name]
else:
center = PolynomialRing(self._constants, names)
embed = SkewPolynomialCenterInjection(center, self, self._embed_constants, self._order)
try:
assert not self.has_coerce_map_from(center)
self.register_coercion(embed)
center.register_conversion(embed.section())
except AssertionError:
raise ValueError("creation of coercion map fails; consider using another variable name")
self._center[name] = center
if default or (self._center_variable_name is None):
self._center_variable_name = name
return center
| 34.609872 | 193 | 0.569762 |
import sage
from sage.structure.richcmp import op_EQ
from sage.misc.prandom import randint
from sage.misc.cachefunc import cached_method
from sage.rings.infinity import Infinity
from sage.structure.category_object import normalize_names
from sage.structure.unique_representation import UniqueRepresentation
from sage.rings.ring import Algebra, Field
from sage.rings.integer import Integer
from sage.categories.commutative_rings import CommutativeRings
from sage.categories.algebras import Algebras
from sage.categories.fields import Fields
from sage.categories.morphism import Morphism, IdentityMorphism
from sage.rings.morphism import RingHomomorphism
from sage.categories.homset import Hom
from sage.categories.map import Section
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.polynomial.skew_polynomial_element import SkewPolynomialBaseringInjection
WORKING_CENTER_MAX_TRIES = 1000
def _base_ring_to_fraction_field(S):
R = S.base_ring()
if isinstance(R, Field):
return S
else:
Q = R.fraction_field()
gens = R.gens()
sigmaS = S.twist_map()
sigmaQ = Q.hom([ Q(sigmaS(g)) for g in gens ])
return Q[S.variable_name(), sigmaQ]
def _minimal_vanishing_polynomial(R, eval_pts):
l = len(eval_pts)
if l == 0:
return R.one()
elif l == 1:
e = eval_pts[0]
if e.is_zero():
return R.one()
else:
return R.gen() - R.twist_map()(e)/e
else:
t = l//2
A = eval_pts[:t]
B = eval_pts[t:]
M_A = _minimal_vanishing_polynomial(R, A)
B_moved = M_A.multi_point_evaluation(B)
M_at_B_moved = _minimal_vanishing_polynomial(R, B_moved)
return M_at_B_moved * M_A
def _lagrange_polynomial(R, eval_pts, values):
l = len(eval_pts)
if l == 1:
if eval_pts[0].is_zero():
raise ValueError("the given evaluation points are linearly dependent over the fixed field of the twist map, so a Lagrange polynomial could not be determined (and might not exist).")
return (values[0]/eval_pts[0])*R.one()
else:
t = l//2
A = eval_pts[:t]
B = eval_pts[t:]
M_A = _minimal_vanishing_polynomial(R, A)
M_B = _minimal_vanishing_polynomial(R, B)
A_ = M_B.multi_point_evaluation(A)
B_ = M_A.multi_point_evaluation(B)
I_1 = _lagrange_polynomial(R, A_, values[:t])
I_2 = _lagrange_polynomial(R, B_, values[t:])
return I_1 * M_B + I_2 * M_A
try:
order = twist_map.order()
if order is not Infinity:
from sage.rings.polynomial.skew_polynomial_ring import SkewPolynomialRing_finite_order
return SkewPolynomialRing_finite_order(base_ring, twist_map, names, sparse)
except (AttributeError, NotImplementedError):
pass
return cls.__classcall__(cls, base_ring, twist_map, names, sparse)
def __init__(self, base_ring, twist_map, name, sparse, category=None):
self.__is_sparse = sparse
self._map = twist_map
self._maps = {0: IdentityMorphism(base_ring), 1: self._map}
category = Algebras(base_ring).or_subcategory(category)
Algebra.__init__(self, base_ring, names=name, normalize=True, category=category)
def _element_constructor_(self, a=None, check=True, construct=False, **kwds):
C = self.Element
if isinstance(a, list):
return C(self, a, check=check, construct=construct)
if isinstance(a, sage.structure.element.Element):
P = a.parent()
def build(check):
if a.is_zero():
return P.zero()
else:
return C(self, [a], check=check, construct=construct)
if P is self:
return a
elif P is self.base_ring():
build(False)
elif P == self.base_ring() or self.base_ring().has_coerce_map_from(P):
build(True)
try:
return a._polynomial_(self)
except AttributeError:
pass
if isinstance(a, str):
try:
from sage.misc.parser import Parser, LookupNameMaker
R = self.base_ring()
p = Parser(Integer, R, LookupNameMaker({self.variable_name(): self.gen()}, R))
return self(p.parse(a))
except NameError:
raise TypeError("unable to coerce string")
return C(self, a, check, construct=construct, **kwds)
def _coerce_map_from_base_ring(self):
return SkewPolynomialBaseringInjection(self.base_ring(), self)
def _coerce_map_from_(self, P):
base_ring = self.base_ring()
try:
connecting = base_ring.coerce_map_from(P)
if connecting is not None:
return self.coerce_map_from(base_ring) * connecting
except TypeError:
pass
if isinstance(P, SkewPolynomialRing):
if self.__is_sparse and not P.is_sparse():
return False
if P.variable_name() == self.variable_name():
return base_ring.has_coerce_map_from(P.base_ring())
def _repr_(self):
s = "Skew Polynomial Ring in %s over %s twisted by %s" % (self.variable_name(),
self.base_ring(),
self._map._repr_short())
if self.is_sparse():
s = "Sparse " + s
return s
def _latex_(self):
from sage.misc.latex import latex
return "%s[%s,%s]" % (latex(self.base_ring()), self.latex_variable_names()[0],
latex(self._map))
def change_var(self, var):
from sage.rings.polynomial.skew_polynomial_ring import SkewPolynomialRing
return SkewPolynomialRing(self.base_ring(), self._map, names=var,
sparse=self.__is_sparse)
def characteristic(self):
return self.base_ring().characteristic()
@cached_method
def twist_map(self, n=1):
try:
return self._map ** n
except TypeError as e:
if n < 0:
raise NotImplementedError("inversion of the twist map %s" % self._map)
else:
raise ValueError("Unexpected error in iterating the twist map: %s", e)
@cached_method
def gen(self, n=0):
if n != 0:
raise IndexError("generator %s not defined" % n)
return self.Element(self, [0,1])
parameter = gen
def gens_dict(self):
return dict(zip(self.variable_names(), self.gens()))
def is_finite(self):
R = self.base_ring()
return R.is_finite() and R.order() == 1
def is_exact(self):
return self.base_ring().is_exact()
def is_sparse(self):
return self.__is_sparse
def ngens(self):
return 1
def random_element(self, degree=2, monic=False, *args, **kwds):
R = self.base_ring()
if isinstance(degree, (list, tuple)):
if len(degree) != 2:
raise ValueError("degree argument must be an integer or a tuple of 2 integers (min_degree, max_degree)")
if degree[0] > degree[1]:
raise ValueError("first degree argument must be less or equal to the second")
degree = randint(*degree)
if monic:
return self([R.random_element(*args, **kwds) for _ in range(degree)] + [R.one()])
else:
return self([R.random_element(*args, **kwds) for _ in range(degree+1)])
def is_commutative(self):
return self.twist_map().is_identity()
def minimal_vanishing_polynomial(self, eval_pts):
return _minimal_vanishing_polynomial(_base_ring_to_fraction_field(self), eval_pts)
def lagrange_polynomial(self, points):
l = len(points)
if not all( len(pair) == 2 for pair in points ):
raise TypeError("supplied points must be pairs of elements of base ring")
eval_pts = [ x for (x,_) in points ]
values = [ y for (_,y) in points ]
if l > len(set(eval_pts)):
raise TypeError("the evaluation points must be distinct")
zero_i = [ i for i in range(l) if eval_pts[i].is_zero() ]
if zero_i and not values[zero_i[0]].is_zero():
raise TypeError("a skew polynomial always evaluates to 0 at 0, but a non-zero value was requested.")
return _lagrange_polynomial(_base_ring_to_fraction_field(self), eval_pts, values)
e_order(SkewPolynomialRing):
import sage.rings.polynomial.skew_polynomial_finite_order
Element = sage.rings.polynomial.skew_polynomial_finite_order.SkewPolynomial_finite_order_dense
def __init__(self, base_ring, twist_map, name, sparse, category=None):
SkewPolynomialRing.__init__(self, base_ring, twist_map, name, sparse, category)
self._order = twist_map.order()
(self._constants, self._embed_constants) = twist_map.fixed_field()
self._center = { }
self._center_variable_name = 'z'
for i in range(WORKING_CENTER_MAX_TRIES):
try:
self._working_center = self.center()
self._center_variable_name = None
break
except ValueError:
self._center_variable_name = "z%s_" % i
if self._center_variable_name is not None:
raise NotImplementedError("unable to create the center")
def center(self, name=None, names=None, default=False):
if name is not None and names is not None:
raise ValueError
if names is None:
if name is None:
name = self._center_variable_name
if name is None:
name = 'z'
names = (name,)
names = normalize_names(1, names)
name = names[0]
if name in self._center:
center = self._center[name]
else:
center = PolynomialRing(self._constants, names)
embed = SkewPolynomialCenterInjection(center, self, self._embed_constants, self._order)
try:
assert not self.has_coerce_map_from(center)
self.register_coercion(embed)
center.register_conversion(embed.section())
except AssertionError:
raise ValueError("creation of coercion map fails; consider using another variable name")
self._center[name] = center
if default or (self._center_variable_name is None):
self._center_variable_name = name
return center
| true | true |
1c309391d4048d53049c80e82239e05e732f278d | 7,540 | py | Python | CreateTestbeds.py | itheodoridis/itheodoridis-createpyatstestbedsfromPrimeInfrastructure | a157f6fac40d6079a0399fdba01f8706db74e746 | [
"MIT"
] | 1 | 2020-06-01T11:15:58.000Z | 2020-06-01T11:15:58.000Z | CreateTestbeds.py | itheodoridis/itheodoridis-createpyatstestbedsfromPrimeInfrastructure | a157f6fac40d6079a0399fdba01f8706db74e746 | [
"MIT"
] | null | null | null | CreateTestbeds.py | itheodoridis/itheodoridis-createpyatstestbedsfromPrimeInfrastructure | a157f6fac40d6079a0399fdba01f8706db74e746 | [
"MIT"
] | 1 | 2020-07-04T12:54:54.000Z | 2020-07-04T12:54:54.000Z | """
Create testbeds for Cisco PyATS from device groups (e.g. switches) managed by Cisco Prime Infrastructure The code in this repo is the result of common work from Ioannis Theodoridis and Katerina Dardoufa (https://github.com/kdardoufa), engineers, co-workers, and friends. The original code that contains the idea of querrying an Enterprise's Prime Infrastrcture server for active network devices so that the device list and attributes can be used for various purposes, is contained in two different repositories created by Katerina Dardoufa at:
https://github.com/kdardoufa/DeviceInfo_from_PI
https://github.com/kdardoufa/CollectIP
The code in this repo is purposed for creating Cisco PyATS testbeds for active network devices per location. It's limited for switches as Device Type but it can easily be adjusted to include other types. It has adjustments compared to the initial code created by Katerina Dardoufa in order to querry the PI server for a full device list directly, instead of going through the device groups, as the main goal is speed.
Indeed the code should complete a full run in a few seconds for a few hundreds of devices.
"""
import requests
import json
import logging
from requests.auth import HTTPBasicAuth
import time
import yaml
from credentials import dev_username, dev_password, dev_enablepass
from primeapidata import PI_ADDRESS, USERNAME, PASSWORD
# this line is used to get rid of the fact that your server probably has a self signed certificate that you can't verify in code.
requests.packages.urllib3.disable_warnings()
# this line as well as the other logging commands create logs to help you verify what the code did. They are not necessary and can be removed from the code (don't forget to remove all instances)
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
filename='CreateTestbeds.log', level=logging.INFO)
# this function gets the list of devices from prime infrastructure as a list of dictionaries.
def getSimpleDevicesList():
DeviceList = []
logging.info(" - Getting all devices url list")
url = "https://"+PI_ADDRESS+"/webacs/api/v4/data/Devices.json?.full=true&.maxResults=1000"
response = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD), verify=False)
r_json = response.json()
for entity in r_json['queryResponse']['entity']:
device = entity["devicesDTO"]
# this if block limits devices to reachable only, switches and excludes NXOS devices
if (device["reachability"] == "REACHABLE") and (
device["productFamily"] == "Switches and Hubs") and ("nx-os" not in device["softwareType"].lower()):
DeviceList.append(device)
logging.info(" - Got all devices in list of dictionaries")
return (DeviceList)
# this creates a list of the separate locations defined in prime infrastructure for the devices already in the list of devices.
def getLocationsList(DevList):
logging.info(" - Getting all different locations")
LocationsList = []
for device in DevList:
location = device["location"].strip()
if location not in LocationsList:
LocationsList.append(location)
logging.info(f" - Appended location {location}")
return(LocationsList)
# End of Function
# this functions creates the testbeds in files per location (one testbed for each location defined in the previous function).
def createTestbeds(DevList, LocList):
Devs_per_loc = dict()
for location in LocList:
Devs_per_loc[location] = []
# define protocol for each device depending on version and rest of details
for device in DevList:
location = device["location"].strip()
if ("2950" in device["deviceType"]) or ("3550" in device["deviceType"]) or ("3750" in device["deviceType"]):
deviceProtocol = "telnet"
devicePort = "23"
deviceName = device["deviceName"]
else:
deviceProtocol = "ssh"
devicePort = "22"
# the following line is necessary to get rid of the domain suffix or PyATS will not recognize the device hostname
divdevname = device["deviceName"].split(".")
deviceName = divdevname[0]
deviceIpAddress = device["ipAddress"]
deviceOS = device["softwareType"].lower()
# define dict to contain device parameters and add to the list
devdict = {
"deviceName" : deviceName,
"os" : deviceOS,
"type" : "switch",
"ip" : deviceIpAddress,
"protocol" : deviceProtocol,
"port" : devicePort
}
Devs_per_loc[location].append(devdict)
logging.info(" - Creating Testbeds")
for location in LocList:
#this is the initial testbed block - this is actually optional
initial_string = (f"testbed:\n"
f" name: {location}\n"
f" credentials:\n"
f" default:\n"
f" username: {dev_username}\n"
f" password: {dev_password}\n"
f" enable:\n"
f" password: {dev_enablepass}\n")
# testbed filename definition
testbed_filename = location + ".yaml"
# open filename and write testbed and devices blocks
with open(testbed_filename, 'w') as writer:
writer.write(initial_string)
writer.write("\ndevices:\n")
for device in Devs_per_loc[location]:
writer.write(" "*2 + device["deviceName"] + ":\n")
writer.write(" "*4 + "os: " + device["os"] + "\n")
writer.write(" "*4 + "type: " + device["type"] + "\n")
writer.write(" "*4 + "connections:\n")
writer.write(" "*6 + "console:\n")
writer.write(" "*8 + "ip: " + device["ip"] + "\n")
writer.write(" "*8 + "protocol: " + device["protocol"] + "\n")
writer.write(" "*8 + "port: " + device["port"] + "\n")
# this was necessary for some of our old devices, using the latest version of the PyATS docker container
if device["protocol"]=="ssh":
writer.write(" "*8 + "ssh_options: -o KexAlgorithms=+diffie-hellman-group1-sha1 -c aes128-cbc,3des-cbc,aes192-cbc,aes256-cbc\n")
if device["protocol"]=="telnet":
writer.write(" "*8 + "settings:\n")
writer.write(" "*10 + "ESCAPE_CHAR_CHATTY_TERM_WAIT: 0.4\n")
writer.write(" "*10 + "ESCAPE_CHAR_PROMPT_WAIT: 0.4\n")
writer.write(" "*4 + "credentials:\n")
writer.write(" "*6 + "default:\n")
writer.write(" "*8 + f"username: {dev_username}\n")
writer.write(" "*8 + f"password: {dev_password}\n")
writer.write(" "*6 + "enable:\n")
writer.write(" "*8+ f"password: {dev_password}\n")
return
# End of function
# Main Function
def main():
SimpleDevicesList = getSimpleDevicesList()
LocationsList = getLocationsList(SimpleDevicesList)
createTestbeds(SimpleDevicesList, LocationsList)
logging.info(" - All testbeds have been created.\nEND")
return()
if __name__ == "__main__":
main()
| 52.727273 | 544 | 0.621485 |
import requests
import json
import logging
from requests.auth import HTTPBasicAuth
import time
import yaml
from credentials import dev_username, dev_password, dev_enablepass
from primeapidata import PI_ADDRESS, USERNAME, PASSWORD
requests.packages.urllib3.disable_warnings()
# this line as well as the other logging commands create logs to help you verify what the code did. They are not necessary and can be removed from the code (don't forget to remove all instances)
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
filename='CreateTestbeds.log', level=logging.INFO)
def getSimpleDevicesList():
DeviceList = []
logging.info(" - Getting all devices url list")
url = "https://"+PI_ADDRESS+"/webacs/api/v4/data/Devices.json?.full=true&.maxResults=1000"
response = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD), verify=False)
r_json = response.json()
for entity in r_json['queryResponse']['entity']:
device = entity["devicesDTO"]
if (device["reachability"] == "REACHABLE") and (
device["productFamily"] == "Switches and Hubs") and ("nx-os" not in device["softwareType"].lower()):
DeviceList.append(device)
logging.info(" - Got all devices in list of dictionaries")
return (DeviceList)
def getLocationsList(DevList):
logging.info(" - Getting all different locations")
LocationsList = []
for device in DevList:
location = device["location"].strip()
if location not in LocationsList:
LocationsList.append(location)
logging.info(f" - Appended location {location}")
return(LocationsList)
def createTestbeds(DevList, LocList):
Devs_per_loc = dict()
for location in LocList:
Devs_per_loc[location] = []
for device in DevList:
location = device["location"].strip()
if ("2950" in device["deviceType"]) or ("3550" in device["deviceType"]) or ("3750" in device["deviceType"]):
deviceProtocol = "telnet"
devicePort = "23"
deviceName = device["deviceName"]
else:
deviceProtocol = "ssh"
devicePort = "22"
divdevname = device["deviceName"].split(".")
deviceName = divdevname[0]
deviceIpAddress = device["ipAddress"]
deviceOS = device["softwareType"].lower()
devdict = {
"deviceName" : deviceName,
"os" : deviceOS,
"type" : "switch",
"ip" : deviceIpAddress,
"protocol" : deviceProtocol,
"port" : devicePort
}
Devs_per_loc[location].append(devdict)
logging.info(" - Creating Testbeds")
for location in LocList:
initial_string = (f"testbed:\n"
f" name: {location}\n"
f" credentials:\n"
f" default:\n"
f" username: {dev_username}\n"
f" password: {dev_password}\n"
f" enable:\n"
f" password: {dev_enablepass}\n")
testbed_filename = location + ".yaml"
with open(testbed_filename, 'w') as writer:
writer.write(initial_string)
writer.write("\ndevices:\n")
for device in Devs_per_loc[location]:
writer.write(" "*2 + device["deviceName"] + ":\n")
writer.write(" "*4 + "os: " + device["os"] + "\n")
writer.write(" "*4 + "type: " + device["type"] + "\n")
writer.write(" "*4 + "connections:\n")
writer.write(" "*6 + "console:\n")
writer.write(" "*8 + "ip: " + device["ip"] + "\n")
writer.write(" "*8 + "protocol: " + device["protocol"] + "\n")
writer.write(" "*8 + "port: " + device["port"] + "\n")
if device["protocol"]=="ssh":
writer.write(" "*8 + "ssh_options: -o KexAlgorithms=+diffie-hellman-group1-sha1 -c aes128-cbc,3des-cbc,aes192-cbc,aes256-cbc\n")
if device["protocol"]=="telnet":
writer.write(" "*8 + "settings:\n")
writer.write(" "*10 + "ESCAPE_CHAR_CHATTY_TERM_WAIT: 0.4\n")
writer.write(" "*10 + "ESCAPE_CHAR_PROMPT_WAIT: 0.4\n")
writer.write(" "*4 + "credentials:\n")
writer.write(" "*6 + "default:\n")
writer.write(" "*8 + f"username: {dev_username}\n")
writer.write(" "*8 + f"password: {dev_password}\n")
writer.write(" "*6 + "enable:\n")
writer.write(" "*8+ f"password: {dev_password}\n")
return
def main():
SimpleDevicesList = getSimpleDevicesList()
LocationsList = getLocationsList(SimpleDevicesList)
createTestbeds(SimpleDevicesList, LocationsList)
logging.info(" - All testbeds have been created.\nEND")
return()
if __name__ == "__main__":
main()
| true | true |
1c3094b8f18a2b6367f245419720577ce0fd6595 | 765 | py | Python | mealpy/swarm_based/__init__.py | lamto20132223/mealpy | b25bd7548299d490cf2f40d3ecfc5dc87cf60994 | [
"MIT"
] | 2 | 2020-06-06T10:28:55.000Z | 2020-06-06T10:30:08.000Z | mealpy/swarm_based/__init__.py | lamto20132223/mealpy | b25bd7548299d490cf2f40d3ecfc5dc87cf60994 | [
"MIT"
] | null | null | null | mealpy/swarm_based/__init__.py | lamto20132223/mealpy | b25bd7548299d490cf2f40d3ecfc5dc87cf60994 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 09:49, 17/03/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
| 76.5 | 105 | 0.213072 | true | true | |
1c3094c476b34f8f266d4252f343e1581d58d74f | 3,649 | py | Python | package/tests/test_script_downloader.py | Omritk/CustomScript-Shell | 9369eba9738943802e72680d05a4a8b371b3e6fa | [
"Apache-2.0"
] | null | null | null | package/tests/test_script_downloader.py | Omritk/CustomScript-Shell | 9369eba9738943802e72680d05a4a8b371b3e6fa | [
"Apache-2.0"
] | null | null | null | package/tests/test_script_downloader.py | Omritk/CustomScript-Shell | 9369eba9738943802e72680d05a4a8b371b3e6fa | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase
from cloudshell.cm.customscript.domain.script_executor import ExcutorConnectionError
from mock import patch, Mock
import mock
from cloudshell.cm.customscript.customscript_shell import CustomScriptShell
from cloudshell.cm.customscript.domain.reservation_output_writer import ReservationOutputWriter
from cloudshell.cm.customscript.domain.script_configuration import ScriptConfiguration
from cloudshell.cm.customscript.domain.script_file import ScriptFile
from cloudshell.cm.customscript.domain.script_downloader import ScriptDownloader, HttpAuth
from cloudshell.cm.customscript.domain.script_configuration import ScriptRepository
from tests.helpers import mocked_requests_get
from tests.helpers import Any
def print_logs(message):
print(message)
class TestScriptDownloader(TestCase):
def setUp(self):
self.logger = Mock()
self.cancel_sampler = Mock()
self.logger_patcher = patch('cloudshell.cm.customscript.customscript_shell.LoggingSessionContext')
self.logger_patcher.start()
self.script_repo = ScriptRepository()
pass
@mock.patch('cloudshell.cm.customscript.domain.script_downloader.requests.get', side_effect=mocked_requests_get)
def test_download_as_public(self, mock_requests):
# public - url, no credentials
public_repo_url = 'https://raw.repocontentservice.com/SomeUser/SomePublicRepo/master/bashScript.sh'
self.auth = HttpAuth('','','')
# set downloaded and downaload
self.logger.info = print_logs
script_downloader = ScriptDownloader(self.logger, self.cancel_sampler)
script_file = script_downloader.download(public_repo_url, self.auth)
# assert name and content
self.assertEqual(script_file.name, "bashScript.sh")
self.assertEqual(script_file.text, "SomeBashScriptContent")
@mock.patch('cloudshell.cm.customscript.domain.script_downloader.requests.get', side_effect=mocked_requests_get)
def test_download_as_private_with_token(self, mocked_requests_get):
# private - url, with token
private_repo_url = 'https://raw.repocontentservice.com/SomeUser/SomePrivateTokenRepo/master/bashScript.sh'
self.auth = HttpAuth('','','551e48b030e1a9f334a330121863e48e43f58c55')
# set downloaded and downaload
self.logger.info = print_logs
script_downloader = ScriptDownloader(self.logger, self.cancel_sampler)
script_file = script_downloader.download(private_repo_url, self.auth)
# assert name and content
self.assertEqual(script_file.name, "bashScript.sh")
self.assertEqual(script_file.text, "SomeBashScriptContent")
@mock.patch('cloudshell.cm.customscript.domain.script_downloader.requests.get', side_effect=mocked_requests_get)
def test_download_as_private_with_credentials_and_failed_token(self, mocked_requests_get):
# private - url, with token that fails and user\password. note - this is will not work on GitHub repo, they require token
private_repo_url = 'https://raw.repocontentservice.com/SomeUser/SomePrivateCredRepo/master/bashScript.sh'
self.auth = HttpAuth('SomeUser','SomePassword','551e48b030e1a9f334a330121863e48e43f0000')
# set downloaded and downaload
self.logger.info = print_logs
script_downloader = ScriptDownloader(self.logger, self.cancel_sampler)
script_file = script_downloader.download(private_repo_url, self.auth)
# assert name and content
self.assertEqual(script_file.name, "bashScript.sh")
self.assertEqual(script_file.text, "SomeBashScriptContent")
| 49.986301 | 129 | 0.763223 | from unittest import TestCase
from cloudshell.cm.customscript.domain.script_executor import ExcutorConnectionError
from mock import patch, Mock
import mock
from cloudshell.cm.customscript.customscript_shell import CustomScriptShell
from cloudshell.cm.customscript.domain.reservation_output_writer import ReservationOutputWriter
from cloudshell.cm.customscript.domain.script_configuration import ScriptConfiguration
from cloudshell.cm.customscript.domain.script_file import ScriptFile
from cloudshell.cm.customscript.domain.script_downloader import ScriptDownloader, HttpAuth
from cloudshell.cm.customscript.domain.script_configuration import ScriptRepository
from tests.helpers import mocked_requests_get
from tests.helpers import Any
def print_logs(message):
print(message)
class TestScriptDownloader(TestCase):
def setUp(self):
self.logger = Mock()
self.cancel_sampler = Mock()
self.logger_patcher = patch('cloudshell.cm.customscript.customscript_shell.LoggingSessionContext')
self.logger_patcher.start()
self.script_repo = ScriptRepository()
pass
@mock.patch('cloudshell.cm.customscript.domain.script_downloader.requests.get', side_effect=mocked_requests_get)
def test_download_as_public(self, mock_requests):
public_repo_url = 'https://raw.repocontentservice.com/SomeUser/SomePublicRepo/master/bashScript.sh'
self.auth = HttpAuth('','','')
self.logger.info = print_logs
script_downloader = ScriptDownloader(self.logger, self.cancel_sampler)
script_file = script_downloader.download(public_repo_url, self.auth)
self.assertEqual(script_file.name, "bashScript.sh")
self.assertEqual(script_file.text, "SomeBashScriptContent")
@mock.patch('cloudshell.cm.customscript.domain.script_downloader.requests.get', side_effect=mocked_requests_get)
def test_download_as_private_with_token(self, mocked_requests_get):
private_repo_url = 'https://raw.repocontentservice.com/SomeUser/SomePrivateTokenRepo/master/bashScript.sh'
self.auth = HttpAuth('','','551e48b030e1a9f334a330121863e48e43f58c55')
self.logger.info = print_logs
script_downloader = ScriptDownloader(self.logger, self.cancel_sampler)
script_file = script_downloader.download(private_repo_url, self.auth)
self.assertEqual(script_file.name, "bashScript.sh")
self.assertEqual(script_file.text, "SomeBashScriptContent")
@mock.patch('cloudshell.cm.customscript.domain.script_downloader.requests.get', side_effect=mocked_requests_get)
def test_download_as_private_with_credentials_and_failed_token(self, mocked_requests_get):
private_repo_url = 'https://raw.repocontentservice.com/SomeUser/SomePrivateCredRepo/master/bashScript.sh'
self.auth = HttpAuth('SomeUser','SomePassword','551e48b030e1a9f334a330121863e48e43f0000')
self.logger.info = print_logs
script_downloader = ScriptDownloader(self.logger, self.cancel_sampler)
script_file = script_downloader.download(private_repo_url, self.auth)
self.assertEqual(script_file.name, "bashScript.sh")
self.assertEqual(script_file.text, "SomeBashScriptContent")
| true | true |
1c3095385b8b49fb92617e32d3bf7877ae7b5bc6 | 1,966 | py | Python | Coding Theory/Huffman Coding/huffdecode.py | bnjasim/algorithms | 14dac34004118d5ed204bd33b26d5fac0c5f9c5a | [
"MIT"
] | null | null | null | Coding Theory/Huffman Coding/huffdecode.py | bnjasim/algorithms | 14dac34004118d5ed204bd33b26d5fac0c5f9c5a | [
"MIT"
] | null | null | null | Coding Theory/Huffman Coding/huffdecode.py | bnjasim/algorithms | 14dac34004118d5ed204bd33b26d5fac0c5f9c5a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import sys
if len(sys.argv) < 2:
print("Format: python huffdecode.py binaryfile")
sys.exit(1)
# Decoding: Read the binary file
with open(sys.argv[1], 'rb') as fbin:
encoded_bytes = fbin.read()
if len(encoded_bytes) < 1:
print('Empty file. Exiting!')
sys.exit(0)
# Decoding: first byte is the code_block_offset
code_block_offset = encoded_bytes[0]
# The code block is stored from index 1 to code_block_offset
code_block = encoded_bytes[1: code_block_offset+1].decode('utf-8')
# print(code_block)
# Decoding: Remember the next byte is the offset value of the last byte
# We split the encoded_bytes to get just the code sequence alone
code_bytes = encoded_bytes[code_block_offset+1:]
offset = code_bytes[0]
encoded_text = ''
# Read from the next byte till the second last byte
for ebyte in code_bytes[1:len(code_bytes)-1]:
b = format(ebyte, '08b')
encoded_text += b
# The last byte may not have used full 8 bits; remove un-necessary zeros with the offset value
last_byte = code_bytes[len(code_bytes)-1]
formatter = '0' + str(offset) + 'b'
encoded_text += format(last_byte, formatter)
## Get back the code_map from the code_block (Actually reverse code_map)
code_map = {}
for code in code_block.split('\n'):
if (len(code)>1):
character = code.split(':')[0]
huffcode = code.split(':')[1]
code_map[huffcode] = character
# Decoding: Now decode the retireved binary sequence with the reverse code_map
# This is a greedy algorithm
def decoding(content, _lookup):
while content:
options = [i for i in _lookup if content.startswith(i) and (any(content[len(i):].startswith(b) for b in _lookup) or not content[len(i):])]
if not options:
raise Exception("Decoding error")
yield _lookup[options[0]]
content = content[len(options[0]):]
print('The original encoded text is: ')
print(''.join(decoding(encoded_text, code_map)))
| 30.246154 | 146 | 0.700407 |
import sys
if len(sys.argv) < 2:
print("Format: python huffdecode.py binaryfile")
sys.exit(1)
with open(sys.argv[1], 'rb') as fbin:
encoded_bytes = fbin.read()
if len(encoded_bytes) < 1:
print('Empty file. Exiting!')
sys.exit(0)
code_block_offset = encoded_bytes[0]
code_block = encoded_bytes[1: code_block_offset+1].decode('utf-8')
code_bytes = encoded_bytes[code_block_offset+1:]
offset = code_bytes[0]
encoded_text = ''
for ebyte in code_bytes[1:len(code_bytes)-1]:
b = format(ebyte, '08b')
encoded_text += b
last_byte = code_bytes[len(code_bytes)-1]
formatter = '0' + str(offset) + 'b'
encoded_text += format(last_byte, formatter)
:
character = code.split(':')[0]
huffcode = code.split(':')[1]
code_map[huffcode] = character
def decoding(content, _lookup):
while content:
options = [i for i in _lookup if content.startswith(i) and (any(content[len(i):].startswith(b) for b in _lookup) or not content[len(i):])]
if not options:
raise Exception("Decoding error")
yield _lookup[options[0]]
content = content[len(options[0]):]
print('The original encoded text is: ')
print(''.join(decoding(encoded_text, code_map)))
| true | true |
1c309551640b18802ccc35cf4c99806932297d4e | 5,897 | py | Python | aoc_2018/day_12/python/subterranean_sustainability.py | girip11/advent_of_code | b58f9a53d219bb3bef284af6b0ca59d2addd57bb | [
"MIT"
] | null | null | null | aoc_2018/day_12/python/subterranean_sustainability.py | girip11/advent_of_code | b58f9a53d219bb3bef284af6b0ca59d2addd57bb | [
"MIT"
] | 1 | 2021-09-29T04:20:48.000Z | 2021-09-29T04:20:48.000Z | aoc_2018/day_12/python/subterranean_sustainability.py | girip11/advent_of_code | b58f9a53d219bb3bef284af6b0ca59d2addd57bb | [
"MIT"
] | null | null | null | import re
import sys
from typing import Iterable, Iterator, List, Set, Tuple
PLANT = "#"
NO_PLANT = "."
def read_initial_state(init_state: str) -> str:
return init_state.replace("initial state:", "").strip()
def read_rules(rules: Iterable[str]) -> Iterator[Tuple[str, str]]:
"""Read the rules for growing plants in the pot.
Rules are of the form "...## => #"
Parameters
----------
rules : Iterable[str]
Yields
-------
Iterator[Tuple[str, str]]
"""
pattern: re.Pattern = re.compile(r"([.|#]{5})\s=>\s([.|#]{1})")
for rule in rules:
if len(rule) > 0:
match = pattern.match(rule)
if match is not None:
llcrr, pot_status = match.groups()
yield (llcrr, pot_status)
def get_pot_slices(pots: str) -> Iterator[str]:
"""Note: No other pots currently contain plants. So default is '.'
Pot slices follow the pattern LLCRR. Two to the left and two to the right of
the current pot.
"""
last_pot = len(pots)
for pot_num in range(0, last_pot):
# current pot is the left most pot
# add additional pots to its left
if pot_num in [0, 1]:
yield ".."[pot_num:] + pots[: pot_num + 3]
elif pot_num in [last_pot - 2, last_pot - 1]:
# current pot is the right most pot
# so add additional pots to the right
yield pots[pot_num - 2 :] + ".."[last_pot - pot_num - 1 :]
else:
yield pots[pot_num - 2 : pot_num + 3]
def trim_pot_row(current_gen_pots: str, pot_offset: int, threshold: int = 2) -> Tuple[str, int]:
# Dont leave more than the threshold number of empty pots at the left or right
# Look for empty left pots till we reach some pot with plant
i = 0
while current_gen_pots[i] == NO_PLANT:
i += 1
if i > threshold:
current_gen_pots = current_gen_pots[i - threshold :]
pot_offset += i - threshold
# Look for empty right pots till we reach some pot with plant
i = len(current_gen_pots) - 1
while current_gen_pots[i] == NO_PLANT:
i -= 1
if len(current_gen_pots) - 1 - i > threshold:
current_gen_pots = current_gen_pots[: i + threshold + 1]
return (current_gen_pots, pot_offset)
def compute_next_generation(
current_gen_pots: str, plant_in_pot_rules: Set[str], pot_offset: int
) -> Tuple[str, int]:
next_gen_pots = []
# Expand to the left if the left most pots(2) has plant in this generation
if PLANT in [current_gen_pots[0], current_gen_pots[1]]:
empty_pots_to_pad = NO_PLANT * (2 if current_gen_pots[0] == PLANT else 1)
current_gen_pots = f"{empty_pots_to_pad}{current_gen_pots}"
pot_offset -= len(empty_pots_to_pad)
# expand to the right if the right most pots(2) have plant in this generation
if PLANT in [current_gen_pots[-1], current_gen_pots[-2]]:
empty_pots_to_pad = NO_PLANT * (2 if current_gen_pots[-1] == PLANT else 1)
current_gen_pots = f"{current_gen_pots}{empty_pots_to_pad}"
for current_pot_with_neighbors in get_pot_slices(current_gen_pots):
# print(current_pot_with_neighbors)
if current_pot_with_neighbors in plant_in_pot_rules:
next_gen_pots.append(PLANT) # plant will be there
else:
next_gen_pots.append(NO_PLANT) # no plant in next gen
# print(f"Before trim: {''.join(next_gen_pots)}")
current_gen_pots, pot_offset = trim_pot_row("".join(next_gen_pots), pot_offset)
return (current_gen_pots, pot_offset)
def calc_pot_post_generations(
generations: int, initial_state: str, plant_in_pot_rules: Set[str]
) -> Tuple[str, int]:
current_gen_pots = initial_state
pot_offset = 0
# print(f"Gen: 0 --> {current_gen_pots}, {pot_offset}")
no_change_gens = 0
pot_offset_delta = 0
for gen in range(1, generations + 1):
next_gen_pots, new_pot_offset = compute_next_generation(
current_gen_pots, plant_in_pot_rules, pot_offset
)
if next_gen_pots == current_gen_pots:
no_change_gens += 1
else:
no_change_gens = 0
current_gen_pots = next_gen_pots
# positive - plants move to right
# negative - plants move to left
pot_offset_delta = new_pot_offset - pot_offset
pot_offset = new_pot_offset
# print(f"Gen: {gen} --> {current_gen_pots}, {pot_offset}")
if no_change_gens >= 1:
break
if gen < generations:
# early exit
# adjust the pot_offset
pot_offset += pot_offset_delta * (generations - gen)
return (current_gen_pots, pot_offset)
def parse_input_data(input_data: List[str]) -> Tuple[str, Set[str]]:
initial_state = read_initial_state(input_data[0])
# print(initial_state)
plant_in_pot_rules: Set[str] = set()
for llcrr, pot_status in read_rules(input_data[1:]):
if pot_status == PLANT: # pot willhave plant in next gen
plant_in_pot_rules.add(llcrr)
# print(plant_in_pot_rules)
return (initial_state, plant_in_pot_rules)
def subterranean_sustainability(
initial_state: str, plant_in_pot_rules: Set[str], generations: int
) -> int:
current_gen_pots, pot_offset = calc_pot_post_generations(
generations, initial_state, plant_in_pot_rules
)
return sum((i + pot_offset) for i, pot in enumerate(current_gen_pots) if pot == PLANT)
def main(_: List[str]) -> None:
input_data = sys.stdin.readlines()
initial_state, plant_in_pot_rules = parse_input_data(input_data)
# part-1 20 generations and part-2 5 billion generations
for gen in [20, 50000000000]:
pot_number_sum = subterranean_sustainability(initial_state, plant_in_pot_rules, gen)
print(f"Sum after {gen} generations: {pot_number_sum}")
if __name__ == "__main__":
main(sys.argv)
| 32.761111 | 96 | 0.652196 | import re
import sys
from typing import Iterable, Iterator, List, Set, Tuple
PLANT = "#"
NO_PLANT = "."
def read_initial_state(init_state: str) -> str:
return init_state.replace("initial state:", "").strip()
def read_rules(rules: Iterable[str]) -> Iterator[Tuple[str, str]]:
pattern: re.Pattern = re.compile(r"([.|#]{5})\s=>\s([.|#]{1})")
for rule in rules:
if len(rule) > 0:
match = pattern.match(rule)
if match is not None:
llcrr, pot_status = match.groups()
yield (llcrr, pot_status)
def get_pot_slices(pots: str) -> Iterator[str]:
last_pot = len(pots)
for pot_num in range(0, last_pot):
if pot_num in [0, 1]:
yield ".."[pot_num:] + pots[: pot_num + 3]
elif pot_num in [last_pot - 2, last_pot - 1]:
yield pots[pot_num - 2 :] + ".."[last_pot - pot_num - 1 :]
else:
yield pots[pot_num - 2 : pot_num + 3]
def trim_pot_row(current_gen_pots: str, pot_offset: int, threshold: int = 2) -> Tuple[str, int]:
i = 0
while current_gen_pots[i] == NO_PLANT:
i += 1
if i > threshold:
current_gen_pots = current_gen_pots[i - threshold :]
pot_offset += i - threshold
i = len(current_gen_pots) - 1
while current_gen_pots[i] == NO_PLANT:
i -= 1
if len(current_gen_pots) - 1 - i > threshold:
current_gen_pots = current_gen_pots[: i + threshold + 1]
return (current_gen_pots, pot_offset)
def compute_next_generation(
current_gen_pots: str, plant_in_pot_rules: Set[str], pot_offset: int
) -> Tuple[str, int]:
next_gen_pots = []
if PLANT in [current_gen_pots[0], current_gen_pots[1]]:
empty_pots_to_pad = NO_PLANT * (2 if current_gen_pots[0] == PLANT else 1)
current_gen_pots = f"{empty_pots_to_pad}{current_gen_pots}"
pot_offset -= len(empty_pots_to_pad)
if PLANT in [current_gen_pots[-1], current_gen_pots[-2]]:
empty_pots_to_pad = NO_PLANT * (2 if current_gen_pots[-1] == PLANT else 1)
current_gen_pots = f"{current_gen_pots}{empty_pots_to_pad}"
for current_pot_with_neighbors in get_pot_slices(current_gen_pots):
if current_pot_with_neighbors in plant_in_pot_rules:
next_gen_pots.append(PLANT)
else:
next_gen_pots.append(NO_PLANT)
current_gen_pots, pot_offset = trim_pot_row("".join(next_gen_pots), pot_offset)
return (current_gen_pots, pot_offset)
def calc_pot_post_generations(
generations: int, initial_state: str, plant_in_pot_rules: Set[str]
) -> Tuple[str, int]:
current_gen_pots = initial_state
pot_offset = 0
no_change_gens = 0
pot_offset_delta = 0
for gen in range(1, generations + 1):
next_gen_pots, new_pot_offset = compute_next_generation(
current_gen_pots, plant_in_pot_rules, pot_offset
)
if next_gen_pots == current_gen_pots:
no_change_gens += 1
else:
no_change_gens = 0
current_gen_pots = next_gen_pots
pot_offset_delta = new_pot_offset - pot_offset
pot_offset = new_pot_offset
if no_change_gens >= 1:
break
if gen < generations:
pot_offset += pot_offset_delta * (generations - gen)
return (current_gen_pots, pot_offset)
def parse_input_data(input_data: List[str]) -> Tuple[str, Set[str]]:
initial_state = read_initial_state(input_data[0])
plant_in_pot_rules: Set[str] = set()
for llcrr, pot_status in read_rules(input_data[1:]):
if pot_status == PLANT:
plant_in_pot_rules.add(llcrr)
return (initial_state, plant_in_pot_rules)
def subterranean_sustainability(
initial_state: str, plant_in_pot_rules: Set[str], generations: int
) -> int:
current_gen_pots, pot_offset = calc_pot_post_generations(
generations, initial_state, plant_in_pot_rules
)
return sum((i + pot_offset) for i, pot in enumerate(current_gen_pots) if pot == PLANT)
def main(_: List[str]) -> None:
input_data = sys.stdin.readlines()
initial_state, plant_in_pot_rules = parse_input_data(input_data)
for gen in [20, 50000000000]:
pot_number_sum = subterranean_sustainability(initial_state, plant_in_pot_rules, gen)
print(f"Sum after {gen} generations: {pot_number_sum}")
if __name__ == "__main__":
main(sys.argv)
| true | true |
1c30957dda167a64596c82f721902da9f1f0b665 | 2,765 | py | Python | ooobuild/lo/xml/dom/dom_exception.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/xml/dom/dom_exception.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/xml/dom/dom_exception.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Exception Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.xml.dom
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
import typing
from ...uno.exception import Exception as Exception_85530a09
from ...uno.x_interface import XInterface as XInterface_8f010a43
from .dom_exception_type import DOMExceptionType as DOMExceptionType_5c70df8
class DOMException(Exception_85530a09):
"""
Exception Class
encapsulates the details of an XML parse error or warning.
See Also:
`API DOMException <https://api.libreoffice.org/docs/idl/ref/exceptioncom_1_1sun_1_1star_1_1xml_1_1dom_1_1DOMException.html>`_
"""
__ooo_ns__: str = 'com.sun.star.xml.dom'
__ooo_full_ns__: str = 'com.sun.star.xml.dom.DOMException'
__ooo_type_name__: str = 'exception'
__pyunointerface__: str = 'com.sun.star.xml.dom.DOMException'
__pyunostruct__: str = 'com.sun.star.xml.dom.DOMException'
typeName: str = 'com.sun.star.xml.dom.DOMException'
"""Literal Constant ``com.sun.star.xml.dom.DOMException``"""
def __init__(self, Message: typing.Optional[str] = '', Context: typing.Optional[XInterface_8f010a43] = None, Code: typing.Optional[DOMExceptionType_5c70df8] = DOMExceptionType_5c70df8.DOMSTRING_SIZE_ERR) -> None:
"""
Constructor
Arguments:
Message (str, optional): Message value.
Context (XInterface, optional): Context value.
Code (DOMExceptionType, optional): Code value.
"""
kargs = {
"Message": Message,
"Context": Context,
"Code": Code,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._code = kwargs["Code"]
inst_keys = ('Code',)
kargs = kwargs.copy()
for key in inst_keys:
del kargs[key]
super()._init(**kargs)
@property
def Code(self) -> DOMExceptionType_5c70df8:
"""
"""
return self._code
@Code.setter
def Code(self, value: DOMExceptionType_5c70df8) -> None:
self._code = value
__all__ = ['DOMException']
| 33.313253 | 216 | 0.680651 |
from ooo.oenv.env_const import UNO_NONE
import typing
from ...uno.exception import Exception as Exception_85530a09
from ...uno.x_interface import XInterface as XInterface_8f010a43
from .dom_exception_type import DOMExceptionType as DOMExceptionType_5c70df8
class DOMException(Exception_85530a09):
__ooo_ns__: str = 'com.sun.star.xml.dom'
__ooo_full_ns__: str = 'com.sun.star.xml.dom.DOMException'
__ooo_type_name__: str = 'exception'
__pyunointerface__: str = 'com.sun.star.xml.dom.DOMException'
__pyunostruct__: str = 'com.sun.star.xml.dom.DOMException'
typeName: str = 'com.sun.star.xml.dom.DOMException'
def __init__(self, Message: typing.Optional[str] = '', Context: typing.Optional[XInterface_8f010a43] = None, Code: typing.Optional[DOMExceptionType_5c70df8] = DOMExceptionType_5c70df8.DOMSTRING_SIZE_ERR) -> None:
kargs = {
"Message": Message,
"Context": Context,
"Code": Code,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._code = kwargs["Code"]
inst_keys = ('Code',)
kargs = kwargs.copy()
for key in inst_keys:
del kargs[key]
super()._init(**kargs)
@property
def Code(self) -> DOMExceptionType_5c70df8:
return self._code
@Code.setter
def Code(self, value: DOMExceptionType_5c70df8) -> None:
self._code = value
__all__ = ['DOMException']
| true | true |
1c3096042871a4a99b8f7ca6d1a29ce31831da69 | 6,318 | py | Python | mbeddr2C_MM/transformation_from_eclipse/Hlayer4rule2.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | mbeddr2C_MM/transformation_from_eclipse/Hlayer4rule2.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | mbeddr2C_MM/transformation_from_eclipse/Hlayer4rule2.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z | from core.himesis import Himesis
import uuid
class Hlayer4rule2(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule layer4rule2.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(Hlayer4rule2, self).__init__(name='Hlayer4rule2', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """layer4rule2"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer4rule2')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class ImplementationModule(layer4rule2class0) node
self.add_node()
self.vs[3]["mm__"] = """ImplementationModule"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class ImplementationModule(layer4rule2class0)
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class TestCase(layer4rule2class1) node
self.add_node()
self.vs[5]["mm__"] = """TestCase"""
self.vs[5]["attr1"] = """+"""
# match_contains node for class TestCase(layer4rule2class1)
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# apply class ImplementationModule(layer4rule2class2) node
self.add_node()
self.vs[7]["mm__"] = """ImplementationModule"""
self.vs[7]["attr1"] = """1"""
# apply_contains node for class ImplementationModule(layer4rule2class2)
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
# apply class Function(layer4rule2class3) node
self.add_node()
self.vs[9]["mm__"] = """Function"""
self.vs[9]["attr1"] = """1"""
# apply_contains node for class Function(layer4rule2class3)
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
# apply class VoidType(layer4rule2class4) node
self.add_node()
self.vs[11]["mm__"] = """VoidType"""
self.vs[11]["attr1"] = """1"""
# apply_contains node for class VoidType(layer4rule2class4)
self.add_node()
self.vs[12]["mm__"] = """apply_contains"""
# apply class StatementList(layer4rule2class5) node
self.add_node()
self.vs[13]["mm__"] = """StatementList"""
self.vs[13]["attr1"] = """1"""
# apply_contains node for class StatementList(layer4rule2class5)
self.add_node()
self.vs[14]["mm__"] = """apply_contains"""
# match association ImplementationModule--contents-->TestCase node
self.add_node()
self.vs[15]["attr1"] = """contents"""
self.vs[15]["mm__"] = """directLink_S"""
# apply association ImplementationModule--contents-->Function node
self.add_node()
self.vs[16]["attr1"] = """contents"""
self.vs[16]["mm__"] = """directLink_T"""
# apply association Function--type-->VoidType node
self.add_node()
self.vs[17]["attr1"] = """type"""
self.vs[17]["mm__"] = """directLink_T"""
# apply association Function--body-->StatementList node
self.add_node()
self.vs[18]["attr1"] = """body"""
self.vs[18]["mm__"] = """directLink_T"""
# backward association ImplementationModule---->ImplementationModule node
self.add_node()
self.vs[19]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class ImplementationModule(layer4rule2class0)
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class TestCase(layer4rule2class1)
(1,8), # applymodel -> apply_contains
(8,7), # apply_contains -> apply_class ImplementationModule(layer4rule2class2)
(1,10), # applymodel -> apply_contains
(10,9), # apply_contains -> apply_class Function(layer4rule2class3)
(1,12), # applymodel -> apply_contains
(12,11), # apply_contains -> apply_class VoidType(layer4rule2class4)
(1,14), # applymodel -> apply_contains
(14,13), # apply_contains -> apply_class StatementList(layer4rule2class5)
(3,15), # match_class ImplementationModule(layer4rule2class0) -> association contents
(15,5), # association contents -> match_class TestCase(layer4rule2class1)
(7,16), # apply_class ImplementationModule(layer4rule2class2) -> association contents
(16,9), # association contents -> apply_class Function(layer4rule2class3)
(9,17), # apply_class Function(layer4rule2class3) -> association type
(17,11), # association type -> apply_class VoidType(layer4rule2class4)
(9,18), # apply_class Function(layer4rule2class3) -> association body
(18,13), # association body -> apply_class StatementList(layer4rule2class5)
(7,19), # apply_class ImplementationModule(layer4rule2class2) -> backward_association
(19,3), # backward_association -> apply_class ImplementationModule(layer4rule2class0)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((7,'__ApplyAttribute'),('constant','ImplementationModule')), ((9,'name'),('concat',((3,'name'),('concat',(('constant','_'),(5,'name')))))), ((13,'__ApplyAttribute'),('constant','TestCaseFunctionStatements')), ]
| 42.402685 | 241 | 0.56062 | from core.himesis import Himesis
import uuid
class Hlayer4rule2(Himesis):
def __init__(self):
self.is_compiled = True
super(Hlayer4rule2, self).__init__(name='Hlayer4rule2', num_nodes=0, edges=[])
self["mm__"] = ['HimesisMM']
self["name"] = """layer4rule2"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer4rule2')
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.add_node()
self.vs[3]["mm__"] = """ImplementationModule"""
self.vs[3]["attr1"] = """+"""
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
self.add_node()
self.vs[5]["mm__"] = """TestCase"""
self.vs[5]["attr1"] = """+"""
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
self.add_node()
self.vs[7]["mm__"] = """ImplementationModule"""
self.vs[7]["attr1"] = """1"""
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
self.add_node()
self.vs[9]["mm__"] = """Function"""
self.vs[9]["attr1"] = """1"""
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
self.add_node()
self.vs[11]["mm__"] = """VoidType"""
self.vs[11]["attr1"] = """1"""
self.add_node()
self.vs[12]["mm__"] = """apply_contains"""
self.add_node()
self.vs[13]["mm__"] = """StatementList"""
self.vs[13]["attr1"] = """1"""
self.add_node()
self.vs[14]["mm__"] = """apply_contains"""
self.add_node()
self.vs[15]["attr1"] = """contents"""
self.vs[15]["mm__"] = """directLink_S"""
self.add_node()
self.vs[16]["attr1"] = """contents"""
self.vs[16]["mm__"] = """directLink_T"""
self.add_node()
self.vs[17]["attr1"] = """type"""
self.vs[17]["mm__"] = """directLink_T"""
self.add_node()
self.vs[18]["attr1"] = """body"""
self.vs[18]["mm__"] = """directLink_T"""
self.add_node()
self.vs[19]["mm__"] = """backward_link"""
self.add_edges([
(0,4),
(4,3),
(0,6),
(6,5),
(1,8),
(8,7),
(1,10),
(10,9),
(1,12),
(12,11),
(1,14),
(14,13),
(3,15),
(15,5),
(7,16),
(16,9),
(9,17),
(17,11),
(9,18),
(18,13),
(7,19),
(19,3),
(0,2),
(2,1)
])
self["equations"] = [((7,'__ApplyAttribute'),('constant','ImplementationModule')), ((9,'name'),('concat',((3,'name'),('concat',(('constant','_'),(5,'name')))))), ((13,'__ApplyAttribute'),('constant','TestCaseFunctionStatements')), ]
| true | true |
1c30964180cacfd3241c892dc582011aba093b85 | 629 | py | Python | tests/test_helpers.py | elifesciences/elife-cleaner | 886be5e0b5317729490a55a8661c1e00e5fc7226 | [
"MIT"
] | null | null | null | tests/test_helpers.py | elifesciences/elife-cleaner | 886be5e0b5317729490a55a8661c1e00e5fc7226 | [
"MIT"
] | 2 | 2021-02-25T01:48:00.000Z | 2021-05-27T01:21:40.000Z | tests/test_helpers.py | elifesciences/elife-cleaner | 886be5e0b5317729490a55a8661c1e00e5fc7226 | [
"MIT"
] | null | null | null | import os
import unittest
from pathlib import Path
from tests.helpers import delete_files_in_folder
class TestHelpers(unittest.TestCase):
def setUp(self):
self.temp_dir = "tests/tmp"
def test_delete_files_in_folder(self):
"test helpers function to clean out tests temp directory"
file_name = os.path.join(self.temp_dir, "test_file.txt")
folder_name = os.path.join(self.temp_dir, "test_folder")
os.mkdir(folder_name)
Path(file_name).touch()
delete_files_in_folder(self.temp_dir, filter_out=[".keepme"])
self.assertEqual(len(os.listdir(self.temp_dir)), 1)
| 33.105263 | 69 | 0.707472 | import os
import unittest
from pathlib import Path
from tests.helpers import delete_files_in_folder
class TestHelpers(unittest.TestCase):
def setUp(self):
self.temp_dir = "tests/tmp"
def test_delete_files_in_folder(self):
file_name = os.path.join(self.temp_dir, "test_file.txt")
folder_name = os.path.join(self.temp_dir, "test_folder")
os.mkdir(folder_name)
Path(file_name).touch()
delete_files_in_folder(self.temp_dir, filter_out=[".keepme"])
self.assertEqual(len(os.listdir(self.temp_dir)), 1)
| true | true |
1c309673b40576448e9b9454ccb79860c08ec8ba | 204 | py | Python | examples/preforked_worker.py | keakon/delayed | 1b7a90a7c579e77e8ac0d9e77c839334ae20944a | [
"MIT"
] | 4 | 2019-06-03T10:33:01.000Z | 2021-03-02T09:32:08.000Z | examples/preforked_worker.py | keakon/delayed | 1b7a90a7c579e77e8ac0d9e77c839334ae20944a | [
"MIT"
] | null | null | null | examples/preforked_worker.py | keakon/delayed | 1b7a90a7c579e77e8ac0d9e77c839334ae20944a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from delayed.logger import setup_logger
from delayed.worker import PreforkedWorker
from .client import queue
setup_logger()
worker = PreforkedWorker(queue=queue)
worker.run()
| 15.692308 | 42 | 0.764706 |
from delayed.logger import setup_logger
from delayed.worker import PreforkedWorker
from .client import queue
setup_logger()
worker = PreforkedWorker(queue=queue)
worker.run()
| true | true |
1c3096c81131b37833d931187b739c047f52dafe | 1,209 | py | Python | BinaryToDecimal.py | ritik1234/Python | d8041001d646467c176f911d4b992edb347b43a2 | [
"MIT"
] | 1,340 | 2015-09-10T19:08:01.000Z | 2022-03-31T01:31:29.000Z | BinaryToDecimal.py | ritik1234/Python | d8041001d646467c176f911d4b992edb347b43a2 | [
"MIT"
] | 33 | 2017-01-18T14:31:27.000Z | 2022-03-23T19:57:37.000Z | BinaryToDecimal.py | ritik1234/Python | d8041001d646467c176f911d4b992edb347b43a2 | [
"MIT"
] | 1,382 | 2015-07-15T16:37:38.000Z | 2022-03-30T12:46:34.000Z | # Python: Binary to Decimal Conversion
# binToDec and decToBin functions are rendered obsolete by the universal convert function
def binToDec(binNum): #function created to convert binary to decimal with parametere binNum
decNum = 0
power = 0
while binNum > 0: #loop will run till binNum is greater than 0
decNum += 2 ** power * (binNum % 10)
binNum //= 10 # reducing binNum everytime by 1 digit
power += 1 # increasing power by 1 each loop
return decNum
def decToBin(decNum): #function created to convert decimal to binary with parametere decNum
binNum = 0
power = 0
while decNum > 0:#loop will run till decNum is greater than 0
binNum += 10 ** power * (decNum % 2)
decNum //= 2 # reducing decNum everytime by 1 digit
power += 1 # increasing power by 1 each loop
return binNum
def convert(fromNum, fromBase, toBase): #function for converting from any base to any other base
toNum = 0
power = 0
while fromNum > 0:
toNum += fromBase ** power * (fromNum % toBase)
fromNum //= toBase
power += 1
return toNum
# print (str(binToDec(101011)))
# print (str(decToBin(128)))
print (str(convert(127, 10, 8))) # converts 127 in base 10 to base 8
print (str(convert(101001, 2, 2)))
| 34.542857 | 96 | 0.709677 |
def binToDec(binNum):
decNum = 0
power = 0
while binNum > 0:
decNum += 2 ** power * (binNum % 10)
binNum //= 10
power += 1
return decNum
def decToBin(decNum):
binNum = 0
power = 0
while decNum > 0:
binNum += 10 ** power * (decNum % 2)
decNum //= 2
power += 1
return binNum
def convert(fromNum, fromBase, toBase):
toNum = 0
power = 0
while fromNum > 0:
toNum += fromBase ** power * (fromNum % toBase)
fromNum //= toBase
power += 1
return toNum
print (str(convert(127, 10, 8)))
print (str(convert(101001, 2, 2)))
| true | true |
1c309732b86a01cf020b7f1662df64cf1dd0b845 | 6,494 | py | Python | pkgs/conda-4.0.5-py27_0/lib/python2.7/site-packages/conda/cli/conda_argparse.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/conda-4.0.5-py27_0/lib/python2.7/site-packages/conda/cli/conda_argparse.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/conda-4.0.5-py27_0/lib/python2.7/site-packages/conda/cli/conda_argparse.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import print_function, division, absolute_import
import sys
import argparse
import os
import subprocess
from difflib import get_close_matches
from conda.cli.find_commands import find_commands, find_executable
from conda.cli import common
build_commands = {'build', 'index', 'skeleton', 'package', 'metapackage',
'pipbuild', 'develop', 'convert'}
_ARGCOMPLETE_DEBUG = False
def debug_argcomplete(msg):
# To debug this, replace ttys001 with the fd of the terminal you are using
# (use the `tty` command to find this), and set _ARGCOMPLETE_DEBUG above
# to True. You can also `export _ARC_DEBUG=1` in the shell you are using
# to print debug messages from argcomplete.
if _ARGCOMPLETE_DEBUG:
f = open('/dev/ttys001', 'w')
f.write("\n%s\n" % msg)
f.flush()
try:
import argcomplete
argcomplete.CompletionFinder
except (ImportError, AttributeError):
# On Python 3.3, argcomplete can be an empty namespace package when
# we are in the conda-recipes directory.
argcomplete = None
if argcomplete:
class CondaSubprocessCompletionFinder(argcomplete.CompletionFinder):
def __call__(self, argument_parser, **kwargs):
call_super = lambda: super(CondaSubprocessCompletionFinder, self).__call__(argument_parser, **kwargs)
debug_argcomplete("Working")
if argument_parser.prog != 'conda':
debug_argcomplete("Argument parser is not conda")
return call_super()
environ = os.environ.copy()
if 'COMP_LINE' not in environ:
debug_argcomplete("COMP_LINE not in environ")
return call_super()
subcommands = find_commands()
for subcommand in subcommands:
if 'conda %s' % subcommand in environ['COMP_LINE']:
environ['COMP_LINE'] = environ['COMP_LINE'].replace('conda %s'
% subcommand, 'conda-%s' % subcommand)
debug_argcomplete("Using subprocess")
debug_argcomplete(sys.argv)
import pprint
debug_argcomplete(pprint.pformat(environ))
args = [find_executable('conda-%s' % subcommand)]
debug_argcomplete(args)
p = subprocess.Popen(args, env=environ, close_fds=False)
p.communicate()
sys.exit()
else:
debug_argcomplete("Not using subprocess")
debug_argcomplete(sys.argv)
debug_argcomplete(argument_parser)
return call_super()
class ArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
if not kwargs.get('formatter_class'):
kwargs['formatter_class'] = argparse.RawDescriptionHelpFormatter
if 'add_help' not in kwargs:
add_custom_help = True
kwargs['add_help'] = False
else:
add_custom_help = False
super(ArgumentParser, self).__init__(*args, **kwargs)
if add_custom_help:
common.add_parser_help(self)
if self.description:
self.description += "\n\nOptions:\n"
def _get_action_from_name(self, name):
"""Given a name, get the Action instance registered with this parser.
If only it were made available in the ArgumentError object. It is
passed as it's first arg...
"""
container = self._actions
if name is None:
return None
for action in container:
if '/'.join(action.option_strings) == name:
return action
elif action.metavar == name:
return action
elif action.dest == name:
return action
def error(self, message):
import re
import subprocess
from conda.cli.find_commands import find_executable
exc = sys.exc_info()[1]
if exc:
# this is incredibly lame, but argparse stupidly does not expose
# reasonable hooks for customizing error handling
if hasattr(exc, 'argument_name'):
argument = self._get_action_from_name(exc.argument_name)
else:
argument = None
if argument and argument.dest == "cmd":
m = re.compile(r"invalid choice: '([\w\-]+)'").match(exc.message)
if m:
cmd = m.group(1)
executable = find_executable('conda-' + cmd)
if not executable:
if cmd in build_commands:
sys.exit("""\
Error: You need to install conda-build in order to use the 'conda %s'
command.
""" % cmd)
else:
message = "Error: Could not locate 'conda-%s'" % cmd
conda_commands = set(find_commands())
close = get_close_matches(cmd,
set(argument.choices.keys()) | build_commands | conda_commands)
if close:
message += '\n\nDid you mean one of these?\n'
for s in close:
message += ' %s' % s
sys.exit(message)
args = [find_executable('conda-' + cmd)]
args.extend(sys.argv[2:])
p = subprocess.Popen(args)
try:
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
sys.exit(p.returncode)
super(ArgumentParser, self).error(message)
def print_help(self):
super(ArgumentParser, self).print_help()
if self.prog == 'conda' and sys.argv[1:] in ([], ['help'], ['-h'], ['--help']):
from conda.cli.find_commands import help
help()
def parse_args(self, *args, **kwargs):
if argcomplete:
CondaSubprocessCompletionFinder()(self)
return super(ArgumentParser, self).parse_args(*args, **kwargs)
| 38.886228 | 113 | 0.564213 |
from __future__ import print_function, division, absolute_import
import sys
import argparse
import os
import subprocess
from difflib import get_close_matches
from conda.cli.find_commands import find_commands, find_executable
from conda.cli import common
build_commands = {'build', 'index', 'skeleton', 'package', 'metapackage',
'pipbuild', 'develop', 'convert'}
_ARGCOMPLETE_DEBUG = False
def debug_argcomplete(msg):
if _ARGCOMPLETE_DEBUG:
f = open('/dev/ttys001', 'w')
f.write("\n%s\n" % msg)
f.flush()
try:
import argcomplete
argcomplete.CompletionFinder
except (ImportError, AttributeError):
argcomplete = None
if argcomplete:
class CondaSubprocessCompletionFinder(argcomplete.CompletionFinder):
def __call__(self, argument_parser, **kwargs):
call_super = lambda: super(CondaSubprocessCompletionFinder, self).__call__(argument_parser, **kwargs)
debug_argcomplete("Working")
if argument_parser.prog != 'conda':
debug_argcomplete("Argument parser is not conda")
return call_super()
environ = os.environ.copy()
if 'COMP_LINE' not in environ:
debug_argcomplete("COMP_LINE not in environ")
return call_super()
subcommands = find_commands()
for subcommand in subcommands:
if 'conda %s' % subcommand in environ['COMP_LINE']:
environ['COMP_LINE'] = environ['COMP_LINE'].replace('conda %s'
% subcommand, 'conda-%s' % subcommand)
debug_argcomplete("Using subprocess")
debug_argcomplete(sys.argv)
import pprint
debug_argcomplete(pprint.pformat(environ))
args = [find_executable('conda-%s' % subcommand)]
debug_argcomplete(args)
p = subprocess.Popen(args, env=environ, close_fds=False)
p.communicate()
sys.exit()
else:
debug_argcomplete("Not using subprocess")
debug_argcomplete(sys.argv)
debug_argcomplete(argument_parser)
return call_super()
class ArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
if not kwargs.get('formatter_class'):
kwargs['formatter_class'] = argparse.RawDescriptionHelpFormatter
if 'add_help' not in kwargs:
add_custom_help = True
kwargs['add_help'] = False
else:
add_custom_help = False
super(ArgumentParser, self).__init__(*args, **kwargs)
if add_custom_help:
common.add_parser_help(self)
if self.description:
self.description += "\n\nOptions:\n"
def _get_action_from_name(self, name):
container = self._actions
if name is None:
return None
for action in container:
if '/'.join(action.option_strings) == name:
return action
elif action.metavar == name:
return action
elif action.dest == name:
return action
def error(self, message):
import re
import subprocess
from conda.cli.find_commands import find_executable
exc = sys.exc_info()[1]
if exc:
if hasattr(exc, 'argument_name'):
argument = self._get_action_from_name(exc.argument_name)
else:
argument = None
if argument and argument.dest == "cmd":
m = re.compile(r"invalid choice: '([\w\-]+)'").match(exc.message)
if m:
cmd = m.group(1)
executable = find_executable('conda-' + cmd)
if not executable:
if cmd in build_commands:
sys.exit("""\
Error: You need to install conda-build in order to use the 'conda %s'
command.
""" % cmd)
else:
message = "Error: Could not locate 'conda-%s'" % cmd
conda_commands = set(find_commands())
close = get_close_matches(cmd,
set(argument.choices.keys()) | build_commands | conda_commands)
if close:
message += '\n\nDid you mean one of these?\n'
for s in close:
message += ' %s' % s
sys.exit(message)
args = [find_executable('conda-' + cmd)]
args.extend(sys.argv[2:])
p = subprocess.Popen(args)
try:
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
sys.exit(p.returncode)
super(ArgumentParser, self).error(message)
def print_help(self):
super(ArgumentParser, self).print_help()
if self.prog == 'conda' and sys.argv[1:] in ([], ['help'], ['-h'], ['--help']):
from conda.cli.find_commands import help
help()
def parse_args(self, *args, **kwargs):
if argcomplete:
CondaSubprocessCompletionFinder()(self)
return super(ArgumentParser, self).parse_args(*args, **kwargs)
| true | true |
1c30976f129d557c66c7e40fbd0edd5a264b59f8 | 1,508 | py | Python | handlers/package_info_handler.py | m-j/ziprepo-server | 35c1f40c3ba5489fb8731e8d66b301333dc9f8b0 | [
"MIT"
] | 1 | 2020-08-08T01:52:57.000Z | 2020-08-08T01:52:57.000Z | handlers/package_info_handler.py | m-j/ziprepo-server | 35c1f40c3ba5489fb8731e8d66b301333dc9f8b0 | [
"MIT"
] | null | null | null | handlers/package_info_handler.py | m-j/ziprepo-server | 35c1f40c3ba5489fb8731e8d66b301333dc9f8b0 | [
"MIT"
] | 2 | 2021-09-06T09:41:41.000Z | 2022-03-27T19:51:32.000Z | from tornado.web import RequestHandler
from errors.errors import PackageDoesntExistError
from handlers.handler_utils import wrap_in_envelope
from handlers.zippero_base_handler import ZipperoBaseHandler
from package_management.package_manager import PackageManager
from package_management.utils import package_link
from security.privilege_validator import PrivilegeValidator
class PackageInfoHandler(ZipperoBaseHandler):
_privilege_validator: PrivilegeValidator
_package_manager: PackageManager
def initialize(self, package_manager, privilege_validator: PrivilegeValidator):
self._package_manager = package_manager
self._privilege_validator = privilege_validator
async def get(self, package_name):
self._privilege_validator.assure_readonly_access(self.request)
package_info = self._package_manager.query(name=package_name)
if package_info is not None:
package_info_dict = self.make_package_info_dict(package_info)
self.finish(wrap_in_envelope(package_info_dict))
else:
raise PackageDoesntExistError(package_name, 'any')
def make_package_info_dict(self, package_info):
package_info_dict = package_info.as_dict()
host = self.request.headers.get('Host')
package_info_dict['links'] = {version: package_link(self.request.protocol, host, package_info.name, version) for
version in package_info.versions}
return package_info_dict
| 37.7 | 120 | 0.761273 | from tornado.web import RequestHandler
from errors.errors import PackageDoesntExistError
from handlers.handler_utils import wrap_in_envelope
from handlers.zippero_base_handler import ZipperoBaseHandler
from package_management.package_manager import PackageManager
from package_management.utils import package_link
from security.privilege_validator import PrivilegeValidator
class PackageInfoHandler(ZipperoBaseHandler):
_privilege_validator: PrivilegeValidator
_package_manager: PackageManager
def initialize(self, package_manager, privilege_validator: PrivilegeValidator):
self._package_manager = package_manager
self._privilege_validator = privilege_validator
async def get(self, package_name):
self._privilege_validator.assure_readonly_access(self.request)
package_info = self._package_manager.query(name=package_name)
if package_info is not None:
package_info_dict = self.make_package_info_dict(package_info)
self.finish(wrap_in_envelope(package_info_dict))
else:
raise PackageDoesntExistError(package_name, 'any')
def make_package_info_dict(self, package_info):
package_info_dict = package_info.as_dict()
host = self.request.headers.get('Host')
package_info_dict['links'] = {version: package_link(self.request.protocol, host, package_info.name, version) for
version in package_info.versions}
return package_info_dict
| true | true |
1c309785797e8ad05429ef332e41130f7ffa1021 | 4,203 | py | Python | tests/test_plotting.py | sciris/sciris | a52a7a0d4bf2c3de7dde1dbca07c40341f9a18b0 | [
"MIT"
] | 5 | 2018-10-01T09:36:31.000Z | 2021-08-03T14:34:48.000Z | tests/test_plotting.py | sciris/sciris | a52a7a0d4bf2c3de7dde1dbca07c40341f9a18b0 | [
"MIT"
] | 194 | 2018-09-21T06:21:07.000Z | 2022-03-26T11:22:24.000Z | tests/test_plotting.py | sciris/sciris | a52a7a0d4bf2c3de7dde1dbca07c40341f9a18b0 | [
"MIT"
] | 5 | 2020-03-12T23:06:39.000Z | 2020-09-30T10:38:17.000Z | """
Test color and plotting functions -- warning, opens up many windows!
"""
import os
import numpy as np
import pylab as pl
import sciris as sc
if 'doplot' not in locals():
doplot = True
def test_colors(doplot=doplot):
sc.heading('Testing colors')
o = sc.objdict()
print('Testing shifthue')
o.hue = sc.shifthue(colors=[(1,0,0),(0,1,0)], hueshift=0.5)
print('Testing hex2rgb and rgb2hex')
hx = '#87bc26'
o.rgb = sc.hex2rgb(hx)
o.hx = sc.rgb2hex(o.rgb)
assert o.hx == hx
print('Testing rgb2hsv and hsv2rgb')
rgb = np.array([0.53, 0.74, 0.15])
o.hsv = sc.rgb2hsv(rgb)
o.rgb2 = sc.hsv2rgb(o.hsv)
assert np.all(np.isclose(rgb, o.rgb2))
return o
def test_colormaps(doplot=doplot):
sc.heading('Testing colormaps')
o = sc.objdict()
print('Testing vectocolor')
x = np.random.rand(10)
o.veccolors = sc.vectocolor(x, cmap='turbo')
print('Testing arraycolors')
n = 1000
ncols = 5
arr = pl.rand(n,ncols)
for c in range(ncols):
arr[:,c] += c
x = pl.rand(n)
y = pl.rand(n)
colors = sc.arraycolors(arr)
if doplot:
pl.figure(figsize=(20,16))
for c in range(ncols):
pl.scatter(x+c, y, s=50, c=colors[:,c])
o.arraycolors = colors
print('Testing gridcolors')
o.gridcolors = sc.gridcolors(ncolors=8, demo=doplot)
sc.gridcolors(ncolors=28, demo=doplot)
print('\n8 colors:', o.gridcolors)
print('Testing colormapdemo')
if doplot:
sc.colormapdemo('parula', doshow=False)
return o
def test_3d(doplot=doplot):
sc.heading('Testing 3D')
o = sc.objdict()
print('Testing surf3d')
if doplot:
o.fig = sc.fig3d()
print('Testing surf3d')
data = pl.randn(50,50)
smoothdata = sc.smooth(data,20)
if doplot:
sc.surf3d(smoothdata)
print('Testing bar3d')
data = pl.rand(20,20)
smoothdata = sc.smooth(data)
if doplot:
sc.bar3d(smoothdata)
return o
def test_other(doplot=doplot):
sc.heading('Testing other')
o = sc.objdict()
data = np.random.rand(10)*1e4
nrows,ncols = sc.get_rows_cols(100, ratio=0.5) # Returns 8,13 since rows are prioritized
if doplot:
sc.emptyfig()
o.fig = pl.figure()
pl.subplot(2,1,1)
pl.plot(data)
sc.boxoff()
sc.setxlim()
sc.setylim()
sc.commaticks()
pl.subplot(2,1,2)
pl.plot(data)
sc.SIticks()
pl.title('SI ticks')
try:
sc.maximize()
except Exception as E:
print(f'sc.maximize() failed with {str(E)}:')
print(sc.traceback())
print('↑↑↑ Ignoring since sc.maximize() unlikely to work via e.g. automated testing')
# Test legends
pl.figure()
pl.plot([1,4,3], label='A')
pl.plot([5,7,8], label='B')
pl.plot([2,5,2], label='C')
sc.orderlegend(reverse=True) # Legend order C, B, A
sc.orderlegend([1,0,2], frameon=False) # Legend order B, A, C with no frame
sc.separatelegend()
# Test date formatter
pl.figure()
pl.plot(np.arange(365), pl.rand(365))
sc.dateformatter('2021-01-01')
return o
def test_saving(doplot=doplot):
sc.heading('Testing saving')
o = sc.objdict()
filename = 'testfig.fig'
moviename = 'testmovie.gif'
if doplot:
print('Testing save figs')
o.fig = pl.figure()
pl.plot(pl.rand(10))
sc.savefigs(o.fig, filetype='fig', filename=filename)
sc.loadfig(filename)
print('Testing save movie')
frames = [pl.plot(pl.cumsum(pl.randn(100))) for i in range(3)] # Create frames
sc.savemovie(frames, moviename) # Save movie as medium-quality gif
os.remove(filename)
os.remove(moviename)
return o
#%% Run as a script
if __name__ == '__main__':
sc.tic()
doplot = True
colors = test_colors(doplot)
colormaps = test_colormaps(doplot)
threed = test_3d(doplot)
other = test_other(doplot)
saved = test_saving(doplot)
if doplot:
pl.show()
sc.toc()
print('Done.')
| 22.475936 | 97 | 0.583155 |
import os
import numpy as np
import pylab as pl
import sciris as sc
if 'doplot' not in locals():
doplot = True
def test_colors(doplot=doplot):
sc.heading('Testing colors')
o = sc.objdict()
print('Testing shifthue')
o.hue = sc.shifthue(colors=[(1,0,0),(0,1,0)], hueshift=0.5)
print('Testing hex2rgb and rgb2hex')
hx = '#87bc26'
o.rgb = sc.hex2rgb(hx)
o.hx = sc.rgb2hex(o.rgb)
assert o.hx == hx
print('Testing rgb2hsv and hsv2rgb')
rgb = np.array([0.53, 0.74, 0.15])
o.hsv = sc.rgb2hsv(rgb)
o.rgb2 = sc.hsv2rgb(o.hsv)
assert np.all(np.isclose(rgb, o.rgb2))
return o
def test_colormaps(doplot=doplot):
sc.heading('Testing colormaps')
o = sc.objdict()
print('Testing vectocolor')
x = np.random.rand(10)
o.veccolors = sc.vectocolor(x, cmap='turbo')
print('Testing arraycolors')
n = 1000
ncols = 5
arr = pl.rand(n,ncols)
for c in range(ncols):
arr[:,c] += c
x = pl.rand(n)
y = pl.rand(n)
colors = sc.arraycolors(arr)
if doplot:
pl.figure(figsize=(20,16))
for c in range(ncols):
pl.scatter(x+c, y, s=50, c=colors[:,c])
o.arraycolors = colors
print('Testing gridcolors')
o.gridcolors = sc.gridcolors(ncolors=8, demo=doplot)
sc.gridcolors(ncolors=28, demo=doplot)
print('\n8 colors:', o.gridcolors)
print('Testing colormapdemo')
if doplot:
sc.colormapdemo('parula', doshow=False)
return o
def test_3d(doplot=doplot):
sc.heading('Testing 3D')
o = sc.objdict()
print('Testing surf3d')
if doplot:
o.fig = sc.fig3d()
print('Testing surf3d')
data = pl.randn(50,50)
smoothdata = sc.smooth(data,20)
if doplot:
sc.surf3d(smoothdata)
print('Testing bar3d')
data = pl.rand(20,20)
smoothdata = sc.smooth(data)
if doplot:
sc.bar3d(smoothdata)
return o
def test_other(doplot=doplot):
sc.heading('Testing other')
o = sc.objdict()
data = np.random.rand(10)*1e4
nrows,ncols = sc.get_rows_cols(100, ratio=0.5)
if doplot:
sc.emptyfig()
o.fig = pl.figure()
pl.subplot(2,1,1)
pl.plot(data)
sc.boxoff()
sc.setxlim()
sc.setylim()
sc.commaticks()
pl.subplot(2,1,2)
pl.plot(data)
sc.SIticks()
pl.title('SI ticks')
try:
sc.maximize()
except Exception as E:
print(f'sc.maximize() failed with {str(E)}:')
print(sc.traceback())
print('↑↑↑ Ignoring since sc.maximize() unlikely to work via e.g. automated testing')
pl.figure()
pl.plot([1,4,3], label='A')
pl.plot([5,7,8], label='B')
pl.plot([2,5,2], label='C')
sc.orderlegend(reverse=True)
sc.orderlegend([1,0,2], frameon=False)
sc.separatelegend()
pl.figure()
pl.plot(np.arange(365), pl.rand(365))
sc.dateformatter('2021-01-01')
return o
def test_saving(doplot=doplot):
sc.heading('Testing saving')
o = sc.objdict()
filename = 'testfig.fig'
moviename = 'testmovie.gif'
if doplot:
print('Testing save figs')
o.fig = pl.figure()
pl.plot(pl.rand(10))
sc.savefigs(o.fig, filetype='fig', filename=filename)
sc.loadfig(filename)
print('Testing save movie')
frames = [pl.plot(pl.cumsum(pl.randn(100))) for i in range(3)]
sc.savemovie(frames, moviename)
os.remove(filename)
os.remove(moviename)
return o
if __name__ == '__main__':
sc.tic()
doplot = True
colors = test_colors(doplot)
colormaps = test_colormaps(doplot)
threed = test_3d(doplot)
other = test_other(doplot)
saved = test_saving(doplot)
if doplot:
pl.show()
sc.toc()
print('Done.')
| true | true |
1c30984db275b3d716ea21898e90ceeb85e57cde | 1,588 | py | Python | urls.py | hishamkaram/data_manager | c3430cb7132b67ef2e8635df7f70334167a78f0e | [
"MIT"
] | null | null | null | urls.py | hishamkaram/data_manager | c3430cb7132b67ef2e8635df7f70334167a78f0e | [
"MIT"
] | 5 | 2020-07-16T18:54:21.000Z | 2022-03-15T19:07:22.000Z | urls.py | hishamkaram/data_manager | c3430cb7132b67ef2e8635df7f70334167a78f0e | [
"MIT"
] | 1 | 2020-11-13T14:13:13.000Z | 2020-11-13T14:13:13.000Z | # -*- coding: utf-8 -*-
from django.urls import include, re_path
from tastypie.api import Api
from . import APP_NAME
from .rest import GpkgUploadResource, ManagerDownloadResource
from .views import (UploadView, compare_to_geonode_layer, deleteUpload,
download_layers, get_compatible_layers, publish_layer,
reload_layer)
api = Api(api_name='gpkg_api')
api.register(GpkgUploadResource())
api.register(ManagerDownloadResource())
urlpatterns = [
re_path(r'^upload/', UploadView.as_view(), name="geopackage_upload"),
re_path(r'^$', UploadView.as_view(), name="%s.index" % (APP_NAME)),
re_path(r'^publish/(?P<upload_id>[\d]+)/(?P<layername>[^/]*)$',
publish_layer,
name="geopackage_publish"),
re_path(r'^publish/(?P<upload_id>[\d]+)/(?P<layername>[^/]*)/(?P<publish_name>[^/]*)$',
publish_layer,
name="geopackage_publish_name"),
re_path(r'^compare_schema/(?P<upload_id>[\d]+)/(?P<layername>[^/]*)/(?P<glayername>[^/]*)$',
compare_to_geonode_layer,
name="compare_schema"),
re_path(r'^reload_layer/(?P<upload_id>[\d]+)/(?P<layername>[^/]*)/(?P<glayername>[^/]*)$',
reload_layer,
name="reload_layer"),
re_path(r'^compatible_layers/(?P<upload_id>[\d]+)/(?P<layername>[^/]*)/$',
get_compatible_layers,
name="compatible_layers"),
re_path(r'^delete/(?P<upload_id>[\d]+)/$',
deleteUpload,
name="geopackage_delete"),
re_path(r'^download$', download_layers, name="geopackage_download"),
re_path(r'^api/', include(api.urls)),
]
| 42.918919 | 96 | 0.639169 |
from django.urls import include, re_path
from tastypie.api import Api
from . import APP_NAME
from .rest import GpkgUploadResource, ManagerDownloadResource
from .views import (UploadView, compare_to_geonode_layer, deleteUpload,
download_layers, get_compatible_layers, publish_layer,
reload_layer)
api = Api(api_name='gpkg_api')
api.register(GpkgUploadResource())
api.register(ManagerDownloadResource())
urlpatterns = [
re_path(r'^upload/', UploadView.as_view(), name="geopackage_upload"),
re_path(r'^$', UploadView.as_view(), name="%s.index" % (APP_NAME)),
re_path(r'^publish/(?P<upload_id>[\d]+)/(?P<layername>[^/]*)$',
publish_layer,
name="geopackage_publish"),
re_path(r'^publish/(?P<upload_id>[\d]+)/(?P<layername>[^/]*)/(?P<publish_name>[^/]*)$',
publish_layer,
name="geopackage_publish_name"),
re_path(r'^compare_schema/(?P<upload_id>[\d]+)/(?P<layername>[^/]*)/(?P<glayername>[^/]*)$',
compare_to_geonode_layer,
name="compare_schema"),
re_path(r'^reload_layer/(?P<upload_id>[\d]+)/(?P<layername>[^/]*)/(?P<glayername>[^/]*)$',
reload_layer,
name="reload_layer"),
re_path(r'^compatible_layers/(?P<upload_id>[\d]+)/(?P<layername>[^/]*)/$',
get_compatible_layers,
name="compatible_layers"),
re_path(r'^delete/(?P<upload_id>[\d]+)/$',
deleteUpload,
name="geopackage_delete"),
re_path(r'^download$', download_layers, name="geopackage_download"),
re_path(r'^api/', include(api.urls)),
]
| true | true |
1c3099a8ee190b17fb946370adda20d68fdd911b | 2,318 | py | Python | cudarray/extra/array.py | gorenje/cudarray | a6d287fe371a93bcce2d3767925a5ea4e0a82e1a | [
"MIT"
] | 228 | 2015-01-03T17:31:54.000Z | 2021-01-21T22:23:17.000Z | cudarray/extra/array.py | maxosprojects/cudarray | a2cffbb1434db9a7e6ed83211300d23d47630d2e | [
"MIT"
] | 77 | 2015-01-03T20:23:19.000Z | 2022-03-23T11:56:27.000Z | cudarray/extra/array.py | maxosprojects/cudarray | a2cffbb1434db9a7e6ed83211300d23d47630d2e | [
"MIT"
] | 74 | 2015-01-06T17:07:06.000Z | 2021-05-14T12:41:05.000Z | import cudarray as ca
from ..wrap import array_ops
from ..helpers import prod
def concatenate(a, b, axis=0, out=None):
ndim = a.ndim
a_shp = a.shape
b_shp = b.shape
d_concat = a_shp[axis] + b_shp[axis]
out_shp = a_shp[:axis] + (d_concat,) + a_shp[axis+1:]
if out is None:
out = ca.empty(out_shp, dtype=a.dtype)
else:
if out.shape != out_shp:
raise ValueError('shape mismatch')
da = a_shp[axis]
db = b_shp[axis]
if ndim < 3:
a_shp = a_shp + (1,)*(3-ndim)
b_shp = b_shp + (1,)*(3-ndim)
elif ndim > 3:
if axis == 0:
a_shp = a_shp[axis], prod(a_shp[1:]), 1
b_shp = b_shp[axis], prod(b_shp[1:]), 1
elif axis + 1 == ndim:
a_shp = 1, prod(a_shp[:axis]), a_shp[axis]
b_shp = 1, prod(b_shp[:axis]), b_shp[axis]
axis = 2
else:
a_shp = prod(a_shp[:axis]), a_shp[axis], prod(a_shp[axis+1:])
b_shp = prod(b_shp[:axis]), b_shp[axis], prod(b_shp[axis+1:])
axis = 1
d0, d1, d2 = a_shp[:axis] + (d_concat,) + a_shp[axis+1:]
array_ops._concatenate(a._data, b._data, axis, d0, d1, d2, da, db,
out._data)
return out
def split(arr, a_size, axis=0, out_a=None, out_b=None):
shp = arr.shape
ndim = arr.ndim
da = a_size
db = shp[axis]-a_size
out_a_shp = shp[:axis] + (da,) + shp[axis+1:]
out_b_shp = shp[:axis] + (db,) + shp[axis+1:]
if out_a is None:
out_a = ca.empty(out_a_shp, dtype=arr.dtype)
else:
if out_a.shape != out_a_shp:
raise ValueError('shape mismatch')
if out_b is None:
out_b = ca.empty(out_b_shp, dtype=arr.dtype)
else:
if out_b.shape != out_b_shp:
raise ValueError('shape mismatch')
if ndim < 3:
shp = shp + (1,)*(3-ndim)
elif ndim > 3:
if axis == 0:
shp = shp[axis], prod(shp[1:]), 1
elif axis + 1 == ndim:
shp = 1, prod(shp[:axis]), shp[axis]
axis = 2
else:
shp = prod(shp[:axis]), shp[axis], prod(shp[axis+1:])
axis = 1
d0, d1, d2 = shp
array_ops._split(arr._data, axis, d0, d1, d2, da, db, out_a._data,
out_b._data)
return out_a, out_b
| 30.103896 | 73 | 0.523296 | import cudarray as ca
from ..wrap import array_ops
from ..helpers import prod
def concatenate(a, b, axis=0, out=None):
ndim = a.ndim
a_shp = a.shape
b_shp = b.shape
d_concat = a_shp[axis] + b_shp[axis]
out_shp = a_shp[:axis] + (d_concat,) + a_shp[axis+1:]
if out is None:
out = ca.empty(out_shp, dtype=a.dtype)
else:
if out.shape != out_shp:
raise ValueError('shape mismatch')
da = a_shp[axis]
db = b_shp[axis]
if ndim < 3:
a_shp = a_shp + (1,)*(3-ndim)
b_shp = b_shp + (1,)*(3-ndim)
elif ndim > 3:
if axis == 0:
a_shp = a_shp[axis], prod(a_shp[1:]), 1
b_shp = b_shp[axis], prod(b_shp[1:]), 1
elif axis + 1 == ndim:
a_shp = 1, prod(a_shp[:axis]), a_shp[axis]
b_shp = 1, prod(b_shp[:axis]), b_shp[axis]
axis = 2
else:
a_shp = prod(a_shp[:axis]), a_shp[axis], prod(a_shp[axis+1:])
b_shp = prod(b_shp[:axis]), b_shp[axis], prod(b_shp[axis+1:])
axis = 1
d0, d1, d2 = a_shp[:axis] + (d_concat,) + a_shp[axis+1:]
array_ops._concatenate(a._data, b._data, axis, d0, d1, d2, da, db,
out._data)
return out
def split(arr, a_size, axis=0, out_a=None, out_b=None):
shp = arr.shape
ndim = arr.ndim
da = a_size
db = shp[axis]-a_size
out_a_shp = shp[:axis] + (da,) + shp[axis+1:]
out_b_shp = shp[:axis] + (db,) + shp[axis+1:]
if out_a is None:
out_a = ca.empty(out_a_shp, dtype=arr.dtype)
else:
if out_a.shape != out_a_shp:
raise ValueError('shape mismatch')
if out_b is None:
out_b = ca.empty(out_b_shp, dtype=arr.dtype)
else:
if out_b.shape != out_b_shp:
raise ValueError('shape mismatch')
if ndim < 3:
shp = shp + (1,)*(3-ndim)
elif ndim > 3:
if axis == 0:
shp = shp[axis], prod(shp[1:]), 1
elif axis + 1 == ndim:
shp = 1, prod(shp[:axis]), shp[axis]
axis = 2
else:
shp = prod(shp[:axis]), shp[axis], prod(shp[axis+1:])
axis = 1
d0, d1, d2 = shp
array_ops._split(arr._data, axis, d0, d1, d2, da, db, out_a._data,
out_b._data)
return out_a, out_b
| true | true |
1c309b05898511f695f8311094b09136ee0e435d | 1,765 | py | Python | src/data_processing/OLD/create_lung_segmented_same_spacing_data.py | DataForces/CV_LUNA | adc76fdc580807742fee4c6453c728a2d6d76ed3 | [
"BSD-2-Clause"
] | 207 | 2016-07-25T07:27:11.000Z | 2022-03-25T06:20:56.000Z | src/data_processing/OLD/create_lung_segmented_same_spacing_data.py | DataForces/CV_LUNA | adc76fdc580807742fee4c6453c728a2d6d76ed3 | [
"BSD-2-Clause"
] | 20 | 2016-12-07T02:36:14.000Z | 2020-06-05T03:05:14.000Z | src/data_processing/OLD/create_lung_segmented_same_spacing_data.py | DataForces/CV_LUNA | adc76fdc580807742fee4c6453c728a2d6d76ed3 | [
"BSD-2-Clause"
] | 83 | 2016-08-27T01:35:23.000Z | 2020-09-21T15:12:06.000Z | import glob
import numpy as np
import os
import SimpleITK as sitk
import skimage.transform
import scipy.ndimage
from joblib import Parallel, delayed
RESIZE_SPACING = [1, 1, 1]
SAVE_FOLDER = '1_1_1mm'
def load_itk(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def save_itk(image, origin, spacing, filename):
itkimage = sitk.GetImageFromArray(image, isVector=False)
itkimage.SetSpacing(spacing)
itkimage.SetOrigin(origin)
sitk.WriteImage(itkimage, filename, True)
def reshape_image(imageDir, subsetDir):
if os.path.isfile(imageDir.replace('original',SAVE_FOLDER)) == False:
img, origin, spacing = load_itk(imageDir)
mask, _, _ = load_itk(imageDir.replace('{}'.format(subsetDir),'data\lung_masks'))
mask[mask >0] = 1
img *= mask
resize_factor = spacing / RESIZE_SPACING
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize = new_shape / img.shape
new_spacing = spacing / real_resize
img = scipy.ndimage.interpolation.zoom(img, real_resize)
origin = origin[::-1]
new_spacing = new_spacing[::-1]
save_itk(img,origin,new_spacing,imageDir.replace('original',SAVE_FOLDER))
if __name__ == "__main__":
for subset in range(10):
subsetDir = 'data\\original\\subset{}'.format(subset)
imageNames = glob.glob("{}/*.mhd".format(subsetDir))
Parallel(n_jobs=4)(delayed(reshape_image)(imageDir,subsetDir) for imageDir in imageNames) | 36.020408 | 97 | 0.695184 | import glob
import numpy as np
import os
import SimpleITK as sitk
import skimage.transform
import scipy.ndimage
from joblib import Parallel, delayed
RESIZE_SPACING = [1, 1, 1]
SAVE_FOLDER = '1_1_1mm'
def load_itk(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def save_itk(image, origin, spacing, filename):
itkimage = sitk.GetImageFromArray(image, isVector=False)
itkimage.SetSpacing(spacing)
itkimage.SetOrigin(origin)
sitk.WriteImage(itkimage, filename, True)
def reshape_image(imageDir, subsetDir):
if os.path.isfile(imageDir.replace('original',SAVE_FOLDER)) == False:
img, origin, spacing = load_itk(imageDir)
mask, _, _ = load_itk(imageDir.replace('{}'.format(subsetDir),'data\lung_masks'))
mask[mask >0] = 1
img *= mask
resize_factor = spacing / RESIZE_SPACING
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize = new_shape / img.shape
new_spacing = spacing / real_resize
img = scipy.ndimage.interpolation.zoom(img, real_resize)
origin = origin[::-1]
new_spacing = new_spacing[::-1]
save_itk(img,origin,new_spacing,imageDir.replace('original',SAVE_FOLDER))
if __name__ == "__main__":
for subset in range(10):
subsetDir = 'data\\original\\subset{}'.format(subset)
imageNames = glob.glob("{}/*.mhd".format(subsetDir))
Parallel(n_jobs=4)(delayed(reshape_image)(imageDir,subsetDir) for imageDir in imageNames) | true | true |
1c309b46bb2ad825ca24438b2fe20a68a15dd03d | 192 | py | Python | seldump/__main__.py | dvarrazzo/pg_seldump | 43110c6073d2d21b7e836cb08e91fb27edb4075f | [
"BSD-3-Clause"
] | 4 | 2020-04-14T12:57:47.000Z | 2021-10-30T11:11:49.000Z | seldump/__main__.py | dvarrazzo/pg_seldump | 43110c6073d2d21b7e836cb08e91fb27edb4075f | [
"BSD-3-Clause"
] | 6 | 2020-02-19T17:06:30.000Z | 2021-06-22T11:55:51.000Z | seldump/__main__.py | dvarrazzo/pg_seldump | 43110c6073d2d21b7e836cb08e91fb27edb4075f | [
"BSD-3-Clause"
] | 1 | 2021-03-12T21:47:29.000Z | 2021-03-12T21:47:29.000Z | #!/usr/bin/env python3
"""
Package entry point (can be executed with python -m seldump)
This file is part of pg_seldump.
"""
from .cli import script
if __name__ == "__main__":
script()
| 16 | 60 | 0.692708 |
from .cli import script
if __name__ == "__main__":
script()
| true | true |
1c309bc23728ebc4192c53a8057fee494abca4ce | 127 | py | Python | src/napari_cryofibsem_monitor/_tests/test_function.py | jojoelfe/napari-cryofibsem-monitor | d315391a07f1ffc3bdfa0135f0dcd9b734235495 | [
"MIT"
] | null | null | null | src/napari_cryofibsem_monitor/_tests/test_function.py | jojoelfe/napari-cryofibsem-monitor | d315391a07f1ffc3bdfa0135f0dcd9b734235495 | [
"MIT"
] | null | null | null | src/napari_cryofibsem_monitor/_tests/test_function.py | jojoelfe/napari-cryofibsem-monitor | d315391a07f1ffc3bdfa0135f0dcd9b734235495 | [
"MIT"
] | null | null | null | # from napari_cryofibsem_monitor import threshold, image_arithmetic
# add your tests here...
def test_something():
pass
| 15.875 | 67 | 0.76378 |
def test_something():
pass
| true | true |
1c309bcea6e7e4e6909ae85f188153eb14d9b7b8 | 193 | py | Python | abc/abc130/abc130b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | abc/abc130/abc130b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | abc/abc130/abc130b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | N, X = map(int, input().split())
L = list(map(int, input().split()))
d = 0
result = 1
for i in range(N):
d += L[i]
if d <= X:
result += 1
else:
break
print(result)
| 14.846154 | 35 | 0.487047 | N, X = map(int, input().split())
L = list(map(int, input().split()))
d = 0
result = 1
for i in range(N):
d += L[i]
if d <= X:
result += 1
else:
break
print(result)
| true | true |
1c309ca6369d5e2c28af11709b877d6c487f42d9 | 4,882 | py | Python | InternalNode.py | matthew99carroll/VoronoiDiagram | 354a6ff08bb0858972b542b4b90973ce31996978 | [
"MIT"
] | null | null | null | InternalNode.py | matthew99carroll/VoronoiDiagram | 354a6ff08bb0858972b542b4b90973ce31996978 | [
"MIT"
] | null | null | null | InternalNode.py | matthew99carroll/VoronoiDiagram | 354a6ff08bb0858972b542b4b90973ce31996978 | [
"MIT"
] | null | null | null | import math
from Breakpoint import Breakpoint
from Node import Node
class InternalNode(Node):
def __init__(self, site1, site2, parent):
self.parent = parent
self.site1 = site1
self.site2 = site2
def IsLeaf(self):
return False
def SiteToString(self):
return "S" + self.site1 + " " + self.site2
def ToString(self):
if self.left is None:
leftSide = "Left:null"
elif self.left.IsLeaf():
leftSide = "Left:" + self.left
else:
leftSide = "Left:" + self.left.SiteToString()
if self.right is None:
rightSide = "Right:null"
elif self.right.IsLeaf():
rightSide = "Right:" + self.right
else:
rightSide = "Right:" + self.right.SiteToString()
return self.SiteToString() + " " + leftSide + " " + rightSide
def Traverse(self, x, y):
breakPointX = self.ComputeBreakPointAt(y)
if x > breakPointX:
return self.right
else:
return self.left
def Contains(self, siteEvent):
return self.site1 is siteEvent or self.site2 is siteEvent
def OtherChild(self, child):
if self.left is child:
return self.right
elif self.right is child:
return self.left
else:
return None
def OtherSiteEvent(self, siteEvent):
if siteEvent is self.site1:
return self.site2
elif siteEvent is self.site2:
return self.site1
else:
return None
def Replace(self, siteEvent, siteEventOther):
if self.site1 is siteEvent:
self.site1 = siteEventOther
return True
elif self.site2 is siteEvent:
self.site2 = siteEventOther
return True
else:
return False
def Replace(self, node, nodeOther):
if self.left is node:
self.left = nodeOther
nodeOther.parent = self
return True
elif self.right is node:
self.right = nodeOther
nodeOther.parent = self
return True
else:
return False
def IsBreakpointBetween(self, site1, site2):
return self.site1 is site1 and self.site2 is site2
"""
Uses the circle technique to compute the breakpoint.(Deprecated because it only gives one breakpoint)
Breakpoint is retrived from the center of the circle touching the two sites and being tangent to the sweep line.
"""
def ComputeBreakpointUsingCircleTechnique(self, y):
# by substituting site1 and site 2 in the equation of the circle and substituting the y value of the sweepline
# we can get the x value of the point at which the circle touches the sweep line or in otherwords the x of the center
x = ((self.site2.x * self.site2.x) + (self.site2.y * self.site2.y) - (self.site1.x * self.site1.x) - (
self.site1.y * self.site1.y) + 2 * (self.site1.y) * y - 2 * (self.site2.y) * y) / (
2 * (self.site2.x - self.site1.x))
# now we use the x value in the equation of the perpendicular bisector between the two sites to get the y of the center
site = self.site1
if self.site1.x == x:
# to prevent divide by zero error while calculating slope
site = self.site2 # assuming the perpendicular bisector will never be a vertical line with infinite slope
mx = (site.x + x) / 2
my = (site.y + y) / 2
slope = (site.y - y) / (site.x - x)
inverseSlope = -1 / slope
c = my - inverseSlope * mx
# perpendicular bisector of a chord will always pass through the center of a circle
centerY = inverseSlope * x + c
return Breakpoint(x, centerY)
"""
Uses the equation of parabola to compute the x value of the breakpoint.
"""
def ComputeBreakpointAt(self, y):
# we use the equation of the parabola to get the intersection of the two arcs
d = 2 * (self.site1.y - y)
a1 = 1 / d
b1 = -2 * self.site1.x / d
c1 = y + d / 4 + self.site1.x * self.site1.x / d
d = 2 * (self.site2.y - y)
a2 = 1 / d
b2 = -2 * self.site2.x / d
c2 = y + d / 4 + self.site2.x * self.site2.x / d # minor adjustment
a = a1 - a2
b = b1 - b2
c = c1 - c2
# since this is a quadratic equation, so it will have 2 solutions
discremenant = b * b - 4 * a * c
x1 = (-b + math.sqrt(discremenant)) / (2 * a)
x2 = (-b - math.sqrt(discremenant)) / (2 * a)
# the two solutions are basically the left and the right breakpoint values (just x)
if self.site1.x <= self.site2.x:
return math.min(x1, x2)
else:
return math.max(x1, x2)
| 32.765101 | 127 | 0.573535 | import math
from Breakpoint import Breakpoint
from Node import Node
class InternalNode(Node):
def __init__(self, site1, site2, parent):
self.parent = parent
self.site1 = site1
self.site2 = site2
def IsLeaf(self):
return False
def SiteToString(self):
return "S" + self.site1 + " " + self.site2
def ToString(self):
if self.left is None:
leftSide = "Left:null"
elif self.left.IsLeaf():
leftSide = "Left:" + self.left
else:
leftSide = "Left:" + self.left.SiteToString()
if self.right is None:
rightSide = "Right:null"
elif self.right.IsLeaf():
rightSide = "Right:" + self.right
else:
rightSide = "Right:" + self.right.SiteToString()
return self.SiteToString() + " " + leftSide + " " + rightSide
def Traverse(self, x, y):
breakPointX = self.ComputeBreakPointAt(y)
if x > breakPointX:
return self.right
else:
return self.left
def Contains(self, siteEvent):
return self.site1 is siteEvent or self.site2 is siteEvent
def OtherChild(self, child):
if self.left is child:
return self.right
elif self.right is child:
return self.left
else:
return None
def OtherSiteEvent(self, siteEvent):
if siteEvent is self.site1:
return self.site2
elif siteEvent is self.site2:
return self.site1
else:
return None
def Replace(self, siteEvent, siteEventOther):
if self.site1 is siteEvent:
self.site1 = siteEventOther
return True
elif self.site2 is siteEvent:
self.site2 = siteEventOther
return True
else:
return False
def Replace(self, node, nodeOther):
if self.left is node:
self.left = nodeOther
nodeOther.parent = self
return True
elif self.right is node:
self.right = nodeOther
nodeOther.parent = self
return True
else:
return False
def IsBreakpointBetween(self, site1, site2):
return self.site1 is site1 and self.site2 is site2
def ComputeBreakpointUsingCircleTechnique(self, y):
x = ((self.site2.x * self.site2.x) + (self.site2.y * self.site2.y) - (self.site1.x * self.site1.x) - (
self.site1.y * self.site1.y) + 2 * (self.site1.y) * y - 2 * (self.site2.y) * y) / (
2 * (self.site2.x - self.site1.x))
site = self.site1
if self.site1.x == x:
site = self.site2
mx = (site.x + x) / 2
my = (site.y + y) / 2
slope = (site.y - y) / (site.x - x)
inverseSlope = -1 / slope
c = my - inverseSlope * mx
centerY = inverseSlope * x + c
return Breakpoint(x, centerY)
def ComputeBreakpointAt(self, y):
d = 2 * (self.site1.y - y)
a1 = 1 / d
b1 = -2 * self.site1.x / d
c1 = y + d / 4 + self.site1.x * self.site1.x / d
d = 2 * (self.site2.y - y)
a2 = 1 / d
b2 = -2 * self.site2.x / d
c2 = y + d / 4 + self.site2.x * self.site2.x / d
a = a1 - a2
b = b1 - b2
c = c1 - c2
discremenant = b * b - 4 * a * c
x1 = (-b + math.sqrt(discremenant)) / (2 * a)
x2 = (-b - math.sqrt(discremenant)) / (2 * a)
if self.site1.x <= self.site2.x:
return math.min(x1, x2)
else:
return math.max(x1, x2)
| true | true |
1c309d8c5efe4639a8e02dcf6aefdaa31f8bb0ed | 11,913 | py | Python | rllib/policy/policy_map.py | willfrey/ray | 288a81b42ef0186ab4db33b30191614a7bdb69f6 | [
"Apache-2.0"
] | 1 | 2019-06-19T02:23:43.000Z | 2019-06-19T02:23:43.000Z | rllib/policy/policy_map.py | willfrey/ray | 288a81b42ef0186ab4db33b30191614a7bdb69f6 | [
"Apache-2.0"
] | 73 | 2021-09-25T07:11:39.000Z | 2022-03-26T07:10:59.000Z | rllib/policy/policy_map.py | willfrey/ray | 288a81b42ef0186ab4db33b30191614a7bdb69f6 | [
"Apache-2.0"
] | 1 | 2019-09-24T16:24:49.000Z | 2019-09-24T16:24:49.000Z | from collections import deque
import gym
import os
import pickle
import threading
from typing import Callable, Dict, Optional, Set, Type, TYPE_CHECKING
from ray.rllib.policy.policy import PolicySpec
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.tf_utils import get_tf_eager_cls_if_necessary
from ray.rllib.utils.threading import with_lock
from ray.rllib.utils.typing import PartialTrainerConfigDict, PolicyID, TrainerConfigDict
from ray.tune.utils.util import merge_dicts
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
@PublicAPI
class PolicyMap(dict):
"""Maps policy IDs to Policy objects.
Thereby, keeps n policies in memory and - when capacity is reached -
writes the least recently used to disk. This allows adding 100s of
policies to a Trainer for league-based setups w/o running out of memory.
"""
def __init__(
self,
worker_index: int,
num_workers: int,
capacity: Optional[int] = None,
path: Optional[str] = None,
policy_config: Optional[TrainerConfigDict] = None,
session_creator: Optional[Callable[[], "tf1.Session"]] = None,
seed: Optional[int] = None,
):
"""Initializes a PolicyMap instance.
Args:
worker_index: The worker index of the RolloutWorker this map
resides in.
num_workers: The total number of remote workers in the
WorkerSet to which this map's RolloutWorker belongs to.
capacity: The maximum number of policies to hold in memory.
The least used ones are written to disk/S3 and retrieved
when needed.
path: The path to store the policy pickle files to. Files
will have the name: [policy_id].[worker idx].policy.pkl.
policy_config: The Trainer's base config dict.
session_creator: An optional
tf1.Session creation callable.
seed: An optional seed (used to seed tf policies).
"""
super().__init__()
self.worker_index = worker_index
self.num_workers = num_workers
self.session_creator = session_creator
self.seed = seed
# The file extension for stashed policies (that are no longer available
# in-memory but can be reinstated any time from storage).
self.extension = f".{self.worker_index}.policy.pkl"
# Dictionary of keys that may be looked up (cached or not).
self.valid_keys: Set[str] = set()
# The actual cache with the in-memory policy objects.
self.cache: Dict[str, Policy] = {}
# The doubly-linked list holding the currently in-memory objects.
self.deque = deque(maxlen=capacity or 10)
# The file path where to store overflowing policies.
self.path = path or "."
# The core config to use. Each single policy's config override is
# added on top of this.
self.policy_config: TrainerConfigDict = policy_config or {}
# The orig classes/obs+act spaces, and config overrides of the
# Policies.
self.policy_specs: Dict[PolicyID, PolicySpec] = {}
# Lock used for locking some methods on the object-level.
# This prevents possible race conditions when accessing the map
# and the underlying structures, like self.deque and others.
self._lock = threading.RLock()
def create_policy(
self,
policy_id: PolicyID,
policy_cls: Type["Policy"],
observation_space: gym.Space,
action_space: gym.Space,
config_override: PartialTrainerConfigDict,
merged_config: TrainerConfigDict,
) -> None:
"""Creates a new policy and stores it to the cache.
Args:
policy_id: The policy ID. This is the key under which
the created policy will be stored in this map.
policy_cls: The (original) policy class to use.
This may still be altered in case tf-eager (and tracing)
is used.
observation_space: The observation space of the
policy.
action_space: The action space of the policy.
config_override: The config override
dict for this policy. This is the partial dict provided by
the user.
merged_config: The entire config (merged
default config + `config_override`).
"""
framework = merged_config.get("framework", "tf")
class_ = get_tf_eager_cls_if_necessary(policy_cls, merged_config)
# Tf.
if framework in ["tf2", "tf", "tfe"]:
var_scope = policy_id + (
("_wk" + str(self.worker_index)) if self.worker_index else ""
)
# For tf static graph, build every policy in its own graph
# and create a new session for it.
if framework == "tf":
with tf1.Graph().as_default():
if self.session_creator:
sess = self.session_creator()
else:
sess = tf1.Session(
config=tf1.ConfigProto(
gpu_options=tf1.GPUOptions(allow_growth=True)
)
)
with sess.as_default():
# Set graph-level seed.
if self.seed is not None:
tf1.set_random_seed(self.seed)
with tf1.variable_scope(var_scope):
self[policy_id] = class_(
observation_space, action_space, merged_config
)
# For tf-eager: no graph, no session.
else:
with tf1.variable_scope(var_scope):
self[policy_id] = class_(
observation_space, action_space, merged_config
)
# Non-tf: No graph, no session.
else:
class_ = policy_cls
self[policy_id] = class_(observation_space, action_space, merged_config)
# Store spec (class, obs-space, act-space, and config overrides) such
# that the map will be able to reproduce on-the-fly added policies
# from disk.
self.policy_specs[policy_id] = PolicySpec(
policy_class=policy_cls,
observation_space=observation_space,
action_space=action_space,
config=config_override,
)
@with_lock
@override(dict)
def __getitem__(self, item):
# Never seen this key -> Error.
if item not in self.valid_keys:
raise KeyError(f"PolicyID '{item}' not found in this PolicyMap!")
# Item already in cache -> Rearrange deque (least recently used) and
# return.
if item in self.cache:
self.deque.remove(item)
self.deque.append(item)
# Item not currently in cache -> Get from disk and - if at capacity -
# remove leftmost one.
else:
self._read_from_disk(policy_id=item)
return self.cache[item]
@with_lock
@override(dict)
def __setitem__(self, key, value):
# Item already in cache -> Rearrange deque (least recently used).
if key in self.cache:
self.deque.remove(key)
self.deque.append(key)
self.cache[key] = value
# Item not currently in cache -> store new value and - if at capacity -
# remove leftmost one.
else:
# Cache at capacity -> Drop leftmost item.
if len(self.deque) == self.deque.maxlen:
self._stash_to_disk()
self.deque.append(key)
self.cache[key] = value
self.valid_keys.add(key)
@with_lock
@override(dict)
def __delitem__(self, key):
# Make key invalid.
self.valid_keys.remove(key)
# Remove policy from memory if currently cached.
if key in self.cache:
policy = self.cache[key]
self._close_session(policy)
del self.cache[key]
# Remove file associated with the policy, if it exists.
filename = self.path + "/" + key + self.extension
if os.path.isfile(filename):
os.remove(filename)
@override(dict)
def __iter__(self):
return iter(self.keys())
@override(dict)
def items(self):
"""Iterates over all policies, even the stashed-to-disk ones."""
def gen():
for key in self.valid_keys:
yield (key, self[key])
return gen()
@override(dict)
def keys(self):
self._lock.acquire()
ks = list(self.valid_keys)
self._lock.release()
def gen():
for key in ks:
yield key
return gen()
@override(dict)
def values(self):
self._lock.acquire()
vs = [self[k] for k in self.valid_keys]
self._lock.release()
def gen():
for value in vs:
yield value
return gen()
@with_lock
@override(dict)
def update(self, __m, **kwargs):
for k, v in __m.items():
self[k] = v
for k, v in kwargs.items():
self[k] = v
@with_lock
@override(dict)
def get(self, key):
if key not in self.valid_keys:
return None
return self[key]
@with_lock
@override(dict)
def __len__(self):
"""Returns number of all policies, including the stashed-to-disk ones."""
return len(self.valid_keys)
@with_lock
@override(dict)
def __contains__(self, item):
return item in self.valid_keys
def _stash_to_disk(self):
"""Writes the least-recently used policy to disk and rearranges cache.
Also closes the session - if applicable - of the stashed policy.
"""
# Get least recently used policy (all the way on the left in deque).
delkey = self.deque.popleft()
policy = self.cache[delkey]
# Get its state for writing to disk.
policy_state = policy.get_state()
# Closes policy's tf session, if any.
self._close_session(policy)
# Remove from memory. This will clear the tf Graph as well.
del self.cache[delkey]
# Write state to disk.
with open(self.path + "/" + delkey + self.extension, "wb") as f:
pickle.dump(policy_state, file=f)
def _read_from_disk(self, policy_id):
"""Reads a policy ID from disk and re-adds it to the cache."""
# Make sure this policy ID is not in the cache right now.
assert policy_id not in self.cache
# Read policy state from disk.
with open(self.path + "/" + policy_id + self.extension, "rb") as f:
policy_state = pickle.load(f)
# Get class and config override.
merged_conf = merge_dicts(
self.policy_config, self.policy_specs[policy_id].config
)
# Create policy object (from its spec: cls, obs-space, act-space,
# config).
self.create_policy(
policy_id,
self.policy_specs[policy_id].policy_class,
self.policy_specs[policy_id].observation_space,
self.policy_specs[policy_id].action_space,
self.policy_specs[policy_id].config,
merged_conf,
)
# Restore policy's state.
policy = self[policy_id]
policy.set_state(policy_state)
def _close_session(self, policy):
sess = policy.get_session()
# Closes the tf session, if any.
if sess is not None:
sess.close()
| 35.990937 | 88 | 0.589944 | from collections import deque
import gym
import os
import pickle
import threading
from typing import Callable, Dict, Optional, Set, Type, TYPE_CHECKING
from ray.rllib.policy.policy import PolicySpec
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.tf_utils import get_tf_eager_cls_if_necessary
from ray.rllib.utils.threading import with_lock
from ray.rllib.utils.typing import PartialTrainerConfigDict, PolicyID, TrainerConfigDict
from ray.tune.utils.util import merge_dicts
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
@PublicAPI
class PolicyMap(dict):
def __init__(
self,
worker_index: int,
num_workers: int,
capacity: Optional[int] = None,
path: Optional[str] = None,
policy_config: Optional[TrainerConfigDict] = None,
session_creator: Optional[Callable[[], "tf1.Session"]] = None,
seed: Optional[int] = None,
):
super().__init__()
self.worker_index = worker_index
self.num_workers = num_workers
self.session_creator = session_creator
self.seed = seed
self.extension = f".{self.worker_index}.policy.pkl"
self.valid_keys: Set[str] = set()
self.cache: Dict[str, Policy] = {}
self.deque = deque(maxlen=capacity or 10)
self.path = path or "."
# added on top of this.
self.policy_config: TrainerConfigDict = policy_config or {}
# The orig classes/obs+act spaces, and config overrides of the
# Policies.
self.policy_specs: Dict[PolicyID, PolicySpec] = {}
# Lock used for locking some methods on the object-level.
# This prevents possible race conditions when accessing the map
# and the underlying structures, like self.deque and others.
self._lock = threading.RLock()
def create_policy(
self,
policy_id: PolicyID,
policy_cls: Type["Policy"],
observation_space: gym.Space,
action_space: gym.Space,
config_override: PartialTrainerConfigDict,
merged_config: TrainerConfigDict,
) -> None:
framework = merged_config.get("framework", "tf")
class_ = get_tf_eager_cls_if_necessary(policy_cls, merged_config)
# Tf.
if framework in ["tf2", "tf", "tfe"]:
var_scope = policy_id + (
("_wk" + str(self.worker_index)) if self.worker_index else ""
)
# For tf static graph, build every policy in its own graph
# and create a new session for it.
if framework == "tf":
with tf1.Graph().as_default():
if self.session_creator:
sess = self.session_creator()
else:
sess = tf1.Session(
config=tf1.ConfigProto(
gpu_options=tf1.GPUOptions(allow_growth=True)
)
)
with sess.as_default():
# Set graph-level seed.
if self.seed is not None:
tf1.set_random_seed(self.seed)
with tf1.variable_scope(var_scope):
self[policy_id] = class_(
observation_space, action_space, merged_config
)
# For tf-eager: no graph, no session.
else:
with tf1.variable_scope(var_scope):
self[policy_id] = class_(
observation_space, action_space, merged_config
)
# Non-tf: No graph, no session.
else:
class_ = policy_cls
self[policy_id] = class_(observation_space, action_space, merged_config)
# Store spec (class, obs-space, act-space, and config overrides) such
# that the map will be able to reproduce on-the-fly added policies
# from disk.
self.policy_specs[policy_id] = PolicySpec(
policy_class=policy_cls,
observation_space=observation_space,
action_space=action_space,
config=config_override,
)
@with_lock
@override(dict)
def __getitem__(self, item):
# Never seen this key -> Error.
if item not in self.valid_keys:
raise KeyError(f"PolicyID '{item}' not found in this PolicyMap!")
# Item already in cache -> Rearrange deque (least recently used) and
# return.
if item in self.cache:
self.deque.remove(item)
self.deque.append(item)
# Item not currently in cache -> Get from disk and - if at capacity -
# remove leftmost one.
else:
self._read_from_disk(policy_id=item)
return self.cache[item]
@with_lock
@override(dict)
def __setitem__(self, key, value):
# Item already in cache -> Rearrange deque (least recently used).
if key in self.cache:
self.deque.remove(key)
self.deque.append(key)
self.cache[key] = value
# Item not currently in cache -> store new value and - if at capacity -
# remove leftmost one.
else:
# Cache at capacity -> Drop leftmost item.
if len(self.deque) == self.deque.maxlen:
self._stash_to_disk()
self.deque.append(key)
self.cache[key] = value
self.valid_keys.add(key)
@with_lock
@override(dict)
def __delitem__(self, key):
# Make key invalid.
self.valid_keys.remove(key)
# Remove policy from memory if currently cached.
if key in self.cache:
policy = self.cache[key]
self._close_session(policy)
del self.cache[key]
# Remove file associated with the policy, if it exists.
filename = self.path + "/" + key + self.extension
if os.path.isfile(filename):
os.remove(filename)
@override(dict)
def __iter__(self):
return iter(self.keys())
@override(dict)
def items(self):
def gen():
for key in self.valid_keys:
yield (key, self[key])
return gen()
@override(dict)
def keys(self):
self._lock.acquire()
ks = list(self.valid_keys)
self._lock.release()
def gen():
for key in ks:
yield key
return gen()
@override(dict)
def values(self):
self._lock.acquire()
vs = [self[k] for k in self.valid_keys]
self._lock.release()
def gen():
for value in vs:
yield value
return gen()
@with_lock
@override(dict)
def update(self, __m, **kwargs):
for k, v in __m.items():
self[k] = v
for k, v in kwargs.items():
self[k] = v
@with_lock
@override(dict)
def get(self, key):
if key not in self.valid_keys:
return None
return self[key]
@with_lock
@override(dict)
def __len__(self):
return len(self.valid_keys)
@with_lock
@override(dict)
def __contains__(self, item):
return item in self.valid_keys
def _stash_to_disk(self):
# Get least recently used policy (all the way on the left in deque).
delkey = self.deque.popleft()
policy = self.cache[delkey]
# Get its state for writing to disk.
policy_state = policy.get_state()
# Closes policy's tf session, if any.
self._close_session(policy)
del self.cache[delkey]
with open(self.path + "/" + delkey + self.extension, "wb") as f:
pickle.dump(policy_state, file=f)
def _read_from_disk(self, policy_id):
assert policy_id not in self.cache
with open(self.path + "/" + policy_id + self.extension, "rb") as f:
policy_state = pickle.load(f)
merged_conf = merge_dicts(
self.policy_config, self.policy_specs[policy_id].config
)
self.create_policy(
policy_id,
self.policy_specs[policy_id].policy_class,
self.policy_specs[policy_id].observation_space,
self.policy_specs[policy_id].action_space,
self.policy_specs[policy_id].config,
merged_conf,
)
policy = self[policy_id]
policy.set_state(policy_state)
def _close_session(self, policy):
sess = policy.get_session()
# Closes the tf session, if any.
if sess is not None:
sess.close()
| true | true |
1c309e29fa3d3e00f8540843a8b0261a9e64bf26 | 92 | py | Python | code/abc126_a_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/abc126_a_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/abc126_a_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | n,k=map(int,input().split())
s=input()
print(s[:k-1].upper()+s[k-1:k].lower()+s[k:].upper()) | 30.666667 | 53 | 0.576087 | n,k=map(int,input().split())
s=input()
print(s[:k-1].upper()+s[k-1:k].lower()+s[k:].upper()) | true | true |
1c30a205b90f870420979848869282a055b6dbee | 8,421 | py | Python | appengine/findit/model/wf_swarming_task.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/findit/model/wf_swarming_task.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/findit/model/wf_swarming_task.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
from google.appengine.ext import ndb
from libs import time_util
from model.base_build_model import BaseBuildModel
from model.base_swarming_task import BaseSwarmingTask
class _ResultCount(ndb.Model):
"""Represent one result status and the count."""
status = ndb.StringProperty(indexed=False)
count = ndb.IntegerProperty(indexed=False)
class _ClassifiedTestResult(ndb.Model):
"""Represents classified result of one test."""
test_name = ndb.StringProperty(indexed=False)
# Total runs of the test in a rerun.
total_run = ndb.IntegerProperty(indexed=False)
# Number of runs with expected result.
num_expected_results = ndb.IntegerProperty(indexed=False)
# Number of runs with unexpected result.
num_unexpected_results = ndb.IntegerProperty(indexed=False)
# All the passing status and their counts.
passes = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
# All the failing status and their counts.
failures = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
# All the skipping status and their counts.
skips = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
# All the unknown status and their counts.
unknowns = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
# All the unknown status and their counts.
notruns = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
@staticmethod
def _GetResultList(results):
return [
_ResultCount(status=status, count=count)
for status, count in results.iteritems()
]
@classmethod
def FromClassifiedTestResultObject(cls, test_name, classified_results):
result = cls()
result.test_name = test_name
result.total_run = classified_results.total_run
result.num_expected_results = classified_results.num_expected_results
result.num_unexpected_results = classified_results.num_unexpected_results
result.passes = cls._GetResultList(classified_results.results.passes)
result.failures = cls._GetResultList(classified_results.results.failures)
result.skips = cls._GetResultList(classified_results.results.skips)
result.unknowns = cls._GetResultList(classified_results.results.unknowns)
result.notruns = cls._GetResultList(classified_results.results.notruns)
return result
class WfSwarmingTask(BaseBuildModel, BaseSwarmingTask):
"""Represents a swarming task for a failed step.
'Wf' is short for waterfall.
"""
def _GetClassifiedTestsFromLegacyTestStatuses(self):
"""Classifies tests into lists of reliable and flaky tests from
legacy test statuses.
example legacy test statuses:
{
'test1': {
'total_run': 2,
'SUCCESS': 2
},
'test2': {
'total_run': 4,
'SUCCESS': 2,
'FAILURE': 2
},
'test3': {
'total_run': 6,
'FAILURE': 6
},
'test4': {
'total_run': 6,
'SKIPPED': 6
},
'test5': {
'total_run': 6,
'UNKNOWN': 6
}
}
example classified tests:
{
'flaky_tests': ['test1', 'test2'],
'reliable_tests': ['test3', 'test4'],
'unknown_tests': ['test5']
}
"""
tests = defaultdict(list)
for test_name, test_statuses in self.tests_statuses.iteritems():
if test_statuses.get('SUCCESS'): # Test passed for some runs, flaky.
tests['flaky_tests'].append(test_name)
elif test_statuses.get('UNKNOWN'):
tests['unknown_tests'].append(test_name)
else:
# Here we consider a 'non-flaky' test to be 'reliable'.
# If the test is 'SKIPPED', there should be failure in its dependency,
# considers it to be failed as well.
# TODO(chanli): Check more test statuses.
tests['reliable_tests'].append(test_name)
return tests
@property
def classified_tests(self):
"""Classifies tests into lists of reliable and flaky tests.
The swarming task is for deflake purpose, meaning Findit runs the task on
failed tests that it finds on waterfall.
So the classification should be:
* Flaky failure: Any test run succeeded or resulted in an expected status.
* Unknown failure: Test is not flaky, and any test run ended with an
unknown status.
* Reliable failure: All test runs failed or skipped unexpectedly.
example classified tests:
{
'flaky_tests': ['test1'],
'reliable_tests': ['test3'],
'unknown_tests': ['test2']
}
"""
if not self.classified_test_results:
return self._GetClassifiedTestsFromLegacyTestStatuses()
tests = defaultdict(list)
for classified_test_result in self.classified_test_results:
test_name = classified_test_result.test_name
if (classified_test_result.num_expected_results > 0 or
classified_test_result.passes):
# There are expected or successful runs for a test that failed on
# waterfall, classifies the test as a flake.
tests['flaky_tests'].append(test_name)
elif classified_test_result.unknowns or classified_test_result.notruns:
tests['unknown_tests'].append(test_name)
else:
# Here we consider a 'non-flaky' test to be 'reliable'.
# If the test has skipping results, there should be failure in its
# dependency, considers it to be failed as well.
tests['reliable_tests'].append(test_name)
return tests
@property
def reliable_tests(self):
return self.classified_tests.get('reliable_tests', [])
@property
def flaky_tests(self):
return self.classified_tests.get('flaky_tests', [])
@property
def reproducible_flaky_tests(self):
tests = []
if not self.classified_test_results:
# For Legacy data.
for test_name, test_statuses in self.tests_statuses.iteritems():
if (test_statuses.get('SUCCESS') and
test_statuses['SUCCESS'] < test_statuses['total_run']):
# Test has passed and not passed runs, confirmed to be flaky.
tests.append(test_name)
return tests
for classified_test_result in self.classified_test_results:
test_name = classified_test_result.test_name
if (classified_test_result.num_expected_results > 0 and
classified_test_result.num_unexpected_results > 0):
# Test has expected and unexpected runs, confirmed to be flaky.
tests.append(test_name)
return tests
@ndb.ComputedProperty
def step_name(self):
return self.key.pairs()[1][1]
@staticmethod
def _CreateKey(master_name, builder_name, build_number,
step_name): # pragma: no cover
build_key = BaseBuildModel.CreateBuildKey(master_name, builder_name,
build_number)
return ndb.Key('WfBuild', build_key, 'WfSwarmingTask', step_name)
@staticmethod
def Create(master_name, builder_name, build_number,
step_name): # pragma: no cover
task = WfSwarmingTask(
key=WfSwarmingTask._CreateKey(master_name, builder_name, build_number,
step_name))
task.parameters = task.parameters or {}
task.tests_statuses = task.tests_statuses or {}
task.requested_time = time_util.GetUTCNow()
return task
@staticmethod
def Get(master_name, builder_name, build_number,
step_name): # pragma: no cover
return WfSwarmingTask._CreateKey(master_name, builder_name, build_number,
step_name).get()
@staticmethod
def GetClassifiedTestResults(results):
"""Gets classified test results and populates data to
_ClassifiedTestResults.
Args:
results(ClassifiedTestResults): A plain dict-like object for classified
test results.
"""
return [
_ClassifiedTestResult.FromClassifiedTestResultObject(test_name, result)
for test_name, result in results.iteritems()
]
# Classified test results.
classified_test_results = ndb.LocalStructuredProperty(
_ClassifiedTestResult, repeated=True, compressed=True)
| 35.23431 | 80 | 0.687329 |
from collections import defaultdict
from google.appengine.ext import ndb
from libs import time_util
from model.base_build_model import BaseBuildModel
from model.base_swarming_task import BaseSwarmingTask
class _ResultCount(ndb.Model):
status = ndb.StringProperty(indexed=False)
count = ndb.IntegerProperty(indexed=False)
class _ClassifiedTestResult(ndb.Model):
test_name = ndb.StringProperty(indexed=False)
total_run = ndb.IntegerProperty(indexed=False)
num_expected_results = ndb.IntegerProperty(indexed=False)
num_unexpected_results = ndb.IntegerProperty(indexed=False)
passes = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
failures = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
skips = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
unknowns = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
notruns = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
@staticmethod
def _GetResultList(results):
return [
_ResultCount(status=status, count=count)
for status, count in results.iteritems()
]
@classmethod
def FromClassifiedTestResultObject(cls, test_name, classified_results):
result = cls()
result.test_name = test_name
result.total_run = classified_results.total_run
result.num_expected_results = classified_results.num_expected_results
result.num_unexpected_results = classified_results.num_unexpected_results
result.passes = cls._GetResultList(classified_results.results.passes)
result.failures = cls._GetResultList(classified_results.results.failures)
result.skips = cls._GetResultList(classified_results.results.skips)
result.unknowns = cls._GetResultList(classified_results.results.unknowns)
result.notruns = cls._GetResultList(classified_results.results.notruns)
return result
class WfSwarmingTask(BaseBuildModel, BaseSwarmingTask):
def _GetClassifiedTestsFromLegacyTestStatuses(self):
tests = defaultdict(list)
for test_name, test_statuses in self.tests_statuses.iteritems():
if test_statuses.get('SUCCESS'):
tests['flaky_tests'].append(test_name)
elif test_statuses.get('UNKNOWN'):
tests['unknown_tests'].append(test_name)
else:
tests['reliable_tests'].append(test_name)
return tests
@property
def classified_tests(self):
if not self.classified_test_results:
return self._GetClassifiedTestsFromLegacyTestStatuses()
tests = defaultdict(list)
for classified_test_result in self.classified_test_results:
test_name = classified_test_result.test_name
if (classified_test_result.num_expected_results > 0 or
classified_test_result.passes):
tests['flaky_tests'].append(test_name)
elif classified_test_result.unknowns or classified_test_result.notruns:
tests['unknown_tests'].append(test_name)
else:
tests['reliable_tests'].append(test_name)
return tests
@property
def reliable_tests(self):
return self.classified_tests.get('reliable_tests', [])
@property
def flaky_tests(self):
return self.classified_tests.get('flaky_tests', [])
@property
def reproducible_flaky_tests(self):
tests = []
if not self.classified_test_results:
for test_name, test_statuses in self.tests_statuses.iteritems():
if (test_statuses.get('SUCCESS') and
test_statuses['SUCCESS'] < test_statuses['total_run']):
tests.append(test_name)
return tests
for classified_test_result in self.classified_test_results:
test_name = classified_test_result.test_name
if (classified_test_result.num_expected_results > 0 and
classified_test_result.num_unexpected_results > 0):
tests.append(test_name)
return tests
@ndb.ComputedProperty
def step_name(self):
return self.key.pairs()[1][1]
@staticmethod
def _CreateKey(master_name, builder_name, build_number,
step_name):
build_key = BaseBuildModel.CreateBuildKey(master_name, builder_name,
build_number)
return ndb.Key('WfBuild', build_key, 'WfSwarmingTask', step_name)
@staticmethod
def Create(master_name, builder_name, build_number,
step_name):
task = WfSwarmingTask(
key=WfSwarmingTask._CreateKey(master_name, builder_name, build_number,
step_name))
task.parameters = task.parameters or {}
task.tests_statuses = task.tests_statuses or {}
task.requested_time = time_util.GetUTCNow()
return task
@staticmethod
def Get(master_name, builder_name, build_number,
step_name):
return WfSwarmingTask._CreateKey(master_name, builder_name, build_number,
step_name).get()
@staticmethod
def GetClassifiedTestResults(results):
return [
_ClassifiedTestResult.FromClassifiedTestResultObject(test_name, result)
for test_name, result in results.iteritems()
]
classified_test_results = ndb.LocalStructuredProperty(
_ClassifiedTestResult, repeated=True, compressed=True)
| true | true |
1c30a43320ab19b30bab87b06cd9e0b5a541adf3 | 541 | py | Python | rofi_menu.py | Marble879/Rofi-Network-Manager | 637cc6aa8b8a5d6981a36806a94a56614a25211c | [
"MIT"
] | 17 | 2021-12-22T12:34:56.000Z | 2021-12-30T10:58:40.000Z | rofi_menu.py | Marble879/Rofi-Network-Manager | 637cc6aa8b8a5d6981a36806a94a56614a25211c | [
"MIT"
] | null | null | null | rofi_menu.py | Marble879/Rofi-Network-Manager | 637cc6aa8b8a5d6981a36806a94a56614a25211c | [
"MIT"
] | null | null | null | from rofi import Rofi
import os
import subprocess
def get_wifi_networks():
wifi_devices_raw = subprocess.check_output(['nmcli', 'device', 'wifi', 'list'])
wifi_devices_decoded = wifi_devices_raw.decode("utf-8")
wifi_list = wifi_devices_decoded.split('\n')
return wifi_list
def quick_options_test():
options = ['Red', 'Green', 'Blue', 'White', 'Silver', 'Black', 'Other']
index, key = rofi.select('What colour car do you drive?', options)
return index, key
if __name__ == '__main__':
get_wifi_networks() | 25.761905 | 83 | 0.689464 | from rofi import Rofi
import os
import subprocess
def get_wifi_networks():
wifi_devices_raw = subprocess.check_output(['nmcli', 'device', 'wifi', 'list'])
wifi_devices_decoded = wifi_devices_raw.decode("utf-8")
wifi_list = wifi_devices_decoded.split('\n')
return wifi_list
def quick_options_test():
options = ['Red', 'Green', 'Blue', 'White', 'Silver', 'Black', 'Other']
index, key = rofi.select('What colour car do you drive?', options)
return index, key
if __name__ == '__main__':
get_wifi_networks() | true | true |
1c30a4cfe5d3b3fce69fc274e2f7316b95ffb1f0 | 20,223 | py | Python | dqn2.py | tierriminator/GraphQSat | 9a356438d1dc68f28a14e71e3f5bd306bd8ce877 | [
"Apache-2.0"
] | null | null | null | dqn2.py | tierriminator/GraphQSat | 9a356438d1dc68f28a14e71e3f5bd306bd8ce877 | [
"Apache-2.0"
] | null | null | null | dqn2.py | tierriminator/GraphQSat | 9a356438d1dc68f28a14e71e3f5bd306bd8ce877 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2020 Nvidia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import os
from collections import deque, defaultdict
import pickle
import copy
import yaml
import time
from gqsat.utils import build_argparser, evaluate, make_env
from gqsat.models import EncoderCoreDecoder, SatModel
from gqsat.agents import GraphAgent, MiniSATAgent
from gqsat.learners import GraphLearner
from gqsat.buffer import ReplayGraphBuffer
from tensorboardX import SummaryWriter
def save_training_state(
model,
learner,
episodes_done,
transitions_seen,
best_eval_so_far,
args,
in_eval_mode=False,
):
# save the model
model_path = os.path.join(args.logdir, f"model_{learner.step_ctr}.chkp")
torch.save(model.state_dict(), model_path)
# save the experience replay
buffer_path = os.path.join(args.logdir, "buffer.pkl")
with open(buffer_path, "wb") as f:
pickle.dump(learner.buffer, f)
# save important parameters
train_status = {
"step_ctr": learner.step_ctr,
"latest_model_name": model_path,
"buffer_path": buffer_path,
"args": args,
"episodes_done": episodes_done,
"logdir": args.logdir,
"transitions_seen": transitions_seen,
"optimizer_state_dict": learner.optimizer.state_dict(),
"optimizer_class": type(learner.optimizer),
"best_eval_so_far": best_eval_so_far,
"scheduler_class": type(learner.lr_scheduler),
"scheduler_state_dict": learner.lr_scheduler.state_dict(),
"in_eval_mode": in_eval_mode,
}
status_path = os.path.join(args.logdir, "status.yaml")
with open(status_path, "w") as f:
yaml.dump(train_status, f, default_flow_style=False)
return status_path
def get_annealed_eps(n_trans, args):
if n_trans < args.init_exploration_steps:
return args.eps_init
if n_trans > args.eps_decay_steps:
return args.eps_final
else:
assert n_trans - args.init_exploration_steps >= 0
return (args.eps_init - args.eps_final) * (
1 - (n_trans - args.init_exploration_steps) / args.eps_decay_steps
) + args.eps_final
def arg2activation(activ_str):
if activ_str == "relu":
return torch.nn.ReLU
elif activ_str == "tanh":
return torch.nn.Tanh
elif activ_str == "leaky_relu":
return torch.nn.LeakyReLU
else:
raise ValueError("Unknown activation function")
class DQN(object):
"""
DQN object for setting up env, agent, learner
Training happens in train() function
For evaluation there are two modes:
(1) runtime evaluation for the problems in eval_problems_paths happens in eval_runtime()
(2) Q-value evaluation for the problems from directory happens in eval_q_from_file()
(3) Q-value evaluation for the given graph happens in eval_q_from_graph
"""
def __init__(self, args, train_status=None, eval=False):
self.writer = SummaryWriter()
self.env = None
if train_status is not None:
if not eval:
self._init_from_status(args, train_status)
else:
self._init_for_eval(args, train_status)
else:
self._init_from_scratch(args)
print(args.__str__())
def _init_from_status(self, args, train_status):
"""
Initialization for training from previously incomplete run
:param args: arguments for training
:param train_status: train status from status.yaml file from the previous run
:returns: different self.object to be used in train() function
"""
self.eval_resume_signal = train_status["in_eval_mode"]
# load the model
net = SatModel.load_from_yaml(os.path.join(args.logdir, "model.yaml")).to(
args.device
)
net.load_state_dict(torch.load(train_status["latest_model_name"]))
target_net = SatModel.load_from_yaml(
os.path.join(args.logdir, "model.yaml")
).to(args.device)
target_net.load_state_dict(net.state_dict())
# load the buffer
if train_status["buffer_path"] is not None:
with open(train_status["buffer_path"], "rb") as f:
self.buffer = pickle.load(f)
else:
self.buffer = None
self.learner = GraphLearner(net, target_net, self.buffer, args)
self.learner.step_ctr = train_status["step_ctr"]
self.learner.optimizer = train_status["optimizer_class"](
net.parameters(), lr=args.lr
)
self.learner.optimizer.load_state_dict(train_status["optimizer_state_dict"])
self.learner.lr_scheduler = train_status["scheduler_class"](
self.learner.optimizer, args.lr_scheduler_frequency, args.lr_scheduler_gamma
)
self.learner.lr_scheduler.load_state_dict(train_status["scheduler_state_dict"])
# load misc training status params
self.n_trans = train_status["transitions_seen"]
self.ep = train_status["episodes_done"]
self.env = make_env(args.train_problems_paths, args, test_mode=False)
self.agent = GraphAgent(net, args)
self.best_eval_so_far = train_status["best_eval_so_far"]
self.args = args
def _init_from_scratch(self, args):
"""
Initialization for training from scratch
:param args: arguments for training
:returns: different self.object to be used in train() function
"""
# training mode, learning from scratch or continuing learning from some previously trained model
args.logdir = self.writer.logdir
model_save_path = os.path.join(args.logdir, "model.yaml")
self.best_eval_so_far = (
{args.eval_problems_paths: -1}
if not args.eval_separately_on_each
else {k: -1 for k in args.eval_problems_paths.split(":")}
)
self.env = make_env(args.train_problems_paths, args, test_mode=False)
if args.model_dir is not None:
# load an existing model and continue training
net = SatModel.load_from_yaml(
os.path.join(args.model_dir, "model.yaml")
).to(args.device)
net.load_state_dict(
torch.load(os.path.join(args.model_dir, args.model_checkpoint))
)
else:
# learning from scratch
net = EncoderCoreDecoder(
(self.env.vertex_in_size, self.env.edge_in_size, self.env.global_in_size),
core_out_dims=(
args.core_v_out_size,
args.core_e_out_size,
args.core_e_out_size,
),
out_dims=(2, None, None),
core_steps=args.core_steps,
dec_out_dims=(
args.decoder_v_out_size,
args.decoder_e_out_size,
args.decoder_e_out_size,
),
encoder_out_dims=(
args.encoder_v_out_size,
args.encoder_e_out_size,
args.encoder_e_out_size,
),
save_name=model_save_path,
e2v_agg=args.e2v_aggregator,
n_hidden=args.n_hidden,
hidden_size=args.hidden_size,
activation=arg2activation(args.activation),
independent_block_layers=args.independent_block_layers,
).to(args.device)
print(str(net))
target_net = copy.deepcopy(net)
self.buffer = ReplayGraphBuffer(args, args.buffer_size)
self.agent = GraphAgent(net, args)
self.n_trans = 0
self.ep = 0
self.learner = GraphLearner(net, target_net, self.buffer, args)
self.eval_resume_signal = False
self.args = args
def _init_for_eval(self, args, train_status):
"""
Initialization for evaluating on problems from a given directory
:param args: arguments for evaluation
:param train_status: training status from status.yaml file from the run
"""
eval_args = copy.deepcopy(args)
args = train_status["args"]
# use same args used for training and overwrite them with those asked for eval
for k, v in vars(eval_args).items():
setattr(args, k, v)
args.device = (
torch.device("cpu")
if args.no_cuda or not torch.cuda.is_available()
else torch.device("cuda")
)
net = SatModel.load_from_yaml(os.path.join(args.model_dir, "model.yaml")).to(
args.device
)
# modify core steps for the eval as requested
if args.core_steps != -1:
# -1 if use the same as for training
net.steps = args.core_steps
net.load_state_dict(
torch.load(os.path.join(args.model_dir, args.model_checkpoint)), strict=False
)
self.agent = GraphAgent(net, args)
self.agent.net.eval()
self.args = args
def set_problems(self, adj_mat_list):
self.env = make_env(None, self.args, adj_mat_list)
def train(self):
"""
training happens here.
"""
while self.learner.step_ctr < self.args.batch_updates:
ret = 0
r = 0
obs = self.env.reset(self.args.train_time_max_decisions_allowed)
done = self.env.isSolved
if self.args.history_len > 1:
raise NotImplementedError(
"History len greater than one is not implemented for graph nets."
)
hist_buffer = deque(maxlen=self.args.history_len)
for _ in range(self.args.history_len):
hist_buffer.append(obs)
ep_step = 0
save_flag = False
while not done:
annealed_eps = get_annealed_eps(self.n_trans, self.args)
action = self.agent.act(hist_buffer, eps=annealed_eps)
next_obs, r, done, _ = self.env.step(action)
self.buffer.add_transition(obs, action, r, done)
obs = next_obs
hist_buffer.append(obs)
ret += r
if (not self.n_trans % self.args.step_freq) and (
self.buffer.ctr > max(self.args.init_exploration_steps, self.args.bsize + 1)
or self.buffer.full
):
step_info = self.learner.step()
if annealed_eps is not None:
step_info["annealed_eps"] = annealed_eps
# we increment the step_ctr in the learner.step(), that's why we need to do -1 in tensorboarding
# we do not need to do -1 in checking for frequency since 0 has already passed
if not self.learner.step_ctr % self.args.save_freq:
# save the exact model you evaluated and make another save after the episode ends
# to have proper transitions in the replay buffer to pickle
status_path = save_training_state(
self.agent.net, #TODO : It was only net (but this should also be correct)
self.learner,
self.ep - 1,
self.n_trans,
self.best_eval_so_far,
self.args,
in_eval_mode=self.eval_resume_signal,
)
save_flag = True
if (
self.args.env_name == "sat-v0" and not self.learner.step_ctr % self.args.eval_freq
) or self.eval_resume_signal:
_, _, scores, _, self.eval_resume_signal = evaluate(
self.agent, self.args, include_train_set=False
)
for sc_key, sc_val in scores.items():
# list can be empty if we hit the time limit for eval
if len(sc_val) > 0:
res_vals = [el for el in sc_val.values()]
median_score = np.nanmedian(res_vals)
if (
self.best_eval_so_far[sc_key] < median_score
or self.best_eval_so_far[sc_key] == -1
):
self.best_eval_so_far[sc_key] = median_score
self.writer.add_scalar(
f"data/median relative score: {sc_key}",
np.nanmedian(res_vals),
self.learner.step_ctr - 1,
)
self.writer.add_scalar(
f"data/mean relative score: {sc_key}",
np.nanmean(res_vals),
self.learner.step_ctr - 1,
)
self.writer.add_scalar(
f"data/max relative score: {sc_key}",
np.nanmax(res_vals),
self.learner.step_ctr - 1,
)
for k, v in self.best_eval_so_far.items():
self.writer.add_scalar(k, v, self.learner.step_ctr - 1)
for k, v in step_info.items():
self.writer.add_scalar(k, v, self.learner.step_ctr - 1)
self.writer.add_scalar("data/num_episodes", self.ep, self.learner.step_ctr - 1)
self.n_trans += 1
ep_step += 1
self.writer.add_scalar("data/ep_return", ret, self.learner.step_ctr - 1)
self.writer.add_scalar("data/ep_steps", self.env.step_ctr, self.learner.step_ctr - 1)
self.writer.add_scalar("data/ep_last_reward", r, self.learner.step_ctr - 1)
print(f"Episode {self.ep + 1}: Return {ret}.")
self.ep += 1
if save_flag:
status_path = save_training_state(
self.agent.net, #TODO: Is agent net the same as net?
self.learner,
self.ep - 1,
self.n_trans,
self.best_eval_so_far,
self.args,
in_eval_mode=self.eval_resume_signal,
)
save_flag = False
def eval_runtime(self):
"""
Evaluation on different problem sets to compare performance of RL solver.
This function will directly use function available in gqsat/utils.py
:param args: arguments for evaluation
"""
st_time = time.time()
_, _, scores, eval_metadata, _ = evaluate(self.agent, self.args)
end_time = time.time()
print(
f"Evaluation is over. It took {end_time - st_time} seconds for the whole procedure"
)
# with open("../eval_results.pkl", "wb") as f:
# pickle.dump(scores, f)
for pset, pset_res in scores.items():
res_list = [el for el in pset_res.values()]
print(f"Results for {pset}")
print(
f"median_relative_score: {np.nanmedian(res_list)}, mean_relative_score: {np.mean(res_list)}"
)
def eval_q_for_agent_from_graph(self, adj_mat, use_minisat = False):
"""
evaluate runtime for a given adj mat for the minisat or GQSat agent
:param adj_mat: adjacency matrix for the problem
:param use_minisat: uses minisat agent if true, else self.agent from the solver object
"""
agent = MiniSATAgent() if use_minisat else self.agent
env = make_env(None, self.args, [adj_mat])
obs = env.reset(self.args.train_time_max_decisions_allowed)
done = env.isSolved
if done:
return 0
q = 0
with torch.no_grad():
while not done:
obs, r, done, _ = env.step(agent.act([obs]))
q += r
return q
def eval_q_from_file(self, eval_problems_paths=None, agg="sum"):
"""
Q-value evaluation of problems in eval_problems_paths.
If eval_problems_paths is None, evaluation will happen in args.eval_problems_paths
:param eval_problems_paths: dir(s) where problems are saved for evaluation
:param agg: aggregation of q-values for a graph (either "sum" or "mean")
:returns res_q: Dict of Dicts where structure of dict is as follows
res_q[eval_problem_path][problem_filename] = QValue
"""
# if eval problems are not provided q value evaluation happens for the
# problem sets in self.args.eval_problems_paths
if not eval_problems_paths:
eval_problems_paths = self.args.eval_problems_paths
problem_sets = (
[eval_problems_paths]
if not self.args.eval_separately_on_each
else [k for k in self.args.eval_problems_paths.split(":")]
)
res_q = defaultdict(dict)
for pset in problem_sets:
eval_env = make_env(pset, self.args, test_mode=True)
q_scores = {}
pr = 0
with torch.no_grad():
while eval_env.test_to != 0 or pr == 0:
obs = eval_env.reset(
max_decisions_cap=self.args.test_time_max_decisions_allowed
)
# TODO: This is broken since eval_q_from_graph is different now
q = self.eval_q_from_graph([obs], agg)
q_scores[eval_env.curr_problem] = q
pr += 1
res_q[pset] = q_scores
return res_q
def eval_q_from_graph(self, adj_mat, agg="max", use_minisat=False):
"""
Evaluation of q-value from the graph structure. This function directly calls forward pass for the agent.
:param hist_buffer: list of size 1 with all elements for graph (vertex_data, edge_data, connectivity, global_data)
:param agg: aggregation of q-values for a graph (either "sum" or "mean")
:param use_minisat: Whether a run of minisat should be used to calculate the reward.
:returns q: q-value for a given graph
"""
env = make_env(None, self.args, [adj_mat])
obs = env.reset(self.args.train_time_max_decisions_allowed)
if env.isSolved:
return 0
if use_minisat:
# run the minisat agent to calculate the number of branches
agent = MiniSATAgent()
done = env.isSolved
q = 0
while not done:
obs, r, done, _ = env.step(agent.act(obs))
q += r
return q
q = self.agent.forward([obs])
if agg == "sum":
q = q.max(1).values.sum().cpu().item()
elif agg == "mean":
q = q.max(1).values.mean().cpu().item()
elif agg == "max":
q = q.flatten().max().cpu().item()
elif agg == "expectation":
flat_q = q.flatten()
q = torch.sum(torch.softmax(flat_q, dim=0) * flat_q).cpu().item()
else:
raise ValueError(f"agg {agg} is not recognized")
return q
| 38.593511 | 122 | 0.571775 |
import numpy as np
import torch
import os
from collections import deque, defaultdict
import pickle
import copy
import yaml
import time
from gqsat.utils import build_argparser, evaluate, make_env
from gqsat.models import EncoderCoreDecoder, SatModel
from gqsat.agents import GraphAgent, MiniSATAgent
from gqsat.learners import GraphLearner
from gqsat.buffer import ReplayGraphBuffer
from tensorboardX import SummaryWriter
def save_training_state(
model,
learner,
episodes_done,
transitions_seen,
best_eval_so_far,
args,
in_eval_mode=False,
):
model_path = os.path.join(args.logdir, f"model_{learner.step_ctr}.chkp")
torch.save(model.state_dict(), model_path)
buffer_path = os.path.join(args.logdir, "buffer.pkl")
with open(buffer_path, "wb") as f:
pickle.dump(learner.buffer, f)
train_status = {
"step_ctr": learner.step_ctr,
"latest_model_name": model_path,
"buffer_path": buffer_path,
"args": args,
"episodes_done": episodes_done,
"logdir": args.logdir,
"transitions_seen": transitions_seen,
"optimizer_state_dict": learner.optimizer.state_dict(),
"optimizer_class": type(learner.optimizer),
"best_eval_so_far": best_eval_so_far,
"scheduler_class": type(learner.lr_scheduler),
"scheduler_state_dict": learner.lr_scheduler.state_dict(),
"in_eval_mode": in_eval_mode,
}
status_path = os.path.join(args.logdir, "status.yaml")
with open(status_path, "w") as f:
yaml.dump(train_status, f, default_flow_style=False)
return status_path
def get_annealed_eps(n_trans, args):
if n_trans < args.init_exploration_steps:
return args.eps_init
if n_trans > args.eps_decay_steps:
return args.eps_final
else:
assert n_trans - args.init_exploration_steps >= 0
return (args.eps_init - args.eps_final) * (
1 - (n_trans - args.init_exploration_steps) / args.eps_decay_steps
) + args.eps_final
def arg2activation(activ_str):
if activ_str == "relu":
return torch.nn.ReLU
elif activ_str == "tanh":
return torch.nn.Tanh
elif activ_str == "leaky_relu":
return torch.nn.LeakyReLU
else:
raise ValueError("Unknown activation function")
class DQN(object):
def __init__(self, args, train_status=None, eval=False):
self.writer = SummaryWriter()
self.env = None
if train_status is not None:
if not eval:
self._init_from_status(args, train_status)
else:
self._init_for_eval(args, train_status)
else:
self._init_from_scratch(args)
print(args.__str__())
def _init_from_status(self, args, train_status):
self.eval_resume_signal = train_status["in_eval_mode"]
net = SatModel.load_from_yaml(os.path.join(args.logdir, "model.yaml")).to(
args.device
)
net.load_state_dict(torch.load(train_status["latest_model_name"]))
target_net = SatModel.load_from_yaml(
os.path.join(args.logdir, "model.yaml")
).to(args.device)
target_net.load_state_dict(net.state_dict())
if train_status["buffer_path"] is not None:
with open(train_status["buffer_path"], "rb") as f:
self.buffer = pickle.load(f)
else:
self.buffer = None
self.learner = GraphLearner(net, target_net, self.buffer, args)
self.learner.step_ctr = train_status["step_ctr"]
self.learner.optimizer = train_status["optimizer_class"](
net.parameters(), lr=args.lr
)
self.learner.optimizer.load_state_dict(train_status["optimizer_state_dict"])
self.learner.lr_scheduler = train_status["scheduler_class"](
self.learner.optimizer, args.lr_scheduler_frequency, args.lr_scheduler_gamma
)
self.learner.lr_scheduler.load_state_dict(train_status["scheduler_state_dict"])
self.n_trans = train_status["transitions_seen"]
self.ep = train_status["episodes_done"]
self.env = make_env(args.train_problems_paths, args, test_mode=False)
self.agent = GraphAgent(net, args)
self.best_eval_so_far = train_status["best_eval_so_far"]
self.args = args
def _init_from_scratch(self, args):
args.logdir = self.writer.logdir
model_save_path = os.path.join(args.logdir, "model.yaml")
self.best_eval_so_far = (
{args.eval_problems_paths: -1}
if not args.eval_separately_on_each
else {k: -1 for k in args.eval_problems_paths.split(":")}
)
self.env = make_env(args.train_problems_paths, args, test_mode=False)
if args.model_dir is not None:
net = SatModel.load_from_yaml(
os.path.join(args.model_dir, "model.yaml")
).to(args.device)
net.load_state_dict(
torch.load(os.path.join(args.model_dir, args.model_checkpoint))
)
else:
net = EncoderCoreDecoder(
(self.env.vertex_in_size, self.env.edge_in_size, self.env.global_in_size),
core_out_dims=(
args.core_v_out_size,
args.core_e_out_size,
args.core_e_out_size,
),
out_dims=(2, None, None),
core_steps=args.core_steps,
dec_out_dims=(
args.decoder_v_out_size,
args.decoder_e_out_size,
args.decoder_e_out_size,
),
encoder_out_dims=(
args.encoder_v_out_size,
args.encoder_e_out_size,
args.encoder_e_out_size,
),
save_name=model_save_path,
e2v_agg=args.e2v_aggregator,
n_hidden=args.n_hidden,
hidden_size=args.hidden_size,
activation=arg2activation(args.activation),
independent_block_layers=args.independent_block_layers,
).to(args.device)
print(str(net))
target_net = copy.deepcopy(net)
self.buffer = ReplayGraphBuffer(args, args.buffer_size)
self.agent = GraphAgent(net, args)
self.n_trans = 0
self.ep = 0
self.learner = GraphLearner(net, target_net, self.buffer, args)
self.eval_resume_signal = False
self.args = args
def _init_for_eval(self, args, train_status):
eval_args = copy.deepcopy(args)
args = train_status["args"]
for k, v in vars(eval_args).items():
setattr(args, k, v)
args.device = (
torch.device("cpu")
if args.no_cuda or not torch.cuda.is_available()
else torch.device("cuda")
)
net = SatModel.load_from_yaml(os.path.join(args.model_dir, "model.yaml")).to(
args.device
)
if args.core_steps != -1:
net.steps = args.core_steps
net.load_state_dict(
torch.load(os.path.join(args.model_dir, args.model_checkpoint)), strict=False
)
self.agent = GraphAgent(net, args)
self.agent.net.eval()
self.args = args
def set_problems(self, adj_mat_list):
self.env = make_env(None, self.args, adj_mat_list)
def train(self):
while self.learner.step_ctr < self.args.batch_updates:
ret = 0
r = 0
obs = self.env.reset(self.args.train_time_max_decisions_allowed)
done = self.env.isSolved
if self.args.history_len > 1:
raise NotImplementedError(
"History len greater than one is not implemented for graph nets."
)
hist_buffer = deque(maxlen=self.args.history_len)
for _ in range(self.args.history_len):
hist_buffer.append(obs)
ep_step = 0
save_flag = False
while not done:
annealed_eps = get_annealed_eps(self.n_trans, self.args)
action = self.agent.act(hist_buffer, eps=annealed_eps)
next_obs, r, done, _ = self.env.step(action)
self.buffer.add_transition(obs, action, r, done)
obs = next_obs
hist_buffer.append(obs)
ret += r
if (not self.n_trans % self.args.step_freq) and (
self.buffer.ctr > max(self.args.init_exploration_steps, self.args.bsize + 1)
or self.buffer.full
):
step_info = self.learner.step()
if annealed_eps is not None:
step_info["annealed_eps"] = annealed_eps
# we do not need to do -1 in checking for frequency since 0 has already passed
if not self.learner.step_ctr % self.args.save_freq:
# save the exact model you evaluated and make another save after the episode ends
# to have proper transitions in the replay buffer to pickle
status_path = save_training_state(
self.agent.net, #TODO : It was only net (but this should also be correct)
self.learner,
self.ep - 1,
self.n_trans,
self.best_eval_so_far,
self.args,
in_eval_mode=self.eval_resume_signal,
)
save_flag = True
if (
self.args.env_name == "sat-v0" and not self.learner.step_ctr % self.args.eval_freq
) or self.eval_resume_signal:
_, _, scores, _, self.eval_resume_signal = evaluate(
self.agent, self.args, include_train_set=False
)
for sc_key, sc_val in scores.items():
# list can be empty if we hit the time limit for eval
if len(sc_val) > 0:
res_vals = [el for el in sc_val.values()]
median_score = np.nanmedian(res_vals)
if (
self.best_eval_so_far[sc_key] < median_score
or self.best_eval_so_far[sc_key] == -1
):
self.best_eval_so_far[sc_key] = median_score
self.writer.add_scalar(
f"data/median relative score: {sc_key}",
np.nanmedian(res_vals),
self.learner.step_ctr - 1,
)
self.writer.add_scalar(
f"data/mean relative score: {sc_key}",
np.nanmean(res_vals),
self.learner.step_ctr - 1,
)
self.writer.add_scalar(
f"data/max relative score: {sc_key}",
np.nanmax(res_vals),
self.learner.step_ctr - 1,
)
for k, v in self.best_eval_so_far.items():
self.writer.add_scalar(k, v, self.learner.step_ctr - 1)
for k, v in step_info.items():
self.writer.add_scalar(k, v, self.learner.step_ctr - 1)
self.writer.add_scalar("data/num_episodes", self.ep, self.learner.step_ctr - 1)
self.n_trans += 1
ep_step += 1
self.writer.add_scalar("data/ep_return", ret, self.learner.step_ctr - 1)
self.writer.add_scalar("data/ep_steps", self.env.step_ctr, self.learner.step_ctr - 1)
self.writer.add_scalar("data/ep_last_reward", r, self.learner.step_ctr - 1)
print(f"Episode {self.ep + 1}: Return {ret}.")
self.ep += 1
if save_flag:
status_path = save_training_state(
self.agent.net, #TODO: Is agent net the same as net?
self.learner,
self.ep - 1,
self.n_trans,
self.best_eval_so_far,
self.args,
in_eval_mode=self.eval_resume_signal,
)
save_flag = False
def eval_runtime(self):
st_time = time.time()
_, _, scores, eval_metadata, _ = evaluate(self.agent, self.args)
end_time = time.time()
print(
f"Evaluation is over. It took {end_time - st_time} seconds for the whole procedure"
)
# with open("../eval_results.pkl", "wb") as f:
# pickle.dump(scores, f)
for pset, pset_res in scores.items():
res_list = [el for el in pset_res.values()]
print(f"Results for {pset}")
print(
f"median_relative_score: {np.nanmedian(res_list)}, mean_relative_score: {np.mean(res_list)}"
)
def eval_q_for_agent_from_graph(self, adj_mat, use_minisat = False):
agent = MiniSATAgent() if use_minisat else self.agent
env = make_env(None, self.args, [adj_mat])
obs = env.reset(self.args.train_time_max_decisions_allowed)
done = env.isSolved
if done:
return 0
q = 0
with torch.no_grad():
while not done:
obs, r, done, _ = env.step(agent.act([obs]))
q += r
return q
def eval_q_from_file(self, eval_problems_paths=None, agg="sum"):
# if eval problems are not provided q value evaluation happens for the
# problem sets in self.args.eval_problems_paths
if not eval_problems_paths:
eval_problems_paths = self.args.eval_problems_paths
problem_sets = (
[eval_problems_paths]
if not self.args.eval_separately_on_each
else [k for k in self.args.eval_problems_paths.split(":")]
)
res_q = defaultdict(dict)
for pset in problem_sets:
eval_env = make_env(pset, self.args, test_mode=True)
q_scores = {}
pr = 0
with torch.no_grad():
while eval_env.test_to != 0 or pr == 0:
obs = eval_env.reset(
max_decisions_cap=self.args.test_time_max_decisions_allowed
)
# TODO: This is broken since eval_q_from_graph is different now
q = self.eval_q_from_graph([obs], agg)
q_scores[eval_env.curr_problem] = q
pr += 1
res_q[pset] = q_scores
return res_q
def eval_q_from_graph(self, adj_mat, agg="max", use_minisat=False):
env = make_env(None, self.args, [adj_mat])
obs = env.reset(self.args.train_time_max_decisions_allowed)
if env.isSolved:
return 0
if use_minisat:
# run the minisat agent to calculate the number of branches
agent = MiniSATAgent()
done = env.isSolved
q = 0
while not done:
obs, r, done, _ = env.step(agent.act(obs))
q += r
return q
q = self.agent.forward([obs])
if agg == "sum":
q = q.max(1).values.sum().cpu().item()
elif agg == "mean":
q = q.max(1).values.mean().cpu().item()
elif agg == "max":
q = q.flatten().max().cpu().item()
elif agg == "expectation":
flat_q = q.flatten()
q = torch.sum(torch.softmax(flat_q, dim=0) * flat_q).cpu().item()
else:
raise ValueError(f"agg {agg} is not recognized")
return q
| true | true |
1c30a4fe46f2ff75000dd34e34ae2c1263e1323e | 3,053 | py | Python | lib/webtest/compat.py | zenlambda/aeta | 3781ac916be069a1d01eaa8b2a42375b689a82fe | [
"Apache-2.0"
] | 1 | 2015-07-22T15:58:06.000Z | 2015-07-22T15:58:06.000Z | lib/webtest/compat.py | agostodev/agar | 66b7937a35ae93717d5e9683c7dc7c80c4bcc5d6 | [
"MIT"
] | null | null | null | lib/webtest/compat.py | agostodev/agar | 66b7937a35ae93717d5e9683c7dc7c80c4bcc5d6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
if sys.version_info[0] > 2:
PY3 = True
string_types = (str,)
text_type = str
binary_type = bytes
from json import loads
from json import dumps
from io import StringIO
from io import BytesIO
from urllib.parse import urlencode
from urllib.parse import splittype
from urllib.parse import splithost
import urllib.parse as urlparse
from http.client import HTTPConnection
from http.client import CannotSendRequest
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
from http.cookies import SimpleCookie, CookieError
from http.cookies import _quote as cookie_quote
def to_bytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
def to_string(s):
if isinstance(s, str):
return s
return str(s, 'latin1')
def join_bytes(sep, l):
l = [to_bytes(e) for e in l]
return to_bytes(sep).join(l)
else:
PY3 = False
string_types = basestring
text_type = unicode
binary_type = str
from urllib import splittype
from urllib import splithost
from urllib import urlencode
from httplib import HTTPConnection
from httplib import CannotSendRequest
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from Cookie import SimpleCookie, CookieError
from Cookie import _quote as cookie_quote
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
BytesIO = StringIO
import urlparse
try:
from json import loads
from json import dumps
except ImportError:
try:
from simplejson import loads
from simplejson import dumps
except ImportError:
loads = None
dumps = None
def to_bytes(s):
return str(s)
def to_string(s):
return str(s)
def join_bytes(sep, l):
l = [e for e in l]
return sep.join(l)
def print_stderr(value):
if PY3:
exec('print(value, file=sys.stderr)')
else:
if isinstance(value, text_type):
# not really clean but this must *never* fail
try:
value = value.encode('utf-8')
except:
value = repr(value)
sys.stderr.write(value)
try:
next = next
except NameError:
# python < 2.6
def next(iterator):
return iterator.next()
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
try:
from unittest import TestCase
from unittest import skipIf
except ImportError:
try:
from unittest2 import TestCase
from unittest2 import skipIf
except ImportError:
from unittest import TestCase
def skipIf(condition, message):
if condition:
return None
def wrapper(func):
return func
return wrapper
| 25.655462 | 57 | 0.633803 |
import sys
if sys.version_info[0] > 2:
PY3 = True
string_types = (str,)
text_type = str
binary_type = bytes
from json import loads
from json import dumps
from io import StringIO
from io import BytesIO
from urllib.parse import urlencode
from urllib.parse import splittype
from urllib.parse import splithost
import urllib.parse as urlparse
from http.client import HTTPConnection
from http.client import CannotSendRequest
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
from http.cookies import SimpleCookie, CookieError
from http.cookies import _quote as cookie_quote
def to_bytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
def to_string(s):
if isinstance(s, str):
return s
return str(s, 'latin1')
def join_bytes(sep, l):
l = [to_bytes(e) for e in l]
return to_bytes(sep).join(l)
else:
PY3 = False
string_types = basestring
text_type = unicode
binary_type = str
from urllib import splittype
from urllib import splithost
from urllib import urlencode
from httplib import HTTPConnection
from httplib import CannotSendRequest
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from Cookie import SimpleCookie, CookieError
from Cookie import _quote as cookie_quote
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
BytesIO = StringIO
import urlparse
try:
from json import loads
from json import dumps
except ImportError:
try:
from simplejson import loads
from simplejson import dumps
except ImportError:
loads = None
dumps = None
def to_bytes(s):
return str(s)
def to_string(s):
return str(s)
def join_bytes(sep, l):
l = [e for e in l]
return sep.join(l)
def print_stderr(value):
if PY3:
exec('print(value, file=sys.stderr)')
else:
if isinstance(value, text_type):
try:
value = value.encode('utf-8')
except:
value = repr(value)
sys.stderr.write(value)
try:
next = next
except NameError:
def next(iterator):
return iterator.next()
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
try:
from unittest import TestCase
from unittest import skipIf
except ImportError:
try:
from unittest2 import TestCase
from unittest2 import skipIf
except ImportError:
from unittest import TestCase
def skipIf(condition, message):
if condition:
return None
def wrapper(func):
return func
return wrapper
| true | true |
1c30a558c1dc64b95621d88202a6ded61091aebf | 14,428 | py | Python | py/test/selenium/webdriver/common/alerts_tests.py | worldofwonders/selenium | f76d614534456f008eaf2b9f7bb32a4b4ab624a7 | [
"Apache-2.0"
] | null | null | null | py/test/selenium/webdriver/common/alerts_tests.py | worldofwonders/selenium | f76d614534456f008eaf2b9f7bb32a4b4ab624a7 | [
"Apache-2.0"
] | null | null | null | py/test/selenium/webdriver/common/alerts_tests.py | worldofwonders/selenium | f76d614534456f008eaf2b9f7bb32a4b4ab624a7 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import (
InvalidElementStateException,
NoAlertPresentException,
TimeoutException,
UnexpectedAlertPresentException,
WebDriverException)
class TestAlerts(object):
def testShouldBeAbleToOverrideTheWindowAlertMethod(self, driver, pages):
pages.load("alerts.html")
driver.execute_script(
"window.alert = function(msg) { document.getElementById('text').innerHTML = msg; }")
driver.find_element(by=By.ID, value="alert").click()
try:
assert driver.find_element_by_id('text').text == "cheese"
except Exception as e:
# if we're here, likely the alert is displayed
# not dismissing it will affect other tests
try:
self._waitForAlert(driver).dismiss()
except Exception:
pass
raise e
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowUsersToAcceptAnAlertManually(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert(driver)
alert.accept()
# If we can perform any action, we're good to go
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowUsersToAcceptAnAlertWithNoTextManually(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "empty-alert").click()
alert = self._waitForAlert(driver)
alert.accept()
# If we can perform any action, we're good to go
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldGetTextOfAlertOpenedInSetTimeout(self, driver, pages):
pages.load("alerts.html")
driver.find_element_by_id("slow-alert").click()
# DO NOT WAIT OR SLEEP HERE
# This is a regression test for a bug where only the first switchTo call would throw,
# and only if it happens before the alert actually loads.
alert = self._waitForAlert(driver)
try:
assert "Slow" == alert.text
finally:
alert.accept()
@pytest.mark.xfail_chrome(
condition=sys.platform == 'darwin',
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=26',
run=False)
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowUsersToDismissAnAlertManually(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert(driver)
alert.dismiss()
# If we can perform any action, we're good to go
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowAUserToAcceptAPrompt(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert(driver)
alert.accept()
# If we can perform any action, we're good to go
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowAUserToDismissAPrompt(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert(driver)
alert.dismiss()
# If we can perform any action, we're good to go
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1500')
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowAUserToSetTheValueOfAPrompt(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert(driver)
alert.send_keys("cheese")
alert.accept()
result = driver.find_element(by=By.ID, value="text").text
assert "cheese" == result
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1353')
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testSettingTheValueOfAnAlertThrows(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "alert").click()
alert = self._waitForAlert(driver)
with pytest.raises(InvalidElementStateException):
alert.send_keys("cheese")
alert.accept()
@pytest.mark.xfail_chrome(
condition=sys.platform == 'darwin',
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=26',
run=False)
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testAlertShouldNotAllowAdditionalCommandsIfDimissed(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "alert").click()
alert = self._waitForAlert(driver)
alert.dismiss()
with pytest.raises(NoAlertPresentException):
alert.text
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
@pytest.mark.xfail_marionette(reason="https://bugzilla.mozilla.org/show_bug.cgi?id=1314462")
def testShouldAllowUsersToAcceptAnAlertInAFrame(self, driver, pages):
pages.load("alerts.html")
driver.switch_to.frame(driver.find_element(By.NAME, "iframeWithAlert"))
driver.find_element_by_id("alertInFrame").click()
alert = self._waitForAlert(driver)
alert.accept()
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1279211',
raises=TimeoutException)
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowUsersToAcceptAnAlertInANestedFrame(self, driver, pages):
pages.load("alerts.html")
driver.switch_to.frame(driver.find_element(By.NAME, "iframeWithIframe"))
driver.switch_to.frame(driver.find_element(By.NAME, "iframeWithAlert"))
driver.find_element_by_id("alertInFrame").click()
alert = self._waitForAlert(driver)
alert.accept()
assert "Testing Alerts" == driver.title
def testShouldThrowAnExceptionIfAnAlertHasNotBeenDealtWithAndDismissTheAlert(self):
pass
# //TODO(David) Complete this test
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testPromptShouldUseDefaultValueIfNoKeysSent(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "prompt-with-default").click()
alert = self._waitForAlert(driver)
alert.accept()
txt = driver.find_element(By.ID, "text").text
assert "This is a default value" == txt
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testPromptShouldHaveNullValueIfDismissed(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "prompt-with-default").click()
alert = self._waitForAlert(driver)
alert.dismiss()
assert "null" == driver.find_element(By.ID, "text").text
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1279211')
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
@pytest.mark.xfail_chrome(reason="Intermittent on Travis")
def testHandlesTwoAlertsFromOneInteraction(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "double-prompt").click()
alert1 = self._waitForAlert(driver)
alert1.send_keys("brie")
alert1.accept()
alert2 = self._waitForAlert(driver)
alert2.send_keys("cheddar")
alert2.accept()
assert driver.find_element(By.ID, "text1").text == "brie"
assert driver.find_element(By.ID, "text2").text == "cheddar"
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldHandleAlertOnPageLoad(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "open-page-with-onload-alert").click()
alert = self._waitForAlert(driver)
value = alert.text
alert.accept()
assert "onload" == value
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldHandleAlertOnPageLoadUsingGet(self, driver, pages):
pages.load("pageWithOnLoad.html")
alert = self._waitForAlert(driver)
value = alert.text
alert.accept()
assert "onload" == value
WebDriverWait(driver, 3).until(EC.text_to_be_present_in_element((By.TAG_NAME, "p"), "Page with onload event handler"))
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldHandleAlertOnPageBeforeUnload(self, driver, pages):
pages.load("pageWithOnBeforeUnloadMessage.html")
element = driver.find_element(By.ID, "navigate")
element.click()
alert = self._waitForAlert(driver)
alert.dismiss()
assert "pageWithOnBeforeUnloadMessage.html" in driver.current_url
element.click()
alert = self._waitForAlert(driver)
alert.accept()
WebDriverWait(driver, 3).until(EC.title_is("Testing Alerts"))
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def _testShouldHandleAlertOnPageBeforeUnloadAtQuit(self, driver, pages):
# TODO: Add the ability to get a new session
pages.load("pageWithOnBeforeUnloadMessage.html")
element = driver.find_element(By.ID, "navigate")
element.click()
self._waitForAlert(driver)
driver.quit()
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowTheUserToGetTheTextOfAnAlert(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert(driver)
value = alert.text
alert.accept()
assert "cheese" == value
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1500')
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowTheUserToGetTheTextOfAPrompt(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "prompt").click()
alert = self._waitForAlert(driver)
value = alert.text
alert.accept()
assert "Enter something" == value
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testAlertShouldNotAllowAdditionalCommandsIfDismissed(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "alert").click()
alert = self._waitForAlert(driver)
alert.accept()
with pytest.raises(NoAlertPresentException):
alert.text
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1537')
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1279211')
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testUnexpectedAlertPresentExceptionContainsAlertText(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert(driver)
value = alert.text
with pytest.raises(UnexpectedAlertPresentException) as e:
pages.load("simpleTest.html")
assert value == e.value.alert_text
assert "Alert Text: {}".format(value) in str(e)
def _waitForAlert(self, driver):
return WebDriverWait(driver, 3).until(EC.alert_is_present())
| 38.889488 | 126 | 0.67667 |
import sys
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import (
InvalidElementStateException,
NoAlertPresentException,
TimeoutException,
UnexpectedAlertPresentException,
WebDriverException)
class TestAlerts(object):
def testShouldBeAbleToOverrideTheWindowAlertMethod(self, driver, pages):
pages.load("alerts.html")
driver.execute_script(
"window.alert = function(msg) { document.getElementById('text').innerHTML = msg; }")
driver.find_element(by=By.ID, value="alert").click()
try:
assert driver.find_element_by_id('text').text == "cheese"
except Exception as e:
# not dismissing it will affect other tests
try:
self._waitForAlert(driver).dismiss()
except Exception:
pass
raise e
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowUsersToAcceptAnAlertManually(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert(driver)
alert.accept()
# If we can perform any action, we're good to go
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowUsersToAcceptAnAlertWithNoTextManually(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "empty-alert").click()
alert = self._waitForAlert(driver)
alert.accept()
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldGetTextOfAlertOpenedInSetTimeout(self, driver, pages):
pages.load("alerts.html")
driver.find_element_by_id("slow-alert").click()
# DO NOT WAIT OR SLEEP HERE
# This is a regression test for a bug where only the first switchTo call would throw,
# and only if it happens before the alert actually loads.
alert = self._waitForAlert(driver)
try:
assert "Slow" == alert.text
finally:
alert.accept()
@pytest.mark.xfail_chrome(
condition=sys.platform == 'darwin',
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=26',
run=False)
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowUsersToDismissAnAlertManually(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert(driver)
alert.dismiss()
# If we can perform any action, we're good to go
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowAUserToAcceptAPrompt(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert(driver)
alert.accept()
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowAUserToDismissAPrompt(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert(driver)
alert.dismiss()
# If we can perform any action, we're good to go
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1500')
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowAUserToSetTheValueOfAPrompt(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert(driver)
alert.send_keys("cheese")
alert.accept()
result = driver.find_element(by=By.ID, value="text").text
assert "cheese" == result
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1353')
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testSettingTheValueOfAnAlertThrows(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "alert").click()
alert = self._waitForAlert(driver)
with pytest.raises(InvalidElementStateException):
alert.send_keys("cheese")
alert.accept()
@pytest.mark.xfail_chrome(
condition=sys.platform == 'darwin',
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=26',
run=False)
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testAlertShouldNotAllowAdditionalCommandsIfDimissed(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "alert").click()
alert = self._waitForAlert(driver)
alert.dismiss()
with pytest.raises(NoAlertPresentException):
alert.text
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
@pytest.mark.xfail_marionette(reason="https://bugzilla.mozilla.org/show_bug.cgi?id=1314462")
def testShouldAllowUsersToAcceptAnAlertInAFrame(self, driver, pages):
pages.load("alerts.html")
driver.switch_to.frame(driver.find_element(By.NAME, "iframeWithAlert"))
driver.find_element_by_id("alertInFrame").click()
alert = self._waitForAlert(driver)
alert.accept()
assert "Testing Alerts" == driver.title
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1279211',
raises=TimeoutException)
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowUsersToAcceptAnAlertInANestedFrame(self, driver, pages):
pages.load("alerts.html")
driver.switch_to.frame(driver.find_element(By.NAME, "iframeWithIframe"))
driver.switch_to.frame(driver.find_element(By.NAME, "iframeWithAlert"))
driver.find_element_by_id("alertInFrame").click()
alert = self._waitForAlert(driver)
alert.accept()
assert "Testing Alerts" == driver.title
def testShouldThrowAnExceptionIfAnAlertHasNotBeenDealtWithAndDismissTheAlert(self):
pass
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testPromptShouldUseDefaultValueIfNoKeysSent(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "prompt-with-default").click()
alert = self._waitForAlert(driver)
alert.accept()
txt = driver.find_element(By.ID, "text").text
assert "This is a default value" == txt
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testPromptShouldHaveNullValueIfDismissed(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "prompt-with-default").click()
alert = self._waitForAlert(driver)
alert.dismiss()
assert "null" == driver.find_element(By.ID, "text").text
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1279211')
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
@pytest.mark.xfail_chrome(reason="Intermittent on Travis")
def testHandlesTwoAlertsFromOneInteraction(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "double-prompt").click()
alert1 = self._waitForAlert(driver)
alert1.send_keys("brie")
alert1.accept()
alert2 = self._waitForAlert(driver)
alert2.send_keys("cheddar")
alert2.accept()
assert driver.find_element(By.ID, "text1").text == "brie"
assert driver.find_element(By.ID, "text2").text == "cheddar"
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldHandleAlertOnPageLoad(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "open-page-with-onload-alert").click()
alert = self._waitForAlert(driver)
value = alert.text
alert.accept()
assert "onload" == value
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldHandleAlertOnPageLoadUsingGet(self, driver, pages):
pages.load("pageWithOnLoad.html")
alert = self._waitForAlert(driver)
value = alert.text
alert.accept()
assert "onload" == value
WebDriverWait(driver, 3).until(EC.text_to_be_present_in_element((By.TAG_NAME, "p"), "Page with onload event handler"))
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldHandleAlertOnPageBeforeUnload(self, driver, pages):
pages.load("pageWithOnBeforeUnloadMessage.html")
element = driver.find_element(By.ID, "navigate")
element.click()
alert = self._waitForAlert(driver)
alert.dismiss()
assert "pageWithOnBeforeUnloadMessage.html" in driver.current_url
element.click()
alert = self._waitForAlert(driver)
alert.accept()
WebDriverWait(driver, 3).until(EC.title_is("Testing Alerts"))
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def _testShouldHandleAlertOnPageBeforeUnloadAtQuit(self, driver, pages):
pages.load("pageWithOnBeforeUnloadMessage.html")
element = driver.find_element(By.ID, "navigate")
element.click()
self._waitForAlert(driver)
driver.quit()
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowTheUserToGetTheTextOfAnAlert(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert(driver)
value = alert.text
alert.accept()
assert "cheese" == value
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1500')
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testShouldAllowTheUserToGetTheTextOfAPrompt(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "prompt").click()
alert = self._waitForAlert(driver)
value = alert.text
alert.accept()
assert "Enter something" == value
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testAlertShouldNotAllowAdditionalCommandsIfDismissed(self, driver, pages):
pages.load("alerts.html")
driver.find_element(By.ID, "alert").click()
alert = self._waitForAlert(driver)
alert.accept()
with pytest.raises(NoAlertPresentException):
alert.text
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1537')
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1279211')
@pytest.mark.xfail_phantomjs(
reason='https://github.com/detro/ghostdriver/issues/20',
raises=WebDriverException)
def testUnexpectedAlertPresentExceptionContainsAlertText(self, driver, pages):
pages.load("alerts.html")
driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert(driver)
value = alert.text
with pytest.raises(UnexpectedAlertPresentException) as e:
pages.load("simpleTest.html")
assert value == e.value.alert_text
assert "Alert Text: {}".format(value) in str(e)
def _waitForAlert(self, driver):
return WebDriverWait(driver, 3).until(EC.alert_is_present())
| true | true |
1c30a56a1bb8eb9a1423caa15dc205466ded19e1 | 1,017 | py | Python | mantrap_evaluation/scenarios/custom/surrounding.py | simon-schaefer/mantrap | 9a2b3f32a0005cc0cb79bb78924f09da5a94587d | [
"MIT"
] | 7 | 2020-05-11T18:13:27.000Z | 2022-03-09T02:52:48.000Z | mantrap_evaluation/scenarios/custom/surrounding.py | StanfordASL/mantrap | 9a2b3f32a0005cc0cb79bb78924f09da5a94587d | [
"MIT"
] | null | null | null | mantrap_evaluation/scenarios/custom/surrounding.py | StanfordASL/mantrap | 9a2b3f32a0005cc0cb79bb78924f09da5a94587d | [
"MIT"
] | 3 | 2020-12-09T00:03:26.000Z | 2022-03-03T10:39:03.000Z | import typing
import mantrap
import torch
import mantrap_evaluation.scenarios.api
def custom_surrounding(env_type: mantrap.environment.base.GraphBasedEnvironment.__class__, **env_kwargs
) -> typing.Tuple[mantrap.environment.base.GraphBasedEnvironment,
torch.Tensor,
typing.Union[typing.Dict[str, torch.Tensor], None]]:
"""Scenario surrounding.
One agent rests in the middle and the robot has to surround it to get to its goal.
"""
ego_state = torch.tensor([-6, 0, 0, 0])
ego_goal = torch.tensor([6, 0])
ado_histories = torch.tensor([0, 0.1, 0, 0, 0]).unsqueeze(dim=0)
ado_goals = torch.tensor([0, 0.1])
return mantrap_evaluation.scenarios.api.create_environment(
config_name="custom_surrounding",
env_type=env_type,
ado_histories=[ado_histories],
ego_state=ego_state,
ado_goals=[ado_goals],
**env_kwargs
), ego_goal, None
| 33.9 | 103 | 0.633235 | import typing
import mantrap
import torch
import mantrap_evaluation.scenarios.api
def custom_surrounding(env_type: mantrap.environment.base.GraphBasedEnvironment.__class__, **env_kwargs
) -> typing.Tuple[mantrap.environment.base.GraphBasedEnvironment,
torch.Tensor,
typing.Union[typing.Dict[str, torch.Tensor], None]]:
ego_state = torch.tensor([-6, 0, 0, 0])
ego_goal = torch.tensor([6, 0])
ado_histories = torch.tensor([0, 0.1, 0, 0, 0]).unsqueeze(dim=0)
ado_goals = torch.tensor([0, 0.1])
return mantrap_evaluation.scenarios.api.create_environment(
config_name="custom_surrounding",
env_type=env_type,
ado_histories=[ado_histories],
ego_state=ego_state,
ado_goals=[ado_goals],
**env_kwargs
), ego_goal, None
| true | true |
1c30aa0c9fc62c15a39061bebfd6add5bbd695ab | 3,425 | py | Python | clients/python/generated/swaggyjenkins/models/link.py | PankTrue/swaggy-jenkins | aca35a7cca6e1fcc08bd399e05148942ac2f514b | [
"MIT"
] | 23 | 2017-08-01T12:25:26.000Z | 2022-01-25T03:44:11.000Z | clients/python/generated/swaggyjenkins/models/link.py | PankTrue/swaggy-jenkins | aca35a7cca6e1fcc08bd399e05148942ac2f514b | [
"MIT"
] | 35 | 2017-06-14T03:28:15.000Z | 2022-02-14T10:25:54.000Z | clients/python/generated/swaggyjenkins/models/link.py | PankTrue/swaggy-jenkins | aca35a7cca6e1fcc08bd399e05148942ac2f514b | [
"MIT"
] | 11 | 2017-08-31T19:00:20.000Z | 2021-12-19T12:04:12.000Z | # coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
OpenAPI spec version: 1.1.1
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Link(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_class': 'str',
'href': 'str'
}
attribute_map = {
'_class': '_class',
'href': 'href'
}
def __init__(self, _class=None, href=None): # noqa: E501
"""Link - a model defined in OpenAPI""" # noqa: E501
self.__class = None
self._href = None
self.discriminator = None
if _class is not None:
self._class = _class
if href is not None:
self.href = href
@property
def _class(self):
"""Gets the _class of this Link. # noqa: E501
:return: The _class of this Link. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this Link.
:param _class: The _class of this Link. # noqa: E501
:type: str
"""
self.__class = _class
@property
def href(self):
"""Gets the href of this Link. # noqa: E501
:return: The href of this Link. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this Link.
:param href: The href of this Link. # noqa: E501
:type: str
"""
self._href = href
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Link):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.464286 | 85 | 0.533431 |
import pprint
import re
import six
class Link(object):
openapi_types = {
'_class': 'str',
'href': 'str'
}
attribute_map = {
'_class': '_class',
'href': 'href'
}
def __init__(self, _class=None, href=None):
self.__class = None
self._href = None
self.discriminator = None
if _class is not None:
self._class = _class
if href is not None:
self.href = href
@property
def _class(self):
return self.__class
@_class.setter
def _class(self, _class):
self.__class = _class
@property
def href(self):
return self._href
@href.setter
def href(self, href):
self._href = href
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Link):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c30aa80c53775faa6404ce0a3b6f030e89215b5 | 1,420 | py | Python | monk/tf_keras_1/training/callbacks.py | kshitij12345/monk_v1 | 9e2ccdd51f3c1335ed732cca5cc5fb7daea66139 | [
"Apache-2.0"
] | 2 | 2020-09-16T06:05:50.000Z | 2021-04-07T12:05:20.000Z | monk/tf_keras_1/training/callbacks.py | kshitij12345/monk_v1 | 9e2ccdd51f3c1335ed732cca5cc5fb7daea66139 | [
"Apache-2.0"
] | null | null | null | monk/tf_keras_1/training/callbacks.py | kshitij12345/monk_v1 | 9e2ccdd51f3c1335ed732cca5cc5fb7daea66139 | [
"Apache-2.0"
] | 1 | 2020-10-07T12:57:44.000Z | 2020-10-07T12:57:44.000Z | from tf_keras_1.training.imports import *
from system.imports import *
class TimeHistory(krc.Callback):
def __init__(self, log_dir=None):
super().__init__()
if(log_dir):
self.log_file = log_dir + "times.txt";
self.f = open(self.log_file, 'a');
else:
self.log_file=None
def on_train_begin(self, logs={}):
self.times = [];
def on_train_end(self, logs={}):
if(self.log_file):
self.f.close();
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
if(self.log_file):
self.f.write(str(time.time() - self.epoch_time_start) + "\n");
class MemoryHistory(krc.Callback):
def __init__(self):
super().__init__()
self.max_gpu_usage=0;
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, batch, logs={}):
return
def on_epoch_end(self, batch, logs={}):
import GPUtil
GPUs = GPUtil.getGPUs()
if(len(GPUs) > 0):
gpuMemoryUsed = GPUs[0].memoryUsed
if(self.max_gpu_usage < int(gpuMemoryUsed)):
self.max_gpu_usage = int(gpuMemoryUsed);
return | 26.792453 | 74 | 0.569014 | from tf_keras_1.training.imports import *
from system.imports import *
class TimeHistory(krc.Callback):
def __init__(self, log_dir=None):
super().__init__()
if(log_dir):
self.log_file = log_dir + "times.txt";
self.f = open(self.log_file, 'a');
else:
self.log_file=None
def on_train_begin(self, logs={}):
self.times = [];
def on_train_end(self, logs={}):
if(self.log_file):
self.f.close();
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
if(self.log_file):
self.f.write(str(time.time() - self.epoch_time_start) + "\n");
class MemoryHistory(krc.Callback):
def __init__(self):
super().__init__()
self.max_gpu_usage=0;
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, batch, logs={}):
return
def on_epoch_end(self, batch, logs={}):
import GPUtil
GPUs = GPUtil.getGPUs()
if(len(GPUs) > 0):
gpuMemoryUsed = GPUs[0].memoryUsed
if(self.max_gpu_usage < int(gpuMemoryUsed)):
self.max_gpu_usage = int(gpuMemoryUsed);
return | true | true |
1c30ab68eda7401a845ee15168466fe52d026520 | 341 | py | Python | other/dingding/dingtalk/api/rest/OapiRhinoMosSpaceWorkerCheckOutRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiRhinoMosSpaceWorkerCheckOutRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiRhinoMosSpaceWorkerCheckOutRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2020.03.07
'''
from dingtalk.api.base import RestApi
class OapiRhinoMosSpaceWorkerCheckOutRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.request = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.rhino.mos.space.worker.check.out'
| 22.733333 | 57 | 0.762463 | from dingtalk.api.base import RestApi
class OapiRhinoMosSpaceWorkerCheckOutRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.request = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.rhino.mos.space.worker.check.out'
| true | true |
1c30abf6f715a8a96fc50d02a1bfc55be0a33371 | 26,197 | py | Python | deepspeed/runtime/pipe/module.py | olegklimov/DeepSpeed | cd7967d6b5465e0f1e52d1e111b98a04720e8f37 | [
"MIT"
] | 6,728 | 2020-02-07T23:53:18.000Z | 2022-03-31T20:02:53.000Z | deepspeed/runtime/pipe/module.py | jfc4050/DeepSpeed | 80e263c571599a93f412c850f0ff7a44a9051a90 | [
"MIT"
] | 1,104 | 2020-02-08T00:26:15.000Z | 2022-03-31T22:33:56.000Z | deepspeed/runtime/pipe/module.py | jfc4050/DeepSpeed | 80e263c571599a93f412c850f0ff7a44a9051a90 | [
"MIT"
] | 801 | 2020-02-10T15:33:42.000Z | 2022-03-29T16:32:33.000Z | import os
import glob
import enum
import re as regex
from collections import defaultdict
from functools import partial
import torch
import torch.nn as nn
import torch.distributed as dist
from deepspeed.utils import logger
from .. import utils as ds_utils
from ..activation_checkpointing import checkpointing
from .topology import PipeDataParallelTopology, PipelineParallelGrid
from deepspeed.runtime.state_dict_factory import SDLoaderFactory
class PipelineError(Exception):
"""Errors related to the use of deepspeed.PipelineModule """
class LayerSpec:
"""Building block for specifying pipeline-parallel modules.
LayerSpec stores the type information and parameters for each stage in a
PipelineModule. For example:
.. code-block:: python
nn.Sequence(
torch.nn.Linear(self.in_dim, self.hidden_dim, bias=False),
torch.nn.Linear(self.hidden_hidden, self.out_dim)
)
becomes
.. code-block:: python
layer_specs = [
LayerSpec(torch.nn.Linear, self.in_dim, self.hidden_dim, bias=False),
LayerSpec(torch.nn.Linear, self.hidden_hidden, self.out_dim)]
]
"""
def __init__(self, typename, *module_args, **module_kwargs):
self.typename = typename
self.module_args = module_args
self.module_kwargs = module_kwargs
if not issubclass(typename, nn.Module):
raise RuntimeError('LayerSpec only supports torch.nn.Module types.')
if dist.is_initialized():
self.global_rank = dist.get_rank()
else:
self.global_rank = -1
def __repr__(self):
return ds_utils.call_to_str(self.typename.__name__,
self.module_args,
self.module_kwargs)
def build(self, log=False):
"""Build the stored specification."""
if log:
logger.info(f'RANK={self.global_rank} building {repr(self)}')
return self.typename(*self.module_args, **self.module_kwargs)
class TiedLayerSpec(LayerSpec):
def __init__(self,
key,
typename,
*module_args,
forward_fn=None,
tied_weight_attr='weight',
**module_kwargs):
super().__init__(typename, *module_args, **module_kwargs)
self.key = key
self.forward_fn = forward_fn
self.tied_weight_attr = tied_weight_attr
class PipelineModule(nn.Module):
def __init__(self,
layers,
num_stages=None,
topology=None,
loss_fn=None,
seed_layers=False,
seed_fn=None,
base_seed=1234,
partition_method='parameters',
activation_checkpoint_interval=0,
activation_checkpoint_func=checkpointing.checkpoint,
checkpointable_layers=None):
"""Modules to be parallelized with pipeline parallelism.
The key constraint that enables pipeline parallelism is the
representation of the forward pass as a sequence of layers
and the enforcement of a simple interface between them. The
forward pass is implicitly defined by the module ``layers``. The key
assumption is that the output of each layer can be directly fed as
input to the next, like a ``torch.nn.Sequence``. The forward pass is
implicitly:
.. code-block:: python
def forward(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x)
return x
.. note::
Pipeline parallelism is not compatible with ZeRO-2 and ZeRO-3.
Args:
layers (Iterable): A sequence of layers defining pipeline structure. Can be a ``torch.nn.Sequential`` module.
num_stages (int, optional): The degree of pipeline parallelism. If not specified, ``topology`` must be provided.
topology (``deepseed.pipe.ProcessTopology``, optional): Defines the axes of parallelism axes for training. Must be provided if ``num_stages`` is ``None``.
loss_fn (callable, optional): Loss is computed ``loss = loss_fn(outputs, label)``
base_seed (int, optional): [description]. Defaults to 1234.
partition_method (str, optional): [description]. Defaults to 'parameters'.
activation_checkpoint_interval (int, optional): The granularity activation checkpointing in terms of number of layers. 0 disables activation checkpointing.
activation_checkpoint_func (callable, optional): The function to use for activation checkpointing. Defaults to ``deepspeed.checkpointing.checkpoint``.
"""
super().__init__()
if num_stages is None and topology is None:
raise RuntimeError('must provide num_stages or topology')
self.micro_offset = 0
self.loss_fn = loss_fn
self.checkpointable_layers = checkpointable_layers
if checkpointable_layers is not None:
assert isinstance(checkpointable_layers, list), "param `checkpointable_layers` must be type of list."
self.seed_layers = seed_layers
self.seed_fn = seed_fn
self.base_seed = base_seed
if dist.get_rank() == 0:
try:
seed_str = self.seed_fn.__name__
except AttributeError:
seed_str = None
print(
f'SEED_LAYERS={self.seed_layers} BASE_SEED={self.base_seed} SEED_FN={seed_str}'
)
# Setup world info
self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
self.global_rank = dist.get_rank(group=self.world_group)
self.world_size = dist.get_world_size(group=self.world_group)
self.local_rank = int(os.environ.get("LOCAL_RANK", None))
assert self.local_rank != None
if topology:
self._topo = topology
self.num_stages = self._topo.get_dim('pipe')
else:
self.num_stages = num_stages
if topology is None:
if self.world_size % self.num_stages != 0:
raise RuntimeError(
f'num_stages ({self.num_stages}) must divide distributed world size ({self.world_size})'
)
dp = self.world_size // num_stages
topology = PipeDataParallelTopology(num_pp=num_stages, num_dp=dp)
self._topo = topology
# Construct communicators for pipeline topology
self._grid = PipelineParallelGrid(process_group=self.world_group,
topology=self._topo)
self.stage_id = self._topo.get_coord(self.global_rank).pipe
# Initialize partition information
self._layer_specs = list(layers)
self._num_layers = len(self._layer_specs)
self._local_start = 0
self._local_stop = None
self._partition_layers(method=partition_method)
self.forward_funcs = []
self.tied_modules = nn.ModuleDict()
self.tied_weight_attrs = {}
# Offset the random seed by the stage ID.
#newseed = torch.cuda.initial_seed() + self._grid.get_stage_id()
#ds_utils.set_random_seed(newseed)
#with torch.random.fork_rng(devices=[torch.cuda.current_device()]):
self._build()
self.to(f'cuda:{self.local_rank}')
self.tied_comms = self._index_tied_modules()
self._synchronize_tied_weights()
self.activation_checkpoint_interval = activation_checkpoint_interval
self.activation_checkpoint_func = activation_checkpoint_func
def _build(self):
specs = self._layer_specs
for local_idx, layer in enumerate(specs[self._local_start:self._local_stop]):
layer_idx = local_idx + self._local_start
if self.seed_layers:
if self.seed_fn:
self.seed_fn(self.base_seed + layer_idx)
else:
ds_utils.set_random_seed(self.base_seed + layer_idx)
# Recursively build PipelineModule objects
if isinstance(layer, PipelineModule):
raise NotImplementedError('RECURSIVE BUILD NOT YET IMPLEMENTED')
# LayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, nn.Module):
name = str(layer_idx)
self.forward_funcs.append(layer)
self.add_module(name, layer)
# TiedLayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, TiedLayerSpec):
# Build and register the module if we haven't seen it before.
if layer.key not in self.tied_modules:
self.tied_modules[layer.key] = layer.build()
self.tied_weight_attrs[layer.key] = layer.tied_weight_attr
if layer.forward_fn is None:
# Just use forward()
self.forward_funcs.append(self.tied_modules[layer.key])
else:
# User specified fn with args (module, input)
self.forward_funcs.append(
partial(layer.forward_fn,
self.tied_modules[layer.key]))
# LayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, LayerSpec):
module = layer.build()
name = str(layer_idx)
self.forward_funcs.append(module)
self.add_module(name, module)
# Last option: layer may be a functional (e.g., lambda). We do nothing in
# that case and just use it in forward()
else:
self.forward_funcs.append(layer)
# All pipeline parameters should be considered as model parallel in the context
# of our FP16 optimizer
for p in self.parameters():
p.ds_pipe_replicated = False
def _count_layer_params(self):
"""Count the trainable parameters in individual layers.
This routine will only build one layer at a time.
Returns:
A list of the number of parameters in each layer.
"""
param_counts = [0] * len(self._layer_specs)
for idx, layer in enumerate(self._layer_specs):
if isinstance(layer, LayerSpec):
l = layer.build()
params = filter(lambda p: p.requires_grad, l.parameters())
param_counts[idx] = sum(p.numel() for p in params)
elif isinstance(layer, nn.Module):
params = filter(lambda p: p.requires_grad, layer.parameters())
param_counts[idx] = sum(p.numel() for p in params)
return param_counts
def _find_layer_type(self, layername):
idxs = []
typeregex = regex.compile(layername, regex.IGNORECASE)
for idx, layer in enumerate(self._layer_specs):
name = None
if isinstance(layer, LayerSpec):
name = layer.typename.__name__
elif isinstance(layer, nn.Module):
name = layer.__class__.__name__
else:
try:
name = layer.__name__
except AttributeError:
continue
if typeregex.search(name):
idxs.append(idx)
if len(idxs) == 0:
raise RuntimeError(
f"Partitioning '{layername}' found no valid layers to partition.")
return idxs
def forward(self, forward_input):
# We need to offset the seed by the microbatch ID. Save it in a local var to
# ensure it is preserved in the closure. Otherwise checkpointed forward funcs
# will see a different offset.
self.micro_offset += 1
def exec_range_func(start, end):
''' Helper function to be used with checkpoint()
Adapted from torch.utils.checkpoint:checkpoint_sequential()
'''
local_micro_offset = self.micro_offset + 1
def exec_func(*inputs):
# Single tensor inputs need to be unwrapped
if len(inputs) == 1:
inputs = inputs[0]
for idx, layer in enumerate(self.forward_funcs[start:end]):
self.curr_layer = idx + self._local_start
if self.seed_layers:
new_seed = (self.base_seed *
local_micro_offset) + self.curr_layer
if self.seed_fn:
self.seed_fn(new_seed)
else:
ds_utils.set_random_seed(new_seed)
inputs = layer(inputs)
return inputs
return exec_func
if self.activation_checkpoint_interval == 0:
func = exec_range_func(0, len(self.forward_funcs))
x = func(forward_input)
else:
num_layers = len(self.forward_funcs)
x = forward_input
for start_idx in range(0, num_layers, self.activation_checkpoint_interval):
end_idx = min(start_idx + self.activation_checkpoint_interval,
num_layers)
funcs = self.forward_funcs[start_idx:end_idx]
# Since we either pass tensors or tuples of tensors without unpacking, we
# need to be careful not to double-wrap tensors with tuple.
if not isinstance(x, tuple):
x = (x, )
if self._is_checkpointable(funcs):
x = self.activation_checkpoint_func(
exec_range_func(start_idx,
end_idx),
*x)
else:
x = exec_range_func(start_idx, end_idx)(*x)
return x
def _partition_layers(self, method='uniform'):
num_stages = self._topo.get_dim('pipe')
stage_id = self._topo.get_coord(self.global_rank).pipe
if self.global_rank == 0:
logger.info(f'Partitioning pipeline stages with method {method}')
method = method.lower()
# Each stage gets a simple uniform number of layers.
if method == 'uniform':
num_layers = len(self._layer_specs)
self.parts = ds_utils.partition_uniform(num_items=num_layers,
num_parts=num_stages)
elif method == 'parameters':
param_counts = self._count_layer_params()
self.parts = ds_utils.partition_balanced(weights=param_counts,
num_parts=num_stages)
elif method.startswith('type:'):
layertype = method.split(':')[1]
binary_weights = [0] * len(self._layer_specs)
for idx in self._find_layer_type(layertype):
binary_weights[idx] = 1
else:
self.parts = ds_utils.partition_balanced(weights=binary_weights,
num_parts=num_stages)
elif method == 'profile':
raise NotImplementedError(f'Partitioning method {method} not implemented.')
else:
raise NotImplementedError(f'Partitioning method {method} not implemented.')
# Print some information on the partitioning.
if self.global_rank == 0:
for stage in range(num_stages):
start = self.parts[stage]
stop = self.parts[stage + 1]
print(f'stage={stage} layers={stop - start}')
for idx, layer in enumerate(self._layer_specs[start:stop]):
name = str(layer)
if isinstance(layer, LayerSpec):
name = layer.typename.__name__
if isinstance(layer, nn.Module):
name = layer.__class__.__name__
else:
try:
name = layer.__name__
except AttributeError:
pass
print(f' {idx+start:2d}: {name}')
if self.loss_fn:
try:
print(f' loss: {self.loss_fn.__name__}')
except AttributeError:
print(f' loss: {self.loss_fn.__class__.__name__}')
self._set_bounds(start=self.parts[stage_id], stop=self.parts[stage_id + 1])
def allreduce_tied_weight_gradients(self):
'''All reduce the gradients of the tied weights between tied stages'''
for key, comm in self.tied_comms.items():
weight = getattr(self.tied_modules[key], comm['weight_attr'])
dist.all_reduce(weight.grad, group=comm['group'])
def _synchronize_tied_weights(self):
for key, comm in self.tied_comms.items():
dist.broadcast(
getattr(comm['module'],
comm['weight_attr']),
src=min(comm['ranks']),
group=comm['group'],
)
def _index_tied_modules(self):
''' Build communication structures for tied modules. '''
tied_comms = {}
if self._topo.get_dim('pipe') == 1:
return tied_comms
specs = self._layer_specs
tie_keys = set(s.key for s in specs if isinstance(s, TiedLayerSpec))
for key in tie_keys:
# Find the layers that the tied module appears in
tied_layers = []
for idx, layer in enumerate(specs):
if isinstance(layer, TiedLayerSpec) and layer.key == key:
tied_layers.append(idx)
# Find all stages with this tied module
# TODO: Would be nice to remove the nested data/model parallelism loops and
# TODO: instead generalize in some way, since we really just care about the
# TODO: stage that owns the tied layer. Then loop over each (dp, mp, ...)
# TODO: fiber to generate process groups.
tied_stages = set(self.stage_owner(idx) for idx in tied_layers)
for dp in range(self._grid.data_parallel_size):
for mp in range(self._grid.get_slice_parallel_world_size()):
tied_ranks = []
for s in sorted(tied_stages):
if self._grid.get_slice_parallel_world_size() > 1:
tied_ranks.append(
self._grid.stage_to_global(stage_id=s,
data=dp,
model=mp))
else:
tied_ranks.append(
self._grid.stage_to_global(stage_id=s,
data=dp))
group = dist.new_group(ranks=tied_ranks)
# Record this tied module if we own a local copy of it.
if self.global_rank in tied_ranks:
assert key in self.tied_modules
if key in self.tied_modules:
tied_comms[key] = {
'ranks': tied_ranks,
'group': group,
'weight_attr': self.tied_weight_attrs[key],
'module': self.tied_modules[key],
}
# Only count the tied module once in the eyes of the FP16 optimizer
if self.global_rank != tied_ranks[0]:
for p in self.tied_modules[key].parameters():
p.ds_pipe_replicated = True
'''
if len(tied_comms) > 0:
print(f'RANK={self.global_rank} tied_comms={tied_comms}')
'''
return tied_comms
def partitions(self):
return self.parts
def stage_owner(self, layer_idx):
assert 0 <= layer_idx < self._num_layers
for stage in range(self._topo.get_dim('pipe')):
if self.parts[stage] <= layer_idx < self.parts[stage + 1]:
return stage
raise RuntimeError(f'Layer {layer_idx} not owned? parts={self.parts}')
def _set_bounds(self, start=None, stop=None):
"""Manually define the range of layers that will be built on this process.
These boundaries are treated as list slices and so start is inclusive and stop is
exclusive. The default of None for both results in all layers being built
locally.
"""
self._local_start = start
self._local_stop = stop
def set_checkpoint_interval(self, interval):
assert interval >= 0
self.checkpoint_interval = interval
def topology(self):
""" ProcessTopology object to query process mappings. """
return self._topo
def mpu(self):
return self._grid
def num_pipeline_stages(self):
return self._topo.get_dim('pipe')
def ckpt_prefix(self, checkpoints_path, tag):
"""Build a prefix for all checkpoint files written by this module. """
# All checkpoint files start with this
rank_name = 'module'
# Data parallelism is omitted from the naming convention because we are agnostic
# to this in the checkpoint.
omit_dims = frozenset(['data'])
axes = [a for a in self._grid._topo.get_axis_names() if a not in omit_dims]
for dim in axes:
rank = getattr(self._grid._topo.get_coord(rank=self.global_rank), dim)
rank_name += f'-{dim}_{rank:02d}'
ckpt_name = os.path.join(checkpoints_path, str(tag), rank_name)
return ckpt_name
def ckpt_layer_path(self, ckpt_dir, local_layer_idx):
"""Customize a prefix for a specific pipeline module layer. """
idx = local_layer_idx + self._local_start
layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}')
rank_repr = self._grid._topo.get_rank_repr(rank=self.global_rank)
if rank_repr != '':
layer_ckpt_path += f'-{rank_repr}'
layer_ckpt_path += '-model_states.pt'
return layer_ckpt_path
def ckpt_layer_path_list(self, ckpt_dir, local_layer_idx):
"""Get all ckpt file list for a specific pipeline module layer. """
idx = local_layer_idx + self._local_start
layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}-')
layer_ckpt_path += "*model_states.pt"
ckpt_files = glob.glob(layer_ckpt_path)
ckpt_files.sort()
return ckpt_files
def save_state_dict(self, save_dir):
if self._grid.data_parallel_id != 0:
return
os.makedirs(save_dir, exist_ok=True)
layer_offset = self._local_start
for idx, layer in enumerate(self.forward_funcs):
model_ckpt_path = self.ckpt_layer_path(save_dir, idx)
if not hasattr(layer, 'state_dict'):
continue
# We pass cloned tensors to torch.save() to avoid checkpoint bloat which occurs because torch.save()
# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.
# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.
# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.
# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.
# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing
orig_state_dict = layer.state_dict()
final_state_dict = type(orig_state_dict)(
{k: v.clone()
for k,
v in orig_state_dict.items()})
torch.save(final_state_dict, model_ckpt_path)
def load_state_dir(self, load_dir, strict=True):
for idx, layer in enumerate(self.forward_funcs):
# Functions, etc. will not have state_dicts
if not hasattr(layer, 'load_state_dict'):
continue
# get all checkpoint files for the layer.
model_ckpt_list = self.ckpt_layer_path_list(load_dir, idx)
mp_rank = self._grid.get_slice_parallel_rank()
mp_world_size = self._grid.get_slice_parallel_world_size()
sd_loader = SDLoaderFactory.get_sd_loader(model_ckpt_list, version=2.0)
load_path, checkpoint, _ = sd_loader.load(mp_world_size, mp_rank, module_key=None, is_pipe_parallel=True)
layer.load_state_dict(checkpoint)
# if self._grid.data_parallel_id == 0:
# logger.info(
# f'RANK={self.global_rank} Loaded layer={idx+self._local_start} file={load_path}'
# )
self._synchronize_tied_weights()
def _is_checkpointable(self, funcs):
# This is an unfortunate hack related to torch and deepspeed activation checkpoint implementations.
# Some layers like torch.nn.Embedding will not receive grads if checkpointed, which breaks things.
# I presume it's related to the discrete inputs that cannot require_grad? Need to revisit.
if self.__class__.__name__ in ('GPTModelPipe', 'GPT2ModelPipe'):
return all('ParallelTransformerLayerPipe' in f.__class__.__name__
for f in funcs)
if self.checkpointable_layers is not None:
return all(f.__class__.__name__ in self.checkpointable_layers for f in funcs)
params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)]
return any(len(list(p)) > 0 for p in params)
| 42.596748 | 167 | 0.586594 | import os
import glob
import enum
import re as regex
from collections import defaultdict
from functools import partial
import torch
import torch.nn as nn
import torch.distributed as dist
from deepspeed.utils import logger
from .. import utils as ds_utils
from ..activation_checkpointing import checkpointing
from .topology import PipeDataParallelTopology, PipelineParallelGrid
from deepspeed.runtime.state_dict_factory import SDLoaderFactory
class PipelineError(Exception):
class LayerSpec:
def __init__(self, typename, *module_args, **module_kwargs):
self.typename = typename
self.module_args = module_args
self.module_kwargs = module_kwargs
if not issubclass(typename, nn.Module):
raise RuntimeError('LayerSpec only supports torch.nn.Module types.')
if dist.is_initialized():
self.global_rank = dist.get_rank()
else:
self.global_rank = -1
def __repr__(self):
return ds_utils.call_to_str(self.typename.__name__,
self.module_args,
self.module_kwargs)
def build(self, log=False):
if log:
logger.info(f'RANK={self.global_rank} building {repr(self)}')
return self.typename(*self.module_args, **self.module_kwargs)
class TiedLayerSpec(LayerSpec):
def __init__(self,
key,
typename,
*module_args,
forward_fn=None,
tied_weight_attr='weight',
**module_kwargs):
super().__init__(typename, *module_args, **module_kwargs)
self.key = key
self.forward_fn = forward_fn
self.tied_weight_attr = tied_weight_attr
class PipelineModule(nn.Module):
def __init__(self,
layers,
num_stages=None,
topology=None,
loss_fn=None,
seed_layers=False,
seed_fn=None,
base_seed=1234,
partition_method='parameters',
activation_checkpoint_interval=0,
activation_checkpoint_func=checkpointing.checkpoint,
checkpointable_layers=None):
super().__init__()
if num_stages is None and topology is None:
raise RuntimeError('must provide num_stages or topology')
self.micro_offset = 0
self.loss_fn = loss_fn
self.checkpointable_layers = checkpointable_layers
if checkpointable_layers is not None:
assert isinstance(checkpointable_layers, list), "param `checkpointable_layers` must be type of list."
self.seed_layers = seed_layers
self.seed_fn = seed_fn
self.base_seed = base_seed
if dist.get_rank() == 0:
try:
seed_str = self.seed_fn.__name__
except AttributeError:
seed_str = None
print(
f'SEED_LAYERS={self.seed_layers} BASE_SEED={self.base_seed} SEED_FN={seed_str}'
)
self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
self.global_rank = dist.get_rank(group=self.world_group)
self.world_size = dist.get_world_size(group=self.world_group)
self.local_rank = int(os.environ.get("LOCAL_RANK", None))
assert self.local_rank != None
if topology:
self._topo = topology
self.num_stages = self._topo.get_dim('pipe')
else:
self.num_stages = num_stages
if topology is None:
if self.world_size % self.num_stages != 0:
raise RuntimeError(
f'num_stages ({self.num_stages}) must divide distributed world size ({self.world_size})'
)
dp = self.world_size // num_stages
topology = PipeDataParallelTopology(num_pp=num_stages, num_dp=dp)
self._topo = topology
self._grid = PipelineParallelGrid(process_group=self.world_group,
topology=self._topo)
self.stage_id = self._topo.get_coord(self.global_rank).pipe
self._layer_specs = list(layers)
self._num_layers = len(self._layer_specs)
self._local_start = 0
self._local_stop = None
self._partition_layers(method=partition_method)
self.forward_funcs = []
self.tied_modules = nn.ModuleDict()
self.tied_weight_attrs = {}
self._build()
self.to(f'cuda:{self.local_rank}')
self.tied_comms = self._index_tied_modules()
self._synchronize_tied_weights()
self.activation_checkpoint_interval = activation_checkpoint_interval
self.activation_checkpoint_func = activation_checkpoint_func
def _build(self):
specs = self._layer_specs
for local_idx, layer in enumerate(specs[self._local_start:self._local_stop]):
layer_idx = local_idx + self._local_start
if self.seed_layers:
if self.seed_fn:
self.seed_fn(self.base_seed + layer_idx)
else:
ds_utils.set_random_seed(self.base_seed + layer_idx)
if isinstance(layer, PipelineModule):
raise NotImplementedError('RECURSIVE BUILD NOT YET IMPLEMENTED')
elif isinstance(layer, nn.Module):
name = str(layer_idx)
self.forward_funcs.append(layer)
self.add_module(name, layer)
elif isinstance(layer, TiedLayerSpec):
if layer.key not in self.tied_modules:
self.tied_modules[layer.key] = layer.build()
self.tied_weight_attrs[layer.key] = layer.tied_weight_attr
if layer.forward_fn is None:
# Just use forward()
self.forward_funcs.append(self.tied_modules[layer.key])
else:
# User specified fn with args (module, input)
self.forward_funcs.append(
partial(layer.forward_fn,
self.tied_modules[layer.key]))
# LayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, LayerSpec):
module = layer.build()
name = str(layer_idx)
self.forward_funcs.append(module)
self.add_module(name, module)
# Last option: layer may be a functional (e.g., lambda). We do nothing in
# that case and just use it in forward()
else:
self.forward_funcs.append(layer)
# All pipeline parameters should be considered as model parallel in the context
# of our FP16 optimizer
for p in self.parameters():
p.ds_pipe_replicated = False
def _count_layer_params(self):
param_counts = [0] * len(self._layer_specs)
for idx, layer in enumerate(self._layer_specs):
if isinstance(layer, LayerSpec):
l = layer.build()
params = filter(lambda p: p.requires_grad, l.parameters())
param_counts[idx] = sum(p.numel() for p in params)
elif isinstance(layer, nn.Module):
params = filter(lambda p: p.requires_grad, layer.parameters())
param_counts[idx] = sum(p.numel() for p in params)
return param_counts
def _find_layer_type(self, layername):
idxs = []
typeregex = regex.compile(layername, regex.IGNORECASE)
for idx, layer in enumerate(self._layer_specs):
name = None
if isinstance(layer, LayerSpec):
name = layer.typename.__name__
elif isinstance(layer, nn.Module):
name = layer.__class__.__name__
else:
try:
name = layer.__name__
except AttributeError:
continue
if typeregex.search(name):
idxs.append(idx)
if len(idxs) == 0:
raise RuntimeError(
f"Partitioning '{layername}' found no valid layers to partition.")
return idxs
def forward(self, forward_input):
# We need to offset the seed by the microbatch ID. Save it in a local var to
# ensure it is preserved in the closure. Otherwise checkpointed forward funcs
# will see a different offset.
self.micro_offset += 1
def exec_range_func(start, end):
local_micro_offset = self.micro_offset + 1
def exec_func(*inputs):
# Single tensor inputs need to be unwrapped
if len(inputs) == 1:
inputs = inputs[0]
for idx, layer in enumerate(self.forward_funcs[start:end]):
self.curr_layer = idx + self._local_start
if self.seed_layers:
new_seed = (self.base_seed *
local_micro_offset) + self.curr_layer
if self.seed_fn:
self.seed_fn(new_seed)
else:
ds_utils.set_random_seed(new_seed)
inputs = layer(inputs)
return inputs
return exec_func
if self.activation_checkpoint_interval == 0:
func = exec_range_func(0, len(self.forward_funcs))
x = func(forward_input)
else:
num_layers = len(self.forward_funcs)
x = forward_input
for start_idx in range(0, num_layers, self.activation_checkpoint_interval):
end_idx = min(start_idx + self.activation_checkpoint_interval,
num_layers)
funcs = self.forward_funcs[start_idx:end_idx]
# Since we either pass tensors or tuples of tensors without unpacking, we
# need to be careful not to double-wrap tensors with tuple.
if not isinstance(x, tuple):
x = (x, )
if self._is_checkpointable(funcs):
x = self.activation_checkpoint_func(
exec_range_func(start_idx,
end_idx),
*x)
else:
x = exec_range_func(start_idx, end_idx)(*x)
return x
def _partition_layers(self, method='uniform'):
num_stages = self._topo.get_dim('pipe')
stage_id = self._topo.get_coord(self.global_rank).pipe
if self.global_rank == 0:
logger.info(f'Partitioning pipeline stages with method {method}')
method = method.lower()
# Each stage gets a simple uniform number of layers.
if method == 'uniform':
num_layers = len(self._layer_specs)
self.parts = ds_utils.partition_uniform(num_items=num_layers,
num_parts=num_stages)
elif method == 'parameters':
param_counts = self._count_layer_params()
self.parts = ds_utils.partition_balanced(weights=param_counts,
num_parts=num_stages)
elif method.startswith('type:'):
layertype = method.split(':')[1]
binary_weights = [0] * len(self._layer_specs)
for idx in self._find_layer_type(layertype):
binary_weights[idx] = 1
else:
self.parts = ds_utils.partition_balanced(weights=binary_weights,
num_parts=num_stages)
elif method == 'profile':
raise NotImplementedError(f'Partitioning method {method} not implemented.')
else:
raise NotImplementedError(f'Partitioning method {method} not implemented.')
# Print some information on the partitioning.
if self.global_rank == 0:
for stage in range(num_stages):
start = self.parts[stage]
stop = self.parts[stage + 1]
print(f'stage={stage} layers={stop - start}')
for idx, layer in enumerate(self._layer_specs[start:stop]):
name = str(layer)
if isinstance(layer, LayerSpec):
name = layer.typename.__name__
if isinstance(layer, nn.Module):
name = layer.__class__.__name__
else:
try:
name = layer.__name__
except AttributeError:
pass
print(f' {idx+start:2d}: {name}')
if self.loss_fn:
try:
print(f' loss: {self.loss_fn.__name__}')
except AttributeError:
print(f' loss: {self.loss_fn.__class__.__name__}')
self._set_bounds(start=self.parts[stage_id], stop=self.parts[stage_id + 1])
def allreduce_tied_weight_gradients(self):
for key, comm in self.tied_comms.items():
weight = getattr(self.tied_modules[key], comm['weight_attr'])
dist.all_reduce(weight.grad, group=comm['group'])
def _synchronize_tied_weights(self):
for key, comm in self.tied_comms.items():
dist.broadcast(
getattr(comm['module'],
comm['weight_attr']),
src=min(comm['ranks']),
group=comm['group'],
)
def _index_tied_modules(self):
tied_comms = {}
if self._topo.get_dim('pipe') == 1:
return tied_comms
specs = self._layer_specs
tie_keys = set(s.key for s in specs if isinstance(s, TiedLayerSpec))
for key in tie_keys:
# Find the layers that the tied module appears in
tied_layers = []
for idx, layer in enumerate(specs):
if isinstance(layer, TiedLayerSpec) and layer.key == key:
tied_layers.append(idx)
# Find all stages with this tied module
# TODO: Would be nice to remove the nested data/model parallelism loops and
# TODO: instead generalize in some way, since we really just care about the
# TODO: stage that owns the tied layer. Then loop over each (dp, mp, ...)
# TODO: fiber to generate process groups.
tied_stages = set(self.stage_owner(idx) for idx in tied_layers)
for dp in range(self._grid.data_parallel_size):
for mp in range(self._grid.get_slice_parallel_world_size()):
tied_ranks = []
for s in sorted(tied_stages):
if self._grid.get_slice_parallel_world_size() > 1:
tied_ranks.append(
self._grid.stage_to_global(stage_id=s,
data=dp,
model=mp))
else:
tied_ranks.append(
self._grid.stage_to_global(stage_id=s,
data=dp))
group = dist.new_group(ranks=tied_ranks)
# Record this tied module if we own a local copy of it.
if self.global_rank in tied_ranks:
assert key in self.tied_modules
if key in self.tied_modules:
tied_comms[key] = {
'ranks': tied_ranks,
'group': group,
'weight_attr': self.tied_weight_attrs[key],
'module': self.tied_modules[key],
}
# Only count the tied module once in the eyes of the FP16 optimizer
if self.global_rank != tied_ranks[0]:
for p in self.tied_modules[key].parameters():
p.ds_pipe_replicated = True
return tied_comms
def partitions(self):
return self.parts
def stage_owner(self, layer_idx):
assert 0 <= layer_idx < self._num_layers
for stage in range(self._topo.get_dim('pipe')):
if self.parts[stage] <= layer_idx < self.parts[stage + 1]:
return stage
raise RuntimeError(f'Layer {layer_idx} not owned? parts={self.parts}')
def _set_bounds(self, start=None, stop=None):
self._local_start = start
self._local_stop = stop
def set_checkpoint_interval(self, interval):
assert interval >= 0
self.checkpoint_interval = interval
def topology(self):
return self._topo
def mpu(self):
return self._grid
def num_pipeline_stages(self):
return self._topo.get_dim('pipe')
def ckpt_prefix(self, checkpoints_path, tag):
# All checkpoint files start with this
rank_name = 'module'
# Data parallelism is omitted from the naming convention because we are agnostic
# to this in the checkpoint.
omit_dims = frozenset(['data'])
axes = [a for a in self._grid._topo.get_axis_names() if a not in omit_dims]
for dim in axes:
rank = getattr(self._grid._topo.get_coord(rank=self.global_rank), dim)
rank_name += f'-{dim}_{rank:02d}'
ckpt_name = os.path.join(checkpoints_path, str(tag), rank_name)
return ckpt_name
def ckpt_layer_path(self, ckpt_dir, local_layer_idx):
idx = local_layer_idx + self._local_start
layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}')
rank_repr = self._grid._topo.get_rank_repr(rank=self.global_rank)
if rank_repr != '':
layer_ckpt_path += f'-{rank_repr}'
layer_ckpt_path += '-model_states.pt'
return layer_ckpt_path
def ckpt_layer_path_list(self, ckpt_dir, local_layer_idx):
idx = local_layer_idx + self._local_start
layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}-')
layer_ckpt_path += "*model_states.pt"
ckpt_files = glob.glob(layer_ckpt_path)
ckpt_files.sort()
return ckpt_files
def save_state_dict(self, save_dir):
if self._grid.data_parallel_id != 0:
return
os.makedirs(save_dir, exist_ok=True)
layer_offset = self._local_start
for idx, layer in enumerate(self.forward_funcs):
model_ckpt_path = self.ckpt_layer_path(save_dir, idx)
if not hasattr(layer, 'state_dict'):
continue
# We pass cloned tensors to torch.save() to avoid checkpoint bloat which occurs because torch.save()
# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.
# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.
# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.
# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.
# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing
orig_state_dict = layer.state_dict()
final_state_dict = type(orig_state_dict)(
{k: v.clone()
for k,
v in orig_state_dict.items()})
torch.save(final_state_dict, model_ckpt_path)
def load_state_dir(self, load_dir, strict=True):
for idx, layer in enumerate(self.forward_funcs):
# Functions, etc. will not have state_dicts
if not hasattr(layer, 'load_state_dict'):
continue
# get all checkpoint files for the layer.
model_ckpt_list = self.ckpt_layer_path_list(load_dir, idx)
mp_rank = self._grid.get_slice_parallel_rank()
mp_world_size = self._grid.get_slice_parallel_world_size()
sd_loader = SDLoaderFactory.get_sd_loader(model_ckpt_list, version=2.0)
load_path, checkpoint, _ = sd_loader.load(mp_world_size, mp_rank, module_key=None, is_pipe_parallel=True)
layer.load_state_dict(checkpoint)
# if self._grid.data_parallel_id == 0:
# logger.info(
# f'RANK={self.global_rank} Loaded layer={idx+self._local_start} file={load_path}'
# )
self._synchronize_tied_weights()
def _is_checkpointable(self, funcs):
# This is an unfortunate hack related to torch and deepspeed activation checkpoint implementations.
# Some layers like torch.nn.Embedding will not receive grads if checkpointed, which breaks things.
# I presume it's related to the discrete inputs that cannot require_grad? Need to revisit.
if self.__class__.__name__ in ('GPTModelPipe', 'GPT2ModelPipe'):
return all('ParallelTransformerLayerPipe' in f.__class__.__name__
for f in funcs)
if self.checkpointable_layers is not None:
return all(f.__class__.__name__ in self.checkpointable_layers for f in funcs)
params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)]
return any(len(list(p)) > 0 for p in params)
| true | true |
1c30ad6bcb7e56cf3e7b21d1b72d25952278c347 | 11,404 | py | Python | python_developer_tools/cv/bases/attentions/SimAM-master/mmdetection/mmdet/models/backbones/resnet_simam.py | carlsummer/python_developer_tools | a8c4365b7cc601cda55648cdfd8c0cb1faae132f | [
"Apache-2.0"
] | 32 | 2021-06-21T04:49:48.000Z | 2022-03-29T05:46:59.000Z | python_developer_tools/cv/bases/attentions/SimAM-master/mmdetection/mmdet/models/backbones/resnet_simam.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 1 | 2021-11-12T03:45:55.000Z | 2021-11-12T03:45:55.000Z | python_developer_tools/cv/bases/attentions/SimAM-master/mmdetection/mmdet/models/backbones/resnet_simam.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 10 | 2021-06-03T08:05:05.000Z | 2021-12-13T03:10:42.000Z | import torch.nn as nn
import functools
from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from mmdet.utils import get_root_logger
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from .attentions import simam_module
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
attention_module=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
if attention_module is not None:
if type(attention_module) == functools.partial:
module_name = attention_module.func.get_module_name()
else:
module_name = attention_module.get_module_name()
if module_name == "simam":
self.conv2 = nn.Sequential(
self.conv2,
attention_module(planes)
)
else:
self.bn2 = nn.Sequential(
self.bn2,
attention_module(planes)
)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
attention_module=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
if attention_module is not None:
if type(attention_module) == functools.partial:
module_name = attention_module.func.get_module_name()
else:
module_name = attention_module.get_module_name()
if module_name == "simam":
self.conv2 = nn.Sequential(
self.conv2,
attention_module(width)
)
else:
self.bn3 = nn.Sequential(
self.bn3,
attention_module(planes * self.expansion)
)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNetAM(nn.Module):
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
norm_eval=True,
frozen_stages=-1,
attention_type="none",
attention_param=None,
zero_init_residual=False):
super(ResNetAM, self).__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
self.norm_eval = norm_eval
self.frozen_stages = frozen_stages
self.zero_init_residual = zero_init_residual
block, stage_blocks = self.arch_settings[depth]
if attention_type == "simam":
attention_module = functools.partial(simam_module, e_lambda=attention_param)
else:
attention_module = None
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu1 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, stage_blocks[0],
attention_module=attention_module)
self.layer2 = self._make_layer(block, 128, stage_blocks[1], stride=2,
dilate=replace_stride_with_dilation[0],
attention_module=attention_module)
self.layer3 = self._make_layer(block, 256, stage_blocks[2], stride=2,
dilate=replace_stride_with_dilation[1],
attention_module=attention_module)
self.layer4 = self._make_layer(block, 512, stage_blocks[3], stride=2,
dilate=replace_stride_with_dilation[2],
attention_module=attention_module)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, 1000)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, attention_module=None):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, attention_module))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, attention_module=attention_module))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
self.fc = None
self.avgpool = None
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool(x)
outs = []
x = self.layer1(x)
outs.append(x)
x = self.layer2(x)
outs.append(x)
x = self.layer3(x)
outs.append(x)
x = self.layer4(x)
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
freezed."""
super(ResNetAM, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval() | 34.143713 | 97 | 0.55235 | import torch.nn as nn
import functools
from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from mmdet.utils import get_root_logger
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from .attentions import simam_module
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
attention_module=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
if attention_module is not None:
if type(attention_module) == functools.partial:
module_name = attention_module.func.get_module_name()
else:
module_name = attention_module.get_module_name()
if module_name == "simam":
self.conv2 = nn.Sequential(
self.conv2,
attention_module(planes)
)
else:
self.bn2 = nn.Sequential(
self.bn2,
attention_module(planes)
)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
attention_module=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
if attention_module is not None:
if type(attention_module) == functools.partial:
module_name = attention_module.func.get_module_name()
else:
module_name = attention_module.get_module_name()
if module_name == "simam":
self.conv2 = nn.Sequential(
self.conv2,
attention_module(width)
)
else:
self.bn3 = nn.Sequential(
self.bn3,
attention_module(planes * self.expansion)
)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNetAM(nn.Module):
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
norm_eval=True,
frozen_stages=-1,
attention_type="none",
attention_param=None,
zero_init_residual=False):
super(ResNetAM, self).__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
self.norm_eval = norm_eval
self.frozen_stages = frozen_stages
self.zero_init_residual = zero_init_residual
block, stage_blocks = self.arch_settings[depth]
if attention_type == "simam":
attention_module = functools.partial(simam_module, e_lambda=attention_param)
else:
attention_module = None
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu1 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, stage_blocks[0],
attention_module=attention_module)
self.layer2 = self._make_layer(block, 128, stage_blocks[1], stride=2,
dilate=replace_stride_with_dilation[0],
attention_module=attention_module)
self.layer3 = self._make_layer(block, 256, stage_blocks[2], stride=2,
dilate=replace_stride_with_dilation[1],
attention_module=attention_module)
self.layer4 = self._make_layer(block, 512, stage_blocks[3], stride=2,
dilate=replace_stride_with_dilation[2],
attention_module=attention_module)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, 1000)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, attention_module=None):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, attention_module))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, attention_module=attention_module))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
self.fc = None
self.avgpool = None
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool(x)
outs = []
x = self.layer1(x)
outs.append(x)
x = self.layer2(x)
outs.append(x)
x = self.layer3(x)
outs.append(x)
x = self.layer4(x)
outs.append(x)
return tuple(outs)
def train(self, mode=True):
super(ResNetAM, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() | true | true |
1c30af1115be6d8b463a13f730dad74ecb43e278 | 11,483 | py | Python | script/main.py | zementalist/Face-Landmarks-Detection-Highly-Improved | 2dea823e2b496153fc3431007a1d4c3e3ea74c42 | [
"Apache-2.0"
] | null | null | null | script/main.py | zementalist/Face-Landmarks-Detection-Highly-Improved | 2dea823e2b496153fc3431007a1d4c3e3ea74c42 | [
"Apache-2.0"
] | null | null | null | script/main.py | zementalist/Face-Landmarks-Detection-Highly-Improved | 2dea823e2b496153fc3431007a1d4c3e3ea74c42 | [
"Apache-2.0"
] | null | null | null | import cv2
import dlib
from imutils import face_utils
import numpy as np
from scipy.spatial import Delaunay
import matplotlib.pyplot as plt
from matplotlib import transforms
import os
import math
from geometry import slope
def load_images_from_folder(folder):
images = []
filenames = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder, filename), cv2.IMREAD_GRAYSCALE)
if img is not None:
images.append(img)
filenames.append(filename)
return images, filenames
def getAllowedColorRange(avgSkinColor):
# Function to determine the color range allowed to move landmarks points through image
# Dark skin
if (avgSkinColor < 100):
colorRange = (avgSkinColor-35, avgSkinColor+50)
# Somehow dark skin
elif(avgSkinColor <= 130):
colorRange = (avgSkinColor-30, avgSkinColor+30)
# Normal skin color (tends to dark)
elif(avgSkinColor <= 160):
colorRange = (avgSkinColor-40, avgSkinColor+40)
# Normal skin color
elif(avgSkinColor < 180):
colorRange = (avgSkinColor-50, avgSkinColor+50)
# Normal skin color (tends to white)
elif(avgSkinColor < 210):
colorRange = (avgSkinColor-50, avgSkinColor+30)
# white skin color
elif (avgSkinColor < 230):
colorRange = (avgSkinColor-40, avgSkinColor+20)
# Abnormal white skin color
else:
colorRange = (avgSkinColor-30, avgSkinColor+15)
return colorRange
def moveUp(grayscale_image, point, avgSkinColor, foreheadHeight):
# Function to move landmarks points, based on skincolor
# Get color range & current color where the point is located in image
steps = 5
portionOfOriginalPointY = 0.275
originalPoint = np.copy(point)
colorRange = getAllowedColorRange(avgSkinColor)
currentPixelColor = grayscale_image.item(point[1],point[0])
# move the landmark point up until a strong change of color happen (outside color range)
while currentPixelColor > colorRange[0] and currentPixelColor < colorRange[1]:
# If point is going out of image boundary
if point[1] < 0:
# Get back to original point location, with a little bit higher
point[1] = originalPoint[1] - (originalPoint[1] * portionOfOriginalPointY)
break
# move up (N steps) pixels & get the color
point[1] = point[1] - steps
currentPixelColor = grayscale_image.item(point[1],point[0])
# if the pixel is moved too high than expected (3/4 forehead height): keep close to original
if abs( originalPoint[1] - point[1] ) > ( foreheadHeight * 0.75 ):
point[1] = originalPoint[1] - (originalPoint[1] * portionOfOriginalPointY)
return point
def clearForehead(forehead, avgSkinColor):
# Function to detect if the forehead is clear or covered with hair (it corrupts the enhancement of landmarks points)
clarityThreshold = 85
colorRange = getAllowedColorRange(avgSkinColor)
# Check if most of the forehead is the same as skin color
regionOK = np.logical_and(forehead > colorRange[0] , forehead < colorRange[1])
try:
percentage = (np.count_nonzero(regionOK) / forehead.size) * 100
except:
return False
isClear = True if percentage >= clarityThreshold else False
return isClear
def facial_landmarks(image, eyeOnlyMode=False, allowEnhancement=False):
# Function to perform facial landmark detection on the whole face
# Use dlib 68 & 81 to predict landmarks points coordinates
detector = dlib.get_frontal_face_detector()
predictor68 = dlib.shape_predictor('../shape_predictor_68_face_landmarks.dat')
predictor81 = dlib.shape_predictor('../shape_predictor_81_face_landmarks.dat')
# Grayscale image
try:
grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
except:
grayscale_image = image
# array of rectangles surrounding faces detected
rectangles = detector(grayscale_image, 1)
# If at least one face is detected
if len(rectangles) > 0:
# Get 68 landmark points
faceLandmarks = predictor68(grayscale_image, rectangles[0])
faceLandmarks = face_utils.shape_to_np(faceLandmarks)
if eyeOnlyMode:
# Return eye points to perform a calculated rotation
return np.array([faceLandmarks[39], faceLandmarks[42]])
# Get 81 landmark points
foreheadLandmarks = predictor81(grayscale_image, rectangles[0])
foreheadLandmarks = face_utils.shape_to_np(foreheadLandmarks)
# Get 68 point from -68- predictor (higher accuracy) + forehead from -81- predictor
fullFacePoints = np.concatenate((faceLandmarks, foreheadLandmarks[68:]))
# Get forehead region & height to perform simple improvement
x,y,x2,y2 = (fullFacePoints[69,0]-10, fullFacePoints[68,1], fullFacePoints[80,0]+10, fullFacePoints[23, 1])
foreheadRegion = grayscale_image[y:y2,x:x2]
foreheadHeight = foreheadRegion.shape[0]
if allowEnhancement:
# Perform progressive quality improvement
# Get nose region to get average skin color
x,y,x2,y2 = (fullFacePoints[28,0]-5, fullFacePoints[28,1], fullFacePoints[28,0]+5, fullFacePoints[30,1])
noseRegion = grayscale_image[y:y2, x:x2]
avgSkinColor = np.average(noseRegion[:,:])
# Check if forehead is clear -> perform heuristic based enhancement
forehead_is_clear = clearForehead(foreheadRegion, avgSkinColor)
originalPoints = fullFacePoints[[69,70,71,73,80]]
if forehead_is_clear:
avgSkinColor = np.average(foreheadRegion)
# Modify some points for more accuracy
# Point[68] will be center between lower-lip & chin
distance = int((fullFacePoints[8,1]-fullFacePoints[57,1]) / 2)
fullFacePoints[68] = np.array([fullFacePoints[8,0], fullFacePoints[8,1]-distance])
# Enhance points locations
enhancedPoints = np.array([moveUp(grayscale_image, orgPoint, avgSkinColor, foreheadHeight) for orgPoint in originalPoints])
# Assign original points to enhanced points (some maybe the same)
fullFacePoints[[69,70,71,73,80]] = enhancedPoints
# Adjust points to fix any corruptions
fullFacePoints[[69,70,71,73,80]] = adjustPoints(enhancedPoints, fullFacePoints[76], fullFacePoints[79])
#Prepare point[72] for center of forehead
distance = (fullFacePoints[22,0] - fullFacePoints[21,0]) / 2
distanceY = (fullFacePoints[21,1] - fullFacePoints[71,1]) / 2
fullFacePoints[72] = np.array([fullFacePoints[21,0] + distance, fullFacePoints[21,1]-distanceY])
# Point[74] sometimes have a fixed corruption, this line helps :)
fullFacePoints[74,0] -= foreheadHeight * 0.1 # Arbitery heurestic
else:
# If forehead isn't clear -> fix points with very simple heuristics
fullFacePoints[70,1] -= foreheadHeight * 0.2
fullFacePoints[71,1] -= foreheadHeight * 0.3
fullFacePoints[80,1] -= foreheadHeight * 0.2
else:
# If Enhancement is False -> do the simple enhancement, better quality + low performance :)
fullFacePoints[70,1] -= foreheadHeight * 0.2
fullFacePoints[71,1] -= foreheadHeight * 0.3
fullFacePoints[80,1] -= foreheadHeight * 0.2
pass
return fullFacePoints
# No faces found
else:
return None
def adjustPoints(points, leftSidePoint, rightSidePoint):
# Function to adjust landmarks points of the forehead & fix corruptions of improvement
# Use shape_predictor_81 as a reference for points indexes:
# points = [69,70,71,73,80]
# LeftSidePoint = 76 | rightSidePoint = 79
slopes = []
slopeThreshold = 0.4 # slope > 0.4 = corruption -> fix
totalSlopeThreshold = 1 # sum of slopes > 1 = corruption -> fix
leftPoint = points[0]
rightPoint = points[3]
criticalLeftPoint = points[1]
criticalRightPoint = points[4]
# if any point is higher than a (accurate located point) -> fix
if leftPoint[1] < criticalLeftPoint[1] :
points[0,1] = np.average([criticalLeftPoint[1], leftSidePoint[1]])
if rightPoint[1] < criticalRightPoint[1]:
points[3,1] = np.average([criticalRightPoint[1], rightSidePoint[1]])
# Collect some slopes of the usually corrupted points
slopes.append(slope(points[1], points[2], True))
slopes.append(slope(points[2], points[4], True))
# Calculate slope differences & sum
difference = abs(np.diff(slopes))
_sum = np.sum(slopes)
# If calculation results (either) too high = corruption -> fix
if difference > slopeThreshold:
issueIndex = np.argmax(slopes)
if issueIndex == 0:
points[1,1] = max(points[4,1], points[2,1])
else:
points[4,1] = max(points[1,1], points[2,1])
if _sum > totalSlopeThreshold:
points[1,1] = np.average(points[[4,2], 1])
points[4,1] = np.average(points[[1,2], 1])
points[2,1] = np.average(points[[4,1], 1])
return points
def align_face(image, eyePoints):
# Function to rotate image to align the face
# Get left eye & right eye coordinates
leftEyeX,leftEyeY = eyePoints[0]
rightEyeX, rightEyeY = eyePoints[1]
# Calculate angle of rotation & origin point
angle = math.atan( (leftEyeY - rightEyeY) / (leftEyeX - rightEyeX) ) * (180/math.pi)
origin_point = tuple(np.array(image.shape[1::-1]) / 2)
# Rotate using rotation matrix
rot_mat = cv2.getRotationMatrix2D(origin_point, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def drawPoints(image, points, pointColor=(255,255,255), pointThickness=None):
# Function to draw points on facial features
if pointThickness is None:
pointThickness = round((7/1200) * image.shape[1])
imgcopy = image.copy()
for i in points:
x,y = i
imgcopy = cv2.circle(imgcopy, (x,y), radius=0, color=pointColor, thickness=pointThickness)
return imgcopy
def main():
# Move opencv window
winname = "Test"
cv2.namedWindow(winname)
cv2.moveWindow(winname, 40,30)
# Capture all images in current folder & their names
images, filesnames = load_images_from_folder('.')
# Detect & Visualize each image
for i in range(0,len(images)):
originalImage = images[i]
cv2.imshow(winname, originalImage)
cv2.waitKey(0)
eyePoints = facial_landmarks(originalImage, eyeOnlyMode=True)
if eyePoints is not None:
image = align_face(originalImage, eyePoints)
improved_landmarks = facial_landmarks(image, allowEnhancement=True)
image = drawPoints(image, improved_landmarks)
cv2.imshow(winname, image)
cv2.waitKey(0)
#cv2.imwrite(filesnames[i].replace('sample', 'after-output'), image)
main()
| 40.15035 | 139 | 0.645476 | import cv2
import dlib
from imutils import face_utils
import numpy as np
from scipy.spatial import Delaunay
import matplotlib.pyplot as plt
from matplotlib import transforms
import os
import math
from geometry import slope
def load_images_from_folder(folder):
images = []
filenames = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder, filename), cv2.IMREAD_GRAYSCALE)
if img is not None:
images.append(img)
filenames.append(filename)
return images, filenames
def getAllowedColorRange(avgSkinColor):
if (avgSkinColor < 100):
colorRange = (avgSkinColor-35, avgSkinColor+50)
elif(avgSkinColor <= 130):
colorRange = (avgSkinColor-30, avgSkinColor+30)
elif(avgSkinColor <= 160):
colorRange = (avgSkinColor-40, avgSkinColor+40)
elif(avgSkinColor < 180):
colorRange = (avgSkinColor-50, avgSkinColor+50)
elif(avgSkinColor < 210):
colorRange = (avgSkinColor-50, avgSkinColor+30)
elif (avgSkinColor < 230):
colorRange = (avgSkinColor-40, avgSkinColor+20)
else:
colorRange = (avgSkinColor-30, avgSkinColor+15)
return colorRange
def moveUp(grayscale_image, point, avgSkinColor, foreheadHeight):
steps = 5
portionOfOriginalPointY = 0.275
originalPoint = np.copy(point)
colorRange = getAllowedColorRange(avgSkinColor)
currentPixelColor = grayscale_image.item(point[1],point[0])
while currentPixelColor > colorRange[0] and currentPixelColor < colorRange[1]:
if point[1] < 0:
point[1] = originalPoint[1] - (originalPoint[1] * portionOfOriginalPointY)
break
point[1] = point[1] - steps
currentPixelColor = grayscale_image.item(point[1],point[0])
if abs( originalPoint[1] - point[1] ) > ( foreheadHeight * 0.75 ):
point[1] = originalPoint[1] - (originalPoint[1] * portionOfOriginalPointY)
return point
def clearForehead(forehead, avgSkinColor):
clarityThreshold = 85
colorRange = getAllowedColorRange(avgSkinColor)
regionOK = np.logical_and(forehead > colorRange[0] , forehead < colorRange[1])
try:
percentage = (np.count_nonzero(regionOK) / forehead.size) * 100
except:
return False
isClear = True if percentage >= clarityThreshold else False
return isClear
def facial_landmarks(image, eyeOnlyMode=False, allowEnhancement=False):
detector = dlib.get_frontal_face_detector()
predictor68 = dlib.shape_predictor('../shape_predictor_68_face_landmarks.dat')
predictor81 = dlib.shape_predictor('../shape_predictor_81_face_landmarks.dat')
try:
grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
except:
grayscale_image = image
rectangles = detector(grayscale_image, 1)
if len(rectangles) > 0:
faceLandmarks = predictor68(grayscale_image, rectangles[0])
faceLandmarks = face_utils.shape_to_np(faceLandmarks)
if eyeOnlyMode:
return np.array([faceLandmarks[39], faceLandmarks[42]])
foreheadLandmarks = predictor81(grayscale_image, rectangles[0])
foreheadLandmarks = face_utils.shape_to_np(foreheadLandmarks)
fullFacePoints = np.concatenate((faceLandmarks, foreheadLandmarks[68:]))
x,y,x2,y2 = (fullFacePoints[69,0]-10, fullFacePoints[68,1], fullFacePoints[80,0]+10, fullFacePoints[23, 1])
foreheadRegion = grayscale_image[y:y2,x:x2]
foreheadHeight = foreheadRegion.shape[0]
if allowEnhancement:
x,y,x2,y2 = (fullFacePoints[28,0]-5, fullFacePoints[28,1], fullFacePoints[28,0]+5, fullFacePoints[30,1])
noseRegion = grayscale_image[y:y2, x:x2]
avgSkinColor = np.average(noseRegion[:,:])
forehead_is_clear = clearForehead(foreheadRegion, avgSkinColor)
originalPoints = fullFacePoints[[69,70,71,73,80]]
if forehead_is_clear:
avgSkinColor = np.average(foreheadRegion)
distance = int((fullFacePoints[8,1]-fullFacePoints[57,1]) / 2)
fullFacePoints[68] = np.array([fullFacePoints[8,0], fullFacePoints[8,1]-distance])
enhancedPoints = np.array([moveUp(grayscale_image, orgPoint, avgSkinColor, foreheadHeight) for orgPoint in originalPoints])
fullFacePoints[[69,70,71,73,80]] = enhancedPoints
fullFacePoints[[69,70,71,73,80]] = adjustPoints(enhancedPoints, fullFacePoints[76], fullFacePoints[79])
distance = (fullFacePoints[22,0] - fullFacePoints[21,0]) / 2
distanceY = (fullFacePoints[21,1] - fullFacePoints[71,1]) / 2
fullFacePoints[72] = np.array([fullFacePoints[21,0] + distance, fullFacePoints[21,1]-distanceY])
fullFacePoints[74,0] -= foreheadHeight * 0.1
else:
fullFacePoints[70,1] -= foreheadHeight * 0.2
fullFacePoints[71,1] -= foreheadHeight * 0.3
fullFacePoints[80,1] -= foreheadHeight * 0.2
else:
# If Enhancement is False -> do the simple enhancement, better quality + low performance :)
fullFacePoints[70,1] -= foreheadHeight * 0.2
fullFacePoints[71,1] -= foreheadHeight * 0.3
fullFacePoints[80,1] -= foreheadHeight * 0.2
pass
return fullFacePoints
# No faces found
else:
return None
def adjustPoints(points, leftSidePoint, rightSidePoint):
# Function to adjust landmarks points of the forehead & fix corruptions of improvement
# Use shape_predictor_81 as a reference for points indexes:
# points = [69,70,71,73,80]
# LeftSidePoint = 76 | rightSidePoint = 79
slopes = []
slopeThreshold = 0.4 # slope > 0.4 = corruption -> fix
totalSlopeThreshold = 1 # sum of slopes > 1 = corruption -> fix
leftPoint = points[0]
rightPoint = points[3]
criticalLeftPoint = points[1]
criticalRightPoint = points[4]
# if any point is higher than a (accurate located point) -> fix
if leftPoint[1] < criticalLeftPoint[1] :
points[0,1] = np.average([criticalLeftPoint[1], leftSidePoint[1]])
if rightPoint[1] < criticalRightPoint[1]:
points[3,1] = np.average([criticalRightPoint[1], rightSidePoint[1]])
# Collect some slopes of the usually corrupted points
slopes.append(slope(points[1], points[2], True))
slopes.append(slope(points[2], points[4], True))
# Calculate slope differences & sum
difference = abs(np.diff(slopes))
_sum = np.sum(slopes)
# If calculation results (either) too high = corruption -> fix
if difference > slopeThreshold:
issueIndex = np.argmax(slopes)
if issueIndex == 0:
points[1,1] = max(points[4,1], points[2,1])
else:
points[4,1] = max(points[1,1], points[2,1])
if _sum > totalSlopeThreshold:
points[1,1] = np.average(points[[4,2], 1])
points[4,1] = np.average(points[[1,2], 1])
points[2,1] = np.average(points[[4,1], 1])
return points
def align_face(image, eyePoints):
# Function to rotate image to align the face
# Get left eye & right eye coordinates
leftEyeX,leftEyeY = eyePoints[0]
rightEyeX, rightEyeY = eyePoints[1]
# Calculate angle of rotation & origin point
angle = math.atan( (leftEyeY - rightEyeY) / (leftEyeX - rightEyeX) ) * (180/math.pi)
origin_point = tuple(np.array(image.shape[1::-1]) / 2)
# Rotate using rotation matrix
rot_mat = cv2.getRotationMatrix2D(origin_point, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def drawPoints(image, points, pointColor=(255,255,255), pointThickness=None):
# Function to draw points on facial features
if pointThickness is None:
pointThickness = round((7/1200) * image.shape[1])
imgcopy = image.copy()
for i in points:
x,y = i
imgcopy = cv2.circle(imgcopy, (x,y), radius=0, color=pointColor, thickness=pointThickness)
return imgcopy
def main():
# Move opencv window
winname = "Test"
cv2.namedWindow(winname)
cv2.moveWindow(winname, 40,30)
# Capture all images in current folder & their names
images, filesnames = load_images_from_folder('.')
# Detect & Visualize each image
for i in range(0,len(images)):
originalImage = images[i]
cv2.imshow(winname, originalImage)
cv2.waitKey(0)
eyePoints = facial_landmarks(originalImage, eyeOnlyMode=True)
if eyePoints is not None:
image = align_face(originalImage, eyePoints)
improved_landmarks = facial_landmarks(image, allowEnhancement=True)
image = drawPoints(image, improved_landmarks)
cv2.imshow(winname, image)
cv2.waitKey(0)
#cv2.imwrite(filesnames[i].replace('sample', 'after-output'), image)
main()
| true | true |
1c30af43c7f998ad0e53dfd81ea41da828f39911 | 7,130 | py | Python | code/driver/servo_manager.py | eulerlab/spectral-scanner | 3da291c16f408ce46d009ae63257606df4ed3230 | [
"MIT"
] | 1 | 2020-12-05T16:59:10.000Z | 2020-12-05T16:59:10.000Z | code/driver/servo_manager.py | eulerlab/spectral-scanner | 3da291c16f408ce46d009ae63257606df4ed3230 | [
"MIT"
] | null | null | null | code/driver/servo_manager.py | eulerlab/spectral-scanner | 3da291c16f408ce46d009ae63257606df4ed3230 | [
"MIT"
] | null | null | null | # ----------------------------------------------------------------------------
# servo_manager.py
# Class to manage and control a number of servos
#
# The MIT License (MIT)
# Copyright (c) 2020 Thomas Euler
# 2020-01-03, v1
# 2020-08-02, v1.1 ulab
# ----------------------------------------------------------------------------
import time
from machine import Timer
import ulab as np
from micropython import alloc_emergency_exception_buf
alloc_emergency_exception_buf(100)
__version__ = "0.1.1.0"
RATE_MS = const(20)
_STEP_ARRAY_MAX = const(500)
# ----------------------------------------------------------------------------
class ServoManager(object):
"""Class to manage and control a number of servos"""
TYPE_NONE = const(0)
TYPE_HORIZONTAL = const(1)
TYPE_VERTICAL = const(2)
TYPE_SENSOR = const(3)
def __init__(self, n, max_steps=_STEP_ARRAY_MAX, verbose=False):
""" Initialises the management structures
"""
self._isVerbose = verbose
self._isMoving = False
self._nChan = max(1, n)
self._Servos = [None]*n # Servo objects
self._servo_type = np.array([0]*n, dtype=np.uint8) # Servo type
self._servo_number = np.array([-1]*n, dtype=np.int8) # Servo number
self._servoPos = np.array([0]*n) # Servo pos [us]
self._SIDList = np.array([-1]*n, dtype=np.int8) # Servos to move next
self._targetPosList = np.array([0]*n) # Target pos [us]
self._currPosList = np.array([-1]*n) # Current pos [us]
self._stepSizeList = np.array([0]*n) # .. step sizes [us]
self._stepLists = []
for i in range(n):
self._stepLists.append(np.array([0]*max_steps))
self._nToMove = 0 # # of servos to move
self._dt_ms = 0 # Time period [ms]
self._nSteps = 0 # # of steps to move
self._Timer = Timer(0)
self._Timer.init(period=-1)
def add_servo(self, i, servoObj, pos=0):
""" Add at the entry `i` of the servo list the servo object, which has to
define the following functions:
- `write_us(t_us)`
- `angle_in_us(value=None)`
- `off()`
- `deinit()`
"""
if i in range(self._nChan):
self._Servos[i] = servoObj
self._servoPos[i] = servoObj.angle_in_us()
self._servo_number[i] = i
if self._isVerbose:
print("Add servo #{0:-2.0f}, at {1} us"
.format(i, int(self._servoPos[i])))
def set_servo_type(self, i, type):
""" Change servo type (see `TYPE_xxx`)
"""
if i in range(self._nChan) and self._Servos[i] is not None:
self._servo_type[i] = type
def turn_all_off(self, deinit=False):
""" Turn all servos off
"""
for servo in self._Servos:
if not servo is None:
servo.off()
if deinit:
servo.deinit()
def deinit(self):
""" Clean up
"""
self._Timer.deinit()
self.turn_all_off(deinit=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
'''
@timed_function
def move_timed(self, servos, pos, dt_ms=0, lin_vel=True):
self.move(servos, pos, dt_ms)
'''
@micropython.native
def move(self, servos, pos, dt_ms=0, lin_vel=True):
""" Move the servos in the list to the positions given in `pos`.
If `dt_ms` > 0, then it will be attempted that all servos reach the
position at the same time (that is after `dt_ms` ms)
"""
if self._isMoving:
# Stop ongoing move
# ...
self._Timer.init(period=-1)
self._isMoving = False
# Prepare new move
n = 0
nSteps = dt_ms /RATE_MS
if nSteps > _STEP_ARRAY_MAX:
# Too many steps for a paraboloid trajectory
lin_vel = True
print("WARNING: {0} is too many steps; going linear".format(int(nSteps)))
for iS, SID in enumerate(servos):
if not self._Servos[SID]:
continue
self._SIDList[n] = SID
self._targetPosList[n] = self._Servos[SID].angle_in_us(pos[iS])
if nSteps > 0:
# A time period is given, therefore calculate the step sizes for this
# servo's move, with ...
p = self._servoPos[SID]
dp = self._targetPosList[n] -p
#print("dp=", dp)
if lin_vel:
# ... linear velocity
s = int(dp /nSteps)
self._currPosList[n] = p +s
self._stepSizeList[n] = s
else:
# ... paraboloid trajectory
p_n = nSteps -1
p_n2 = p_n /2
p_peak = p_n2**2
p_func = [-(j +1 -p_n2)**2 +p_peak for j in range(int(nSteps))]
p_scal = dp /sum(p_func)
for iv in range(p_n -1):
self._stepLists[n][iv] = int(p_func[iv] *p_scal)
self._stepSizeList[n] = 0
#print(dp, nSteps, p_n, p_scal, sum(self._stepLists[iS]))
#print(self._stepLists[iS])
else:
# Move directly, therefore update already the final position
self._servoPos[SID] = self._targetPosList[iS]
n += 1
self._nToMove = n
self._dt_ms = dt_ms
self._nSteps = int(nSteps) -1
# Initiate move
if dt_ms == 0:
# Just move them w/o considering timing
for iS in range(n):
p = int(self._targetPosList[iS])
self._Servos[self._SIDList[iS]].write_us(p)
else:
# Setup timer to keep moving them in the requested time
self._Timer.init(mode=Timer.PERIODIC, period=RATE_MS, callback=self._cb)
self._isMoving = True
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _cb(self, value):
if self._isMoving:
# Update every servo in the list
for iS in range(self._nToMove):
SID = self._SIDList[iS]
if self._nSteps > 0:
# Move is ongoing, update servo position ...
p = int(self._currPosList[iS])
self._Servos[SID].write_us(p)
if self._stepSizeList[iS] == 0:
# Paraboloid trajectory
self._currPosList[iS] += self._stepLists[iS][self._nSteps]
#print("para", self._nSteps, p, self._stepLists[iS][self._nSteps])
else:
# Linear trajectory
self._currPosList[iS] += self._stepSizeList[iS]
#print("lin ", self._nSteps, p, self._stepSizeList[iS])
else:
# Move has ended, therefore set servo to the target position
tp = int(self._targetPosList[iS])
self._servoPos[SID] = tp
self._Servos[SID].write_us(tp)
if self._nSteps > 0:
self._nSteps -= 1
#print("curr", self._currPosList)
else:
# Move is done
self._isMoving = False
#print("targ", self._targetPosList)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@property
def is_moving(self):
""" Returns True if a move is still ongoing
"""
return self._isMoving
# ----------------------------------------------------------------------------
| 35.472637 | 79 | 0.534362 |
import time
from machine import Timer
import ulab as np
from micropython import alloc_emergency_exception_buf
alloc_emergency_exception_buf(100)
__version__ = "0.1.1.0"
RATE_MS = const(20)
_STEP_ARRAY_MAX = const(500)
class ServoManager(object):
TYPE_NONE = const(0)
TYPE_HORIZONTAL = const(1)
TYPE_VERTICAL = const(2)
TYPE_SENSOR = const(3)
def __init__(self, n, max_steps=_STEP_ARRAY_MAX, verbose=False):
self._isVerbose = verbose
self._isMoving = False
self._nChan = max(1, n)
self._Servos = [None]*n
self._servo_type = np.array([0]*n, dtype=np.uint8)
self._servo_number = np.array([-1]*n, dtype=np.int8)
self._servoPos = np.array([0]*n)
self._SIDList = np.array([-1]*n, dtype=np.int8)
self._targetPosList = np.array([0]*n)
self._currPosList = np.array([-1]*n)
self._stepSizeList = np.array([0]*n)
self._stepLists = []
for i in range(n):
self._stepLists.append(np.array([0]*max_steps))
self._nToMove = 0 = 0
self._nSteps = 0 = Timer(0)
self._Timer.init(period=-1)
def add_servo(self, i, servoObj, pos=0):
if i in range(self._nChan):
self._Servos[i] = servoObj
self._servoPos[i] = servoObj.angle_in_us()
self._servo_number[i] = i
if self._isVerbose:
print("Add servo #{0:-2.0f}, at {1} us"
.format(i, int(self._servoPos[i])))
def set_servo_type(self, i, type):
if i in range(self._nChan) and self._Servos[i] is not None:
self._servo_type[i] = type
def turn_all_off(self, deinit=False):
for servo in self._Servos:
if not servo is None:
servo.off()
if deinit:
servo.deinit()
def deinit(self):
self._Timer.deinit()
self.turn_all_off(deinit=True)
@micropython.native
def move(self, servos, pos, dt_ms=0, lin_vel=True):
if self._isMoving:
self._Timer.init(period=-1)
self._isMoving = False
n = 0
nSteps = dt_ms /RATE_MS
if nSteps > _STEP_ARRAY_MAX:
lin_vel = True
print("WARNING: {0} is too many steps; going linear".format(int(nSteps)))
for iS, SID in enumerate(servos):
if not self._Servos[SID]:
continue
self._SIDList[n] = SID
self._targetPosList[n] = self._Servos[SID].angle_in_us(pos[iS])
if nSteps > 0:
p = self._servoPos[SID]
dp = self._targetPosList[n] -p
#print("dp=", dp)
if lin_vel:
# ... linear velocity
s = int(dp /nSteps)
self._currPosList[n] = p +s
self._stepSizeList[n] = s
else:
# ... paraboloid trajectory
p_n = nSteps -1
p_n2 = p_n /2
p_peak = p_n2**2
p_func = [-(j +1 -p_n2)**2 +p_peak for j in range(int(nSteps))]
p_scal = dp /sum(p_func)
for iv in range(p_n -1):
self._stepLists[n][iv] = int(p_func[iv] *p_scal)
self._stepSizeList[n] = 0
#print(dp, nSteps, p_n, p_scal, sum(self._stepLists[iS]))
#print(self._stepLists[iS])
else:
# Move directly, therefore update already the final position
self._servoPos[SID] = self._targetPosList[iS]
n += 1
self._nToMove = n
self._dt_ms = dt_ms
self._nSteps = int(nSteps) -1
# Initiate move
if dt_ms == 0:
# Just move them w/o considering timing
for iS in range(n):
p = int(self._targetPosList[iS])
self._Servos[self._SIDList[iS]].write_us(p)
else:
# Setup timer to keep moving them in the requested time
self._Timer.init(mode=Timer.PERIODIC, period=RATE_MS, callback=self._cb)
self._isMoving = True
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _cb(self, value):
if self._isMoving:
# Update every servo in the list
for iS in range(self._nToMove):
SID = self._SIDList[iS]
if self._nSteps > 0:
# Move is ongoing, update servo position ...
p = int(self._currPosList[iS])
self._Servos[SID].write_us(p)
if self._stepSizeList[iS] == 0:
# Paraboloid trajectory
self._currPosList[iS] += self._stepLists[iS][self._nSteps]
#print("para", self._nSteps, p, self._stepLists[iS][self._nSteps])
else:
# Linear trajectory
self._currPosList[iS] += self._stepSizeList[iS]
#print("lin ", self._nSteps, p, self._stepSizeList[iS])
else:
# Move has ended, therefore set servo to the target position
tp = int(self._targetPosList[iS])
self._servoPos[SID] = tp
self._Servos[SID].write_us(tp)
if self._nSteps > 0:
self._nSteps -= 1
#print("curr", self._currPosList)
else:
# Move is done
self._isMoving = False
#print("targ", self._targetPosList)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@property
def is_moving(self):
return self._isMoving
# ----------------------------------------------------------------------------
| true | true |
1c30af73b4c1fe1408985171d2a518da80cf23fa | 17,036 | py | Python | cogdl/models/emb/gatne.py | xssstory/cogdl | ae8de495c365993f19f04774f083960fd282c2a3 | [
"MIT"
] | 1 | 2020-07-20T07:14:50.000Z | 2020-07-20T07:14:50.000Z | cogdl/models/emb/gatne.py | xssstory/cogdl | ae8de495c365993f19f04774f083960fd282c2a3 | [
"MIT"
] | null | null | null | cogdl/models/emb/gatne.py | xssstory/cogdl | ae8de495c365993f19f04774f083960fd282c2a3 | [
"MIT"
] | 1 | 2021-06-17T02:44:09.000Z | 2021-06-17T02:44:09.000Z | import numpy as np
import networkx as nx
from collections import defaultdict
from gensim.models.keyedvectors import Vocab
from six import iteritems
import random
import math
import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from .. import BaseModel, register_model
@register_model("gatne")
class GATNE(BaseModel):
r"""The GATNE model from the `"Representation Learning for Attributed Multiplex Heterogeneous Network"
<https://dl.acm.org/doi/10.1145/3292500.3330964>`_ paper
Args:
walk_length (int) : The walk length.
walk_num (int) : The number of walks to sample for each node.
window_size (int) : The actual context size which is considered in language model.
worker (int) : The number of workers for word2vec.
epoch (int) : The number of training epochs.
batch_size (int) : The size of each training batch.
edge_dim (int) : Number of edge embedding dimensions.
att_dim (int) : Number of attention dimensions.
negative_samples (int) : Negative samples for optimization.
neighbor_samples (int) : Neighbor samples for aggregation
schema (str) : The metapath schema used in model. Metapaths are splited with ",",
while each node type are connected with "-" in each metapath. For example:"0-1-0,0-1-2-1-0"
"""
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--walk-length', type=int, default=10,
help='Length of walk per source. Default is 10.')
parser.add_argument('--walk-num', type=int, default=10,
help='Number of walks per source. Default is 10.')
parser.add_argument('--window-size', type=int, default=5,
help='Window size of skip-gram model. Default is 5.')
parser.add_argument('--worker', type=int, default=10,
help='Number of parallel workers. Default is 10.')
parser.add_argument('--epoch', type=int, default=20,
help='Number of epoch. Default is 20.')
parser.add_argument('--batch-size', type=int, default=256,
help='Number of batch_size. Default is 256.')
parser.add_argument('--edge-dim', type=int, default=10,
help='Number of edge embedding dimensions. Default is 10.')
parser.add_argument('--att-dim', type=int, default=20,
help='Number of attention dimensions. Default is 20.')
parser.add_argument('--negative-samples', type=int, default=5,
help='Negative samples for optimization. Default is 5.')
parser.add_argument('--neighbor-samples', type=int, default=10,
help='Neighbor samples for aggregation. Default is 10.')
parser.add_argument('--schema', type=str, default=None,
help="Input schema for metapath random walk.")
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.hidden_size,
args.walk_length,
args.walk_num,
args.window_size,
args.worker,
args.epoch,
args.batch_size,
args.edge_dim,
args.att_dim,
args.negative_samples,
args.neighbor_samples,
args.schema,
)
def __init__(
self,
dimension,
walk_length,
walk_num,
window_size,
worker,
epoch,
batch_size,
edge_dim,
att_dim,
negative_samples,
neighbor_samples,
schema,
):
super(GATNE, self).__init__()
self.embedding_size = dimension
self.walk_length = walk_length
self.walk_num = walk_num
self.window_size = window_size
self.worker = worker
self.epochs = epoch
self.batch_size = batch_size
self.embedding_u_size = edge_dim
self.dim_att = att_dim
self.num_sampled = negative_samples
self.neighbor_samples = neighbor_samples
self.schema = schema
self.multiplicity = True
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(self, network_data):
all_walks = generate_walks(network_data, self.walk_num, self.walk_length, schema=self.schema)
vocab, index2word = generate_vocab(all_walks)
train_pairs = generate_pairs(all_walks, vocab)
edge_types = list(network_data.keys())
num_nodes = len(index2word)
edge_type_count = len(edge_types)
epochs = self.epochs
batch_size = self.batch_size
embedding_size = self.embedding_size
embedding_u_size = self.embedding_u_size
num_sampled = self.num_sampled
dim_att = self.dim_att
neighbor_samples = self.neighbor_samples
neighbors = [[[] for __ in range(edge_type_count)] for _ in range(num_nodes)]
for r in range(edge_type_count):
g = network_data[edge_types[r]]
for (x, y) in g:
ix = vocab[x].index
iy = vocab[y].index
neighbors[ix][r].append(iy)
neighbors[iy][r].append(ix)
for i in range(num_nodes):
if len(neighbors[i][r]) == 0:
neighbors[i][r] = [i] * neighbor_samples
elif len(neighbors[i][r]) < neighbor_samples:
neighbors[i][r].extend(
list(
np.random.choice(
neighbors[i][r],
size=neighbor_samples - len(neighbors[i][r]),
)
)
)
elif len(neighbors[i][r]) > neighbor_samples:
neighbors[i][r] = list(
np.random.choice(neighbors[i][r], size=neighbor_samples)
)
model = GATNEModel(
num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_att
)
nsloss = NSLoss(num_nodes, num_sampled, embedding_size)
model.to(self.device)
nsloss.to(self.device)
optimizer = torch.optim.Adam(
[{"params": model.parameters()}, {"params": nsloss.parameters()}], lr=1e-4
)
for epoch in range(epochs):
random.shuffle(train_pairs)
batches = get_batches(train_pairs, neighbors, batch_size)
data_iter = tqdm.tqdm(
batches,
desc="epoch %d" % (epoch),
total=(len(train_pairs) + (batch_size - 1)) // batch_size,
bar_format="{l_bar}{r_bar}",
)
avg_loss = 0.0
for i, data in enumerate(data_iter):
optimizer.zero_grad()
embs = model(
data[0].to(self.device),
data[2].to(self.device),
data[3].to(self.device),
)
loss = nsloss(data[0].to(self.device), embs, data[1].to(self.device))
loss.backward()
optimizer.step()
avg_loss += loss.item()
if i % 5000 == 0:
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"loss": loss.item(),
}
data_iter.write(str(post_fix))
final_model = dict(zip(edge_types, [dict() for _ in range(edge_type_count)]))
for i in range(num_nodes):
train_inputs = torch.tensor([i for _ in range(edge_type_count)]).to(
self.device
)
train_types = torch.tensor(list(range(edge_type_count))).to(self.device)
node_neigh = torch.tensor(
[neighbors[i] for _ in range(edge_type_count)]
).to(self.device)
node_emb = model(train_inputs, train_types, node_neigh)
for j in range(edge_type_count):
final_model[edge_types[j]][index2word[i]] = (
node_emb[j].cpu().detach().numpy()
)
return final_model
class GATNEModel(nn.Module):
def __init__(
self, num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a
):
super(GATNEModel, self).__init__()
self.num_nodes = num_nodes
self.embedding_size = embedding_size
self.embedding_u_size = embedding_u_size
self.edge_type_count = edge_type_count
self.dim_a = dim_a
self.node_embeddings = Parameter(torch.FloatTensor(num_nodes, embedding_size))
self.node_type_embeddings = Parameter(
torch.FloatTensor(num_nodes, edge_type_count, embedding_u_size)
)
self.trans_weights = Parameter(
torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size)
)
self.trans_weights_s1 = Parameter(
torch.FloatTensor(edge_type_count, embedding_u_size, dim_a)
)
self.trans_weights_s2 = Parameter(torch.FloatTensor(edge_type_count, dim_a, 1))
self.reset_parameters()
def reset_parameters(self):
self.node_embeddings.data.uniform_(-1.0, 1.0)
self.node_type_embeddings.data.uniform_(-1.0, 1.0)
self.trans_weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
self.trans_weights_s1.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
self.trans_weights_s2.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
def forward(self, train_inputs, train_types, node_neigh):
node_embed = self.node_embeddings[train_inputs]
node_embed_neighbors = self.node_type_embeddings[node_neigh]
node_embed_tmp = torch.cat(
[
node_embed_neighbors[:, i, :, i, :].unsqueeze(1)
for i in range(self.edge_type_count)
],
dim=1,
)
node_type_embed = torch.sum(node_embed_tmp, dim=2)
trans_w = self.trans_weights[train_types]
trans_w_s1 = self.trans_weights_s1[train_types]
trans_w_s2 = self.trans_weights_s2[train_types]
attention = F.softmax(
torch.matmul(
F.tanh(torch.matmul(node_type_embed, trans_w_s1)), trans_w_s2
).squeeze()
).unsqueeze(1)
node_type_embed = torch.matmul(attention, node_type_embed)
node_embed = node_embed + torch.matmul(node_type_embed, trans_w).squeeze()
last_node_embed = F.normalize(node_embed, dim=1)
return last_node_embed
class NSLoss(nn.Module):
def __init__(self, num_nodes, num_sampled, embedding_size):
super(NSLoss, self).__init__()
self.num_nodes = num_nodes
self.num_sampled = num_sampled
self.embedding_size = embedding_size
self.weights = Parameter(torch.FloatTensor(num_nodes, embedding_size))
self.sample_weights = F.normalize(
torch.Tensor(
[
(math.log(k + 2) - math.log(k + 1)) / math.log(num_nodes + 1)
for k in range(num_nodes)
]
),
dim=0,
)
self.reset_parameters()
def reset_parameters(self):
self.weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
def forward(self, input, embs, label):
n = input.shape[0]
log_target = torch.log(
torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1))
)
negs = torch.multinomial(
self.sample_weights, self.num_sampled * n, replacement=True
).view(n, self.num_sampled)
noise = torch.neg(self.weights[negs])
sum_log_sampled = torch.sum(
torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1
).squeeze()
loss = log_target + sum_log_sampled
return -loss.sum() / n
class RWGraph:
def __init__(self, nx_G, node_type=None):
self.G = nx_G
self.node_type = node_type
def walk(self, walk_length, start, schema=None):
# Simulate a random walk starting from start node.
G = self.G
rand = random.Random()
if schema:
schema_items = schema.split("-")
assert schema_items[0] == schema_items[-1]
walk = [start]
while len(walk) < walk_length:
cur = walk[-1]
candidates = []
for node in G[cur].keys():
if (
schema == None
or self.node_type[node]
== schema_items[len(walk) % (len(schema_items) - 1)]
):
candidates.append(node)
if candidates:
walk.append(rand.choice(candidates))
else:
break
return walk
def simulate_walks(self, num_walks, walk_length, schema=None):
G = self.G
walks = []
nodes = list(G.nodes())
# print('Walk iteration:')
if schema is not None:
schema_list = schema.split(",")
for walk_iter in range(num_walks):
random.shuffle(nodes)
for node in nodes:
if schema is None:
walks.append(self.walk(walk_length=walk_length, start=node))
else:
for schema_iter in schema_list:
if schema_iter.split("-")[0] == self.node_type[node]:
walks.append(
self.walk(
walk_length=walk_length,
start=node,
schema=schema_iter,
)
)
return walks
def get_G_from_edges(edges):
edge_dict = dict()
for edge in edges:
edge_key = str(edge[0]) + "_" + str(edge[1])
if edge_key not in edge_dict:
edge_dict[edge_key] = 1
else:
edge_dict[edge_key] += 1
tmp_G = nx.Graph()
for edge_key in edge_dict:
weight = edge_dict[edge_key]
x = int(edge_key.split("_")[0])
y = int(edge_key.split("_")[1])
tmp_G.add_edge(x, y)
tmp_G[x][y]["weight"] = weight
return tmp_G
def generate_pairs(all_walks, vocab, window_size=5):
pairs = []
skip_window = window_size // 2
for layer_id, walks in enumerate(all_walks):
for walk in walks:
for i in range(len(walk)):
for j in range(1, skip_window + 1):
if i - j >= 0:
pairs.append(
(vocab[walk[i]].index, vocab[walk[i - j]].index, layer_id)
)
if i + j < len(walk):
pairs.append(
(vocab[walk[i]].index, vocab[walk[i + j]].index, layer_id)
)
return pairs
def generate_vocab(all_walks):
index2word = []
raw_vocab = defaultdict(int)
for walks in all_walks:
for walk in walks:
for word in walk:
raw_vocab[word] += 1
vocab = {}
for word, v in iteritems(raw_vocab):
vocab[word] = Vocab(count=v, index=len(index2word))
index2word.append(word)
index2word.sort(key=lambda word: vocab[word].count, reverse=True)
for i, word in enumerate(index2word):
vocab[word].index = i
return vocab, index2word
def get_batches(pairs, neighbors, batch_size):
n_batches = (len(pairs) + (batch_size - 1)) // batch_size
# result = []
for idx in range(n_batches):
x, y, t, neigh = [], [], [], []
for i in range(batch_size):
index = idx * batch_size + i
if index >= len(pairs):
break
x.append(pairs[index][0])
y.append(pairs[index][1])
t.append(pairs[index][2])
neigh.append(neighbors[pairs[index][0]])
yield torch.tensor(x), torch.tensor(y), torch.tensor(t), torch.tensor(neigh)
def generate_walks(network_data, num_walks, walk_length, schema=None):
if schema is not None:
# TODO: node_type = load_node_type(file_name + '/node_type.txt')
pass
else:
node_type = None
all_walks = []
for layer_id in network_data:
tmp_data = network_data[layer_id]
# start to do the random walk on a layer
layer_walker = RWGraph(get_G_from_edges(tmp_data))
layer_walks = layer_walker.simulate_walks(num_walks, walk_length, schema=schema)
all_walks.append(layer_walks)
return all_walks
| 36.169851 | 106 | 0.560343 | import numpy as np
import networkx as nx
from collections import defaultdict
from gensim.models.keyedvectors import Vocab
from six import iteritems
import random
import math
import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from .. import BaseModel, register_model
@register_model("gatne")
class GATNE(BaseModel):
@staticmethod
def add_args(parser):
parser.add_argument('--walk-length', type=int, default=10,
help='Length of walk per source. Default is 10.')
parser.add_argument('--walk-num', type=int, default=10,
help='Number of walks per source. Default is 10.')
parser.add_argument('--window-size', type=int, default=5,
help='Window size of skip-gram model. Default is 5.')
parser.add_argument('--worker', type=int, default=10,
help='Number of parallel workers. Default is 10.')
parser.add_argument('--epoch', type=int, default=20,
help='Number of epoch. Default is 20.')
parser.add_argument('--batch-size', type=int, default=256,
help='Number of batch_size. Default is 256.')
parser.add_argument('--edge-dim', type=int, default=10,
help='Number of edge embedding dimensions. Default is 10.')
parser.add_argument('--att-dim', type=int, default=20,
help='Number of attention dimensions. Default is 20.')
parser.add_argument('--negative-samples', type=int, default=5,
help='Negative samples for optimization. Default is 5.')
parser.add_argument('--neighbor-samples', type=int, default=10,
help='Neighbor samples for aggregation. Default is 10.')
parser.add_argument('--schema', type=str, default=None,
help="Input schema for metapath random walk.")
@classmethod
def build_model_from_args(cls, args):
return cls(
args.hidden_size,
args.walk_length,
args.walk_num,
args.window_size,
args.worker,
args.epoch,
args.batch_size,
args.edge_dim,
args.att_dim,
args.negative_samples,
args.neighbor_samples,
args.schema,
)
def __init__(
self,
dimension,
walk_length,
walk_num,
window_size,
worker,
epoch,
batch_size,
edge_dim,
att_dim,
negative_samples,
neighbor_samples,
schema,
):
super(GATNE, self).__init__()
self.embedding_size = dimension
self.walk_length = walk_length
self.walk_num = walk_num
self.window_size = window_size
self.worker = worker
self.epochs = epoch
self.batch_size = batch_size
self.embedding_u_size = edge_dim
self.dim_att = att_dim
self.num_sampled = negative_samples
self.neighbor_samples = neighbor_samples
self.schema = schema
self.multiplicity = True
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(self, network_data):
all_walks = generate_walks(network_data, self.walk_num, self.walk_length, schema=self.schema)
vocab, index2word = generate_vocab(all_walks)
train_pairs = generate_pairs(all_walks, vocab)
edge_types = list(network_data.keys())
num_nodes = len(index2word)
edge_type_count = len(edge_types)
epochs = self.epochs
batch_size = self.batch_size
embedding_size = self.embedding_size
embedding_u_size = self.embedding_u_size
num_sampled = self.num_sampled
dim_att = self.dim_att
neighbor_samples = self.neighbor_samples
neighbors = [[[] for __ in range(edge_type_count)] for _ in range(num_nodes)]
for r in range(edge_type_count):
g = network_data[edge_types[r]]
for (x, y) in g:
ix = vocab[x].index
iy = vocab[y].index
neighbors[ix][r].append(iy)
neighbors[iy][r].append(ix)
for i in range(num_nodes):
if len(neighbors[i][r]) == 0:
neighbors[i][r] = [i] * neighbor_samples
elif len(neighbors[i][r]) < neighbor_samples:
neighbors[i][r].extend(
list(
np.random.choice(
neighbors[i][r],
size=neighbor_samples - len(neighbors[i][r]),
)
)
)
elif len(neighbors[i][r]) > neighbor_samples:
neighbors[i][r] = list(
np.random.choice(neighbors[i][r], size=neighbor_samples)
)
model = GATNEModel(
num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_att
)
nsloss = NSLoss(num_nodes, num_sampled, embedding_size)
model.to(self.device)
nsloss.to(self.device)
optimizer = torch.optim.Adam(
[{"params": model.parameters()}, {"params": nsloss.parameters()}], lr=1e-4
)
for epoch in range(epochs):
random.shuffle(train_pairs)
batches = get_batches(train_pairs, neighbors, batch_size)
data_iter = tqdm.tqdm(
batches,
desc="epoch %d" % (epoch),
total=(len(train_pairs) + (batch_size - 1)) // batch_size,
bar_format="{l_bar}{r_bar}",
)
avg_loss = 0.0
for i, data in enumerate(data_iter):
optimizer.zero_grad()
embs = model(
data[0].to(self.device),
data[2].to(self.device),
data[3].to(self.device),
)
loss = nsloss(data[0].to(self.device), embs, data[1].to(self.device))
loss.backward()
optimizer.step()
avg_loss += loss.item()
if i % 5000 == 0:
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"loss": loss.item(),
}
data_iter.write(str(post_fix))
final_model = dict(zip(edge_types, [dict() for _ in range(edge_type_count)]))
for i in range(num_nodes):
train_inputs = torch.tensor([i for _ in range(edge_type_count)]).to(
self.device
)
train_types = torch.tensor(list(range(edge_type_count))).to(self.device)
node_neigh = torch.tensor(
[neighbors[i] for _ in range(edge_type_count)]
).to(self.device)
node_emb = model(train_inputs, train_types, node_neigh)
for j in range(edge_type_count):
final_model[edge_types[j]][index2word[i]] = (
node_emb[j].cpu().detach().numpy()
)
return final_model
class GATNEModel(nn.Module):
def __init__(
self, num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a
):
super(GATNEModel, self).__init__()
self.num_nodes = num_nodes
self.embedding_size = embedding_size
self.embedding_u_size = embedding_u_size
self.edge_type_count = edge_type_count
self.dim_a = dim_a
self.node_embeddings = Parameter(torch.FloatTensor(num_nodes, embedding_size))
self.node_type_embeddings = Parameter(
torch.FloatTensor(num_nodes, edge_type_count, embedding_u_size)
)
self.trans_weights = Parameter(
torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size)
)
self.trans_weights_s1 = Parameter(
torch.FloatTensor(edge_type_count, embedding_u_size, dim_a)
)
self.trans_weights_s2 = Parameter(torch.FloatTensor(edge_type_count, dim_a, 1))
self.reset_parameters()
def reset_parameters(self):
self.node_embeddings.data.uniform_(-1.0, 1.0)
self.node_type_embeddings.data.uniform_(-1.0, 1.0)
self.trans_weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
self.trans_weights_s1.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
self.trans_weights_s2.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
def forward(self, train_inputs, train_types, node_neigh):
node_embed = self.node_embeddings[train_inputs]
node_embed_neighbors = self.node_type_embeddings[node_neigh]
node_embed_tmp = torch.cat(
[
node_embed_neighbors[:, i, :, i, :].unsqueeze(1)
for i in range(self.edge_type_count)
],
dim=1,
)
node_type_embed = torch.sum(node_embed_tmp, dim=2)
trans_w = self.trans_weights[train_types]
trans_w_s1 = self.trans_weights_s1[train_types]
trans_w_s2 = self.trans_weights_s2[train_types]
attention = F.softmax(
torch.matmul(
F.tanh(torch.matmul(node_type_embed, trans_w_s1)), trans_w_s2
).squeeze()
).unsqueeze(1)
node_type_embed = torch.matmul(attention, node_type_embed)
node_embed = node_embed + torch.matmul(node_type_embed, trans_w).squeeze()
last_node_embed = F.normalize(node_embed, dim=1)
return last_node_embed
class NSLoss(nn.Module):
def __init__(self, num_nodes, num_sampled, embedding_size):
super(NSLoss, self).__init__()
self.num_nodes = num_nodes
self.num_sampled = num_sampled
self.embedding_size = embedding_size
self.weights = Parameter(torch.FloatTensor(num_nodes, embedding_size))
self.sample_weights = F.normalize(
torch.Tensor(
[
(math.log(k + 2) - math.log(k + 1)) / math.log(num_nodes + 1)
for k in range(num_nodes)
]
),
dim=0,
)
self.reset_parameters()
def reset_parameters(self):
self.weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
def forward(self, input, embs, label):
n = input.shape[0]
log_target = torch.log(
torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1))
)
negs = torch.multinomial(
self.sample_weights, self.num_sampled * n, replacement=True
).view(n, self.num_sampled)
noise = torch.neg(self.weights[negs])
sum_log_sampled = torch.sum(
torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1
).squeeze()
loss = log_target + sum_log_sampled
return -loss.sum() / n
class RWGraph:
def __init__(self, nx_G, node_type=None):
self.G = nx_G
self.node_type = node_type
def walk(self, walk_length, start, schema=None):
G = self.G
rand = random.Random()
if schema:
schema_items = schema.split("-")
assert schema_items[0] == schema_items[-1]
walk = [start]
while len(walk) < walk_length:
cur = walk[-1]
candidates = []
for node in G[cur].keys():
if (
schema == None
or self.node_type[node]
== schema_items[len(walk) % (len(schema_items) - 1)]
):
candidates.append(node)
if candidates:
walk.append(rand.choice(candidates))
else:
break
return walk
def simulate_walks(self, num_walks, walk_length, schema=None):
G = self.G
walks = []
nodes = list(G.nodes())
if schema is not None:
schema_list = schema.split(",")
for walk_iter in range(num_walks):
random.shuffle(nodes)
for node in nodes:
if schema is None:
walks.append(self.walk(walk_length=walk_length, start=node))
else:
for schema_iter in schema_list:
if schema_iter.split("-")[0] == self.node_type[node]:
walks.append(
self.walk(
walk_length=walk_length,
start=node,
schema=schema_iter,
)
)
return walks
def get_G_from_edges(edges):
edge_dict = dict()
for edge in edges:
edge_key = str(edge[0]) + "_" + str(edge[1])
if edge_key not in edge_dict:
edge_dict[edge_key] = 1
else:
edge_dict[edge_key] += 1
tmp_G = nx.Graph()
for edge_key in edge_dict:
weight = edge_dict[edge_key]
x = int(edge_key.split("_")[0])
y = int(edge_key.split("_")[1])
tmp_G.add_edge(x, y)
tmp_G[x][y]["weight"] = weight
return tmp_G
def generate_pairs(all_walks, vocab, window_size=5):
pairs = []
skip_window = window_size // 2
for layer_id, walks in enumerate(all_walks):
for walk in walks:
for i in range(len(walk)):
for j in range(1, skip_window + 1):
if i - j >= 0:
pairs.append(
(vocab[walk[i]].index, vocab[walk[i - j]].index, layer_id)
)
if i + j < len(walk):
pairs.append(
(vocab[walk[i]].index, vocab[walk[i + j]].index, layer_id)
)
return pairs
def generate_vocab(all_walks):
index2word = []
raw_vocab = defaultdict(int)
for walks in all_walks:
for walk in walks:
for word in walk:
raw_vocab[word] += 1
vocab = {}
for word, v in iteritems(raw_vocab):
vocab[word] = Vocab(count=v, index=len(index2word))
index2word.append(word)
index2word.sort(key=lambda word: vocab[word].count, reverse=True)
for i, word in enumerate(index2word):
vocab[word].index = i
return vocab, index2word
def get_batches(pairs, neighbors, batch_size):
n_batches = (len(pairs) + (batch_size - 1)) // batch_size
for idx in range(n_batches):
x, y, t, neigh = [], [], [], []
for i in range(batch_size):
index = idx * batch_size + i
if index >= len(pairs):
break
x.append(pairs[index][0])
y.append(pairs[index][1])
t.append(pairs[index][2])
neigh.append(neighbors[pairs[index][0]])
yield torch.tensor(x), torch.tensor(y), torch.tensor(t), torch.tensor(neigh)
def generate_walks(network_data, num_walks, walk_length, schema=None):
if schema is not None:
pass
else:
node_type = None
all_walks = []
for layer_id in network_data:
tmp_data = network_data[layer_id]
layer_walker = RWGraph(get_G_from_edges(tmp_data))
layer_walks = layer_walker.simulate_walks(num_walks, walk_length, schema=schema)
all_walks.append(layer_walks)
return all_walks
| true | true |
1c30afb5fc363275db4e5235e079d59e4a8af4aa | 1,074 | py | Python | app/tests/conftest.py | philipph77/fifa_tournament_companion | e6d83405273170dbd1a653af2f179c1895bfecdf | [
"CC0-1.0"
] | null | null | null | app/tests/conftest.py | philipph77/fifa_tournament_companion | e6d83405273170dbd1a653af2f179c1895bfecdf | [
"CC0-1.0"
] | null | null | null | app/tests/conftest.py | philipph77/fifa_tournament_companion | e6d83405273170dbd1a653af2f179c1895bfecdf | [
"CC0-1.0"
] | null | null | null | import os
import tempfile
import pytest
from ftc import create_app
from ftc.db import get_db, init_db
with open(os.path.join(os.path.dirname(__file__), 'data.sql'), 'rb') as f:
_data_sql = f.read().decode('utf8')
@pytest.fixture
def app():
db_fd, db_path = tempfile.mkstemp()
app = create_app({
'TESTING': True,
'DATABASE': db_path,
})
with app.app_context():
init_db()
get_db().executescript(_data_sql)
yield app
os.close(db_fd)
os.unlink(db_path)
@pytest.fixture
def client(app):
return app.test_client()
@pytest.fixture
def runner(app):
return app.test_cli_runner()
class AuthActions(object):
def __init__(self, client):
self._client = client
def login(self, username='johndoe77', password='test'):
return self._client.post(
'/auth/login',
data={'username': username, 'password': password}
)
def logout(self):
return self._client.get('/auth/logout')
@pytest.fixture
def auth(client):
return AuthActions(client) | 19.178571 | 74 | 0.638734 | import os
import tempfile
import pytest
from ftc import create_app
from ftc.db import get_db, init_db
with open(os.path.join(os.path.dirname(__file__), 'data.sql'), 'rb') as f:
_data_sql = f.read().decode('utf8')
@pytest.fixture
def app():
db_fd, db_path = tempfile.mkstemp()
app = create_app({
'TESTING': True,
'DATABASE': db_path,
})
with app.app_context():
init_db()
get_db().executescript(_data_sql)
yield app
os.close(db_fd)
os.unlink(db_path)
@pytest.fixture
def client(app):
return app.test_client()
@pytest.fixture
def runner(app):
return app.test_cli_runner()
class AuthActions(object):
def __init__(self, client):
self._client = client
def login(self, username='johndoe77', password='test'):
return self._client.post(
'/auth/login',
data={'username': username, 'password': password}
)
def logout(self):
return self._client.get('/auth/logout')
@pytest.fixture
def auth(client):
return AuthActions(client) | true | true |
1c30afdc9023b5d18c837fda9f7c402b8d18b3cb | 8,786 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/fcfportglobals/fcfportglobals.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/fcfportglobals/fcfportglobals.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/fcfportglobals/fcfportglobals.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FcFportGlobals(Base):
"""StackManager Fc F_Port Global Settings
The FcFportGlobals class encapsulates a list of fcFportGlobals resources that are managed by the user.
A list of resources can be retrieved from the server using the FcFportGlobals.find() method.
The list can be managed by using the FcFportGlobals.add() and FcFportGlobals.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'fcFportGlobals'
_SDM_ATT_MAP = {
'AcceptPartialConfig': 'acceptPartialConfig',
'MaxPacketsPerSecond': 'maxPacketsPerSecond',
'MaxRetries': 'maxRetries',
'ObjectId': 'objectId',
'RetryInterval': 'retryInterval',
}
def __init__(self, parent):
super(FcFportGlobals, self).__init__(parent)
@property
def AcceptPartialConfig(self):
"""
Returns
-------
- bool: This flag controls how the negotiation process reports success or failure. When is true the plugin reports success if at least one session is established. When is false the plugin reports success only if all sessions are established.
"""
return self._get_attribute(self._SDM_ATT_MAP['AcceptPartialConfig'])
@AcceptPartialConfig.setter
def AcceptPartialConfig(self, value):
self._set_attribute(self._SDM_ATT_MAP['AcceptPartialConfig'], value)
@property
def MaxPacketsPerSecond(self):
"""
Returns
-------
- number: The maximum number of requests transmitted in each second.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxPacketsPerSecond'])
@MaxPacketsPerSecond.setter
def MaxPacketsPerSecond(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxPacketsPerSecond'], value)
@property
def MaxRetries(self):
"""
Returns
-------
- number: The number of request retries for each negotiation stage in case of response timeout or error.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxRetries'])
@MaxRetries.setter
def MaxRetries(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxRetries'], value)
@property
def ObjectId(self):
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def RetryInterval(self):
"""
Returns
-------
- number: The number of seconds to wait for a response before sending a new request.
"""
return self._get_attribute(self._SDM_ATT_MAP['RetryInterval'])
@RetryInterval.setter
def RetryInterval(self, value):
self._set_attribute(self._SDM_ATT_MAP['RetryInterval'], value)
def update(self, AcceptPartialConfig=None, MaxPacketsPerSecond=None, MaxRetries=None, RetryInterval=None):
"""Updates fcFportGlobals resource on the server.
Args
----
- AcceptPartialConfig (bool): This flag controls how the negotiation process reports success or failure. When is true the plugin reports success if at least one session is established. When is false the plugin reports success only if all sessions are established.
- MaxPacketsPerSecond (number): The maximum number of requests transmitted in each second.
- MaxRetries (number): The number of request retries for each negotiation stage in case of response timeout or error.
- RetryInterval (number): The number of seconds to wait for a response before sending a new request.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, AcceptPartialConfig=None, MaxPacketsPerSecond=None, MaxRetries=None, RetryInterval=None):
"""Adds a new fcFportGlobals resource on the server and adds it to the container.
Args
----
- AcceptPartialConfig (bool): This flag controls how the negotiation process reports success or failure. When is true the plugin reports success if at least one session is established. When is false the plugin reports success only if all sessions are established.
- MaxPacketsPerSecond (number): The maximum number of requests transmitted in each second.
- MaxRetries (number): The number of request retries for each negotiation stage in case of response timeout or error.
- RetryInterval (number): The number of seconds to wait for a response before sending a new request.
Returns
-------
- self: This instance with all currently retrieved fcFportGlobals resources using find and the newly added fcFportGlobals resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained fcFportGlobals resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, AcceptPartialConfig=None, MaxPacketsPerSecond=None, MaxRetries=None, ObjectId=None, RetryInterval=None):
"""Finds and retrieves fcFportGlobals resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve fcFportGlobals resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all fcFportGlobals resources from the server.
Args
----
- AcceptPartialConfig (bool): This flag controls how the negotiation process reports success or failure. When is true the plugin reports success if at least one session is established. When is false the plugin reports success only if all sessions are established.
- MaxPacketsPerSecond (number): The maximum number of requests transmitted in each second.
- MaxRetries (number): The number of request retries for each negotiation stage in case of response timeout or error.
- ObjectId (str): Unique identifier for this object
- RetryInterval (number): The number of seconds to wait for a response before sending a new request.
Returns
-------
- self: This instance with matching fcFportGlobals resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of fcFportGlobals data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the fcFportGlobals resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 46 | 271 | 0.699636 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FcFportGlobals(Base):
__slots__ = ()
_SDM_NAME = 'fcFportGlobals'
_SDM_ATT_MAP = {
'AcceptPartialConfig': 'acceptPartialConfig',
'MaxPacketsPerSecond': 'maxPacketsPerSecond',
'MaxRetries': 'maxRetries',
'ObjectId': 'objectId',
'RetryInterval': 'retryInterval',
}
def __init__(self, parent):
super(FcFportGlobals, self).__init__(parent)
@property
def AcceptPartialConfig(self):
return self._get_attribute(self._SDM_ATT_MAP['AcceptPartialConfig'])
@AcceptPartialConfig.setter
def AcceptPartialConfig(self, value):
self._set_attribute(self._SDM_ATT_MAP['AcceptPartialConfig'], value)
@property
def MaxPacketsPerSecond(self):
return self._get_attribute(self._SDM_ATT_MAP['MaxPacketsPerSecond'])
@MaxPacketsPerSecond.setter
def MaxPacketsPerSecond(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxPacketsPerSecond'], value)
@property
def MaxRetries(self):
return self._get_attribute(self._SDM_ATT_MAP['MaxRetries'])
@MaxRetries.setter
def MaxRetries(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxRetries'], value)
@property
def ObjectId(self):
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def RetryInterval(self):
return self._get_attribute(self._SDM_ATT_MAP['RetryInterval'])
@RetryInterval.setter
def RetryInterval(self, value):
self._set_attribute(self._SDM_ATT_MAP['RetryInterval'], value)
def update(self, AcceptPartialConfig=None, MaxPacketsPerSecond=None, MaxRetries=None, RetryInterval=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, AcceptPartialConfig=None, MaxPacketsPerSecond=None, MaxRetries=None, RetryInterval=None):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
self._delete()
def find(self, AcceptPartialConfig=None, MaxPacketsPerSecond=None, MaxRetries=None, ObjectId=None, RetryInterval=None):
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
return self._read(href)
| true | true |
1c30b0a5945d99a0d9ec01db7440cfdee3da7965 | 68,626 | py | Python | mock_icd_generator.py | nyorain/vkmockicd | 08a31a1fcd6239bc7f7a81233a9c2827b09e2d5c | [
"Apache-2.0"
] | null | null | null | mock_icd_generator.py | nyorain/vkmockicd | 08a31a1fcd6239bc7f7a81233a9c2827b09e2d5c | [
"Apache-2.0"
] | null | null | null | mock_icd_generator.py | nyorain/vkmockicd | 08a31a1fcd6239bc7f7a81233a9c2827b09e2d5c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3 -i
#
# Copyright (c) 2015-2021 The Khronos Group Inc.
# Copyright (c) 2015-2021 Valve Corporation
# Copyright (c) 2015-2021 LunarG, Inc.
# Copyright (c) 2015-2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Tobin Ehlis <tobine@google.com>
#
# This script generates a Mock ICD that intercepts almost all Vulkan
# functions. That layer is not intended to be useful or even compilable
# in its initial state. Rather it's intended to be a starting point that
# can be copied and customized to assist in creation of a new layer.
import os,re,sys
from generator import *
from common_codegen import *
# Mock header code
HEADER_C_CODE = '''
using mutex_t = std::mutex;
using lock_guard_t = std::lock_guard<mutex_t>;
using unique_lock_t = std::unique_lock<mutex_t>;
static mutex_t global_lock;
static uint64_t global_unique_handle = 1;
static const uint32_t SUPPORTED_LOADER_ICD_INTERFACE_VERSION = 5;
static uint32_t loader_interface_version = 0;
static bool negotiate_loader_icd_interface_called = false;
static void* CreateDispObjHandle() {
auto handle = new VK_LOADER_DATA;
set_loader_magic_value(handle);
return handle;
}
static void DestroyDispObjHandle(void* handle) {
delete reinterpret_cast<VK_LOADER_DATA*>(handle);
}
'''
# Manual code at the top of the cpp source file
SOURCE_CPP_PREFIX = '''
using std::unordered_map;
static constexpr uint32_t icd_physical_device_count = 1;
static constexpr uint32_t kSupportedVulkanAPIVersion = VK_API_VERSION_1_1;
static unordered_map<VkInstance, std::array<VkPhysicalDevice, icd_physical_device_count>> physical_device_map;
// Map device memory handle to any mapped allocations that we'll need to free on unmap
static unordered_map<VkDeviceMemory, std::vector<void*>> mapped_memory_map;
// Map device memory allocation handle to the size
static unordered_map<VkDeviceMemory, VkDeviceSize> allocated_memory_size_map;
static unordered_map<VkDevice, unordered_map<uint32_t, unordered_map<uint32_t, VkQueue>>> queue_map;
static unordered_map<VkDevice, unordered_map<VkBuffer, VkBufferCreateInfo>> buffer_map;
static unordered_map<VkDevice, unordered_map<VkImage, VkDeviceSize>> image_memory_size_map;
static unordered_map<VkCommandPool, std::vector<VkCommandBuffer>> command_pool_buffer_map;
static constexpr uint32_t icd_swapchain_image_count = 1;
static unordered_map<VkSwapchainKHR, VkImage[icd_swapchain_image_count]> swapchain_image_map;
// TODO: Would like to codegen this but limits aren't in XML
static VkPhysicalDeviceLimits SetLimits(VkPhysicalDeviceLimits *limits) {
limits->maxImageDimension1D = 4096;
limits->maxImageDimension2D = 4096;
limits->maxImageDimension3D = 256;
limits->maxImageDimensionCube = 4096;
limits->maxImageArrayLayers = 256;
limits->maxTexelBufferElements = 65536;
limits->maxUniformBufferRange = 16384;
limits->maxStorageBufferRange = 134217728;
limits->maxPushConstantsSize = 128;
limits->maxMemoryAllocationCount = 4096;
limits->maxSamplerAllocationCount = 4000;
limits->bufferImageGranularity = 1;
limits->sparseAddressSpaceSize = 2147483648;
limits->maxBoundDescriptorSets = 4;
limits->maxPerStageDescriptorSamplers = 16;
limits->maxPerStageDescriptorUniformBuffers = 12;
limits->maxPerStageDescriptorStorageBuffers = 4;
limits->maxPerStageDescriptorSampledImages = 16;
limits->maxPerStageDescriptorStorageImages = 4;
limits->maxPerStageDescriptorInputAttachments = 4;
limits->maxPerStageResources = 128;
limits->maxDescriptorSetSamplers = 96;
limits->maxDescriptorSetUniformBuffers = 72;
limits->maxDescriptorSetUniformBuffersDynamic = 8;
limits->maxDescriptorSetStorageBuffers = 24;
limits->maxDescriptorSetStorageBuffersDynamic = 4;
limits->maxDescriptorSetSampledImages = 96;
limits->maxDescriptorSetStorageImages = 24;
limits->maxDescriptorSetInputAttachments = 4;
limits->maxVertexInputAttributes = 16;
limits->maxVertexInputBindings = 16;
limits->maxVertexInputAttributeOffset = 2047;
limits->maxVertexInputBindingStride = 2048;
limits->maxVertexOutputComponents = 64;
limits->maxTessellationGenerationLevel = 64;
limits->maxTessellationPatchSize = 32;
limits->maxTessellationControlPerVertexInputComponents = 64;
limits->maxTessellationControlPerVertexOutputComponents = 64;
limits->maxTessellationControlPerPatchOutputComponents = 120;
limits->maxTessellationControlTotalOutputComponents = 2048;
limits->maxTessellationEvaluationInputComponents = 64;
limits->maxTessellationEvaluationOutputComponents = 64;
limits->maxGeometryShaderInvocations = 32;
limits->maxGeometryInputComponents = 64;
limits->maxGeometryOutputComponents = 64;
limits->maxGeometryOutputVertices = 256;
limits->maxGeometryTotalOutputComponents = 1024;
limits->maxFragmentInputComponents = 64;
limits->maxFragmentOutputAttachments = 4;
limits->maxFragmentDualSrcAttachments = 1;
limits->maxFragmentCombinedOutputResources = 4;
limits->maxComputeSharedMemorySize = 16384;
limits->maxComputeWorkGroupCount[0] = 65535;
limits->maxComputeWorkGroupCount[1] = 65535;
limits->maxComputeWorkGroupCount[2] = 65535;
limits->maxComputeWorkGroupInvocations = 128;
limits->maxComputeWorkGroupSize[0] = 128;
limits->maxComputeWorkGroupSize[1] = 128;
limits->maxComputeWorkGroupSize[2] = 64;
limits->subPixelPrecisionBits = 4;
limits->subTexelPrecisionBits = 4;
limits->mipmapPrecisionBits = 4;
limits->maxDrawIndexedIndexValue = UINT32_MAX;
limits->maxDrawIndirectCount = UINT16_MAX;
limits->maxSamplerLodBias = 2.0f;
limits->maxSamplerAnisotropy = 16;
limits->maxViewports = 16;
limits->maxViewportDimensions[0] = 4096;
limits->maxViewportDimensions[1] = 4096;
limits->viewportBoundsRange[0] = -8192;
limits->viewportBoundsRange[1] = 8191;
limits->viewportSubPixelBits = 0;
limits->minMemoryMapAlignment = 64;
limits->minTexelBufferOffsetAlignment = 16;
limits->minUniformBufferOffsetAlignment = 16;
limits->minStorageBufferOffsetAlignment = 16;
limits->minTexelOffset = -8;
limits->maxTexelOffset = 7;
limits->minTexelGatherOffset = -8;
limits->maxTexelGatherOffset = 7;
limits->minInterpolationOffset = 0.0f;
limits->maxInterpolationOffset = 0.5f;
limits->subPixelInterpolationOffsetBits = 4;
limits->maxFramebufferWidth = 4096;
limits->maxFramebufferHeight = 4096;
limits->maxFramebufferLayers = 256;
limits->framebufferColorSampleCounts = 0x7F;
limits->framebufferDepthSampleCounts = 0x7F;
limits->framebufferStencilSampleCounts = 0x7F;
limits->framebufferNoAttachmentsSampleCounts = 0x7F;
limits->maxColorAttachments = 4;
limits->sampledImageColorSampleCounts = 0x7F;
limits->sampledImageIntegerSampleCounts = 0x7F;
limits->sampledImageDepthSampleCounts = 0x7F;
limits->sampledImageStencilSampleCounts = 0x7F;
limits->storageImageSampleCounts = 0x7F;
limits->maxSampleMaskWords = 1;
limits->timestampComputeAndGraphics = VK_TRUE;
limits->timestampPeriod = 1;
limits->maxClipDistances = 8;
limits->maxCullDistances = 8;
limits->maxCombinedClipAndCullDistances = 8;
limits->discreteQueuePriorities = 2;
limits->pointSizeRange[0] = 1.0f;
limits->pointSizeRange[1] = 64.0f;
limits->lineWidthRange[0] = 1.0f;
limits->lineWidthRange[1] = 8.0f;
limits->pointSizeGranularity = 1.0f;
limits->lineWidthGranularity = 1.0f;
limits->strictLines = VK_TRUE;
limits->standardSampleLocations = VK_TRUE;
limits->optimalBufferCopyOffsetAlignment = 1;
limits->optimalBufferCopyRowPitchAlignment = 1;
limits->nonCoherentAtomSize = 256;
return *limits;
}
void SetBoolArrayTrue(VkBool32* bool_array, uint32_t num_bools)
{
for (uint32_t i = 0; i < num_bools; ++i) {
bool_array[i] = VK_TRUE;
}
}
'''
# Manual code at the end of the cpp source file
SOURCE_CPP_POSTFIX = '''
static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
// TODO: This function should only care about physical device functions and return nullptr for other functions
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
// Mock should intercept all functions so if we get here just return null
return nullptr;
}
} // namespace vkmock
#if defined(__GNUC__) && __GNUC__ >= 4
#define EXPORT __attribute__((visibility("default")))
#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)
#define EXPORT __attribute__((visibility("default")))
#elif defined(_WIN32)
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
extern "C" {
EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(VkInstance instance, const char* pName) {
if (!vkmock::negotiate_loader_icd_interface_called) {
vkmock::loader_interface_version = 1;
}
return vkmock::GetInstanceProcAddr(instance, pName);
}
EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(VkInstance instance, const char* pName) {
return vkmock::GetPhysicalDeviceProcAddr(instance, pName);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion) {
vkmock::negotiate_loader_icd_interface_called = true;
vkmock::loader_interface_version = *pSupportedVersion;
if (*pSupportedVersion > vkmock::SUPPORTED_LOADER_ICD_INTERFACE_VERSION) {
*pSupportedVersion = vkmock::SUPPORTED_LOADER_ICD_INTERFACE_VERSION;
}
return VK_SUCCESS;
}
EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(
VkInstance instance,
VkSurfaceKHR surface,
const VkAllocationCallbacks* pAllocator)
{
vkmock::DestroySurfaceKHR(instance, surface, pAllocator);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
VkSurfaceKHR surface,
VkBool32* pSupported)
{
return vkmock::GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
{
return vkmock::GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pSurfaceFormatCount,
VkSurfaceFormatKHR* pSurfaceFormats)
{
return vkmock::GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pPresentModeCount,
VkPresentModeKHR* pPresentModes)
{
return vkmock::GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount, pPresentModes);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR(
VkInstance instance,
const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateDisplayPlaneSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#ifdef VK_USE_PLATFORM_XLIB_KHR
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
VkInstance instance,
const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateXlibSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_XLIB_KHR */
#ifdef VK_USE_PLATFORM_XCB_KHR
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
VkInstance instance,
const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateXcbSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_XCB_KHR */
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
VkInstance instance,
const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateWaylandSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_WAYLAND_KHR */
#ifdef VK_USE_PLATFORM_ANDROID_KHR
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
VkInstance instance,
const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateAndroidSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_ANDROID_KHR */
#ifdef VK_USE_PLATFORM_WIN32_KHR
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
VkInstance instance,
const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_WIN32_KHR */
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR(
VkDevice device,
VkSurfaceKHR surface,
VkDeviceGroupPresentModeFlagsKHR* pModes)
{
return vkmock::GetDeviceGroupSurfacePresentModesKHR(device, surface, pModes);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pRectCount,
VkRect2D* pRects)
{
return vkmock::GetPhysicalDevicePresentRectanglesKHR(physicalDevice, surface, pRectCount, pRects);
}
#ifdef VK_USE_PLATFORM_VI_NN
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN(
VkInstance instance,
const VkViSurfaceCreateInfoNN* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateViSurfaceNN(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_VI_NN */
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilities2EXT* pSurfaceCapabilities)
{
return vkmock::GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
}
#ifdef VK_USE_PLATFORM_IOS_MVK
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK(
VkInstance instance,
const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateIOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_IOS_MVK */
#ifdef VK_USE_PLATFORM_MACOS_MVK
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK(
VkInstance instance,
const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateMacOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_MACOS_MVK */
} // end extern "C"
'''
CUSTOM_C_INTERCEPTS = {
'vkCreateInstance': '''
// TODO: If loader ver <=4 ICD must fail with VK_ERROR_INCOMPATIBLE_DRIVER for all vkCreateInstance calls with
// apiVersion set to > Vulkan 1.0 because the loader is still at interface version <= 4. Otherwise, the
// ICD should behave as normal.
if (loader_interface_version <= 4) {
return VK_ERROR_INCOMPATIBLE_DRIVER;
}
*pInstance = (VkInstance)CreateDispObjHandle();
for (auto& physical_device : physical_device_map[*pInstance])
physical_device = (VkPhysicalDevice)CreateDispObjHandle();
// TODO: If emulating specific device caps, will need to add intelligence here
return VK_SUCCESS;
''',
'vkDestroyInstance': '''
if (instance) {
for (const auto physical_device : physical_device_map.at(instance))
DestroyDispObjHandle((void*)physical_device);
physical_device_map.erase(instance);
DestroyDispObjHandle((void*)instance);
}
''',
'vkAllocateCommandBuffers': '''
unique_lock_t lock(global_lock);
for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
pCommandBuffers[i] = (VkCommandBuffer)CreateDispObjHandle();
command_pool_buffer_map[pAllocateInfo->commandPool].push_back(pCommandBuffers[i]);
}
return VK_SUCCESS;
''',
'vkFreeCommandBuffers': '''
unique_lock_t lock(global_lock);
for (auto i = 0u; i < commandBufferCount; ++i) {
if (!pCommandBuffers[i]) {
continue;
}
for (auto& pair : command_pool_buffer_map) {
auto& cbs = pair.second;
auto it = std::find(cbs.begin(), cbs.end(), pCommandBuffers[i]);
if (it != cbs.end()) {
cbs.erase(it);
}
}
DestroyDispObjHandle((void*) pCommandBuffers[i]);
}
''',
'vkDestroyCommandPool': '''
// destroy command buffers for this pool
unique_lock_t lock(global_lock);
auto it = command_pool_buffer_map.find(commandPool);
if (it != command_pool_buffer_map.end()) {
for (auto& cb : it->second) {
DestroyDispObjHandle((void*) cb);
}
command_pool_buffer_map.erase(it);
}
''',
'vkEnumeratePhysicalDevices': '''
VkResult result_code = VK_SUCCESS;
if (pPhysicalDevices) {
const auto return_count = (std::min)(*pPhysicalDeviceCount, icd_physical_device_count);
for (uint32_t i = 0; i < return_count; ++i) pPhysicalDevices[i] = physical_device_map.at(instance)[i];
if (return_count < icd_physical_device_count) result_code = VK_INCOMPLETE;
*pPhysicalDeviceCount = return_count;
} else {
*pPhysicalDeviceCount = icd_physical_device_count;
}
return result_code;
''',
'vkCreateDevice': '''
*pDevice = (VkDevice)CreateDispObjHandle();
// TODO: If emulating specific device caps, will need to add intelligence here
return VK_SUCCESS;
''',
'vkDestroyDevice': '''
unique_lock_t lock(global_lock);
// First destroy sub-device objects
// Destroy Queues
for (auto queue_family_map_pair : queue_map[device]) {
for (auto index_queue_pair : queue_map[device][queue_family_map_pair.first]) {
DestroyDispObjHandle((void*)index_queue_pair.second);
}
}
queue_map.erase(device);
buffer_map.erase(device);
image_memory_size_map.erase(device);
// Now destroy device
DestroyDispObjHandle((void*)device);
// TODO: If emulating specific device caps, will need to add intelligence here
''',
'vkGetDeviceQueue': '''
unique_lock_t lock(global_lock);
auto queue = queue_map[device][queueFamilyIndex][queueIndex];
if (queue) {
*pQueue = queue;
} else {
*pQueue = queue_map[device][queueFamilyIndex][queueIndex] = (VkQueue)CreateDispObjHandle();
}
// TODO: If emulating specific device caps, will need to add intelligence here
return;
''',
'vkGetDeviceQueue2': '''
GetDeviceQueue(device, pQueueInfo->queueFamilyIndex, pQueueInfo->queueIndex, pQueue);
// TODO: Add further support for GetDeviceQueue2 features
''',
'vkEnumerateInstanceLayerProperties': '''
return VK_SUCCESS;
''',
'vkEnumerateInstanceVersion': '''
*pApiVersion = kSupportedVulkanAPIVersion;
return VK_SUCCESS;
''',
'vkEnumerateDeviceLayerProperties': '''
return VK_SUCCESS;
''',
'vkEnumerateInstanceExtensionProperties': '''
// If requesting number of extensions, return that
if (!pLayerName) {
if (!pProperties) {
*pPropertyCount = (uint32_t)instance_extension_map.size();
} else {
uint32_t i = 0;
for (const auto &name_ver_pair : instance_extension_map) {
if (i == *pPropertyCount) {
break;
}
std::strncpy(pProperties[i].extensionName, name_ver_pair.first.c_str(), sizeof(pProperties[i].extensionName));
pProperties[i].extensionName[sizeof(pProperties[i].extensionName) - 1] = 0;
pProperties[i].specVersion = name_ver_pair.second;
++i;
}
if (i != instance_extension_map.size()) {
return VK_INCOMPLETE;
}
}
}
// If requesting extension properties, fill in data struct for number of extensions
return VK_SUCCESS;
''',
'vkEnumerateDeviceExtensionProperties': '''
// If requesting number of extensions, return that
if (!pLayerName) {
if (!pProperties) {
*pPropertyCount = (uint32_t)device_extension_map.size();
} else {
uint32_t i = 0;
for (const auto &name_ver_pair : device_extension_map) {
if (i == *pPropertyCount) {
break;
}
std::strncpy(pProperties[i].extensionName, name_ver_pair.first.c_str(), sizeof(pProperties[i].extensionName));
pProperties[i].extensionName[sizeof(pProperties[i].extensionName) - 1] = 0;
pProperties[i].specVersion = name_ver_pair.second;
++i;
}
if (i != device_extension_map.size()) {
return VK_INCOMPLETE;
}
}
}
// If requesting extension properties, fill in data struct for number of extensions
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfacePresentModesKHR': '''
// Currently always say that all present modes are supported
if (!pPresentModes) {
*pPresentModeCount = 6;
} else {
// Intentionally falling through and just filling however many modes are requested
switch(*pPresentModeCount) {
case 6:
pPresentModes[5] = VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR;
// fall through
case 5:
pPresentModes[4] = VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR;
// fall through
case 4:
pPresentModes[3] = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
// fall through
case 3:
pPresentModes[2] = VK_PRESENT_MODE_FIFO_KHR;
// fall through
case 2:
pPresentModes[1] = VK_PRESENT_MODE_MAILBOX_KHR;
// fall through
default:
pPresentModes[0] = VK_PRESENT_MODE_IMMEDIATE_KHR;
break;
}
}
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfaceFormatsKHR': '''
// Currently always say that RGBA8 & BGRA8 are supported
if (!pSurfaceFormats) {
*pSurfaceFormatCount = 2;
} else {
// Intentionally falling through and just filling however many types are requested
switch(*pSurfaceFormatCount) {
case 2:
pSurfaceFormats[1].format = VK_FORMAT_R8G8B8A8_UNORM;
pSurfaceFormats[1].colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
// fall through
default:
pSurfaceFormats[0].format = VK_FORMAT_B8G8R8A8_UNORM;
pSurfaceFormats[0].colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
break;
}
}
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfaceFormats2KHR': '''
// Currently always say that RGBA8 & BGRA8 are supported
if (!pSurfaceFormats) {
*pSurfaceFormatCount = 2;
} else {
// Intentionally falling through and just filling however many types are requested
switch(*pSurfaceFormatCount) {
case 2:
pSurfaceFormats[1].pNext = nullptr;
pSurfaceFormats[1].surfaceFormat.format = VK_FORMAT_R8G8B8A8_UNORM;
pSurfaceFormats[1].surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
// fall through
default:
pSurfaceFormats[1].pNext = nullptr;
pSurfaceFormats[0].surfaceFormat.format = VK_FORMAT_B8G8R8A8_UNORM;
pSurfaceFormats[0].surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
break;
}
}
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfaceSupportKHR': '''
// Currently say that all surface/queue combos are supported
*pSupported = VK_TRUE;
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfaceCapabilitiesKHR': '''
// In general just say max supported is available for requested surface
pSurfaceCapabilities->minImageCount = 1;
pSurfaceCapabilities->maxImageCount = 0;
pSurfaceCapabilities->currentExtent.width = 0xFFFFFFFF;
pSurfaceCapabilities->currentExtent.height = 0xFFFFFFFF;
pSurfaceCapabilities->minImageExtent.width = 1;
pSurfaceCapabilities->minImageExtent.height = 1;
pSurfaceCapabilities->maxImageExtent.width = 0xFFFF;
pSurfaceCapabilities->maxImageExtent.height = 0xFFFF;
pSurfaceCapabilities->maxImageArrayLayers = 128;
pSurfaceCapabilities->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR |
VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR |
VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR |
VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR |
VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR;
pSurfaceCapabilities->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
pSurfaceCapabilities->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR |
VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR |
VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR;
pSurfaceCapabilities->supportedUsageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT |
VK_IMAGE_USAGE_STORAGE_BIT |
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfaceCapabilities2KHR': '''
GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, pSurfaceInfo->surface, &pSurfaceCapabilities->surfaceCapabilities);
return VK_SUCCESS;
''',
'vkGetInstanceProcAddr': '''
if (!negotiate_loader_icd_interface_called) {
loader_interface_version = 0;
}
const auto &item = name_to_funcptr_map.find(pName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
// Mock should intercept all functions so if we get here just return null
return nullptr;
''',
'vkGetDeviceProcAddr': '''
return GetInstanceProcAddr(nullptr, pName);
''',
'vkGetPhysicalDeviceMemoryProperties': '''
pMemoryProperties->memoryTypeCount = 2;
pMemoryProperties->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
pMemoryProperties->memoryTypes[0].heapIndex = 0;
pMemoryProperties->memoryTypes[1].propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
pMemoryProperties->memoryTypes[1].heapIndex = 1;
pMemoryProperties->memoryHeapCount = 2;
pMemoryProperties->memoryHeaps[0].flags = 0;
pMemoryProperties->memoryHeaps[0].size = 8000000000;
pMemoryProperties->memoryHeaps[1].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
pMemoryProperties->memoryHeaps[1].size = 8000000000;
''',
'vkGetPhysicalDeviceMemoryProperties2KHR': '''
GetPhysicalDeviceMemoryProperties(physicalDevice, &pMemoryProperties->memoryProperties);
''',
'vkGetPhysicalDeviceQueueFamilyProperties': '''
if (!pQueueFamilyProperties) {
*pQueueFamilyPropertyCount = 1;
} else {
if (*pQueueFamilyPropertyCount) {
pQueueFamilyProperties[0].queueFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT | VK_QUEUE_SPARSE_BINDING_BIT;
pQueueFamilyProperties[0].queueCount = 1;
pQueueFamilyProperties[0].timestampValidBits = 0;
pQueueFamilyProperties[0].minImageTransferGranularity = {1,1,1};
}
}
''',
'vkGetPhysicalDeviceQueueFamilyProperties2KHR': '''
if (pQueueFamilyPropertyCount && pQueueFamilyProperties) {
GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, &pQueueFamilyProperties->queueFamilyProperties);
} else {
GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, nullptr);
}
''',
'vkGetPhysicalDeviceFeatures': '''
uint32_t num_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
VkBool32 *bool_array = &pFeatures->robustBufferAccess;
SetBoolArrayTrue(bool_array, num_bools);
''',
'vkGetPhysicalDeviceFeatures2KHR': '''
GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
uint32_t num_bools = 0; // Count number of VkBool32s in extension structs
VkBool32* feat_bools = nullptr;
const auto *desc_idx_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pFeatures->pNext);
if (desc_idx_features) {
const auto bool_size = sizeof(VkPhysicalDeviceDescriptorIndexingFeaturesEXT) - offsetof(VkPhysicalDeviceDescriptorIndexingFeaturesEXT, shaderInputAttachmentArrayDynamicIndexing);
num_bools = bool_size/sizeof(VkBool32);
feat_bools = (VkBool32*)&desc_idx_features->shaderInputAttachmentArrayDynamicIndexing;
SetBoolArrayTrue(feat_bools, num_bools);
}
const auto *blendop_features = lvl_find_in_chain<VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT>(pFeatures->pNext);
if (blendop_features) {
const auto bool_size = sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT) - offsetof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT, advancedBlendCoherentOperations);
num_bools = bool_size/sizeof(VkBool32);
feat_bools = (VkBool32*)&blendop_features->advancedBlendCoherentOperations;
SetBoolArrayTrue(feat_bools, num_bools);
}
''',
'vkGetPhysicalDeviceFormatProperties': '''
if (VK_FORMAT_UNDEFINED == format) {
*pFormatProperties = { 0x0, 0x0, 0x0 };
} else {
// Default to a color format, skip DS bit
*pFormatProperties = { 0x00FFFDFF, 0x00FFFDFF, 0x00FFFDFF };
switch (format) {
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_X8_D24_UNORM_PACK32:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_S8_UINT:
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
// Don't set color bits for DS formats
*pFormatProperties = { 0x00FFFE7F, 0x00FFFE7F, 0x00FFFE7F };
break;
default:
break;
}
}
''',
'vkGetPhysicalDeviceFormatProperties2KHR': '''
GetPhysicalDeviceFormatProperties(physicalDevice, format, &pFormatProperties->formatProperties);
VkFormatProperties3KHR *props_3 = lvl_find_mod_in_chain<VkFormatProperties3KHR>(pFormatProperties->pNext);
if (props_3) {
props_3->linearTilingFeatures = pFormatProperties->formatProperties.linearTilingFeatures;
props_3->optimalTilingFeatures = pFormatProperties->formatProperties.optimalTilingFeatures;
props_3->bufferFeatures = pFormatProperties->formatProperties.bufferFeatures;
}
''',
'vkGetPhysicalDeviceImageFormatProperties': '''
// A hardcoded unsupported format
if (format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32) {
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
// TODO: Just hard-coding some values for now
// TODO: If tiling is linear, limit the mips, levels, & sample count
if (VK_IMAGE_TILING_LINEAR == tiling) {
*pImageFormatProperties = { { 4096, 4096, 256 }, 1, 1, VK_SAMPLE_COUNT_1_BIT, 4294967296 };
} else {
// We hard-code support for all sample counts except 64 bits.
*pImageFormatProperties = { { 4096, 4096, 256 }, 12, 256, 0x7F & ~VK_SAMPLE_COUNT_64_BIT, 4294967296 };
}
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceImageFormatProperties2KHR': '''
GetPhysicalDeviceImageFormatProperties(physicalDevice, pImageFormatInfo->format, pImageFormatInfo->type, pImageFormatInfo->tiling, pImageFormatInfo->usage, pImageFormatInfo->flags, &pImageFormatProperties->imageFormatProperties);
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceProperties': '''
// TODO: Just hard-coding some values for now
pProperties->apiVersion = kSupportedVulkanAPIVersion;
pProperties->driverVersion = 1;
pProperties->vendorID = 0xba5eba11;
pProperties->deviceID = 0xf005ba11;
pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
//std::string devName = "Vulkan Mock Device";
strcpy(pProperties->deviceName, "Vulkan Mock Device");
pProperties->pipelineCacheUUID[0] = 18;
pProperties->limits = SetLimits(&pProperties->limits);
pProperties->sparseProperties = { VK_TRUE, VK_TRUE, VK_TRUE, VK_TRUE, VK_TRUE };
''',
'vkGetPhysicalDeviceProperties2KHR': '''
GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
const auto *desc_idx_props = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingPropertiesEXT>(pProperties->pNext);
if (desc_idx_props) {
VkPhysicalDeviceDescriptorIndexingPropertiesEXT* write_props = (VkPhysicalDeviceDescriptorIndexingPropertiesEXT*)desc_idx_props;
write_props->maxUpdateAfterBindDescriptorsInAllPools = 500000;
write_props->shaderUniformBufferArrayNonUniformIndexingNative = false;
write_props->shaderSampledImageArrayNonUniformIndexingNative = false;
write_props->shaderStorageBufferArrayNonUniformIndexingNative = false;
write_props->shaderStorageImageArrayNonUniformIndexingNative = false;
write_props->shaderInputAttachmentArrayNonUniformIndexingNative = false;
write_props->robustBufferAccessUpdateAfterBind = true;
write_props->quadDivergentImplicitLod = true;
write_props->maxPerStageDescriptorUpdateAfterBindSamplers = 500000;
write_props->maxPerStageDescriptorUpdateAfterBindUniformBuffers = 500000;
write_props->maxPerStageDescriptorUpdateAfterBindStorageBuffers = 500000;
write_props->maxPerStageDescriptorUpdateAfterBindSampledImages = 500000;
write_props->maxPerStageDescriptorUpdateAfterBindStorageImages = 500000;
write_props->maxPerStageDescriptorUpdateAfterBindInputAttachments = 500000;
write_props->maxPerStageUpdateAfterBindResources = 500000;
write_props->maxDescriptorSetUpdateAfterBindSamplers = 500000;
write_props->maxDescriptorSetUpdateAfterBindUniformBuffers = 96;
write_props->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = 8;
write_props->maxDescriptorSetUpdateAfterBindStorageBuffers = 500000;
write_props->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = 4;
write_props->maxDescriptorSetUpdateAfterBindSampledImages = 500000;
write_props->maxDescriptorSetUpdateAfterBindStorageImages = 500000;
write_props->maxDescriptorSetUpdateAfterBindInputAttachments = 500000;
}
const auto *push_descriptor_props = lvl_find_in_chain<VkPhysicalDevicePushDescriptorPropertiesKHR>(pProperties->pNext);
if (push_descriptor_props) {
VkPhysicalDevicePushDescriptorPropertiesKHR* write_props = (VkPhysicalDevicePushDescriptorPropertiesKHR*)push_descriptor_props;
write_props->maxPushDescriptors = 256;
}
const auto *depth_stencil_resolve_props = lvl_find_in_chain<VkPhysicalDeviceDepthStencilResolvePropertiesKHR>(pProperties->pNext);
if (depth_stencil_resolve_props) {
VkPhysicalDeviceDepthStencilResolvePropertiesKHR* write_props = (VkPhysicalDeviceDepthStencilResolvePropertiesKHR*)depth_stencil_resolve_props;
write_props->supportedDepthResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
write_props->supportedStencilResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
}
const auto *fragment_density_map2_props = lvl_find_in_chain<VkPhysicalDeviceFragmentDensityMap2PropertiesEXT>(pProperties->pNext);
if (fragment_density_map2_props) {
VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* write_props = (VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*)fragment_density_map2_props;
write_props->subsampledLoads = VK_FALSE;
write_props->subsampledCoarseReconstructionEarlyAccess = VK_FALSE;
write_props->maxSubsampledArrayLayers = 2;
write_props->maxDescriptorSetSubsampledSamplers = 1;
}
''',
'vkGetPhysicalDeviceExternalSemaphoreProperties':'''
// Hard code support for all handle types and features
pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0x1F;
pExternalSemaphoreProperties->compatibleHandleTypes = 0x1F;
pExternalSemaphoreProperties->externalSemaphoreFeatures = 0x3;
''',
'vkGetPhysicalDeviceExternalSemaphorePropertiesKHR':'''
GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
''',
'vkGetPhysicalDeviceExternalFenceProperties':'''
// Hard-code support for all handle types and features
pExternalFenceProperties->exportFromImportedHandleTypes = 0xF;
pExternalFenceProperties->compatibleHandleTypes = 0xF;
pExternalFenceProperties->externalFenceFeatures = 0x3;
''',
'vkGetPhysicalDeviceExternalFencePropertiesKHR':'''
GetPhysicalDeviceExternalFenceProperties(physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
''',
'vkGetPhysicalDeviceExternalBufferProperties':'''
// Hard-code support for all handle types and features
pExternalBufferProperties->externalMemoryProperties.externalMemoryFeatures = 0x7;
pExternalBufferProperties->externalMemoryProperties.exportFromImportedHandleTypes = 0x1FF;
pExternalBufferProperties->externalMemoryProperties.compatibleHandleTypes = 0x1FF;
''',
'vkGetPhysicalDeviceExternalBufferPropertiesKHR':'''
GetPhysicalDeviceExternalBufferProperties(physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
''',
'vkGetBufferMemoryRequirements': '''
// TODO: Just hard-coding reqs for now
pMemoryRequirements->size = 4096;
pMemoryRequirements->alignment = 1;
pMemoryRequirements->memoryTypeBits = 0xFFFF;
// Return a better size based on the buffer size from the create info.
auto d_iter = buffer_map.find(device);
if (d_iter != buffer_map.end()) {
auto iter = d_iter->second.find(buffer);
if (iter != d_iter->second.end()) {
pMemoryRequirements->size = ((iter->second.size + 4095) / 4096) * 4096;
}
}
''',
'vkGetBufferMemoryRequirements2KHR': '''
GetBufferMemoryRequirements(device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
''',
'vkGetImageMemoryRequirements': '''
pMemoryRequirements->size = 0;
pMemoryRequirements->alignment = 1;
auto d_iter = image_memory_size_map.find(device);
if(d_iter != image_memory_size_map.end()){
auto iter = d_iter->second.find(image);
if (iter != d_iter->second.end()) {
pMemoryRequirements->size = iter->second;
}
}
// Here we hard-code that the memory type at index 3 doesn't support this image.
pMemoryRequirements->memoryTypeBits = 0xFFFF & ~(0x1 << 3);
''',
'vkGetImageMemoryRequirements2KHR': '''
GetImageMemoryRequirements(device, pInfo->image, &pMemoryRequirements->memoryRequirements);
''',
'vkMapMemory': '''
unique_lock_t lock(global_lock);
if (VK_WHOLE_SIZE == size) {
if (allocated_memory_size_map.count(memory) != 0)
size = allocated_memory_size_map[memory] - offset;
else
size = 0x10000;
}
void* map_addr = malloc((size_t)size);
mapped_memory_map[memory].push_back(map_addr);
*ppData = map_addr;
return VK_SUCCESS;
''',
'vkUnmapMemory': '''
unique_lock_t lock(global_lock);
for (auto map_addr : mapped_memory_map[memory]) {
free(map_addr);
}
mapped_memory_map.erase(memory);
''',
'vkGetImageSubresourceLayout': '''
// Need safe values. Callers are computing memory offsets from pLayout, with no return code to flag failure.
*pLayout = VkSubresourceLayout(); // Default constructor zero values.
''',
'vkCreateSwapchainKHR': '''
unique_lock_t lock(global_lock);
*pSwapchain = (VkSwapchainKHR)global_unique_handle++;
for(uint32_t i = 0; i < icd_swapchain_image_count; ++i){
swapchain_image_map[*pSwapchain][i] = (VkImage)global_unique_handle++;
}
return VK_SUCCESS;
''',
'vkDestroySwapchainKHR': '''
unique_lock_t lock(global_lock);
swapchain_image_map.clear();
''',
'vkGetSwapchainImagesKHR': '''
if (!pSwapchainImages) {
*pSwapchainImageCount = icd_swapchain_image_count;
} else {
unique_lock_t lock(global_lock);
for (uint32_t img_i = 0; img_i < (std::min)(*pSwapchainImageCount, icd_swapchain_image_count); ++img_i){
pSwapchainImages[img_i] = swapchain_image_map.at(swapchain)[img_i];
}
if (*pSwapchainImageCount < icd_swapchain_image_count) return VK_INCOMPLETE;
else if (*pSwapchainImageCount > icd_swapchain_image_count) *pSwapchainImageCount = icd_swapchain_image_count;
}
return VK_SUCCESS;
''',
'vkAcquireNextImageKHR': '''
*pImageIndex = 0;
return VK_SUCCESS;
''',
'vkAcquireNextImage2KHR': '''
*pImageIndex = 0;
return VK_SUCCESS;
''',
'vkCreateBuffer': '''
unique_lock_t lock(global_lock);
*pBuffer = (VkBuffer)global_unique_handle++;
buffer_map[device][*pBuffer] = *pCreateInfo;
return VK_SUCCESS;
''',
'vkDestroyBuffer': '''
unique_lock_t lock(global_lock);
buffer_map[device].erase(buffer);
''',
'vkCreateImage': '''
unique_lock_t lock(global_lock);
*pImage = (VkImage)global_unique_handle++;
// TODO: A pixel size is 32 bytes. This accounts for the largest possible pixel size of any format. It could be changed to more accurate size if need be.
image_memory_size_map[device][*pImage] = pCreateInfo->extent.width * pCreateInfo->extent.height * pCreateInfo->extent.depth *
32 * pCreateInfo->arrayLayers * (pCreateInfo->mipLevels > 1 ? 2 : 1);
// plane count
switch (pCreateInfo->format) {
case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
image_memory_size_map[device][*pImage] *= 3;
break;
case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
image_memory_size_map[device][*pImage] *= 2;
break;
default:
break;
}
return VK_SUCCESS;
''',
'vkDestroyImage': '''
unique_lock_t lock(global_lock);
image_memory_size_map[device].erase(image);
''',
}
# MockICDGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by MockICDOutputGenerator objects during Mock
# ICD generation.
#
# Additional members
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - If conditional protection should be generated
# around prototype declarations, set to either '#ifdef'
# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
# to require opt-out (#ifndef protectProtoStr). Otherwise
# set to None.
# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
# declarations, if protectProto is set
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class MockICDGeneratorOptions(GeneratorOptions):
def __init__(self,
conventions = None,
filename = None,
directory = '.',
genpath = None,
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
protectFile = True,
protectFeature = True,
protectProto = None,
protectProtoStr = None,
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
expandEnumerants = True,
helper_file_type = ''):
GeneratorOptions.__init__(self,
conventions = conventions,
filename = filename,
directory = directory,
genpath = genpath,
apiname = apiname,
profile = profile,
versions = versions,
emitversions = emitversions,
defaultExtensions = defaultExtensions,
addExtensions = addExtensions,
removeExtensions = removeExtensions,
emitExtensions = emitExtensions,
sortProcedure = sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.protectFile = protectFile
self.protectFeature = protectFeature
self.protectProto = protectProto
self.protectProtoStr = protectProtoStr
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
# MockICDOutputGenerator - subclass of OutputGenerator.
# Generates a mock vulkan ICD.
# This is intended to be a minimal replacement for a vulkan device in order
# to enable Vulkan Validation testing.
#
# ---- methods ----
# MockOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class MockICDOutputGenerator(OutputGenerator):
"""Generate specified API interfaces in a specific style, such as a C header"""
# This is an ordered list of sections in the header file.
TYPE_SECTIONS = ['include', 'define', 'basetype', 'handle', 'enum',
'group', 'bitmask', 'funcpointer', 'struct']
ALL_SECTIONS = TYPE_SECTIONS + ['command']
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
# Internal state - accumulators for different inner block text
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
self.intercepts = []
# Check if the parameter passed in is a pointer to an array
def paramIsArray(self, param):
return param.attrib.get('len') is not None
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = False
for elem in param:
if ((elem.tag != 'type') and (elem.tail is not None)) and '*' in elem.tail:
ispointer = True
return ispointer
# Check if an object is a non-dispatchable handle
def isHandleTypeNonDispatchable(self, handletype):
handle = self.registry.tree.find("types/type/[name='" + handletype + "'][@category='handle']")
if handle is not None and handle.find('type').text == 'VK_DEFINE_NON_DISPATCHABLE_HANDLE':
return True
else:
return False
# Check if an object is a dispatchable handle
def isHandleTypeDispatchable(self, handletype):
handle = self.registry.tree.find("types/type/[name='" + handletype + "'][@category='handle']")
if handle is not None and handle.find('type').text == 'VK_DEFINE_HANDLE':
return True
else:
return False
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# C-specific
#
# Multiple inclusion protection & C++ namespace.
self.header = False
if (genOpts.protectFile and self.genOpts.filename and 'h' == self.genOpts.filename[-1]):
self.header = True
headerSym = '__' + re.sub(r'\.h', '_h_', os.path.basename(self.genOpts.filename))
write('#ifndef', headerSym, file=self.outFile)
write('#define', headerSym, '1', file=self.outFile)
self.newline()
#
# User-supplied prefix text, if any (list of strings)
if (genOpts.prefixText):
for s in genOpts.prefixText:
write(s, file=self.outFile)
if self.header:
# we don't want prototypes to avoid linking issues.
# also important on windows to allow us setting correct dllexport linkage
write('#define VK_NO_PROTOTYPES', file=self.outFile)
write('#include <unordered_map>', file=self.outFile)
write('#include <mutex>', file=self.outFile)
write('#include <string>', file=self.outFile)
write('#include <cstring>', file=self.outFile)
write('#include "vulkan/vk_icd.h"', file=self.outFile)
else:
write('#include "mock_icd.h"', file=self.outFile)
write('#include <stdlib.h>', file=self.outFile)
write('#include <algorithm>', file=self.outFile)
write('#include <array>', file=self.outFile)
write('#include <vector>', file=self.outFile)
write('#include "vk_typemap_helper.h"', file=self.outFile)
write('namespace vkmock {', file=self.outFile)
if self.header:
self.newline()
write(HEADER_C_CODE, file=self.outFile)
# Include all of the extensions in ICD except specific ignored ones
device_exts = []
instance_exts = []
# Ignore extensions that ICDs should not implement or are not safe to report
ignore_exts = ['VK_EXT_validation_cache', 'VK_KHR_portability_subset']
for ext in self.registry.tree.findall("extensions/extension"):
if ext.attrib['supported'] != 'disabled': # Only include enabled extensions
if (ext.attrib['name'] not in ignore_exts):
# Search for extension version enum
for enum in ext.findall('require/enum'):
if enum.get('name', '').endswith('_SPEC_VERSION'):
ext_version = enum.get('value')
if (ext.attrib.get('type') == 'instance'):
instance_exts.append(' {"%s", %s},' % (ext.attrib['name'], ext_version))
else:
device_exts.append(' {"%s", %s},' % (ext.attrib['name'], ext_version))
break
write('// Map of instance extension name to version', file=self.outFile)
write('static const std::unordered_map<std::string, uint32_t> instance_extension_map = {', file=self.outFile)
write('\n'.join(instance_exts), file=self.outFile)
write('};', file=self.outFile)
write('// Map of device extension name to version', file=self.outFile)
write('static const std::unordered_map<std::string, uint32_t> device_extension_map = {', file=self.outFile)
write('\n'.join(device_exts), file=self.outFile)
write('};', file=self.outFile)
else:
self.newline()
write(SOURCE_CPP_PREFIX, file=self.outFile)
def endFile(self):
# C-specific
# Finish C++ namespace and multiple inclusion protection
self.newline()
if self.header:
# record intercepted procedures
write('// Map of all APIs to be intercepted by this layer', file=self.outFile)
write('static const std::unordered_map<std::string, void*> name_to_funcptr_map = {', file=self.outFile)
write('\n'.join(self.intercepts), file=self.outFile)
write('};\n', file=self.outFile)
self.newline()
write('} // namespace vkmock', file=self.outFile)
self.newline()
write('#endif', file=self.outFile)
else: # Loader-layer-interface, need to implement global interface functions
write(SOURCE_CPP_POSTFIX, file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
def beginFeature(self, interface, emit):
#write('// starting beginFeature', file=self.outFile)
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
self.featureExtraProtect = GetFeatureProtect(interface)
# C-specific
# Accumulate includes, defines, types, enums, function pointer typedefs,
# end function prototypes separately for this feature. They're only
# printed in endFeature().
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
#write('// ending beginFeature', file=self.outFile)
def endFeature(self):
# C-specific
# Actually write the interface to the output file.
#write('// starting endFeature', file=self.outFile)
if (self.emit):
self.newline()
if (self.genOpts.protectFeature):
write('#ifndef', self.featureName, file=self.outFile)
# If type declarations are needed by other features based on
# this one, it may be necessary to suppress the ExtraProtect,
# or move it below the 'for section...' loop.
#write('// endFeature looking at self.featureExtraProtect', file=self.outFile)
if (self.featureExtraProtect != None):
write('#ifdef', self.featureExtraProtect, file=self.outFile)
#write('#define', self.featureName, '1', file=self.outFile)
for section in self.TYPE_SECTIONS:
#write('// endFeature writing section'+section, file=self.outFile)
contents = self.sections[section]
if contents:
write('\n'.join(contents), file=self.outFile)
self.newline()
#write('// endFeature looking at self.sections[command]', file=self.outFile)
if (self.sections['command']):
write('\n'.join(self.sections['command']), end=u'', file=self.outFile)
self.newline()
if (self.featureExtraProtect != None):
write('#endif /*', self.featureExtraProtect, '*/', file=self.outFile)
if (self.genOpts.protectFeature):
write('#endif /*', self.featureName, '*/', file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFeature(self)
#write('// ending endFeature', file=self.outFile)
#
# Append a definition to the specified section
def appendSection(self, section, text):
# self.sections[section].append('SECTION: ' + section + '\n')
self.sections[section].append(text)
#
# Type generation
def genType(self, typeinfo, name, alias):
pass
#
# Struct (e.g. C "struct" type) generation.
# This is a special case of the <type> tag where the contents are
# interpreted as a set of <member> tags instead of freeform C
# C type declarations. The <member> tags are just like <param>
# tags - they are a declaration of a struct or union member.
# Only simple member declarations are supported (no nested
# structs etc.)
def genStruct(self, typeinfo, typeName, alias):
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
body = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
# paramdecl = self.makeCParamDecl(typeinfo.elem, self.genOpts.alignFuncParam)
for member in typeinfo.elem.findall('.//member'):
body += self.makeCParamDecl(member, self.genOpts.alignFuncParam)
body += ';\n'
body += '} ' + typeName + ';\n'
self.appendSection('struct', body)
#
# Group (e.g. C "enum" type) generation.
# These are concatenated together with other types.
def genGroup(self, groupinfo, groupName, alias):
pass
# Enumerant generation
# <enum> tags may specify their values in several ways, but are usually
# just integers.
def genEnum(self, enuminfo, name, alias):
pass
#
# Command generation
def genCmd(self, cmdinfo, name, alias):
decls = self.makeCDecls(cmdinfo.elem)
if self.header: # In the header declare all intercepts
self.appendSection('command', '')
self.appendSection('command', 'static %s' % (decls[0]))
if (self.featureExtraProtect != None):
self.intercepts += [ '#ifdef %s' % self.featureExtraProtect ]
self.intercepts += [ ' {"%s", (void*)%s},' % (name,name[2:]) ]
if (self.featureExtraProtect != None):
self.intercepts += [ '#endif' ]
return
manual_functions = [
# Include functions here to be intercepted w/ manually implemented function bodies
'vkGetDeviceProcAddr',
'vkGetInstanceProcAddr',
'vkCreateDevice',
'vkDestroyDevice',
'vkCreateInstance',
'vkDestroyInstance',
'vkFreeCommandBuffers',
'vkAllocateCommandBuffers',
'vkDestroyCommandPool',
#'vkCreateDebugReportCallbackEXT',
#'vkDestroyDebugReportCallbackEXT',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateInstanceVersion',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateDeviceExtensionProperties',
]
if name in manual_functions:
self.appendSection('command', '')
if name not in CUSTOM_C_INTERCEPTS:
self.appendSection('command', '// declare only')
self.appendSection('command', 'static %s' % (decls[0]))
self.appendSection('command', '// TODO: Implement custom intercept body')
else:
self.appendSection('command', 'static %s' % (decls[0][:-1]))
self.appendSection('command', '{\n%s}' % (CUSTOM_C_INTERCEPTS[name]))
self.intercepts += [ ' {"%s", (void*)%s},' % (name,name[2:]) ]
return
# record that the function will be intercepted
if (self.featureExtraProtect != None):
self.intercepts += [ '#ifdef %s' % self.featureExtraProtect ]
self.intercepts += [ ' {"%s", (void*)%s},' % (name,name[2:]) ]
if (self.featureExtraProtect != None):
self.intercepts += [ '#endif' ]
OutputGenerator.genCmd(self, cmdinfo, name, alias)
#
self.appendSection('command', '')
self.appendSection('command', 'static %s' % (decls[0][:-1]))
if name in CUSTOM_C_INTERCEPTS:
self.appendSection('command', '{%s}' % (CUSTOM_C_INTERCEPTS[name]))
return
# Declare result variable, if any.
resulttype = cmdinfo.elem.find('proto/type')
if (resulttype != None and resulttype.text == 'void'):
resulttype = None
# if the name w/ KHR postfix is in the CUSTOM_C_INTERCEPTS
# Call the KHR custom version instead of generating separate code
khr_name = name + "KHR"
if khr_name in CUSTOM_C_INTERCEPTS:
return_string = ''
if resulttype != None:
return_string = 'return '
params = cmdinfo.elem.findall('param/name')
param_names = []
for param in params:
param_names.append(param.text)
self.appendSection('command', '{\n %s%s(%s);\n}' % (return_string, khr_name[2:], ", ".join(param_names)))
return
self.appendSection('command', '{')
api_function_name = cmdinfo.elem.attrib.get('name')
# GET THE TYPE OF FUNCTION
if True in [ftxt in api_function_name for ftxt in ['Create', 'Allocate']]:
# Get last param
last_param = cmdinfo.elem.findall('param')[-1]
lp_txt = last_param.find('name').text
lp_len = None
if ('len' in last_param.attrib):
lp_len = last_param.attrib['len']
lp_len = lp_len.replace('::', '->')
lp_type = last_param.find('type').text
handle_type = 'dispatchable'
allocator_txt = 'CreateDispObjHandle()';
if (self.isHandleTypeNonDispatchable(lp_type)):
handle_type = 'non-' + handle_type
allocator_txt = 'global_unique_handle++';
# Need to lock in both cases
self.appendSection('command', ' unique_lock_t lock(global_lock);')
if (lp_len != None):
#print("%s last params (%s) has len %s" % (handle_type, lp_txt, lp_len))
self.appendSection('command', ' for (uint32_t i = 0; i < %s; ++i) {' % (lp_len))
self.appendSection('command', ' %s[i] = (%s)%s;' % (lp_txt, lp_type, allocator_txt))
self.appendSection('command', ' }')
else:
#print("Single %s last param is '%s' w/ type '%s'" % (handle_type, lp_txt, lp_type))
if 'AllocateMemory' in api_function_name:
# Store allocation size in case it's mapped
self.appendSection('command', ' allocated_memory_size_map[(VkDeviceMemory)global_unique_handle] = pAllocateInfo->allocationSize;')
self.appendSection('command', ' *%s = (%s)%s;' % (lp_txt, lp_type, allocator_txt))
elif True in [ftxt in api_function_name for ftxt in ['Destroy', 'Free']]:
self.appendSection('command', '//Destroy object')
if 'FreeMemory' in api_function_name:
# Remove from allocation map
self.appendSection('command', ' allocated_memory_size_map.erase(memory);')
else:
self.appendSection('command', '//Not a CREATE or DESTROY function')
# Return result variable, if any.
if (resulttype != None):
if api_function_name == 'vkGetEventStatus':
self.appendSection('command', ' return VK_EVENT_SET;')
else:
self.appendSection('command', ' return VK_SUCCESS;')
self.appendSection('command', '}')
#
# override makeProtoName to drop the "vk" prefix
def makeProtoName(self, name, tail):
return self.genOpts.apientry + name[2:] + tail
| 45.4778 | 233 | 0.668435 |
# can be copied and customized to assist in creation of a new layer.
import os,re,sys
from generator import *
from common_codegen import *
# Mock header code
HEADER_C_CODE = '''
using mutex_t = std::mutex;
using lock_guard_t = std::lock_guard<mutex_t>;
using unique_lock_t = std::unique_lock<mutex_t>;
static mutex_t global_lock;
static uint64_t global_unique_handle = 1;
static const uint32_t SUPPORTED_LOADER_ICD_INTERFACE_VERSION = 5;
static uint32_t loader_interface_version = 0;
static bool negotiate_loader_icd_interface_called = false;
static void* CreateDispObjHandle() {
auto handle = new VK_LOADER_DATA;
set_loader_magic_value(handle);
return handle;
}
static void DestroyDispObjHandle(void* handle) {
delete reinterpret_cast<VK_LOADER_DATA*>(handle);
}
'''
# Manual code at the top of the cpp source file
SOURCE_CPP_PREFIX = '''
using std::unordered_map;
static constexpr uint32_t icd_physical_device_count = 1;
static constexpr uint32_t kSupportedVulkanAPIVersion = VK_API_VERSION_1_1;
static unordered_map<VkInstance, std::array<VkPhysicalDevice, icd_physical_device_count>> physical_device_map;
// Map device memory handle to any mapped allocations that we'll need to free on unmap
static unordered_map<VkDeviceMemory, std::vector<void*>> mapped_memory_map;
// Map device memory allocation handle to the size
static unordered_map<VkDeviceMemory, VkDeviceSize> allocated_memory_size_map;
static unordered_map<VkDevice, unordered_map<uint32_t, unordered_map<uint32_t, VkQueue>>> queue_map;
static unordered_map<VkDevice, unordered_map<VkBuffer, VkBufferCreateInfo>> buffer_map;
static unordered_map<VkDevice, unordered_map<VkImage, VkDeviceSize>> image_memory_size_map;
static unordered_map<VkCommandPool, std::vector<VkCommandBuffer>> command_pool_buffer_map;
static constexpr uint32_t icd_swapchain_image_count = 1;
static unordered_map<VkSwapchainKHR, VkImage[icd_swapchain_image_count]> swapchain_image_map;
// TODO: Would like to codegen this but limits aren't in XML
static VkPhysicalDeviceLimits SetLimits(VkPhysicalDeviceLimits *limits) {
limits->maxImageDimension1D = 4096;
limits->maxImageDimension2D = 4096;
limits->maxImageDimension3D = 256;
limits->maxImageDimensionCube = 4096;
limits->maxImageArrayLayers = 256;
limits->maxTexelBufferElements = 65536;
limits->maxUniformBufferRange = 16384;
limits->maxStorageBufferRange = 134217728;
limits->maxPushConstantsSize = 128;
limits->maxMemoryAllocationCount = 4096;
limits->maxSamplerAllocationCount = 4000;
limits->bufferImageGranularity = 1;
limits->sparseAddressSpaceSize = 2147483648;
limits->maxBoundDescriptorSets = 4;
limits->maxPerStageDescriptorSamplers = 16;
limits->maxPerStageDescriptorUniformBuffers = 12;
limits->maxPerStageDescriptorStorageBuffers = 4;
limits->maxPerStageDescriptorSampledImages = 16;
limits->maxPerStageDescriptorStorageImages = 4;
limits->maxPerStageDescriptorInputAttachments = 4;
limits->maxPerStageResources = 128;
limits->maxDescriptorSetSamplers = 96;
limits->maxDescriptorSetUniformBuffers = 72;
limits->maxDescriptorSetUniformBuffersDynamic = 8;
limits->maxDescriptorSetStorageBuffers = 24;
limits->maxDescriptorSetStorageBuffersDynamic = 4;
limits->maxDescriptorSetSampledImages = 96;
limits->maxDescriptorSetStorageImages = 24;
limits->maxDescriptorSetInputAttachments = 4;
limits->maxVertexInputAttributes = 16;
limits->maxVertexInputBindings = 16;
limits->maxVertexInputAttributeOffset = 2047;
limits->maxVertexInputBindingStride = 2048;
limits->maxVertexOutputComponents = 64;
limits->maxTessellationGenerationLevel = 64;
limits->maxTessellationPatchSize = 32;
limits->maxTessellationControlPerVertexInputComponents = 64;
limits->maxTessellationControlPerVertexOutputComponents = 64;
limits->maxTessellationControlPerPatchOutputComponents = 120;
limits->maxTessellationControlTotalOutputComponents = 2048;
limits->maxTessellationEvaluationInputComponents = 64;
limits->maxTessellationEvaluationOutputComponents = 64;
limits->maxGeometryShaderInvocations = 32;
limits->maxGeometryInputComponents = 64;
limits->maxGeometryOutputComponents = 64;
limits->maxGeometryOutputVertices = 256;
limits->maxGeometryTotalOutputComponents = 1024;
limits->maxFragmentInputComponents = 64;
limits->maxFragmentOutputAttachments = 4;
limits->maxFragmentDualSrcAttachments = 1;
limits->maxFragmentCombinedOutputResources = 4;
limits->maxComputeSharedMemorySize = 16384;
limits->maxComputeWorkGroupCount[0] = 65535;
limits->maxComputeWorkGroupCount[1] = 65535;
limits->maxComputeWorkGroupCount[2] = 65535;
limits->maxComputeWorkGroupInvocations = 128;
limits->maxComputeWorkGroupSize[0] = 128;
limits->maxComputeWorkGroupSize[1] = 128;
limits->maxComputeWorkGroupSize[2] = 64;
limits->subPixelPrecisionBits = 4;
limits->subTexelPrecisionBits = 4;
limits->mipmapPrecisionBits = 4;
limits->maxDrawIndexedIndexValue = UINT32_MAX;
limits->maxDrawIndirectCount = UINT16_MAX;
limits->maxSamplerLodBias = 2.0f;
limits->maxSamplerAnisotropy = 16;
limits->maxViewports = 16;
limits->maxViewportDimensions[0] = 4096;
limits->maxViewportDimensions[1] = 4096;
limits->viewportBoundsRange[0] = -8192;
limits->viewportBoundsRange[1] = 8191;
limits->viewportSubPixelBits = 0;
limits->minMemoryMapAlignment = 64;
limits->minTexelBufferOffsetAlignment = 16;
limits->minUniformBufferOffsetAlignment = 16;
limits->minStorageBufferOffsetAlignment = 16;
limits->minTexelOffset = -8;
limits->maxTexelOffset = 7;
limits->minTexelGatherOffset = -8;
limits->maxTexelGatherOffset = 7;
limits->minInterpolationOffset = 0.0f;
limits->maxInterpolationOffset = 0.5f;
limits->subPixelInterpolationOffsetBits = 4;
limits->maxFramebufferWidth = 4096;
limits->maxFramebufferHeight = 4096;
limits->maxFramebufferLayers = 256;
limits->framebufferColorSampleCounts = 0x7F;
limits->framebufferDepthSampleCounts = 0x7F;
limits->framebufferStencilSampleCounts = 0x7F;
limits->framebufferNoAttachmentsSampleCounts = 0x7F;
limits->maxColorAttachments = 4;
limits->sampledImageColorSampleCounts = 0x7F;
limits->sampledImageIntegerSampleCounts = 0x7F;
limits->sampledImageDepthSampleCounts = 0x7F;
limits->sampledImageStencilSampleCounts = 0x7F;
limits->storageImageSampleCounts = 0x7F;
limits->maxSampleMaskWords = 1;
limits->timestampComputeAndGraphics = VK_TRUE;
limits->timestampPeriod = 1;
limits->maxClipDistances = 8;
limits->maxCullDistances = 8;
limits->maxCombinedClipAndCullDistances = 8;
limits->discreteQueuePriorities = 2;
limits->pointSizeRange[0] = 1.0f;
limits->pointSizeRange[1] = 64.0f;
limits->lineWidthRange[0] = 1.0f;
limits->lineWidthRange[1] = 8.0f;
limits->pointSizeGranularity = 1.0f;
limits->lineWidthGranularity = 1.0f;
limits->strictLines = VK_TRUE;
limits->standardSampleLocations = VK_TRUE;
limits->optimalBufferCopyOffsetAlignment = 1;
limits->optimalBufferCopyRowPitchAlignment = 1;
limits->nonCoherentAtomSize = 256;
return *limits;
}
void SetBoolArrayTrue(VkBool32* bool_array, uint32_t num_bools)
{
for (uint32_t i = 0; i < num_bools; ++i) {
bool_array[i] = VK_TRUE;
}
}
'''
# Manual code at the end of the cpp source file
SOURCE_CPP_POSTFIX = '''
static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
// TODO: This function should only care about physical device functions and return nullptr for other functions
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
// Mock should intercept all functions so if we get here just return null
return nullptr;
}
} // namespace vkmock
#if defined(__GNUC__) && __GNUC__ >= 4
#define EXPORT __attribute__((visibility("default")))
#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)
#define EXPORT __attribute__((visibility("default")))
#elif defined(_WIN32)
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
extern "C" {
EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(VkInstance instance, const char* pName) {
if (!vkmock::negotiate_loader_icd_interface_called) {
vkmock::loader_interface_version = 1;
}
return vkmock::GetInstanceProcAddr(instance, pName);
}
EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(VkInstance instance, const char* pName) {
return vkmock::GetPhysicalDeviceProcAddr(instance, pName);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion) {
vkmock::negotiate_loader_icd_interface_called = true;
vkmock::loader_interface_version = *pSupportedVersion;
if (*pSupportedVersion > vkmock::SUPPORTED_LOADER_ICD_INTERFACE_VERSION) {
*pSupportedVersion = vkmock::SUPPORTED_LOADER_ICD_INTERFACE_VERSION;
}
return VK_SUCCESS;
}
EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(
VkInstance instance,
VkSurfaceKHR surface,
const VkAllocationCallbacks* pAllocator)
{
vkmock::DestroySurfaceKHR(instance, surface, pAllocator);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
VkSurfaceKHR surface,
VkBool32* pSupported)
{
return vkmock::GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
{
return vkmock::GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pSurfaceFormatCount,
VkSurfaceFormatKHR* pSurfaceFormats)
{
return vkmock::GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pPresentModeCount,
VkPresentModeKHR* pPresentModes)
{
return vkmock::GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount, pPresentModes);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR(
VkInstance instance,
const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateDisplayPlaneSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#ifdef VK_USE_PLATFORM_XLIB_KHR
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
VkInstance instance,
const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateXlibSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_XLIB_KHR */
#ifdef VK_USE_PLATFORM_XCB_KHR
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
VkInstance instance,
const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateXcbSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_XCB_KHR */
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
VkInstance instance,
const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateWaylandSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_WAYLAND_KHR */
#ifdef VK_USE_PLATFORM_ANDROID_KHR
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
VkInstance instance,
const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateAndroidSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_ANDROID_KHR */
#ifdef VK_USE_PLATFORM_WIN32_KHR
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
VkInstance instance,
const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_WIN32_KHR */
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR(
VkDevice device,
VkSurfaceKHR surface,
VkDeviceGroupPresentModeFlagsKHR* pModes)
{
return vkmock::GetDeviceGroupSurfacePresentModesKHR(device, surface, pModes);
}
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pRectCount,
VkRect2D* pRects)
{
return vkmock::GetPhysicalDevicePresentRectanglesKHR(physicalDevice, surface, pRectCount, pRects);
}
#ifdef VK_USE_PLATFORM_VI_NN
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN(
VkInstance instance,
const VkViSurfaceCreateInfoNN* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateViSurfaceNN(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_VI_NN */
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
VkSurfaceCapabilities2EXT* pSurfaceCapabilities)
{
return vkmock::GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
}
#ifdef VK_USE_PLATFORM_IOS_MVK
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK(
VkInstance instance,
const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateIOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_IOS_MVK */
#ifdef VK_USE_PLATFORM_MACOS_MVK
EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK(
VkInstance instance,
const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface)
{
return vkmock::CreateMacOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface);
}
#endif /* VK_USE_PLATFORM_MACOS_MVK */
} // end extern "C"
'''
CUSTOM_C_INTERCEPTS = {
'vkCreateInstance': '''
// TODO: If loader ver <=4 ICD must fail with VK_ERROR_INCOMPATIBLE_DRIVER for all vkCreateInstance calls with
// apiVersion set to > Vulkan 1.0 because the loader is still at interface version <= 4. Otherwise, the
// ICD should behave as normal.
if (loader_interface_version <= 4) {
return VK_ERROR_INCOMPATIBLE_DRIVER;
}
*pInstance = (VkInstance)CreateDispObjHandle();
for (auto& physical_device : physical_device_map[*pInstance])
physical_device = (VkPhysicalDevice)CreateDispObjHandle();
// TODO: If emulating specific device caps, will need to add intelligence here
return VK_SUCCESS;
''',
'vkDestroyInstance': '''
if (instance) {
for (const auto physical_device : physical_device_map.at(instance))
DestroyDispObjHandle((void*)physical_device);
physical_device_map.erase(instance);
DestroyDispObjHandle((void*)instance);
}
''',
'vkAllocateCommandBuffers': '''
unique_lock_t lock(global_lock);
for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
pCommandBuffers[i] = (VkCommandBuffer)CreateDispObjHandle();
command_pool_buffer_map[pAllocateInfo->commandPool].push_back(pCommandBuffers[i]);
}
return VK_SUCCESS;
''',
'vkFreeCommandBuffers': '''
unique_lock_t lock(global_lock);
for (auto i = 0u; i < commandBufferCount; ++i) {
if (!pCommandBuffers[i]) {
continue;
}
for (auto& pair : command_pool_buffer_map) {
auto& cbs = pair.second;
auto it = std::find(cbs.begin(), cbs.end(), pCommandBuffers[i]);
if (it != cbs.end()) {
cbs.erase(it);
}
}
DestroyDispObjHandle((void*) pCommandBuffers[i]);
}
''',
'vkDestroyCommandPool': '''
// destroy command buffers for this pool
unique_lock_t lock(global_lock);
auto it = command_pool_buffer_map.find(commandPool);
if (it != command_pool_buffer_map.end()) {
for (auto& cb : it->second) {
DestroyDispObjHandle((void*) cb);
}
command_pool_buffer_map.erase(it);
}
''',
'vkEnumeratePhysicalDevices': '''
VkResult result_code = VK_SUCCESS;
if (pPhysicalDevices) {
const auto return_count = (std::min)(*pPhysicalDeviceCount, icd_physical_device_count);
for (uint32_t i = 0; i < return_count; ++i) pPhysicalDevices[i] = physical_device_map.at(instance)[i];
if (return_count < icd_physical_device_count) result_code = VK_INCOMPLETE;
*pPhysicalDeviceCount = return_count;
} else {
*pPhysicalDeviceCount = icd_physical_device_count;
}
return result_code;
''',
'vkCreateDevice': '''
*pDevice = (VkDevice)CreateDispObjHandle();
// TODO: If emulating specific device caps, will need to add intelligence here
return VK_SUCCESS;
''',
'vkDestroyDevice': '''
unique_lock_t lock(global_lock);
// First destroy sub-device objects
// Destroy Queues
for (auto queue_family_map_pair : queue_map[device]) {
for (auto index_queue_pair : queue_map[device][queue_family_map_pair.first]) {
DestroyDispObjHandle((void*)index_queue_pair.second);
}
}
queue_map.erase(device);
buffer_map.erase(device);
image_memory_size_map.erase(device);
// Now destroy device
DestroyDispObjHandle((void*)device);
// TODO: If emulating specific device caps, will need to add intelligence here
''',
'vkGetDeviceQueue': '''
unique_lock_t lock(global_lock);
auto queue = queue_map[device][queueFamilyIndex][queueIndex];
if (queue) {
*pQueue = queue;
} else {
*pQueue = queue_map[device][queueFamilyIndex][queueIndex] = (VkQueue)CreateDispObjHandle();
}
// TODO: If emulating specific device caps, will need to add intelligence here
return;
''',
'vkGetDeviceQueue2': '''
GetDeviceQueue(device, pQueueInfo->queueFamilyIndex, pQueueInfo->queueIndex, pQueue);
// TODO: Add further support for GetDeviceQueue2 features
''',
'vkEnumerateInstanceLayerProperties': '''
return VK_SUCCESS;
''',
'vkEnumerateInstanceVersion': '''
*pApiVersion = kSupportedVulkanAPIVersion;
return VK_SUCCESS;
''',
'vkEnumerateDeviceLayerProperties': '''
return VK_SUCCESS;
''',
'vkEnumerateInstanceExtensionProperties': '''
// If requesting number of extensions, return that
if (!pLayerName) {
if (!pProperties) {
*pPropertyCount = (uint32_t)instance_extension_map.size();
} else {
uint32_t i = 0;
for (const auto &name_ver_pair : instance_extension_map) {
if (i == *pPropertyCount) {
break;
}
std::strncpy(pProperties[i].extensionName, name_ver_pair.first.c_str(), sizeof(pProperties[i].extensionName));
pProperties[i].extensionName[sizeof(pProperties[i].extensionName) - 1] = 0;
pProperties[i].specVersion = name_ver_pair.second;
++i;
}
if (i != instance_extension_map.size()) {
return VK_INCOMPLETE;
}
}
}
// If requesting extension properties, fill in data struct for number of extensions
return VK_SUCCESS;
''',
'vkEnumerateDeviceExtensionProperties': '''
// If requesting number of extensions, return that
if (!pLayerName) {
if (!pProperties) {
*pPropertyCount = (uint32_t)device_extension_map.size();
} else {
uint32_t i = 0;
for (const auto &name_ver_pair : device_extension_map) {
if (i == *pPropertyCount) {
break;
}
std::strncpy(pProperties[i].extensionName, name_ver_pair.first.c_str(), sizeof(pProperties[i].extensionName));
pProperties[i].extensionName[sizeof(pProperties[i].extensionName) - 1] = 0;
pProperties[i].specVersion = name_ver_pair.second;
++i;
}
if (i != device_extension_map.size()) {
return VK_INCOMPLETE;
}
}
}
// If requesting extension properties, fill in data struct for number of extensions
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfacePresentModesKHR': '''
// Currently always say that all present modes are supported
if (!pPresentModes) {
*pPresentModeCount = 6;
} else {
// Intentionally falling through and just filling however many modes are requested
switch(*pPresentModeCount) {
case 6:
pPresentModes[5] = VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR;
// fall through
case 5:
pPresentModes[4] = VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR;
// fall through
case 4:
pPresentModes[3] = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
// fall through
case 3:
pPresentModes[2] = VK_PRESENT_MODE_FIFO_KHR;
// fall through
case 2:
pPresentModes[1] = VK_PRESENT_MODE_MAILBOX_KHR;
// fall through
default:
pPresentModes[0] = VK_PRESENT_MODE_IMMEDIATE_KHR;
break;
}
}
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfaceFormatsKHR': '''
// Currently always say that RGBA8 & BGRA8 are supported
if (!pSurfaceFormats) {
*pSurfaceFormatCount = 2;
} else {
// Intentionally falling through and just filling however many types are requested
switch(*pSurfaceFormatCount) {
case 2:
pSurfaceFormats[1].format = VK_FORMAT_R8G8B8A8_UNORM;
pSurfaceFormats[1].colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
// fall through
default:
pSurfaceFormats[0].format = VK_FORMAT_B8G8R8A8_UNORM;
pSurfaceFormats[0].colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
break;
}
}
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfaceFormats2KHR': '''
// Currently always say that RGBA8 & BGRA8 are supported
if (!pSurfaceFormats) {
*pSurfaceFormatCount = 2;
} else {
// Intentionally falling through and just filling however many types are requested
switch(*pSurfaceFormatCount) {
case 2:
pSurfaceFormats[1].pNext = nullptr;
pSurfaceFormats[1].surfaceFormat.format = VK_FORMAT_R8G8B8A8_UNORM;
pSurfaceFormats[1].surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
// fall through
default:
pSurfaceFormats[1].pNext = nullptr;
pSurfaceFormats[0].surfaceFormat.format = VK_FORMAT_B8G8R8A8_UNORM;
pSurfaceFormats[0].surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
break;
}
}
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfaceSupportKHR': '''
// Currently say that all surface/queue combos are supported
*pSupported = VK_TRUE;
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfaceCapabilitiesKHR': '''
// In general just say max supported is available for requested surface
pSurfaceCapabilities->minImageCount = 1;
pSurfaceCapabilities->maxImageCount = 0;
pSurfaceCapabilities->currentExtent.width = 0xFFFFFFFF;
pSurfaceCapabilities->currentExtent.height = 0xFFFFFFFF;
pSurfaceCapabilities->minImageExtent.width = 1;
pSurfaceCapabilities->minImageExtent.height = 1;
pSurfaceCapabilities->maxImageExtent.width = 0xFFFF;
pSurfaceCapabilities->maxImageExtent.height = 0xFFFF;
pSurfaceCapabilities->maxImageArrayLayers = 128;
pSurfaceCapabilities->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR |
VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR |
VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR |
VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR |
VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR |
VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR;
pSurfaceCapabilities->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
pSurfaceCapabilities->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR |
VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR |
VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR;
pSurfaceCapabilities->supportedUsageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT |
VK_IMAGE_USAGE_STORAGE_BIT |
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceSurfaceCapabilities2KHR': '''
GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, pSurfaceInfo->surface, &pSurfaceCapabilities->surfaceCapabilities);
return VK_SUCCESS;
''',
'vkGetInstanceProcAddr': '''
if (!negotiate_loader_icd_interface_called) {
loader_interface_version = 0;
}
const auto &item = name_to_funcptr_map.find(pName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
// Mock should intercept all functions so if we get here just return null
return nullptr;
''',
'vkGetDeviceProcAddr': '''
return GetInstanceProcAddr(nullptr, pName);
''',
'vkGetPhysicalDeviceMemoryProperties': '''
pMemoryProperties->memoryTypeCount = 2;
pMemoryProperties->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
pMemoryProperties->memoryTypes[0].heapIndex = 0;
pMemoryProperties->memoryTypes[1].propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
pMemoryProperties->memoryTypes[1].heapIndex = 1;
pMemoryProperties->memoryHeapCount = 2;
pMemoryProperties->memoryHeaps[0].flags = 0;
pMemoryProperties->memoryHeaps[0].size = 8000000000;
pMemoryProperties->memoryHeaps[1].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
pMemoryProperties->memoryHeaps[1].size = 8000000000;
''',
'vkGetPhysicalDeviceMemoryProperties2KHR': '''
GetPhysicalDeviceMemoryProperties(physicalDevice, &pMemoryProperties->memoryProperties);
''',
'vkGetPhysicalDeviceQueueFamilyProperties': '''
if (!pQueueFamilyProperties) {
*pQueueFamilyPropertyCount = 1;
} else {
if (*pQueueFamilyPropertyCount) {
pQueueFamilyProperties[0].queueFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT | VK_QUEUE_SPARSE_BINDING_BIT;
pQueueFamilyProperties[0].queueCount = 1;
pQueueFamilyProperties[0].timestampValidBits = 0;
pQueueFamilyProperties[0].minImageTransferGranularity = {1,1,1};
}
}
''',
'vkGetPhysicalDeviceQueueFamilyProperties2KHR': '''
if (pQueueFamilyPropertyCount && pQueueFamilyProperties) {
GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, &pQueueFamilyProperties->queueFamilyProperties);
} else {
GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, nullptr);
}
''',
'vkGetPhysicalDeviceFeatures': '''
uint32_t num_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
VkBool32 *bool_array = &pFeatures->robustBufferAccess;
SetBoolArrayTrue(bool_array, num_bools);
''',
'vkGetPhysicalDeviceFeatures2KHR': '''
GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
uint32_t num_bools = 0; // Count number of VkBool32s in extension structs
VkBool32* feat_bools = nullptr;
const auto *desc_idx_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pFeatures->pNext);
if (desc_idx_features) {
const auto bool_size = sizeof(VkPhysicalDeviceDescriptorIndexingFeaturesEXT) - offsetof(VkPhysicalDeviceDescriptorIndexingFeaturesEXT, shaderInputAttachmentArrayDynamicIndexing);
num_bools = bool_size/sizeof(VkBool32);
feat_bools = (VkBool32*)&desc_idx_features->shaderInputAttachmentArrayDynamicIndexing;
SetBoolArrayTrue(feat_bools, num_bools);
}
const auto *blendop_features = lvl_find_in_chain<VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT>(pFeatures->pNext);
if (blendop_features) {
const auto bool_size = sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT) - offsetof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT, advancedBlendCoherentOperations);
num_bools = bool_size/sizeof(VkBool32);
feat_bools = (VkBool32*)&blendop_features->advancedBlendCoherentOperations;
SetBoolArrayTrue(feat_bools, num_bools);
}
''',
'vkGetPhysicalDeviceFormatProperties': '''
if (VK_FORMAT_UNDEFINED == format) {
*pFormatProperties = { 0x0, 0x0, 0x0 };
} else {
// Default to a color format, skip DS bit
*pFormatProperties = { 0x00FFFDFF, 0x00FFFDFF, 0x00FFFDFF };
switch (format) {
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_X8_D24_UNORM_PACK32:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_S8_UINT:
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
// Don't set color bits for DS formats
*pFormatProperties = { 0x00FFFE7F, 0x00FFFE7F, 0x00FFFE7F };
break;
default:
break;
}
}
''',
'vkGetPhysicalDeviceFormatProperties2KHR': '''
GetPhysicalDeviceFormatProperties(physicalDevice, format, &pFormatProperties->formatProperties);
VkFormatProperties3KHR *props_3 = lvl_find_mod_in_chain<VkFormatProperties3KHR>(pFormatProperties->pNext);
if (props_3) {
props_3->linearTilingFeatures = pFormatProperties->formatProperties.linearTilingFeatures;
props_3->optimalTilingFeatures = pFormatProperties->formatProperties.optimalTilingFeatures;
props_3->bufferFeatures = pFormatProperties->formatProperties.bufferFeatures;
}
''',
'vkGetPhysicalDeviceImageFormatProperties': '''
// A hardcoded unsupported format
if (format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32) {
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
// TODO: Just hard-coding some values for now
// TODO: If tiling is linear, limit the mips, levels, & sample count
if (VK_IMAGE_TILING_LINEAR == tiling) {
*pImageFormatProperties = { { 4096, 4096, 256 }, 1, 1, VK_SAMPLE_COUNT_1_BIT, 4294967296 };
} else {
// We hard-code support for all sample counts except 64 bits.
*pImageFormatProperties = { { 4096, 4096, 256 }, 12, 256, 0x7F & ~VK_SAMPLE_COUNT_64_BIT, 4294967296 };
}
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceImageFormatProperties2KHR': '''
GetPhysicalDeviceImageFormatProperties(physicalDevice, pImageFormatInfo->format, pImageFormatInfo->type, pImageFormatInfo->tiling, pImageFormatInfo->usage, pImageFormatInfo->flags, &pImageFormatProperties->imageFormatProperties);
return VK_SUCCESS;
''',
'vkGetPhysicalDeviceProperties': '''
// TODO: Just hard-coding some values for now
pProperties->apiVersion = kSupportedVulkanAPIVersion;
pProperties->driverVersion = 1;
pProperties->vendorID = 0xba5eba11;
pProperties->deviceID = 0xf005ba11;
pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
//std::string devName = "Vulkan Mock Device";
strcpy(pProperties->deviceName, "Vulkan Mock Device");
pProperties->pipelineCacheUUID[0] = 18;
pProperties->limits = SetLimits(&pProperties->limits);
pProperties->sparseProperties = { VK_TRUE, VK_TRUE, VK_TRUE, VK_TRUE, VK_TRUE };
''',
'vkGetPhysicalDeviceProperties2KHR': '''
GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
const auto *desc_idx_props = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingPropertiesEXT>(pProperties->pNext);
if (desc_idx_props) {
VkPhysicalDeviceDescriptorIndexingPropertiesEXT* write_props = (VkPhysicalDeviceDescriptorIndexingPropertiesEXT*)desc_idx_props;
write_props->maxUpdateAfterBindDescriptorsInAllPools = 500000;
write_props->shaderUniformBufferArrayNonUniformIndexingNative = false;
write_props->shaderSampledImageArrayNonUniformIndexingNative = false;
write_props->shaderStorageBufferArrayNonUniformIndexingNative = false;
write_props->shaderStorageImageArrayNonUniformIndexingNative = false;
write_props->shaderInputAttachmentArrayNonUniformIndexingNative = false;
write_props->robustBufferAccessUpdateAfterBind = true;
write_props->quadDivergentImplicitLod = true;
write_props->maxPerStageDescriptorUpdateAfterBindSamplers = 500000;
write_props->maxPerStageDescriptorUpdateAfterBindUniformBuffers = 500000;
write_props->maxPerStageDescriptorUpdateAfterBindStorageBuffers = 500000;
write_props->maxPerStageDescriptorUpdateAfterBindSampledImages = 500000;
write_props->maxPerStageDescriptorUpdateAfterBindStorageImages = 500000;
write_props->maxPerStageDescriptorUpdateAfterBindInputAttachments = 500000;
write_props->maxPerStageUpdateAfterBindResources = 500000;
write_props->maxDescriptorSetUpdateAfterBindSamplers = 500000;
write_props->maxDescriptorSetUpdateAfterBindUniformBuffers = 96;
write_props->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = 8;
write_props->maxDescriptorSetUpdateAfterBindStorageBuffers = 500000;
write_props->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = 4;
write_props->maxDescriptorSetUpdateAfterBindSampledImages = 500000;
write_props->maxDescriptorSetUpdateAfterBindStorageImages = 500000;
write_props->maxDescriptorSetUpdateAfterBindInputAttachments = 500000;
}
const auto *push_descriptor_props = lvl_find_in_chain<VkPhysicalDevicePushDescriptorPropertiesKHR>(pProperties->pNext);
if (push_descriptor_props) {
VkPhysicalDevicePushDescriptorPropertiesKHR* write_props = (VkPhysicalDevicePushDescriptorPropertiesKHR*)push_descriptor_props;
write_props->maxPushDescriptors = 256;
}
const auto *depth_stencil_resolve_props = lvl_find_in_chain<VkPhysicalDeviceDepthStencilResolvePropertiesKHR>(pProperties->pNext);
if (depth_stencil_resolve_props) {
VkPhysicalDeviceDepthStencilResolvePropertiesKHR* write_props = (VkPhysicalDeviceDepthStencilResolvePropertiesKHR*)depth_stencil_resolve_props;
write_props->supportedDepthResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
write_props->supportedStencilResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
}
const auto *fragment_density_map2_props = lvl_find_in_chain<VkPhysicalDeviceFragmentDensityMap2PropertiesEXT>(pProperties->pNext);
if (fragment_density_map2_props) {
VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* write_props = (VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*)fragment_density_map2_props;
write_props->subsampledLoads = VK_FALSE;
write_props->subsampledCoarseReconstructionEarlyAccess = VK_FALSE;
write_props->maxSubsampledArrayLayers = 2;
write_props->maxDescriptorSetSubsampledSamplers = 1;
}
''',
'vkGetPhysicalDeviceExternalSemaphoreProperties':'''
// Hard code support for all handle types and features
pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0x1F;
pExternalSemaphoreProperties->compatibleHandleTypes = 0x1F;
pExternalSemaphoreProperties->externalSemaphoreFeatures = 0x3;
''',
'vkGetPhysicalDeviceExternalSemaphorePropertiesKHR':'''
GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
''',
'vkGetPhysicalDeviceExternalFenceProperties':'''
// Hard-code support for all handle types and features
pExternalFenceProperties->exportFromImportedHandleTypes = 0xF;
pExternalFenceProperties->compatibleHandleTypes = 0xF;
pExternalFenceProperties->externalFenceFeatures = 0x3;
''',
'vkGetPhysicalDeviceExternalFencePropertiesKHR':'''
GetPhysicalDeviceExternalFenceProperties(physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
''',
'vkGetPhysicalDeviceExternalBufferProperties':'''
// Hard-code support for all handle types and features
pExternalBufferProperties->externalMemoryProperties.externalMemoryFeatures = 0x7;
pExternalBufferProperties->externalMemoryProperties.exportFromImportedHandleTypes = 0x1FF;
pExternalBufferProperties->externalMemoryProperties.compatibleHandleTypes = 0x1FF;
''',
'vkGetPhysicalDeviceExternalBufferPropertiesKHR':'''
GetPhysicalDeviceExternalBufferProperties(physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
''',
'vkGetBufferMemoryRequirements': '''
// TODO: Just hard-coding reqs for now
pMemoryRequirements->size = 4096;
pMemoryRequirements->alignment = 1;
pMemoryRequirements->memoryTypeBits = 0xFFFF;
// Return a better size based on the buffer size from the create info.
auto d_iter = buffer_map.find(device);
if (d_iter != buffer_map.end()) {
auto iter = d_iter->second.find(buffer);
if (iter != d_iter->second.end()) {
pMemoryRequirements->size = ((iter->second.size + 4095) / 4096) * 4096;
}
}
''',
'vkGetBufferMemoryRequirements2KHR': '''
GetBufferMemoryRequirements(device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
''',
'vkGetImageMemoryRequirements': '''
pMemoryRequirements->size = 0;
pMemoryRequirements->alignment = 1;
auto d_iter = image_memory_size_map.find(device);
if(d_iter != image_memory_size_map.end()){
auto iter = d_iter->second.find(image);
if (iter != d_iter->second.end()) {
pMemoryRequirements->size = iter->second;
}
}
// Here we hard-code that the memory type at index 3 doesn't support this image.
pMemoryRequirements->memoryTypeBits = 0xFFFF & ~(0x1 << 3);
''',
'vkGetImageMemoryRequirements2KHR': '''
GetImageMemoryRequirements(device, pInfo->image, &pMemoryRequirements->memoryRequirements);
''',
'vkMapMemory': '''
unique_lock_t lock(global_lock);
if (VK_WHOLE_SIZE == size) {
if (allocated_memory_size_map.count(memory) != 0)
size = allocated_memory_size_map[memory] - offset;
else
size = 0x10000;
}
void* map_addr = malloc((size_t)size);
mapped_memory_map[memory].push_back(map_addr);
*ppData = map_addr;
return VK_SUCCESS;
''',
'vkUnmapMemory': '''
unique_lock_t lock(global_lock);
for (auto map_addr : mapped_memory_map[memory]) {
free(map_addr);
}
mapped_memory_map.erase(memory);
''',
'vkGetImageSubresourceLayout': '''
// Need safe values. Callers are computing memory offsets from pLayout, with no return code to flag failure.
*pLayout = VkSubresourceLayout(); // Default constructor zero values.
''',
'vkCreateSwapchainKHR': '''
unique_lock_t lock(global_lock);
*pSwapchain = (VkSwapchainKHR)global_unique_handle++;
for(uint32_t i = 0; i < icd_swapchain_image_count; ++i){
swapchain_image_map[*pSwapchain][i] = (VkImage)global_unique_handle++;
}
return VK_SUCCESS;
''',
'vkDestroySwapchainKHR': '''
unique_lock_t lock(global_lock);
swapchain_image_map.clear();
''',
'vkGetSwapchainImagesKHR': '''
if (!pSwapchainImages) {
*pSwapchainImageCount = icd_swapchain_image_count;
} else {
unique_lock_t lock(global_lock);
for (uint32_t img_i = 0; img_i < (std::min)(*pSwapchainImageCount, icd_swapchain_image_count); ++img_i){
pSwapchainImages[img_i] = swapchain_image_map.at(swapchain)[img_i];
}
if (*pSwapchainImageCount < icd_swapchain_image_count) return VK_INCOMPLETE;
else if (*pSwapchainImageCount > icd_swapchain_image_count) *pSwapchainImageCount = icd_swapchain_image_count;
}
return VK_SUCCESS;
''',
'vkAcquireNextImageKHR': '''
*pImageIndex = 0;
return VK_SUCCESS;
''',
'vkAcquireNextImage2KHR': '''
*pImageIndex = 0;
return VK_SUCCESS;
''',
'vkCreateBuffer': '''
unique_lock_t lock(global_lock);
*pBuffer = (VkBuffer)global_unique_handle++;
buffer_map[device][*pBuffer] = *pCreateInfo;
return VK_SUCCESS;
''',
'vkDestroyBuffer': '''
unique_lock_t lock(global_lock);
buffer_map[device].erase(buffer);
''',
'vkCreateImage': '''
unique_lock_t lock(global_lock);
*pImage = (VkImage)global_unique_handle++;
// TODO: A pixel size is 32 bytes. This accounts for the largest possible pixel size of any format. It could be changed to more accurate size if need be.
image_memory_size_map[device][*pImage] = pCreateInfo->extent.width * pCreateInfo->extent.height * pCreateInfo->extent.depth *
32 * pCreateInfo->arrayLayers * (pCreateInfo->mipLevels > 1 ? 2 : 1);
// plane count
switch (pCreateInfo->format) {
case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
image_memory_size_map[device][*pImage] *= 3;
break;
case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
image_memory_size_map[device][*pImage] *= 2;
break;
default:
break;
}
return VK_SUCCESS;
''',
'vkDestroyImage': '''
unique_lock_t lock(global_lock);
image_memory_size_map[device].erase(image);
''',
}
# MockICDGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by MockICDOutputGenerator objects during Mock
# ICD generation.
#
# Additional members
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - If conditional protection should be generated
# around prototype declarations, set to either '
# to require opt-in (#ifdef protectProtoStr) or '
# to require opt-out (#ifndef protectProtoStr). Otherwise
# set to None.
# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
# declarations, if protectProto is set
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class MockICDGeneratorOptions(GeneratorOptions):
def __init__(self,
conventions = None,
filename = None,
directory = '.',
genpath = None,
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
protectFile = True,
protectFeature = True,
protectProto = None,
protectProtoStr = None,
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
expandEnumerants = True,
helper_file_type = ''):
GeneratorOptions.__init__(self,
conventions = conventions,
filename = filename,
directory = directory,
genpath = genpath,
apiname = apiname,
profile = profile,
versions = versions,
emitversions = emitversions,
defaultExtensions = defaultExtensions,
addExtensions = addExtensions,
removeExtensions = removeExtensions,
emitExtensions = emitExtensions,
sortProcedure = sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.protectFile = protectFile
self.protectFeature = protectFeature
self.protectProto = protectProto
self.protectProtoStr = protectProtoStr
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
# MockICDOutputGenerator - subclass of OutputGenerator.
# Generates a mock vulkan ICD.
# This is intended to be a minimal replacement for a vulkan device in order
# to enable Vulkan Validation testing.
#
# ---- methods ----
# MockOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class MockICDOutputGenerator(OutputGenerator):
# This is an ordered list of sections in the header file.
TYPE_SECTIONS = ['include', 'define', 'basetype', 'handle', 'enum',
'group', 'bitmask', 'funcpointer', 'struct']
ALL_SECTIONS = TYPE_SECTIONS + ['command']
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
# Internal state - accumulators for different inner block text
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
self.intercepts = []
# Check if the parameter passed in is a pointer to an array
def paramIsArray(self, param):
return param.attrib.get('len') is not None
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = False
for elem in param:
if ((elem.tag != 'type') and (elem.tail is not None)) and '*' in elem.tail:
ispointer = True
return ispointer
# Check if an object is a non-dispatchable handle
def isHandleTypeNonDispatchable(self, handletype):
handle = self.registry.tree.find("types/type/[name='" + handletype + "'][@category='handle']")
if handle is not None and handle.find('type').text == 'VK_DEFINE_NON_DISPATCHABLE_HANDLE':
return True
else:
return False
# Check if an object is a dispatchable handle
def isHandleTypeDispatchable(self, handletype):
handle = self.registry.tree.find("types/type/[name='" + handletype + "'][@category='handle']")
if handle is not None and handle.find('type').text == 'VK_DEFINE_HANDLE':
return True
else:
return False
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# C-specific
#
# Multiple inclusion protection & C++ namespace.
self.header = False
if (genOpts.protectFile and self.genOpts.filename and 'h' == self.genOpts.filename[-1]):
self.header = True
headerSym = '__' + re.sub(r'\.h', '_h_', os.path.basename(self.genOpts.filename))
write('
write('
self.newline()
#
# User-supplied prefix text, if any (list of strings)
if (genOpts.prefixText):
for s in genOpts.prefixText:
write(s, file=self.outFile)
if self.header:
# we don't want prototypes to avoid linking issues.
write('#define VK_NO_PROTOTYPES', file=self.outFile)
write('#include <unordered_map>', file=self.outFile)
write('#include <mutex>', file=self.outFile)
write('#include <string>', file=self.outFile)
write('#include <cstring>', file=self.outFile)
write('#include "vulkan/vk_icd.h"', file=self.outFile)
else:
write('#include "mock_icd.h"', file=self.outFile)
write('#include <stdlib.h>', file=self.outFile)
write('#include <algorithm>', file=self.outFile)
write('#include <array>', file=self.outFile)
write('#include <vector>', file=self.outFile)
write('#include "vk_typemap_helper.h"', file=self.outFile)
write('namespace vkmock {', file=self.outFile)
if self.header:
self.newline()
write(HEADER_C_CODE, file=self.outFile)
device_exts = []
instance_exts = []
ignore_exts = ['VK_EXT_validation_cache', 'VK_KHR_portability_subset']
for ext in self.registry.tree.findall("extensions/extension"):
if ext.attrib['supported'] != 'disabled':
if (ext.attrib['name'] not in ignore_exts):
for enum in ext.findall('require/enum'):
if enum.get('name', '').endswith('_SPEC_VERSION'):
ext_version = enum.get('value')
if (ext.attrib.get('type') == 'instance'):
instance_exts.append(' {"%s", %s},' % (ext.attrib['name'], ext_version))
else:
device_exts.append(' {"%s", %s},' % (ext.attrib['name'], ext_version))
break
write('// Map of instance extension name to version', file=self.outFile)
write('static const std::unordered_map<std::string, uint32_t> instance_extension_map = {', file=self.outFile)
write('\n'.join(instance_exts), file=self.outFile)
write('};', file=self.outFile)
write('// Map of device extension name to version', file=self.outFile)
write('static const std::unordered_map<std::string, uint32_t> device_extension_map = {', file=self.outFile)
write('\n'.join(device_exts), file=self.outFile)
write('};', file=self.outFile)
else:
self.newline()
write(SOURCE_CPP_PREFIX, file=self.outFile)
def endFile(self):
self.newline()
if self.header:
write('// Map of all APIs to be intercepted by this layer', file=self.outFile)
write('static const std::unordered_map<std::string, void*> name_to_funcptr_map = {', file=self.outFile)
write('\n'.join(self.intercepts), file=self.outFile)
write('};\n', file=self.outFile)
self.newline()
write('} // namespace vkmock', file=self.outFile)
self.newline()
write('#endif', file=self.outFile)
else:
write(SOURCE_CPP_POSTFIX, file=self.outFile)
OutputGenerator.endFile(self)
def beginFeature(self, interface, emit):
OutputGenerator.beginFeature(self, interface, emit)
self.featureExtraProtect = GetFeatureProtect(interface)
# printed in endFeature().
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
#write('// ending beginFeature', file=self.outFile)
def endFeature(self):
# C-specific
# Actually write the interface to the output file.
#write('// starting endFeature', file=self.outFile)
if (self.emit):
self.newline()
if (self.genOpts.protectFeature):
write('
# If type declarations are needed by other features based on
# this one, it may be necessary to suppress the ExtraProtect,
# or move it below the 'for section...' loop.
#write('// endFeature looking at self.featureExtraProtect', file=self.outFile)
if (self.featureExtraProtect != None):
write('
#write('
for section in self.TYPE_SECTIONS:
#write('// endFeature writing section'+section, file=self.outFile)
contents = self.sections[section]
if contents:
write('\n'.join(contents), file=self.outFile)
self.newline()
#write('// endFeature looking at self.sections[command]', file=self.outFile)
if (self.sections['command']):
write('\n'.join(self.sections['command']), end=u'', file=self.outFile)
self.newline()
if (self.featureExtraProtect != None):
write('
if (self.genOpts.protectFeature):
write('
# Finish processing in superclass
OutputGenerator.endFeature(self)
#write('// ending endFeature', file=self.outFile)
#
# Append a definition to the specified section
def appendSection(self, section, text):
# self.sections[section].append('SECTION: ' + section + '\n')
self.sections[section].append(text)
#
# Type generation
def genType(self, typeinfo, name, alias):
pass
#
# Struct (e.g. C "struct" type) generation.
# This is a special case of the <type> tag where the contents are
# interpreted as a set of <member> tags instead of freeform C
# C type declarations. The <member> tags are just like <param>
# tags - they are a declaration of a struct or union member.
# Only simple member declarations are supported (no nested
# structs etc.)
def genStruct(self, typeinfo, typeName, alias):
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
body = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
# paramdecl = self.makeCParamDecl(typeinfo.elem, self.genOpts.alignFuncParam)
for member in typeinfo.elem.findall('.//member'):
body += self.makeCParamDecl(member, self.genOpts.alignFuncParam)
body += ';\n'
body += '} ' + typeName + ';\n'
self.appendSection('struct', body)
#
# Group (e.g. C "enum" type) generation.
# These are concatenated together with other types.
def genGroup(self, groupinfo, groupName, alias):
pass
# Enumerant generation
# <enum> tags may specify their values in several ways, but are usually
# just integers.
def genEnum(self, enuminfo, name, alias):
pass
#
# Command generation
def genCmd(self, cmdinfo, name, alias):
decls = self.makeCDecls(cmdinfo.elem)
if self.header: # In the header declare all intercepts
self.appendSection('command', '')
self.appendSection('command', 'static %s' % (decls[0]))
if (self.featureExtraProtect != None):
self.intercepts += [ '
self.intercepts += [ ' {"%s", (void*)%s},' % (name,name[2:]) ]
if (self.featureExtraProtect != None):
self.intercepts += [ '
return
manual_functions = [
# Include functions here to be intercepted w/ manually implemented function bodies
'vkGetDeviceProcAddr',
'vkGetInstanceProcAddr',
'vkCreateDevice',
'vkDestroyDevice',
'vkCreateInstance',
'vkDestroyInstance',
'vkFreeCommandBuffers',
'vkAllocateCommandBuffers',
'vkDestroyCommandPool',
#'vkCreateDebugReportCallbackEXT',
#'vkDestroyDebugReportCallbackEXT',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateInstanceVersion',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateDeviceExtensionProperties',
]
if name in manual_functions:
self.appendSection('command', '')
if name not in CUSTOM_C_INTERCEPTS:
self.appendSection('command', '// declare only')
self.appendSection('command', 'static %s' % (decls[0]))
self.appendSection('command', '// TODO: Implement custom intercept body')
else:
self.appendSection('command', 'static %s' % (decls[0][:-1]))
self.appendSection('command', '{\n%s}' % (CUSTOM_C_INTERCEPTS[name]))
self.intercepts += [ ' {"%s", (void*)%s},' % (name,name[2:]) ]
return
# record that the function will be intercepted
if (self.featureExtraProtect != None):
self.intercepts += [ '
self.intercepts += [ ' {"%s", (void*)%s},' % (name,name[2:]) ]
if (self.featureExtraProtect != None):
self.intercepts += [ '
OutputGenerator.genCmd(self, cmdinfo, name, alias)
#
self.appendSection('command', '')
self.appendSection('command', 'static %s' % (decls[0][:-1]))
if name in CUSTOM_C_INTERCEPTS:
self.appendSection('command', '{%s}' % (CUSTOM_C_INTERCEPTS[name]))
return
# Declare result variable, if any.
resulttype = cmdinfo.elem.find('proto/type')
if (resulttype != None and resulttype.text == 'void'):
resulttype = None
# if the name w/ KHR postfix is in the CUSTOM_C_INTERCEPTS
# Call the KHR custom version instead of generating separate code
khr_name = name + "KHR"
if khr_name in CUSTOM_C_INTERCEPTS:
return_string = ''
if resulttype != None:
return_string = 'return '
params = cmdinfo.elem.findall('param/name')
param_names = []
for param in params:
param_names.append(param.text)
self.appendSection('command', '{\n %s%s(%s);\n}' % (return_string, khr_name[2:], ", ".join(param_names)))
return
self.appendSection('command', '{')
api_function_name = cmdinfo.elem.attrib.get('name')
# GET THE TYPE OF FUNCTION
if True in [ftxt in api_function_name for ftxt in ['Create', 'Allocate']]:
# Get last param
last_param = cmdinfo.elem.findall('param')[-1]
lp_txt = last_param.find('name').text
lp_len = None
if ('len' in last_param.attrib):
lp_len = last_param.attrib['len']
lp_len = lp_len.replace('::', '->')
lp_type = last_param.find('type').text
handle_type = 'dispatchable'
allocator_txt = 'CreateDispObjHandle()';
if (self.isHandleTypeNonDispatchable(lp_type)):
handle_type = 'non-' + handle_type
allocator_txt = 'global_unique_handle++';
# Need to lock in both cases
self.appendSection('command', ' unique_lock_t lock(global_lock);')
if (lp_len != None):
#print("%s last params (%s) has len %s" % (handle_type, lp_txt, lp_len))
self.appendSection('command', ' for (uint32_t i = 0; i < %s; ++i) {' % (lp_len))
self.appendSection('command', ' %s[i] = (%s)%s;' % (lp_txt, lp_type, allocator_txt))
self.appendSection('command', ' }')
else:
#print("Single %s last param is '%s' w/ type '%s'" % (handle_type, lp_txt, lp_type))
if 'AllocateMemory' in api_function_name:
# Store allocation size in case it's mapped
self.appendSection('command', ' allocated_memory_size_map[(VkDeviceMemory)global_unique_handle] = pAllocateInfo->allocationSize;')
self.appendSection('command', ' *%s = (%s)%s;' % (lp_txt, lp_type, allocator_txt))
elif True in [ftxt in api_function_name for ftxt in ['Destroy', 'Free']]:
self.appendSection('command', '//Destroy object')
if 'FreeMemory' in api_function_name:
self.appendSection('command', ' allocated_memory_size_map.erase(memory);')
else:
self.appendSection('command', '//Not a CREATE or DESTROY function')
if (resulttype != None):
if api_function_name == 'vkGetEventStatus':
self.appendSection('command', ' return VK_EVENT_SET;')
else:
self.appendSection('command', ' return VK_SUCCESS;')
self.appendSection('command', '}')
def makeProtoName(self, name, tail):
return self.genOpts.apientry + name[2:] + tail
| true | true |
1c30b0b0b2dd41db25f62e5e8c870bf536a3300b | 2,172 | py | Python | Project_2.py | jainrachit1008/Titans-of-Wall-Street-Program-Projects | 2d71499a0942ed506330c412eae3b0822c837aa7 | [
"MIT"
] | null | null | null | Project_2.py | jainrachit1008/Titans-of-Wall-Street-Program-Projects | 2d71499a0942ed506330c412eae3b0822c837aa7 | [
"MIT"
] | null | null | null | Project_2.py | jainrachit1008/Titans-of-Wall-Street-Program-Projects | 2d71499a0942ed506330c412eae3b0822c837aa7 | [
"MIT"
] | null | null | null | import datetime as dt
import pandas as pd
import numpy as np
from pandas.plotting import table
import matplotlib.pyplot as plt
def ann_return(DF):
"function to calculate the Annualized return from monthly prices of a fund/sript"
df = DF.copy()
df["mon_ret"] = df["NAV"].pct_change()
df["cum_return"] = (1 + df["mon_ret"]).cumprod()
n = len(df)/12
ann_ret = (df["cum_return"][-1])**(1/n) - 1
return ann_ret
def ann_volatility(DF):
"function to calculate annualized volatility of a trading strategy"
df = DF.copy()
df["mon_ret"] = df["NAV"].pct_change()
vol = df["mon_ret"].std() * np.sqrt(12)
return vol
def sharpe(DF, rf):
"function to calculate sharpe ratio ; rf is the risk free rate"
df = DF.copy()
sr = (ann_return(df) - rf) / ann_volatility(df)
return sr
# Import the Lipper Hedge Fund Data
Lip = pd.read_csv(r'/Users/rachnish/Dropbox/TWSA Session #1 - Wed Nov 20/Kapil_Data.csv', index_col='Date')
# format the date columns to datetime format
Lip['Performance Start Date'] = pd.to_datetime(Lip['Performance Start Date'], errors='raise', dayfirst=True)
Lip['Performance End Date'] = pd.to_datetime(Lip['Performance End Date'], errors='raise', dayfirst=True)
Lip.index = pd.to_datetime(Lip.index, errors='raise', dayfirst=True)
# Filter Funds with a continuous track record from 1995/1/1 to 1995/12/1
Yearly_data = Lip.copy()
Yearly_data = Yearly_data[(Yearly_data['Performance Start Date'] <= '1995-01-01') & (Yearly_data['Performance End Date'] >= '1995-12-31')]
Yearly_data = Yearly_data[(Yearly_data.index >= '1995-01-01') & (Yearly_data.index <= '1995-12-31')]
# Calculate Sharpe Ratio for each Fund in the selected database
HF = list(Yearly_data['Fund Name'].unique())
HF_stats = pd.DataFrame(columns=['SharpeRatioPast', 'PercentileRankingPast'], index=HF)
for i in HF:
HF_stats['SharpeRatioPast'].loc[i] = sharpe(Yearly_data.loc[Yearly_data['Fund Name'] == i], 0.00)
# Calculate percentile ranking for each Fund in the selected database
ranks = HF_stats.SharpeRatioPast.rank(ascending=False)
HF_stats['PercentileRankingPast'] = (ranks - ranks.min())/(ranks.max() - ranks.min())*100
| 40.222222 | 138 | 0.706722 | import datetime as dt
import pandas as pd
import numpy as np
from pandas.plotting import table
import matplotlib.pyplot as plt
def ann_return(DF):
df = DF.copy()
df["mon_ret"] = df["NAV"].pct_change()
df["cum_return"] = (1 + df["mon_ret"]).cumprod()
n = len(df)/12
ann_ret = (df["cum_return"][-1])**(1/n) - 1
return ann_ret
def ann_volatility(DF):
df = DF.copy()
df["mon_ret"] = df["NAV"].pct_change()
vol = df["mon_ret"].std() * np.sqrt(12)
return vol
def sharpe(DF, rf):
df = DF.copy()
sr = (ann_return(df) - rf) / ann_volatility(df)
return sr
Lip = pd.read_csv(r'/Users/rachnish/Dropbox/TWSA Session #1 - Wed Nov 20/Kapil_Data.csv', index_col='Date')
Lip['Performance Start Date'] = pd.to_datetime(Lip['Performance Start Date'], errors='raise', dayfirst=True)
Lip['Performance End Date'] = pd.to_datetime(Lip['Performance End Date'], errors='raise', dayfirst=True)
Lip.index = pd.to_datetime(Lip.index, errors='raise', dayfirst=True)
Yearly_data = Lip.copy()
Yearly_data = Yearly_data[(Yearly_data['Performance Start Date'] <= '1995-01-01') & (Yearly_data['Performance End Date'] >= '1995-12-31')]
Yearly_data = Yearly_data[(Yearly_data.index >= '1995-01-01') & (Yearly_data.index <= '1995-12-31')]
HF = list(Yearly_data['Fund Name'].unique())
HF_stats = pd.DataFrame(columns=['SharpeRatioPast', 'PercentileRankingPast'], index=HF)
for i in HF:
HF_stats['SharpeRatioPast'].loc[i] = sharpe(Yearly_data.loc[Yearly_data['Fund Name'] == i], 0.00)
ranks = HF_stats.SharpeRatioPast.rank(ascending=False)
HF_stats['PercentileRankingPast'] = (ranks - ranks.min())/(ranks.max() - ranks.min())*100
| true | true |
1c30b0fd99c82818b7c1ab9e9be37e6f7376425f | 28,374 | py | Python | nova/virt/powervm/driver.py | wangchencom/nova | 34e595f2a7ac1cd16ea13d0e3a14899b89f94998 | [
"Apache-2.0"
] | null | null | null | nova/virt/powervm/driver.py | wangchencom/nova | 34e595f2a7ac1cd16ea13d0e3a14899b89f94998 | [
"Apache-2.0"
] | null | null | null | nova/virt/powervm/driver.py | wangchencom/nova | 34e595f2a7ac1cd16ea13d0e3a14899b89f94998 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014, 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Connection to PowerVM hypervisor through NovaLink."""
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from pypowervm import adapter as pvm_apt
from pypowervm import const as pvm_const
from pypowervm import exceptions as pvm_exc
from pypowervm.helpers import log_helper as log_hlp
from pypowervm.helpers import vios_busy as vio_hlp
from pypowervm.tasks import partition as pvm_par
from pypowervm.tasks import storage as pvm_stor
from pypowervm.tasks import vterm as pvm_vterm
from pypowervm.wrappers import managed_system as pvm_ms
import six
from taskflow.patterns import linear_flow as tf_lf
from nova.compute import task_states
from nova import conf as cfg
from nova.console import type as console_type
from nova import exception as exc
from nova.i18n import _
from nova import image
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.powervm import host as pvm_host
from nova.virt.powervm.tasks import base as tf_base
from nova.virt.powervm.tasks import image as tf_img
from nova.virt.powervm.tasks import network as tf_net
from nova.virt.powervm.tasks import storage as tf_stg
from nova.virt.powervm.tasks import vm as tf_vm
from nova.virt.powervm import vm
from nova.virt.powervm import volume
from nova.virt.powervm.volume import fcvscsi
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DISK_ADPT_NS = 'nova.virt.powervm.disk'
DISK_ADPT_MAPPINGS = {
'localdisk': 'localdisk.LocalStorage',
'ssp': 'ssp.SSPDiskAdapter'
}
class PowerVMDriver(driver.ComputeDriver):
"""PowerVM NovaLink Implementation of Compute Driver.
https://wiki.openstack.org/wiki/PowerVM
"""
def __init__(self, virtapi):
# NOTE(edmondsw) some of these will be dynamic in future, so putting
# capabilities on the instance rather than on the class.
self.capabilities = {
'has_imagecache': False,
'supports_evacuate': False,
'supports_migrate_to_same_host': False,
'supports_attach_interface': True,
'supports_device_tagging': False,
'supports_tagged_attach_interface': False,
'supports_tagged_attach_volume': False,
'supports_extend_volume': True,
'supports_multiattach': False,
'supports_trusted_certs': False,
}
super(PowerVMDriver, self).__init__(virtapi)
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function.
Includes catching up with currently running VMs on the given host.
"""
# Build the adapter. May need to attempt the connection multiple times
# in case the PowerVM management API service is starting.
# TODO(efried): Implement async compute service enable/disable like
# I73a34eb6e0ca32d03e54d12a5e066b2ed4f19a61
self.adapter = pvm_apt.Adapter(
pvm_apt.Session(conn_tries=60),
helpers=[log_hlp.log_helper, vio_hlp.vios_busy_retry_helper])
# Make sure the Virtual I/O Server(s) are available.
pvm_par.validate_vios_ready(self.adapter)
self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
# Do a scrub of the I/O plane to make sure the system is in good shape
LOG.info("Clearing stale I/O connections on driver init.")
pvm_stor.ComprehensiveScrub(self.adapter).execute()
# Initialize the disk adapter
self.disk_dvr = importutils.import_object_ns(
DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()],
self.adapter, self.host_wrapper.uuid)
self.image_api = image.API()
LOG.info("The PowerVM compute driver has been initialized.")
@staticmethod
def _log_operation(op, instance):
"""Log entry point of driver operations."""
LOG.info('Operation: %(op)s. Virtual machine display name: '
'%(display_name)s, name: %(name)s',
{'op': op, 'display_name': instance.display_name,
'name': instance.name}, instance=instance)
def get_info(self, instance):
"""Get the current status of an instance.
:param instance: nova.objects.instance.Instance object
:returns: An InstanceInfo object.
"""
return vm.get_vm_info(self.adapter, instance)
def list_instances(self):
"""Return the names of all the instances known to the virt host.
:return: VM Names as a list.
"""
return vm.get_lpar_names(self.adapter)
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
return [CONF.host]
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and as part of a
periodic task.
:param nodename: Node from which the caller wants to get resources.
A driver that manages only one node can safely ignore
this.
:return: Dictionary describing resources.
"""
# TODO(efried): Switch to get_inventory, per blueprint
# custom-resource-classes-pike
# Do this here so it refreshes each time this method is called.
self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
return self._get_available_resource()
def _get_available_resource(self):
# Get host information
data = pvm_host.build_host_resource_from_ms(self.host_wrapper)
# Add the disk information
data["local_gb"] = self.disk_dvr.capacity
data["local_gb_used"] = self.disk_dvr.capacity_used
return data
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree with current provider and inventory data.
:param nova.compute.provider_tree.ProviderTree provider_tree:
A nova.compute.provider_tree.ProviderTree object representing all
the providers in the tree associated with the compute node, and any
sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
trait) associated via aggregate with any of those providers (but
not *their* tree- or aggregate-associated providers), as currently
known by placement.
:param nodename:
String name of the compute node (i.e.
ComputeNode.hypervisor_hostname) for which the caller is requesting
updated provider information.
:param allocations: Currently ignored by this driver.
"""
# Get (legacy) resource information. Same as get_available_resource,
# but we don't need to refresh self.host_wrapper as it was *just*
# refreshed by get_available_resource in the resource tracker's
# update_available_resource flow.
data = self._get_available_resource()
# NOTE(yikun): If the inv record does not exists, the allocation_ratio
# will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio
# is set, and fallback to use the initial_xxx_allocation_ratio
# otherwise.
inv = provider_tree.data(nodename).inventory
ratios = self._get_allocation_ratios(inv)
# TODO(efried): Fix these to reflect something like reality
cpu_reserved = CONF.reserved_host_cpus
mem_reserved = CONF.reserved_host_memory_mb
disk_reserved = self._get_reserved_host_disk_gb_from_config()
inventory = {
orc.VCPU: {
'total': data['vcpus'],
'max_unit': data['vcpus'],
'allocation_ratio': ratios[orc.VCPU],
'reserved': cpu_reserved,
},
orc.MEMORY_MB: {
'total': data['memory_mb'],
'max_unit': data['memory_mb'],
'allocation_ratio': ratios[orc.MEMORY_MB],
'reserved': mem_reserved,
},
orc.DISK_GB: {
# TODO(efried): Proper DISK_GB sharing when SSP driver in play
'total': int(data['local_gb']),
'max_unit': int(data['local_gb']),
'allocation_ratio': ratios[orc.DISK_GB],
'reserved': disk_reserved,
},
}
provider_tree.update_inventory(nodename, inventory)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None):
"""Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
:param nova.objects.ImageMeta image_meta:
The metadata of the image of the instance.
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param allocations: Information about resources allocated to the
instance via placement, of the form returned by
SchedulerReportClient.get_allocations_for_consumer.
:param network_info: instance network information
:param block_device_info: Information about block devices to be
attached to the instance.
"""
self._log_operation('spawn', instance)
# Define the flow
flow_spawn = tf_lf.Flow("spawn")
# This FeedTask accumulates VIOS storage connection operations to be
# run in parallel. Include both SCSI and fibre channel mappings for
# the scrubber.
stg_ftsk = pvm_par.build_active_vio_feed_task(
self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
flow_spawn.add(tf_vm.Create(
self.adapter, self.host_wrapper, instance, stg_ftsk))
# Create a flow for the IO
flow_spawn.add(tf_net.PlugVifs(
self.virtapi, self.adapter, instance, network_info))
flow_spawn.add(tf_net.PlugMgmtVif(
self.adapter, instance))
# Create the boot image.
flow_spawn.add(tf_stg.CreateDiskForImg(
self.disk_dvr, context, instance, image_meta))
# Connects up the disk to the LPAR
flow_spawn.add(tf_stg.AttachDisk(
self.disk_dvr, instance, stg_ftsk=stg_ftsk))
# Extract the block devices.
bdms = driver.block_device_info_get_mapping(block_device_info)
# Determine if there are volumes to connect. If so, add a connection
# for each type.
for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms,
stg_ftsk=stg_ftsk):
# Connect the volume. This will update the connection_info.
flow_spawn.add(tf_stg.AttachVolume(vol_drv))
# If the config drive is needed, add those steps. Should be done
# after all the other I/O.
if configdrive.required_by(instance):
flow_spawn.add(tf_stg.CreateAndConnectCfgDrive(
self.adapter, instance, injected_files, network_info,
stg_ftsk, admin_pass=admin_password))
# Add the transaction manager flow at the end of the 'I/O
# connection' tasks. This will run all the connections in parallel.
flow_spawn.add(stg_ftsk)
# Last step is to power on the system.
flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))
# Run the flow.
tf_base.run(flow_spawn, instance=instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy the specified instance from the Hypervisor.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param network_info: instance network information
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
"""
# TODO(thorst, efried) Add resize checks for destroy
self._log_operation('destroy', instance)
def _setup_flow_and_run():
# Define the flow
flow = tf_lf.Flow("destroy")
# Power Off the LPAR. If its disks are about to be deleted, issue a
# hard shutdown.
flow.add(tf_vm.PowerOff(self.adapter, instance,
force_immediate=destroy_disks))
# The FeedTask accumulates storage disconnection tasks to be run in
# parallel.
stg_ftsk = pvm_par.build_active_vio_feed_task(
self.adapter, xag=[pvm_const.XAG.VIO_SMAP])
# Call the unplug VIFs task. While CNAs get removed from the LPAR
# directly on the destroy, this clears up the I/O Host side.
flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))
# Add the disconnect/deletion of the vOpt to the transaction
# manager.
if configdrive.required_by(instance):
flow.add(tf_stg.DeleteVOpt(
self.adapter, instance, stg_ftsk=stg_ftsk))
# Extract the block devices.
bdms = driver.block_device_info_get_mapping(block_device_info)
# Determine if there are volumes to detach. If so, remove each
# volume (within the transaction manager)
for bdm, vol_drv in self._vol_drv_iter(
context, instance, bdms, stg_ftsk=stg_ftsk):
flow.add(tf_stg.DetachVolume(vol_drv))
# Detach the disk storage adapters
flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))
# Accumulated storage disconnection tasks next
flow.add(stg_ftsk)
# Delete the storage disks
if destroy_disks:
flow.add(tf_stg.DeleteDisk(self.disk_dvr))
# TODO(thorst, efried) Add LPAR id based scsi map clean up task
flow.add(tf_vm.Delete(self.adapter, instance))
# Build the engine & run!
tf_base.run(flow, instance=instance)
try:
_setup_flow_and_run()
except exc.InstanceNotFound:
LOG.debug('VM was not found during destroy operation.',
instance=instance)
return
except pvm_exc.Error as e:
LOG.exception("PowerVM error during destroy.", instance=instance)
# Convert to a Nova exception
raise exc.InstanceTerminationFailure(reason=six.text_type(e))
def snapshot(self, context, instance, image_id, update_task_state):
"""Snapshots the specified instance.
:param context: security context
:param instance: nova.objects.instance.Instance
:param image_id: Reference to a pre-created image that will hold the
snapshot.
:param update_task_state: Callback function to update the task_state
on the instance while the snapshot operation progresses. The
function takes a task_state argument and an optional
expected_task_state kwarg which defaults to
nova.compute.task_states.IMAGE_SNAPSHOT. See
nova.objects.instance.Instance.save for expected_task_state usage.
"""
if not self.disk_dvr.capabilities.get('snapshot'):
raise exc.NotSupportedWithOption(
message=_("The snapshot operation is not supported in "
"conjunction with a [powervm]/disk_driver setting "
"of %s.") % CONF.powervm.disk_driver)
self._log_operation('snapshot', instance)
# Define the flow.
flow = tf_lf.Flow("snapshot")
# Notify that we're starting the process.
flow.add(tf_img.UpdateTaskState(update_task_state,
task_states.IMAGE_PENDING_UPLOAD))
# Connect the instance's boot disk to the management partition, and
# scan the scsi bus and bring the device into the management partition.
flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance))
# Notify that the upload is in progress.
flow.add(tf_img.UpdateTaskState(
update_task_state, task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD))
# Stream the disk to glance.
flow.add(tf_img.StreamToGlance(context, self.image_api, image_id,
instance))
# Disconnect the boot disk from the management partition and delete the
# device.
flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance))
# Run the flow.
tf_base.run(flow, instance=instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
self._log_operation('power_off', instance)
force_immediate = (timeout == 0)
timeout = timeout or None
vm.power_off(self.adapter, instance, force_immediate=force_immediate,
timeout=timeout)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
:param instance: nova.objects.instance.Instance
"""
self._log_operation('power_on', instance)
vm.power_on(self.adapter, instance)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: nova.objects.instance.Instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
:param block_device_info: Info pertaining to attached volumes
:param bad_volumes_callback: Function to handle any bad volumes
encountered
"""
self._log_operation(reboot_type + ' reboot', instance)
vm.reboot(self.adapter, instance, reboot_type == 'HARD')
# pypowervm exceptions are sufficient to indicate real failure.
# Otherwise, pypowervm thinks the instance is up.
def attach_interface(self, context, instance, image_meta, vif):
"""Attach an interface to the instance."""
self.plug_vifs(instance, [vif])
def detach_interface(self, context, instance, vif):
"""Detach an interface from the instance."""
self.unplug_vifs(instance, [vif])
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._log_operation('plug_vifs', instance)
# Define the flow
flow = tf_lf.Flow("plug_vifs")
# Get the LPAR Wrapper
flow.add(tf_vm.Get(self.adapter, instance))
# Run the attach
flow.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance,
network_info))
# Run the flow
try:
tf_base.run(flow, instance=instance)
except exc.InstanceNotFound:
raise exc.VirtualInterfacePlugException(
_("Plug vif failed because instance %s was not found.")
% instance.name)
except Exception:
LOG.exception("PowerVM error plugging vifs.", instance=instance)
raise exc.VirtualInterfacePlugException(
_("Plug vif failed because of an unexpected error."))
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._log_operation('unplug_vifs', instance)
# Define the flow
flow = tf_lf.Flow("unplug_vifs")
# Run the detach
flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))
# Run the flow
try:
tf_base.run(flow, instance=instance)
except exc.InstanceNotFound:
LOG.warning('VM was not found during unplug operation as it is '
'already possibly deleted.', instance=instance)
except Exception:
LOG.exception("PowerVM error trying to unplug vifs.",
instance=instance)
raise exc.InterfaceDetachFailed(instance_uuid=instance.uuid)
def get_vnc_console(self, context, instance):
"""Get connection info for a vnc console.
:param context: security context
:param instance: nova.objects.instance.Instance
:return: An instance of console.type.ConsoleVNC
"""
self._log_operation('get_vnc_console', instance)
lpar_uuid = vm.get_pvm_uuid(instance)
# Build the connection to the VNC.
host = CONF.vnc.server_proxyclient_address
# TODO(thorst, efried) Add the x509 certificate support when it lands
try:
# Open up a remote vterm
port = pvm_vterm.open_remotable_vnc_vterm(
self.adapter, lpar_uuid, host, vnc_path=lpar_uuid)
# Note that the VNC viewer will wrap the internal_access_path with
# the HTTP content.
return console_type.ConsoleVNC(host=host, port=port,
internal_access_path=lpar_uuid)
except pvm_exc.HttpError as e:
with excutils.save_and_reraise_exception(logger=LOG) as sare:
# If the LPAR was not found, raise a more descriptive error
if e.response.status == 404:
sare.reraise = False
raise exc.InstanceNotFound(instance_id=instance.uuid)
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?
:param instance: the instance object.
:returns: Boolean value. If True deallocate networks on reschedule.
"""
return True
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the volume to the instance using the connection_info.
:param context: security context
:param connection_info: Volume connection information from the block
device mapping
:param instance: nova.objects.instance.Instance
:param mountpoint: Unused
:param disk_bus: Unused
:param device_type: Unused
:param encryption: Unused
"""
self._log_operation('attach_volume', instance)
# Define the flow
flow = tf_lf.Flow("attach_volume")
# Build the driver
vol_drv = volume.build_volume_driver(self.adapter, instance,
connection_info)
# Add the volume attach to the flow.
flow.add(tf_stg.AttachVolume(vol_drv))
# Run the flow
tf_base.run(flow, instance=instance)
# The volume connector may have updated the system metadata. Save
# the instance to persist the data. Spawn/destroy auto saves instance,
# but the attach does not. Detach does not need this save - as the
# detach flows do not (currently) modify system metadata. May need
# to revise in the future as volume connectors evolve.
instance.save()
def detach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
"""Detach the volume attached to the instance.
:param context: security context
:param connection_info: Volume connection information from the block
device mapping
:param instance: nova.objects.instance.Instance
:param mountpoint: Unused
:param encryption: Unused
"""
self._log_operation('detach_volume', instance)
# Define the flow
flow = tf_lf.Flow("detach_volume")
# Get a volume adapter for this volume
vol_drv = volume.build_volume_driver(self.adapter, instance,
connection_info)
# Add a task to detach the volume
flow.add(tf_stg.DetachVolume(vol_drv))
# Run the flow
tf_base.run(flow, instance=instance)
def extend_volume(self, connection_info, instance):
"""Extend the disk attached to the instance.
:param dict connection_info: The connection for the extended volume.
:param nova.objects.instance.Instance instance:
The instance whose volume gets extended.
:return: None
"""
vol_drv = volume.build_volume_driver(
self.adapter, instance, connection_info)
vol_drv.extend_volume()
def _vol_drv_iter(self, context, instance, bdms, stg_ftsk=None):
"""Yields a bdm and volume driver.
:param context: security context
:param instance: nova.objects.instance.Instance
:param bdms: block device mappings
:param stg_ftsk: storage FeedTask
"""
# Get a volume driver for each volume
for bdm in bdms or []:
conn_info = bdm.get('connection_info')
vol_drv = volume.build_volume_driver(self.adapter, instance,
conn_info, stg_ftsk=stg_ftsk)
yield bdm, vol_drv
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing information about
the system that will be making the connection.
:param instance: nova.objects.instance.Instance
"""
# Put the values in the connector
connector = {}
wwpn_list = fcvscsi.wwpns(self.adapter)
if wwpn_list is not None:
connector["wwpns"] = wwpn_list
connector["multipath"] = False
connector['host'] = CONF.host
connector['initiator'] = None
return connector
| 41.482456 | 79 | 0.644252 |
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from pypowervm import adapter as pvm_apt
from pypowervm import const as pvm_const
from pypowervm import exceptions as pvm_exc
from pypowervm.helpers import log_helper as log_hlp
from pypowervm.helpers import vios_busy as vio_hlp
from pypowervm.tasks import partition as pvm_par
from pypowervm.tasks import storage as pvm_stor
from pypowervm.tasks import vterm as pvm_vterm
from pypowervm.wrappers import managed_system as pvm_ms
import six
from taskflow.patterns import linear_flow as tf_lf
from nova.compute import task_states
from nova import conf as cfg
from nova.console import type as console_type
from nova import exception as exc
from nova.i18n import _
from nova import image
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.powervm import host as pvm_host
from nova.virt.powervm.tasks import base as tf_base
from nova.virt.powervm.tasks import image as tf_img
from nova.virt.powervm.tasks import network as tf_net
from nova.virt.powervm.tasks import storage as tf_stg
from nova.virt.powervm.tasks import vm as tf_vm
from nova.virt.powervm import vm
from nova.virt.powervm import volume
from nova.virt.powervm.volume import fcvscsi
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DISK_ADPT_NS = 'nova.virt.powervm.disk'
DISK_ADPT_MAPPINGS = {
'localdisk': 'localdisk.LocalStorage',
'ssp': 'ssp.SSPDiskAdapter'
}
class PowerVMDriver(driver.ComputeDriver):
def __init__(self, virtapi):
self.capabilities = {
'has_imagecache': False,
'supports_evacuate': False,
'supports_migrate_to_same_host': False,
'supports_attach_interface': True,
'supports_device_tagging': False,
'supports_tagged_attach_interface': False,
'supports_tagged_attach_volume': False,
'supports_extend_volume': True,
'supports_multiattach': False,
'supports_trusted_certs': False,
}
super(PowerVMDriver, self).__init__(virtapi)
def init_host(self, host):
self.adapter = pvm_apt.Adapter(
pvm_apt.Session(conn_tries=60),
helpers=[log_hlp.log_helper, vio_hlp.vios_busy_retry_helper])
pvm_par.validate_vios_ready(self.adapter)
self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
LOG.info("Clearing stale I/O connections on driver init.")
pvm_stor.ComprehensiveScrub(self.adapter).execute()
self.disk_dvr = importutils.import_object_ns(
DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()],
self.adapter, self.host_wrapper.uuid)
self.image_api = image.API()
LOG.info("The PowerVM compute driver has been initialized.")
@staticmethod
def _log_operation(op, instance):
LOG.info('Operation: %(op)s. Virtual machine display name: '
'%(display_name)s, name: %(name)s',
{'op': op, 'display_name': instance.display_name,
'name': instance.name}, instance=instance)
def get_info(self, instance):
return vm.get_vm_info(self.adapter, instance)
def list_instances(self):
return vm.get_lpar_names(self.adapter)
def get_available_nodes(self, refresh=False):
return [CONF.host]
def get_available_resource(self, nodename):
self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
return self._get_available_resource()
def _get_available_resource(self):
data = pvm_host.build_host_resource_from_ms(self.host_wrapper)
data["local_gb"] = self.disk_dvr.capacity
data["local_gb_used"] = self.disk_dvr.capacity_used
return data
def update_provider_tree(self, provider_tree, nodename, allocations=None):
# refreshed by get_available_resource in the resource tracker's
data = self._get_available_resource()
inv = provider_tree.data(nodename).inventory
ratios = self._get_allocation_ratios(inv)
cpu_reserved = CONF.reserved_host_cpus
mem_reserved = CONF.reserved_host_memory_mb
disk_reserved = self._get_reserved_host_disk_gb_from_config()
inventory = {
orc.VCPU: {
'total': data['vcpus'],
'max_unit': data['vcpus'],
'allocation_ratio': ratios[orc.VCPU],
'reserved': cpu_reserved,
},
orc.MEMORY_MB: {
'total': data['memory_mb'],
'max_unit': data['memory_mb'],
'allocation_ratio': ratios[orc.MEMORY_MB],
'reserved': mem_reserved,
},
orc.DISK_GB: {
'total': int(data['local_gb']),
'max_unit': int(data['local_gb']),
'allocation_ratio': ratios[orc.DISK_GB],
'reserved': disk_reserved,
},
}
provider_tree.update_inventory(nodename, inventory)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None):
self._log_operation('spawn', instance)
flow_spawn = tf_lf.Flow("spawn")
stg_ftsk = pvm_par.build_active_vio_feed_task(
self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
flow_spawn.add(tf_vm.Create(
self.adapter, self.host_wrapper, instance, stg_ftsk))
flow_spawn.add(tf_net.PlugVifs(
self.virtapi, self.adapter, instance, network_info))
flow_spawn.add(tf_net.PlugMgmtVif(
self.adapter, instance))
flow_spawn.add(tf_stg.CreateDiskForImg(
self.disk_dvr, context, instance, image_meta))
flow_spawn.add(tf_stg.AttachDisk(
self.disk_dvr, instance, stg_ftsk=stg_ftsk))
bdms = driver.block_device_info_get_mapping(block_device_info)
for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms,
stg_ftsk=stg_ftsk):
flow_spawn.add(tf_stg.AttachVolume(vol_drv))
if configdrive.required_by(instance):
flow_spawn.add(tf_stg.CreateAndConnectCfgDrive(
self.adapter, instance, injected_files, network_info,
stg_ftsk, admin_pass=admin_password))
# connection' tasks. This will run all the connections in parallel.
flow_spawn.add(stg_ftsk)
flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))
tf_base.run(flow_spawn, instance=instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
self._log_operation('destroy', instance)
def _setup_flow_and_run():
flow = tf_lf.Flow("destroy")
flow.add(tf_vm.PowerOff(self.adapter, instance,
force_immediate=destroy_disks))
stg_ftsk = pvm_par.build_active_vio_feed_task(
self.adapter, xag=[pvm_const.XAG.VIO_SMAP])
flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))
if configdrive.required_by(instance):
flow.add(tf_stg.DeleteVOpt(
self.adapter, instance, stg_ftsk=stg_ftsk))
bdms = driver.block_device_info_get_mapping(block_device_info)
for bdm, vol_drv in self._vol_drv_iter(
context, instance, bdms, stg_ftsk=stg_ftsk):
flow.add(tf_stg.DetachVolume(vol_drv))
flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))
flow.add(stg_ftsk)
if destroy_disks:
flow.add(tf_stg.DeleteDisk(self.disk_dvr))
flow.add(tf_vm.Delete(self.adapter, instance))
tf_base.run(flow, instance=instance)
try:
_setup_flow_and_run()
except exc.InstanceNotFound:
LOG.debug('VM was not found during destroy operation.',
instance=instance)
return
except pvm_exc.Error as e:
LOG.exception("PowerVM error during destroy.", instance=instance)
raise exc.InstanceTerminationFailure(reason=six.text_type(e))
def snapshot(self, context, instance, image_id, update_task_state):
if not self.disk_dvr.capabilities.get('snapshot'):
raise exc.NotSupportedWithOption(
message=_("The snapshot operation is not supported in "
"conjunction with a [powervm]/disk_driver setting "
"of %s.") % CONF.powervm.disk_driver)
self._log_operation('snapshot', instance)
flow = tf_lf.Flow("snapshot")
flow.add(tf_img.UpdateTaskState(update_task_state,
task_states.IMAGE_PENDING_UPLOAD))
# Connect the instance's boot disk to the management partition, and
flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance))
flow.add(tf_img.UpdateTaskState(
update_task_state, task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD))
flow.add(tf_img.StreamToGlance(context, self.image_api, image_id,
instance))
flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance))
tf_base.run(flow, instance=instance)
def power_off(self, instance, timeout=0, retry_interval=0):
self._log_operation('power_off', instance)
force_immediate = (timeout == 0)
timeout = timeout or None
vm.power_off(self.adapter, instance, force_immediate=force_immediate,
timeout=timeout)
def power_on(self, context, instance, network_info,
block_device_info=None):
self._log_operation('power_on', instance)
vm.power_on(self.adapter, instance)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
self._log_operation(reboot_type + ' reboot', instance)
vm.reboot(self.adapter, instance, reboot_type == 'HARD')
def attach_interface(self, context, instance, image_meta, vif):
self.plug_vifs(instance, [vif])
def detach_interface(self, context, instance, vif):
self.unplug_vifs(instance, [vif])
def plug_vifs(self, instance, network_info):
self._log_operation('plug_vifs', instance)
flow = tf_lf.Flow("plug_vifs")
flow.add(tf_vm.Get(self.adapter, instance))
flow.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance,
network_info))
try:
tf_base.run(flow, instance=instance)
except exc.InstanceNotFound:
raise exc.VirtualInterfacePlugException(
_("Plug vif failed because instance %s was not found.")
% instance.name)
except Exception:
LOG.exception("PowerVM error plugging vifs.", instance=instance)
raise exc.VirtualInterfacePlugException(
_("Plug vif failed because of an unexpected error."))
def unplug_vifs(self, instance, network_info):
self._log_operation('unplug_vifs', instance)
flow = tf_lf.Flow("unplug_vifs")
flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))
try:
tf_base.run(flow, instance=instance)
except exc.InstanceNotFound:
LOG.warning('VM was not found during unplug operation as it is '
'already possibly deleted.', instance=instance)
except Exception:
LOG.exception("PowerVM error trying to unplug vifs.",
instance=instance)
raise exc.InterfaceDetachFailed(instance_uuid=instance.uuid)
def get_vnc_console(self, context, instance):
self._log_operation('get_vnc_console', instance)
lpar_uuid = vm.get_pvm_uuid(instance)
host = CONF.vnc.server_proxyclient_address
try:
port = pvm_vterm.open_remotable_vnc_vterm(
self.adapter, lpar_uuid, host, vnc_path=lpar_uuid)
return console_type.ConsoleVNC(host=host, port=port,
internal_access_path=lpar_uuid)
except pvm_exc.HttpError as e:
with excutils.save_and_reraise_exception(logger=LOG) as sare:
if e.response.status == 404:
sare.reraise = False
raise exc.InstanceNotFound(instance_id=instance.uuid)
def deallocate_networks_on_reschedule(self, instance):
return True
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
self._log_operation('attach_volume', instance)
flow = tf_lf.Flow("attach_volume")
vol_drv = volume.build_volume_driver(self.adapter, instance,
connection_info)
flow.add(tf_stg.AttachVolume(vol_drv))
tf_base.run(flow, instance=instance)
instance.save()
def detach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
self._log_operation('detach_volume', instance)
flow = tf_lf.Flow("detach_volume")
vol_drv = volume.build_volume_driver(self.adapter, instance,
connection_info)
flow.add(tf_stg.DetachVolume(vol_drv))
tf_base.run(flow, instance=instance)
def extend_volume(self, connection_info, instance):
vol_drv = volume.build_volume_driver(
self.adapter, instance, connection_info)
vol_drv.extend_volume()
def _vol_drv_iter(self, context, instance, bdms, stg_ftsk=None):
for bdm in bdms or []:
conn_info = bdm.get('connection_info')
vol_drv = volume.build_volume_driver(self.adapter, instance,
conn_info, stg_ftsk=stg_ftsk)
yield bdm, vol_drv
def get_volume_connector(self, instance):
connector = {}
wwpn_list = fcvscsi.wwpns(self.adapter)
if wwpn_list is not None:
connector["wwpns"] = wwpn_list
connector["multipath"] = False
connector['host'] = CONF.host
connector['initiator'] = None
return connector
| true | true |
1c30b22f755d32919b8be5d7721daf340cd3874b | 7,416 | py | Python | Figure_8.py | vincentpun/ConformanceConstraintsReproducibility | fc5df4ec9a3702a1837ffe6f3c05216523e8a1c5 | [
"MIT"
] | null | null | null | Figure_8.py | vincentpun/ConformanceConstraintsReproducibility | fc5df4ec9a3702a1837ffe6f3c05216523e8a1c5 | [
"MIT"
] | 1 | 2021-12-09T09:30:49.000Z | 2021-12-09T09:30:49.000Z | Figure_8.py | vincentpun/ConformanceConstraintsReproducibility | fc5df4ec9a3702a1837ffe6f3c05216523e8a1c5 | [
"MIT"
] | 1 | 2021-12-09T05:22:18.000Z | 2021-12-09T05:22:18.000Z | import prose.datainsights as di
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import subprocess
from sklearn.decomposition import PCA
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
import os
import sys
import warnings
from matplotlib import rc
from matplotlib import rcParams
systemName = 'CCSynth'
rcParams['font.family'] = 'sans-serif'
rc('text', usetex=True)
warnings.filterwarnings('ignore')
module_path = os.path.abspath(os.path.join("Baseline", "PCA-SPLL"))
if module_path not in sys.path:
sys.path.append(module_path)
from SPLL import SPLL
path = os.getcwd()
path = path[0].lower() + path[2:]
current_directory = os.getcwd()
CD_executable = os.path.join(current_directory, "Baseline", "PCA-CD", "ChangeDetection", "CD")
data_source = os.path.join(current_directory, "data", "uncompressed", "EVL/")
output_prefix = os.path.join(current_directory, "data", "uncompressed", "EVL", "results/")
window_sizes = {
"1CDT":400,
"2CDT":400,
"1CHT":400,
"2CHT":400,
"4CR":2000,
"4CRE-V1":1000,
"4CRE-V2":1000,
"5CVT":1000,
"1CSurr":600,
"4CE1CF":7500,
"UG-2C-2D":1000,
"MG-2C-2D":2000,
"FG-2C-2D":2000,
"UG-2C-3D":2000,
"UG-2C-5D":2000,
"GEARS-2C-2D":2000,
}
def get_df(dataset, raw=True):
df = pd.read_csv(data_source + dataset + ".txt", header=None)
if not raw:
for col in df.columns:
if len(list(df[col].unique())) < 10:
df[col] = df[col].apply(str)
return df
def get_cd_violations(dataset, window, div_metric):
input_source = data_source + "_" + dataset + ".txt"
try:
open(data_source + "_" + dataset + ".txt", "r")
except:
infile = open(data_source + dataset + ".txt", "r")
outfile = open(data_source + "_" + dataset + ".txt", "w")
for line in infile:
line = line.replace(",", "\t")
outfile.write(line)
outfile.close()
nDim = get_df(dataset).shape[1]
seg_fault_hack = "0"
if dataset == "1CSurr":
seg_fault_hack = "1"
command = 'bash -c'
command = command.split(sep=" ")
cd_command = " ".join([CD_executable,
input_source,
str(window),
"500",
output_prefix + "output.txt",
str(nDim),
"0.005",
str(div_metric),
seg_fault_hack])
command.append(cd_command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
output_source = data_source + "results/output.txt"
violations = [float(violation) for violation in open(output_source)]
os.remove(output_source)
return violations
def get_violations(approaches, dataset, window):
all_violations = dict()
df_raw = get_df(dataset, raw=True)
df = get_df(dataset, raw=False)
for approach in approaches:
violations = []
if approach.startswith("CD-Area"):
violations = get_cd_violations(dataset, window, 1)
elif approach.startswith("CD-MKL"):
violations = get_cd_violations(dataset, window, 2)
elif approach.startswith("PCA-SPLL"):
train_df = df_raw[:window]
pca = PCA()
pca.fit(train_df)
# Keep the least variant features
feature_indices = np.where(pca.explained_variance_ratio_ < 0.35)[0]
train_PCA = pca.transform(train_df)[:, feature_indices.tolist()]
n_chunks = int(df_raw.shape[0]/window)
violations = []
for i in range(n_chunks):
test_df = df_raw[i * window: (i + 1) * window]
if np.size(feature_indices) < 2:
# Guard against empty clusters...
st_raw, st_pca = 0, 0
else:
# Transform with W1's coefficients, only keep the least variant features
test_PCA = pca.transform(test_df)[:, feature_indices.tolist()]
_, _, st_pca = SPLL(train_PCA, test_PCA)
violations.append(st_pca)
else:
max_self_violation_threshold = 0.15
if approach == "PCA":
assertions = di.learn_assertions(df[:window], max_self_violation = max_self_violation_threshold)
if approach == "DT":
assertions = di.learn_assertions(df[:window], learn_decision_tree=True, max_self_violation = max_self_violation_threshold)
n_chunks = int(df.shape[0]/window)
violations = []
for i in range(n_chunks):
test_df = df[i * window: (i + 1) * window]
result = assertions.evaluate(test_df, normalizeViolation=False)
violations.append(result.avg_violation)
all_violations[approach] = np.array(violations)
return all_violations
rcParams['figure.dpi'] = 300
approaches = ["CD-MKL", "CD-Area", "PCA-SPLL", "DT"]
approach_names = ["CD-MKL", "CD-Area", "PCA-SPLL (25\%)", systemName]
colors = ["C8", "C0", "C2", "C3"]
lss = ['--','-.', ':', '-',]
nCol = 8
fig, ax = plt.subplots(2, nCol)
fig.set_size_inches(12, 2)
cur_plot_idx = 0
for dataset, window in window_sizes.items():
if cur_plot_idx == 8 or cur_plot_idx == 0 or True:
all_violations = get_violations(approaches, dataset, window)
for approach in approaches:
cur_plot = ax[cur_plot_idx//nCol][cur_plot_idx%nCol]
violations = all_violations[approach]
if max(violations) > 0:
violations = (violations - np.min(violations))/(np.max(violations) - np.min(violations))
color = colors[approaches.index(approach)]
ls = lss[approaches.index(approach)]
approach_name = approach_names[approaches.index(approach)]
cur_plot.plot(violations, color=color, linestyle=ls, label=approach_name, linewidth=0.8)
cur_plot.set_title(dataset)
cur_plot.set_ylim(-0.2, 1.2)
if cur_plot_idx % nCol == 0:
cur_plot.set_yticks(np.arange(0, 1.1, 0.5))
else:
cur_plot.set_yticks([])
if cur_plot_idx == nCol:
cur_plot.set_ylabel("Change (normalized)", position=(1, 1.2))
if cur_plot_idx >= nCol:
cur_plot.set_xticks(np.arange(0, len(violations) + 1, len(violations)//2))
labels = cur_plot.get_xticks().tolist()
labels[0] = "0"
labels[1] = "0.5"
labels[2] = "1"
cur_plot.set_xticklabels(labels)
cur_plot.set_xlabel("Time step (norm.)")
else:
cur_plot.set_xticks([])
if cur_plot_idx == 0:
cur_plot.legend(ncol=4,loc="upper center", bbox_to_anchor=[4.2,2.1],)
cur_plot_idx += 1
fig.subplots_adjust(hspace=0.6, wspace=0.05)
plt.savefig(os.path.join(current_directory, "Plots", "Figure_8.pdf"), bbox_inches="tight")
| 34.175115 | 144 | 0.567422 | import prose.datainsights as di
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import subprocess
from sklearn.decomposition import PCA
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
import os
import sys
import warnings
from matplotlib import rc
from matplotlib import rcParams
systemName = 'CCSynth'
rcParams['font.family'] = 'sans-serif'
rc('text', usetex=True)
warnings.filterwarnings('ignore')
module_path = os.path.abspath(os.path.join("Baseline", "PCA-SPLL"))
if module_path not in sys.path:
sys.path.append(module_path)
from SPLL import SPLL
path = os.getcwd()
path = path[0].lower() + path[2:]
current_directory = os.getcwd()
CD_executable = os.path.join(current_directory, "Baseline", "PCA-CD", "ChangeDetection", "CD")
data_source = os.path.join(current_directory, "data", "uncompressed", "EVL/")
output_prefix = os.path.join(current_directory, "data", "uncompressed", "EVL", "results/")
window_sizes = {
"1CDT":400,
"2CDT":400,
"1CHT":400,
"2CHT":400,
"4CR":2000,
"4CRE-V1":1000,
"4CRE-V2":1000,
"5CVT":1000,
"1CSurr":600,
"4CE1CF":7500,
"UG-2C-2D":1000,
"MG-2C-2D":2000,
"FG-2C-2D":2000,
"UG-2C-3D":2000,
"UG-2C-5D":2000,
"GEARS-2C-2D":2000,
}
def get_df(dataset, raw=True):
df = pd.read_csv(data_source + dataset + ".txt", header=None)
if not raw:
for col in df.columns:
if len(list(df[col].unique())) < 10:
df[col] = df[col].apply(str)
return df
def get_cd_violations(dataset, window, div_metric):
input_source = data_source + "_" + dataset + ".txt"
try:
open(data_source + "_" + dataset + ".txt", "r")
except:
infile = open(data_source + dataset + ".txt", "r")
outfile = open(data_source + "_" + dataset + ".txt", "w")
for line in infile:
line = line.replace(",", "\t")
outfile.write(line)
outfile.close()
nDim = get_df(dataset).shape[1]
seg_fault_hack = "0"
if dataset == "1CSurr":
seg_fault_hack = "1"
command = 'bash -c'
command = command.split(sep=" ")
cd_command = " ".join([CD_executable,
input_source,
str(window),
"500",
output_prefix + "output.txt",
str(nDim),
"0.005",
str(div_metric),
seg_fault_hack])
command.append(cd_command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
output_source = data_source + "results/output.txt"
violations = [float(violation) for violation in open(output_source)]
os.remove(output_source)
return violations
def get_violations(approaches, dataset, window):
all_violations = dict()
df_raw = get_df(dataset, raw=True)
df = get_df(dataset, raw=False)
for approach in approaches:
violations = []
if approach.startswith("CD-Area"):
violations = get_cd_violations(dataset, window, 1)
elif approach.startswith("CD-MKL"):
violations = get_cd_violations(dataset, window, 2)
elif approach.startswith("PCA-SPLL"):
train_df = df_raw[:window]
pca = PCA()
pca.fit(train_df)
feature_indices = np.where(pca.explained_variance_ratio_ < 0.35)[0]
train_PCA = pca.transform(train_df)[:, feature_indices.tolist()]
n_chunks = int(df_raw.shape[0]/window)
violations = []
for i in range(n_chunks):
test_df = df_raw[i * window: (i + 1) * window]
if np.size(feature_indices) < 2:
st_raw, st_pca = 0, 0
else:
test_PCA = pca.transform(test_df)[:, feature_indices.tolist()]
_, _, st_pca = SPLL(train_PCA, test_PCA)
violations.append(st_pca)
else:
max_self_violation_threshold = 0.15
if approach == "PCA":
assertions = di.learn_assertions(df[:window], max_self_violation = max_self_violation_threshold)
if approach == "DT":
assertions = di.learn_assertions(df[:window], learn_decision_tree=True, max_self_violation = max_self_violation_threshold)
n_chunks = int(df.shape[0]/window)
violations = []
for i in range(n_chunks):
test_df = df[i * window: (i + 1) * window]
result = assertions.evaluate(test_df, normalizeViolation=False)
violations.append(result.avg_violation)
all_violations[approach] = np.array(violations)
return all_violations
rcParams['figure.dpi'] = 300
approaches = ["CD-MKL", "CD-Area", "PCA-SPLL", "DT"]
approach_names = ["CD-MKL", "CD-Area", "PCA-SPLL (25\%)", systemName]
colors = ["C8", "C0", "C2", "C3"]
lss = ['--','-.', ':', '-',]
nCol = 8
fig, ax = plt.subplots(2, nCol)
fig.set_size_inches(12, 2)
cur_plot_idx = 0
for dataset, window in window_sizes.items():
if cur_plot_idx == 8 or cur_plot_idx == 0 or True:
all_violations = get_violations(approaches, dataset, window)
for approach in approaches:
cur_plot = ax[cur_plot_idx//nCol][cur_plot_idx%nCol]
violations = all_violations[approach]
if max(violations) > 0:
violations = (violations - np.min(violations))/(np.max(violations) - np.min(violations))
color = colors[approaches.index(approach)]
ls = lss[approaches.index(approach)]
approach_name = approach_names[approaches.index(approach)]
cur_plot.plot(violations, color=color, linestyle=ls, label=approach_name, linewidth=0.8)
cur_plot.set_title(dataset)
cur_plot.set_ylim(-0.2, 1.2)
if cur_plot_idx % nCol == 0:
cur_plot.set_yticks(np.arange(0, 1.1, 0.5))
else:
cur_plot.set_yticks([])
if cur_plot_idx == nCol:
cur_plot.set_ylabel("Change (normalized)", position=(1, 1.2))
if cur_plot_idx >= nCol:
cur_plot.set_xticks(np.arange(0, len(violations) + 1, len(violations)//2))
labels = cur_plot.get_xticks().tolist()
labels[0] = "0"
labels[1] = "0.5"
labels[2] = "1"
cur_plot.set_xticklabels(labels)
cur_plot.set_xlabel("Time step (norm.)")
else:
cur_plot.set_xticks([])
if cur_plot_idx == 0:
cur_plot.legend(ncol=4,loc="upper center", bbox_to_anchor=[4.2,2.1],)
cur_plot_idx += 1
fig.subplots_adjust(hspace=0.6, wspace=0.05)
plt.savefig(os.path.join(current_directory, "Plots", "Figure_8.pdf"), bbox_inches="tight")
| true | true |
1c30b2a2341a327a30c56419156b38d7140393cf | 3,576 | py | Python | csscompressor/tests/test_partition.py | sprymix/csscompressor | 0857438db725d5c1d2672f45d9cf3e7dc14646a4 | [
"BSD-3-Clause"
] | 38 | 2015-05-22T18:55:52.000Z | 2022-03-05T21:18:58.000Z | csscompressor/tests/test_partition.py | sprymix/csscompressor | 0857438db725d5c1d2672f45d9cf3e7dc14646a4 | [
"BSD-3-Clause"
] | 8 | 2015-08-18T04:31:11.000Z | 2022-01-28T16:55:33.000Z | venv/Lib/site-packages/csscompressor/tests/test_partition.py | FZJ-INM5/JuHPLC | efaf9b8f5d7f0c9a8ad687d0f143e161f523db7c | [
"Unlicense"
] | 10 | 2015-01-04T14:14:05.000Z | 2020-09-03T18:32:02.000Z | ##
# Copyright (c) 2013 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
from csscompressor.tests.base import BaseTest
from csscompressor import compress_partitioned
import unittest
class Tests(unittest.TestCase):
def test_partition_1(self):
input = ''
output = compress_partitioned(input, max_rules_per_file=2)
assert output == ['']
def test_partition_2(self):
input = '''
a {content: '}}'}
b {content: '}'}
c {content: '{'}
'''
output = compress_partitioned(input, max_rules_per_file=2)
assert output == ["a{content:'}}'}b{content:'}'}", "c{content:'{'}"]
def test_partition_3(self):
input = '''
@media{
a {p: 1}
b {p: 2}
x {p: 2}
}
@media{
c {p: 1}
d {p: 2}
y {p: 2}
}
@media{
e {p: 1}
f {p: 2}
z {p: 2}
}
'''
output = compress_partitioned(input, max_rules_per_file=2)
assert output == ['@media{a{p:1}b{p:2}x{p:2}}',
'@media{c{p:1}d{p:2}y{p:2}}',
'@media{e{p:1}f{p:2}z{p:2}}']
def test_partition_4(self):
input = '''
@media{
a {p: 1}
b {p: 2}
x {p: 2}
'''
self.assertRaises(ValueError, compress_partitioned,
input, max_rules_per_file=2)
def test_partition_5(self):
input = '''
@media{
a {p: 1}
b {p: 2}
x {p: 2}
@media{
c {p: 1}
d {p: 2}
y {p: 2}
}
@media{
e {p: 1}
f {p: 2}
z {p: 2}
}
'''
self.assertRaises(ValueError, compress_partitioned,
input, max_rules_per_file=2)
def test_partition_6(self):
input = '''
@media{}}
a {p: 1}
b {p: 2}
x {p: 2}
'''
self.assertRaises(ValueError, compress_partitioned,
input, max_rules_per_file=2)
def test_partition_7(self):
input = '''
a, a1, a2 {color: red}
b, b2, b3 {color: red}
c, c3, c4, c5 {color: red}
d {color: red}
'''
output = compress_partitioned(input, max_rules_per_file=2)
assert output == ['a,a1,a2{color:red}', 'b,b2,b3{color:red}',
'c,c3,c4,c5{color:red}', 'd{color:red}']
def test_partition_8(self):
input = '''
@media{
a {p: 1}
b {p: 2}
x {p: 2}
}
@media{
c {p: 1}
d {p: 2}
y {p: 2}
}
@media{
e {p: 1}
f {p: 2}
z {p: 2}
}
z {p: 2}
'''
# carefully pick 'max_linelen' to have a trailing '\n' after
# '_compress' call
output = compress_partitioned(input, max_rules_per_file=2, max_linelen=6)
assert output == ['@media{a{p:1}\nb{p:2}x{p:2}\n}',
'@media{c{p:1}\nd{p:2}y{p:2}\n}',
'@media{e{p:1}\nf{p:2}z{p:2}\n}',
'z{p:2}']
| 25.726619 | 81 | 0.39038 |
from csscompressor.tests.base import BaseTest
from csscompressor import compress_partitioned
import unittest
class Tests(unittest.TestCase):
def test_partition_1(self):
input = ''
output = compress_partitioned(input, max_rules_per_file=2)
assert output == ['']
def test_partition_2(self):
input = '''
a {content: '}}'}
b {content: '}'}
c {content: '{'}
'''
output = compress_partitioned(input, max_rules_per_file=2)
assert output == ["a{content:'}}'}b{content:'}'}", "c{content:'{'}"]
def test_partition_3(self):
input = '''
@media{
a {p: 1}
b {p: 2}
x {p: 2}
}
@media{
c {p: 1}
d {p: 2}
y {p: 2}
}
@media{
e {p: 1}
f {p: 2}
z {p: 2}
}
'''
output = compress_partitioned(input, max_rules_per_file=2)
assert output == ['@media{a{p:1}b{p:2}x{p:2}}',
'@media{c{p:1}d{p:2}y{p:2}}',
'@media{e{p:1}f{p:2}z{p:2}}']
def test_partition_4(self):
input = '''
@media{
a {p: 1}
b {p: 2}
x {p: 2}
'''
self.assertRaises(ValueError, compress_partitioned,
input, max_rules_per_file=2)
def test_partition_5(self):
input = '''
@media{
a {p: 1}
b {p: 2}
x {p: 2}
@media{
c {p: 1}
d {p: 2}
y {p: 2}
}
@media{
e {p: 1}
f {p: 2}
z {p: 2}
}
'''
self.assertRaises(ValueError, compress_partitioned,
input, max_rules_per_file=2)
def test_partition_6(self):
input = '''
@media{}}
a {p: 1}
b {p: 2}
x {p: 2}
'''
self.assertRaises(ValueError, compress_partitioned,
input, max_rules_per_file=2)
def test_partition_7(self):
input = '''
a, a1, a2 {color: red}
b, b2, b3 {color: red}
c, c3, c4, c5 {color: red}
d {color: red}
'''
output = compress_partitioned(input, max_rules_per_file=2)
assert output == ['a,a1,a2{color:red}', 'b,b2,b3{color:red}',
'c,c3,c4,c5{color:red}', 'd{color:red}']
def test_partition_8(self):
input = '''
@media{
a {p: 1}
b {p: 2}
x {p: 2}
}
@media{
c {p: 1}
d {p: 2}
y {p: 2}
}
@media{
e {p: 1}
f {p: 2}
z {p: 2}
}
z {p: 2}
'''
output = compress_partitioned(input, max_rules_per_file=2, max_linelen=6)
assert output == ['@media{a{p:1}\nb{p:2}x{p:2}\n}',
'@media{c{p:1}\nd{p:2}y{p:2}\n}',
'@media{e{p:1}\nf{p:2}z{p:2}\n}',
'z{p:2}']
| true | true |
1c30b383d1bf94598ddc72b74f9c1bbed1aa8523 | 3,350 | py | Python | yuanrl/nn/QMIXBackbone.py | yuanmingqi/YuanRL | b0e6cdb0207d23ec9c883191f9ca13a6a08f9769 | [
"MIT"
] | 1 | 2021-03-07T08:19:45.000Z | 2021-03-07T08:19:45.000Z | yuanrl/nn/QMIXBackbone.py | yuanmingqi/YuanRL | b0e6cdb0207d23ec9c883191f9ca13a6a08f9769 | [
"MIT"
] | null | null | null | yuanrl/nn/QMIXBackbone.py | yuanmingqi/YuanRL | b0e6cdb0207d23ec9c883191f9ca13a6a08f9769 | [
"MIT"
] | null | null | null | from torch import nn
from torch.nn import functional as F
import torch
class SingleAgent(nn.Module):
def __init__(self, kwargs):
super(SingleAgent, self).__init__()
self.fc1 = nn.Linear(kwargs['input_dim'], 64)
self.fc2 = nn.Linear(64, 128)
self.rnn = nn.GRUCell(128, kwargs['hidden_dim'])
self.fc3 = nn.Linear(kwargs['hidden_dim'], kwargs['output_dim'])
self.leaky_relu = nn.LeakyReLU()
def forward(self, local_state, h_in):
x = self.leaky_relu(self.fc1(local_state))
x = self.leaky_relu(self.fc2(x))
h_out = self.rnn(x, h_in)
x = self.fc3(h_out)
return x, h_out
class MultiAgent(nn.Module):
def __init__(self, device, agents_num, agent_kwargs):
super(MultiAgent, self).__init__()
self.device = device
self.all_agents = list()
for i in range(agents_num):
self.all_agents.append(SingleAgent(kwargs=agent_kwargs))
def forward(self, local_state, ph, flag):
all_local_qs = []
all_h = []
for idx, agent in enumerate(self.all_agents):
if flag == 'eval':
ls_tensor = torch.FloatTensor(local_state[idx]).unsqueeze(0).to(self.device)
ph_tensor = torch.FloatTensor(ph[idx]).unsqueeze(0).to(self.device)
else:
''' train '''
ls_tensor = torch.FloatTensor(local_state[:, idx, :]).to(self.device)
ph_tensor = torch.FloatTensor(ph[:, idx, :]).to(self.device)
local_qs, h = agent(ls_tensor, ph_tensor)
all_local_qs.append(local_qs)
all_h.append(h)
all_local_qs = torch.stack(all_local_qs, dim=1)
all_h = torch.stack(all_h, dim=1)
return all_local_qs, all_h
class MixingNet(nn.Module):
def __init__(self, kwargs):
super(MixingNet, self).__init__()
self.hyper_w1 = nn.Linear(kwargs['hyper_input_dim'], kwargs['mixing_input_dim'] * 64)
self.hyper_b1 = nn.Linear(kwargs['hyper_input_dim'], 64)
self.hyper_w2 = nn.Linear(kwargs['hyper_input_dim'], 64 * 128)
self.hyper_b2 = nn.Linear(kwargs['hyper_input_dim'], 128)
self.hyper_w3 = nn.Linear(kwargs['hyper_input_dim'], 128 * kwargs['mixing_output_dim'])
self.hyper_b3 = nn.Sequential(
nn.Linear(kwargs['hyper_input_dim'], 64),
nn.ReLU(),
nn.Linear(64, kwargs['mixing_output_dim'])
)
self.elu = nn.ELU()
def forward(self, global_state, q_values):
w1 = self.hyper_w1(global_state)
w1 = torch.abs(w1.view(-1, q_values.shape[2], 64))
# print(w1.shape)
b1 = self.hyper_b1(global_state)
b1 = b1.view(-1, 1, 64)
# print(b1.shape)
w2 = self.hyper_w2(global_state)
w2 = torch.abs(w2.view(-1, 64, 128))
# print(w2.shape)
b2 = self.hyper_b2(global_state)
b2 = b2.view(-1, 1, 128)
# print(b2.shape)
w3 = self.hyper_w3(global_state)
w3 = torch.abs(w3.view(-1, 128, 1))
# print(w3.shape)
b3 = self.hyper_b3(global_state)
b3 = b3.view(-1, 1, 1)
# print(b3.shape)
x = self.elu(torch.bmm(q_values, w1) + b1)
x = self.elu(torch.bmm(x ,w2) + b2)
x = torch.bmm(x, w3) + b3
return x[:, 0, :] | 34.895833 | 95 | 0.58597 | from torch import nn
from torch.nn import functional as F
import torch
class SingleAgent(nn.Module):
def __init__(self, kwargs):
super(SingleAgent, self).__init__()
self.fc1 = nn.Linear(kwargs['input_dim'], 64)
self.fc2 = nn.Linear(64, 128)
self.rnn = nn.GRUCell(128, kwargs['hidden_dim'])
self.fc3 = nn.Linear(kwargs['hidden_dim'], kwargs['output_dim'])
self.leaky_relu = nn.LeakyReLU()
def forward(self, local_state, h_in):
x = self.leaky_relu(self.fc1(local_state))
x = self.leaky_relu(self.fc2(x))
h_out = self.rnn(x, h_in)
x = self.fc3(h_out)
return x, h_out
class MultiAgent(nn.Module):
def __init__(self, device, agents_num, agent_kwargs):
super(MultiAgent, self).__init__()
self.device = device
self.all_agents = list()
for i in range(agents_num):
self.all_agents.append(SingleAgent(kwargs=agent_kwargs))
def forward(self, local_state, ph, flag):
all_local_qs = []
all_h = []
for idx, agent in enumerate(self.all_agents):
if flag == 'eval':
ls_tensor = torch.FloatTensor(local_state[idx]).unsqueeze(0).to(self.device)
ph_tensor = torch.FloatTensor(ph[idx]).unsqueeze(0).to(self.device)
else:
''' train '''
ls_tensor = torch.FloatTensor(local_state[:, idx, :]).to(self.device)
ph_tensor = torch.FloatTensor(ph[:, idx, :]).to(self.device)
local_qs, h = agent(ls_tensor, ph_tensor)
all_local_qs.append(local_qs)
all_h.append(h)
all_local_qs = torch.stack(all_local_qs, dim=1)
all_h = torch.stack(all_h, dim=1)
return all_local_qs, all_h
class MixingNet(nn.Module):
def __init__(self, kwargs):
super(MixingNet, self).__init__()
self.hyper_w1 = nn.Linear(kwargs['hyper_input_dim'], kwargs['mixing_input_dim'] * 64)
self.hyper_b1 = nn.Linear(kwargs['hyper_input_dim'], 64)
self.hyper_w2 = nn.Linear(kwargs['hyper_input_dim'], 64 * 128)
self.hyper_b2 = nn.Linear(kwargs['hyper_input_dim'], 128)
self.hyper_w3 = nn.Linear(kwargs['hyper_input_dim'], 128 * kwargs['mixing_output_dim'])
self.hyper_b3 = nn.Sequential(
nn.Linear(kwargs['hyper_input_dim'], 64),
nn.ReLU(),
nn.Linear(64, kwargs['mixing_output_dim'])
)
self.elu = nn.ELU()
def forward(self, global_state, q_values):
w1 = self.hyper_w1(global_state)
w1 = torch.abs(w1.view(-1, q_values.shape[2], 64))
b1 = self.hyper_b1(global_state)
b1 = b1.view(-1, 1, 64)
w2 = self.hyper_w2(global_state)
w2 = torch.abs(w2.view(-1, 64, 128))
b2 = self.hyper_b2(global_state)
b2 = b2.view(-1, 1, 128)
w3 = self.hyper_w3(global_state)
w3 = torch.abs(w3.view(-1, 128, 1))
b3 = self.hyper_b3(global_state)
b3 = b3.view(-1, 1, 1)
x = self.elu(torch.bmm(q_values, w1) + b1)
x = self.elu(torch.bmm(x ,w2) + b2)
x = torch.bmm(x, w3) + b3
return x[:, 0, :] | true | true |
1c30b6562e9aee66d3c3a55d6a91ed7a1553a9be | 9,135 | py | Python | mmdet/models/detectors/two_stage.py | xwuShirley/mmdetection | f9b9eaad9f58e90862997b90a034aad1518baf2f | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/two_stage.py | xwuShirley/mmdetection | f9b9eaad9f58e90862997b90a034aad1518baf2f | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/two_stage.py | xwuShirley/mmdetection | f9b9eaad9f58e90862997b90a034aad1518baf2f | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class TwoStageDetector(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(TwoStageDetector, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
"""bool: whether the detector has RPN"""
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
"""bool: whether the detector has a RoI head"""
return hasattr(self, 'roi_head') and self.roi_head is not None
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(TwoStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained)
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = outs + (roi_outs, )
return outs
# def forward_train(self,
# img,
# img_metas,
# gt_bboxes,
# gt_labels,
# gt_bboxes_ignore=None,
# gt_masks=None,
# proposals=None,
# **kwargs):
# """
# Args:
# img (Tensor): of shape (N, C, H, W) encoding input images.
# Typically these should be mean centered and std scaled.
# img_metas (list[dict]): list of image info dict where each dict
# has: 'img_shape', 'scale_factor', 'flip', and may also contain
# 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
# For details on the values of these keys see
# `mmdet/datasets/pipelines/formatting.py:Collect`.
# gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
# shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
# gt_labels (list[Tensor]): class indices corresponding to each box
# gt_bboxes_ignore (None | list[Tensor]): specify which bounding
# boxes can be ignored when computing the loss.
# gt_masks (None | Tensor) : true segmentation masks for each box
# used if the architecture supports a segmentation task.
# proposals : override rpn proposals with custom proposals. Use when
# `with_rpn` is False.
# Returns:
# dict[str, Tensor]: a dictionary of loss components
# """
# x = self.extract_feat(img)
# losses = dict()
# # RPN forward and loss
# if self.with_rpn:
# proposal_cfg = self.train_cfg.get('rpn_proposal',
# self.test_cfg.rpn)
# rpn_losses, proposal_list = self.rpn_head.forward_train(
# x,
# img_metas,
# gt_bboxes,
# gt_labels=None,
# gt_bboxes_ignore=gt_bboxes_ignore,
# proposal_cfg=proposal_cfg)
# losses.update(rpn_losses)
# else:
# # proposal_list = proposals
# proposal_list = gt_bboxes
# roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
# gt_bboxes, gt_labels,
# gt_bboxes_ignore, gt_masks,
# **kwargs)
# #print (roi_losses)
# # roi_loss['loss_bbox']=0*roi_loss['loss_bbox']
# losses.update(roi_losses)
# return losses
def forward_train(self,img, gt_bboxes):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
Returns:
Tensor: gt_features same len as gt_bboxes
"""
x = self.extract_feat(img)
return self.roi_head.bbox_forward_feature(x, gt_bboxes)
###########
async def async_simple_test(self,
img,
img_meta,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = await self.rpn_head.async_simple_test_rpn(
x, img_meta)
else:
proposal_list = proposals
return await self.roi_head.async_simple_test(
x, proposal_list, img_meta, rescale=rescale)
#def simple_test(self, img, img_metas,proposals=None, rescale=False):
# def simple_test(self,
# img,
# img_metas,
# gt_bboxes,
# gt_labels,
# gt_bboxes_ignore=None,
# gt_masks=None,
# proposals=None,
# rescale=False,
# **kwargs):
# """Test without augmentation."""
# assert self.with_bbox, 'Bbox head must be implemented.'
# x = self.extract_feat(img)
# if proposals is None:
# proposal_list = gt_bboxes
# #proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
# else:
# proposal_list = proposals
# return self.roi_head.simple_test(
# x, proposal_list, img_metas, rescale=rescale)
def simple_test(self, img, img_metas,proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
#proposal_list = gt_bboxes
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
| 35.964567 | 80 | 0.555446 | import torch
import torch.nn as nn
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class TwoStageDetector(BaseDetector):
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(TwoStageDetector, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if roi_head is not None:
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
return hasattr(self, 'roi_head') and self.roi_head is not None
def init_weights(self, pretrained=None):
super(TwoStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained)
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
outs = ()
x = self.extract_feat(img)
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = outs + (roi_outs, )
return outs
# Args:
# img (Tensor): of shape (N, C, H, W) encoding input images.
# Typically these should be mean centered and std scaled.
# img_metas (list[dict]): list of image info dict where each dict
# has: 'img_shape', 'scale_factor', 'flip', and may also contain
# 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
# For details on the values of these keys see
# `mmdet/datasets/pipelines/formatting.py:Collect`.
# gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
# shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
# gt_labels (list[Tensor]): class indices corresponding to each box
# gt_bboxes_ignore (None | list[Tensor]): specify which bounding
# boxes can be ignored when computing the loss.
# gt_masks (None | Tensor) : true segmentation masks for each box
# used if the architecture supports a segmentation task.
# proposals : override rpn proposals with custom proposals. Use when
# `with_rpn` is False.
# Returns:
# dict[str, Tensor]: a dictionary of loss components
# """
x = self.extract_feat(img)
return self.roi_head.bbox_forward_feature(x, gt_bboxes)
img,
img_meta,
proposals=None,
rescale=False):
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = await self.rpn_head.async_simple_test_rpn(
x, img_meta)
else:
proposal_list = proposals
return await self.roi_head.async_simple_test(
x, proposal_list, img_meta, rescale=rescale)
as,proposals=None, rescale=False):
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
| true | true |
1c30b781ec2bce688a731c1d15464a679af1ca9a | 5,341 | py | Python | mlmodels/model_tch/vae/network_test.py | gitter-badger/mlmodels | f08cc9b6ec202d4ad25ecdda2f44487da387569d | [
"MIT"
] | 1 | 2022-03-11T07:57:48.000Z | 2022-03-11T07:57:48.000Z | mlmodels/model_tch/vae/network_test.py | whitetiger1002/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | [
"MIT"
] | null | null | null | mlmodels/model_tch/vae/network_test.py | whitetiger1002/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | [
"MIT"
] | null | null | null | from torchsummary import summary
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
def reparametrize(mu, logvar):
std = logvar.div(2).exp()
eps = Variable(std.data.new(std.size()).normal_())
return mu + std*eps
class View(nn.Module):
def __init__(self, size):
super(View, self).__init__()
self.size = size
def forward(self, tensor):
return tensor.view(self.size)
class BetaVAE_new(nn.Module):
"""BetaVAE_new's last conv is to 4 [-1, 4, 1, 1]."""
def __init__(self, z_dim=10, nc=3):
super(BetaVAE_new, self).__init__()
self.z_dim = z_dim
self.nc = nc
self.encoder = nn.Sequential(
nn.Conv2d(nc, 32, 4, 2, 1), # B, 32, 32, 32
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8
nn.ReLU(True),
nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4
nn.ReLU(True),
nn.Conv2d(64, 4, 4, 1), # B, 4, 1, 1
nn.ReLU(True),
View((-1, 4*1*1)), # B, 4
nn.Linear(4, z_dim*2), # B, z_dim*2
)
self.decoder = nn.Sequential(
nn.Linear(z_dim, 4), # B, 4
View((-1, 4, 1, 1)), # B, 4, 1, 1
nn.ReLU(True),
nn.ConvTranspose2d(4, 64, 4), # B, 64, 4, 4
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32
nn.ReLU(True),
nn.ConvTranspose2d(32, nc, 4, 2, 1), # B, nc, 64, 64
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, x):
distributions = self._encode(x)
mu = distributions[:, :self.z_dim]
logvar = distributions[:, self.z_dim:]
z = reparametrize(mu, logvar)
x_recon = self._decode(z)
return x_recon, mu, logvar
def _encode(self, x):
return self.encoder(x)
def _decode(self, z):
return self.decoder(z)
class BetaVAE_H(nn.Module):
"""Model proposed in original beta-VAE paper(Higgins et al, ICLR, 2017)."""
def __init__(self, z_dim=10, nc=3):
super(BetaVAE_H, self).__init__()
self.z_dim = z_dim
self.nc = nc
self.encoder = nn.Sequential(
nn.Conv2d(nc, 32, 4, 2, 1), # B, 32, 32, 32
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8
nn.ReLU(True),
nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4
nn.ReLU(True),
nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1
nn.ReLU(True),
View((-1, 256*1*1)), # B, 256
nn.Linear(256, z_dim*2), # B, z_dim*2
)
self.decoder = nn.Sequential(
nn.Linear(z_dim, 256), # B, 256
View((-1, 256, 1, 1)), # B, 256, 1, 1
nn.ReLU(True),
nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32
nn.ReLU(True),
nn.ConvTranspose2d(32, nc, 4, 2, 1), # B, nc, 64, 64
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, x):
distributions = self._encode(x)
mu = distributions[:, :self.z_dim]
logvar = distributions[:, self.z_dim:]
z = reparametrize(mu, logvar)
x_recon = self._decode(z)
return x_recon, mu, logvar
def _encode(self, x):
return self.encoder(x)
def _decode(self, z):
return self.decoder(z)
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def normal_init(m, mean, std):
if isinstance(m, (nn.Linear, nn.Conv2d)):
m.weight.data.normal_(mean, std)
if m.bias.data is not None:
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.weight.data.fill_(1)
if m.bias.data is not None:
m.bias.data.zero_()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = BetaVAE_new().to(device)
summary(model, (3, 64, 64))
| 32.766871 | 79 | 0.491668 | from torchsummary import summary
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
def reparametrize(mu, logvar):
std = logvar.div(2).exp()
eps = Variable(std.data.new(std.size()).normal_())
return mu + std*eps
class View(nn.Module):
def __init__(self, size):
super(View, self).__init__()
self.size = size
def forward(self, tensor):
return tensor.view(self.size)
class BetaVAE_new(nn.Module):
def __init__(self, z_dim=10, nc=3):
super(BetaVAE_new, self).__init__()
self.z_dim = z_dim
self.nc = nc
self.encoder = nn.Sequential(
nn.Conv2d(nc, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 4, 4, 1),
nn.ReLU(True),
View((-1, 4*1*1)),
nn.Linear(4, z_dim*2),
)
self.decoder = nn.Sequential(
nn.Linear(z_dim, 4),
View((-1, 4, 1, 1)),
nn.ReLU(True),
nn.ConvTranspose2d(4, 64, 4),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, nc, 4, 2, 1),
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, x):
distributions = self._encode(x)
mu = distributions[:, :self.z_dim]
logvar = distributions[:, self.z_dim:]
z = reparametrize(mu, logvar)
x_recon = self._decode(z)
return x_recon, mu, logvar
def _encode(self, x):
return self.encoder(x)
def _decode(self, z):
return self.decoder(z)
class BetaVAE_H(nn.Module):
def __init__(self, z_dim=10, nc=3):
super(BetaVAE_H, self).__init__()
self.z_dim = z_dim
self.nc = nc
self.encoder = nn.Sequential(
nn.Conv2d(nc, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 256, 4, 1),
nn.ReLU(True),
View((-1, 256*1*1)),
nn.Linear(256, z_dim*2),
)
self.decoder = nn.Sequential(
nn.Linear(z_dim, 256),
View((-1, 256, 1, 1)),
nn.ReLU(True),
nn.ConvTranspose2d(256, 64, 4),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, nc, 4, 2, 1),
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, x):
distributions = self._encode(x)
mu = distributions[:, :self.z_dim]
logvar = distributions[:, self.z_dim:]
z = reparametrize(mu, logvar)
x_recon = self._decode(z)
return x_recon, mu, logvar
def _encode(self, x):
return self.encoder(x)
def _decode(self, z):
return self.decoder(z)
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def normal_init(m, mean, std):
if isinstance(m, (nn.Linear, nn.Conv2d)):
m.weight.data.normal_(mean, std)
if m.bias.data is not None:
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.weight.data.fill_(1)
if m.bias.data is not None:
m.bias.data.zero_()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = BetaVAE_new().to(device)
summary(model, (3, 64, 64))
| true | true |
1c30b82f79db62f5392d32243d4efc209399e2c4 | 794 | py | Python | stdplugins/alive.py | sheikhnabil/Pornhub | 2699dbb72d714fdbe3985a0dd1876f59eeca627e | [
"Apache-2.0"
] | null | null | null | stdplugins/alive.py | sheikhnabil/Pornhub | 2699dbb72d714fdbe3985a0dd1876f59eeca627e | [
"Apache-2.0"
] | null | null | null | stdplugins/alive.py | sheikhnabil/Pornhub | 2699dbb72d714fdbe3985a0dd1876f59eeca627e | [
"Apache-2.0"
] | null | null | null | """.alive Plugin for @UniBorg"""
import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
from uniborg.util import admin_cmd
@borg.on(admin_cmd("alive"))
async def _(event):
if event.fwd_from:
return
mentions = "`I AM ALIVE🐸 NO NEED TO cry\n\nTelethon version: 1.10.6\nPython: 3.7.4\nUser: @rajdeshmukh7\nGithub repo: https://github.com/Rajdeshmukh77/Pornhub`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await event.reply(mentions)
await event.delete()
| 34.521739 | 164 | 0.717884 | import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
from uniborg.util import admin_cmd
@borg.on(admin_cmd("alive"))
async def _(event):
if event.fwd_from:
return
mentions = "`I AM ALIVE🐸 NO NEED TO cry\n\nTelethon version: 1.10.6\nPython: 3.7.4\nUser: @rajdeshmukh7\nGithub repo: https://github.com/Rajdeshmukh77/Pornhub`"
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await event.reply(mentions)
await event.delete()
| true | true |
1c30b8334c4a893e73a3805247e22ea67575d2b8 | 1,940 | py | Python | tests/test_merge.py | squireg/PyRate | ef603067cd6e183a0f7290cd4598aab816ab94a5 | [
"Apache-2.0"
] | null | null | null | tests/test_merge.py | squireg/PyRate | ef603067cd6e183a0f7290cd4598aab816ab94a5 | [
"Apache-2.0"
] | null | null | null | tests/test_merge.py | squireg/PyRate | ef603067cd6e183a0f7290cd4598aab816ab94a5 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# This Python module is part of the PyRate software package.
#
# Copyright 2020 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains tests for the Merge step of PyRate.
"""
import os
import unittest
from pathlib import Path
from pyrate.merge import create_png_from_tif
from tests.common import TESTDIR
class MergingTest(unittest.TestCase):
def test_png_creation(self):
output_folder_path = Path(TESTDIR).joinpath("test_data", "merge")
create_png_from_tif(output_folder_path)
# check if color map is created
output_color_map_path = os.path.join(output_folder_path, "colourmap.txt")
if not os.path.isfile(output_color_map_path):
self.assertTrue(False, "Output color map file not found at: " + output_color_map_path)
# check if png is created
output_image_path = os.path.join(output_folder_path, "stack_rate.png")
if not os.path.isfile(output_image_path):
self.assertTrue(False, "Output png file not found at: " + output_image_path)
# check if kml is created
output_kml_path = os.path.join(output_folder_path, "stack_rate.kml")
if not os.path.isfile(output_kml_path):
self.assertTrue(False, "Output kml file not found at: " + output_kml_path)
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| 35.272727 | 98 | 0.714433 |
import os
import unittest
from pathlib import Path
from pyrate.merge import create_png_from_tif
from tests.common import TESTDIR
class MergingTest(unittest.TestCase):
def test_png_creation(self):
output_folder_path = Path(TESTDIR).joinpath("test_data", "merge")
create_png_from_tif(output_folder_path)
output_color_map_path = os.path.join(output_folder_path, "colourmap.txt")
if not os.path.isfile(output_color_map_path):
self.assertTrue(False, "Output color map file not found at: " + output_color_map_path)
output_image_path = os.path.join(output_folder_path, "stack_rate.png")
if not os.path.isfile(output_image_path):
self.assertTrue(False, "Output png file not found at: " + output_image_path)
output_kml_path = os.path.join(output_folder_path, "stack_rate.kml")
if not os.path.isfile(output_kml_path):
self.assertTrue(False, "Output kml file not found at: " + output_kml_path)
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| true | true |
1c30b92bef986729fa531b8608a49e8288ecbd39 | 1,792 | py | Python | 993.cousins-in-binary-tree.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 993.cousins-in-binary-tree.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 993.cousins-in-binary-tree.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=993 lang=python3
#
# [993] Cousins in Binary Tree
#
# https://leetcode.com/problems/cousins-in-binary-tree/description/
#
# algorithms
# Easy (51.85%)
# Likes: 480
# Dislikes: 29
# Total Accepted: 43.6K
# Total Submissions: 83.5K
# Testcase Example: '[1,2,3,4]\n4\n3'
#
# In a binary tree, the root node is at depth 0, and children of each depth k
# node are at depth k+1.
#
# Two nodes of a binary tree are cousins if they have the same depth, but have
# different parents.
#
# We are given the root of a binary tree with unique values, and the values x
# and y of two different nodes in the tree.
#
# Return true if and only if the nodes corresponding to the values x and y are
# cousins.
#
#
#
# Example 1:
#
#
#
# Input: root = [1,2,3,4], x = 4, y = 3
# Output: false
#
#
#
# Example 2:
#
#
#
# Input: root = [1,2,3,null,4,null,5], x = 5, y = 4
# Output: true
#
#
#
# Example 3:
#
#
#
#
# Input: root = [1,2,3,null,4], x = 2, y = 3
# Output: false
#
#
#
#
#
# Note:
#
#
# The number of nodes in the tree will be between 2 and 100.
# Each node has a unique integer value from 1 to 100.
#
#
#
#
#
#
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import defaultdict
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
pd = defaultdict(list)
def util(node, d, p):
if node is not None:
pd[node.val] = [d,p]
util(node.left,d+1,node.val)
util(node.right,d+1,node.val)
util(root,0,-1)
return pd[x][0]==pd[y][0] and pd[x][1]!=pd[y][1]
# @lc code=end
| 18.285714 | 78 | 0.584263 |
from collections import defaultdict
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
pd = defaultdict(list)
def util(node, d, p):
if node is not None:
pd[node.val] = [d,p]
util(node.left,d+1,node.val)
util(node.right,d+1,node.val)
util(root,0,-1)
return pd[x][0]==pd[y][0] and pd[x][1]!=pd[y][1]
| true | true |
1c30b93f9f02b50cae32c7e11eebb7bff9ec6d66 | 4,289 | py | Python | aiohttp/web_middlewares.py | adamko147/aiohttp | 3250c5d75a54e19e2825d0a609f9d9cd4bf62087 | [
"Apache-2.0"
] | 1 | 2021-01-19T09:47:03.000Z | 2021-01-19T09:47:03.000Z | aiohttp/web_middlewares.py | adamko147/aiohttp | 3250c5d75a54e19e2825d0a609f9d9cd4bf62087 | [
"Apache-2.0"
] | 102 | 2020-12-27T22:15:13.000Z | 2022-03-01T08:08:48.000Z | aiohttp/web_middlewares.py | adamko147/aiohttp | 3250c5d75a54e19e2825d0a609f9d9cd4bf62087 | [
"Apache-2.0"
] | null | null | null | import re
import warnings
from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar
from .web_exceptions import HTTPMove, HTTPPermanentRedirect
from .web_request import Request
from .web_response import StreamResponse
from .web_urldispatcher import SystemRoute
__all__ = (
"middleware",
"normalize_path_middleware",
)
if TYPE_CHECKING: # pragma: no cover
from .web_app import Application
_Func = TypeVar("_Func")
async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]:
alt_request = request.clone(rel_url=path)
match_info = await request.app.router.resolve(alt_request)
alt_request._match_info = match_info # type: ignore
if match_info.http_exception is None:
return True, alt_request
return False, request
def middleware(f: _Func) -> _Func:
warnings.warn(
"Middleware decorator is deprecated since 4.0 "
"and its behaviour is default, "
"you can simply remove this decorator.",
DeprecationWarning,
stacklevel=2,
)
return f
_Handler = Callable[[Request], Awaitable[StreamResponse]]
_Middleware = Callable[[Request, _Handler], Awaitable[StreamResponse]]
def normalize_path_middleware(
*,
append_slash: bool = True,
remove_slash: bool = False,
merge_slashes: bool = True,
redirect_class: Type[HTTPMove] = HTTPPermanentRedirect,
) -> _Middleware:
"""
Middleware factory which produces a middleware that normalizes
the path of a request. By normalizing it means:
- Add or remove a trailing slash to the path.
- Double slashes are replaced by one.
The middleware returns as soon as it finds a path that resolves
correctly. The order if both merge and append/remove are enabled is
1) merge slashes
2) append/remove slash
3) both merge slashes and append/remove slash.
If the path resolves with at least one of those conditions, it will
redirect to the new path.
Only one of `append_slash` and `remove_slash` can be enabled. If both
are `True` the factory will raise an assertion error
If `append_slash` is `True` the middleware will append a slash when
needed. If a resource is defined with trailing slash and the request
comes without it, it will append it automatically.
If `remove_slash` is `True`, `append_slash` must be `False`. When enabled
the middleware will remove trailing slashes and redirect if the resource
is defined
If merge_slashes is True, merge multiple consecutive slashes in the
path into one.
"""
correct_configuration = not (append_slash and remove_slash)
assert correct_configuration, "Cannot both remove and append slash"
async def impl(request: Request, handler: _Handler) -> StreamResponse:
if isinstance(request.match_info.route, SystemRoute):
paths_to_check = []
if "?" in request.raw_path:
path, query = request.raw_path.split("?", 1)
query = "?" + query
else:
query = ""
path = request.raw_path
if merge_slashes:
paths_to_check.append(re.sub("//+", "/", path))
if append_slash and not request.path.endswith("/"):
paths_to_check.append(path + "/")
if remove_slash and request.path.endswith("/"):
paths_to_check.append(path[:-1])
if merge_slashes and append_slash:
paths_to_check.append(re.sub("//+", "/", path + "/"))
if merge_slashes and remove_slash and path.endswith("/"):
merged_slashes = re.sub("//+", "/", path)
paths_to_check.append(merged_slashes[:-1])
for path in paths_to_check:
resolves, request = await _check_request_resolves(request, path)
if resolves:
raise redirect_class(request.raw_path + query)
return await handler(request)
return impl
def _fix_request_current_app(app: "Application") -> _Middleware:
async def impl(request: Request, handler: _Handler) -> StreamResponse:
with request.match_info.set_current_app(app):
return await handler(request)
return impl
| 34.039683 | 87 | 0.666356 | import re
import warnings
from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar
from .web_exceptions import HTTPMove, HTTPPermanentRedirect
from .web_request import Request
from .web_response import StreamResponse
from .web_urldispatcher import SystemRoute
__all__ = (
"middleware",
"normalize_path_middleware",
)
if TYPE_CHECKING:
from .web_app import Application
_Func = TypeVar("_Func")
async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]:
alt_request = request.clone(rel_url=path)
match_info = await request.app.router.resolve(alt_request)
alt_request._match_info = match_info
if match_info.http_exception is None:
return True, alt_request
return False, request
def middleware(f: _Func) -> _Func:
warnings.warn(
"Middleware decorator is deprecated since 4.0 "
"and its behaviour is default, "
"you can simply remove this decorator.",
DeprecationWarning,
stacklevel=2,
)
return f
_Handler = Callable[[Request], Awaitable[StreamResponse]]
_Middleware = Callable[[Request, _Handler], Awaitable[StreamResponse]]
def normalize_path_middleware(
*,
append_slash: bool = True,
remove_slash: bool = False,
merge_slashes: bool = True,
redirect_class: Type[HTTPMove] = HTTPPermanentRedirect,
) -> _Middleware:
correct_configuration = not (append_slash and remove_slash)
assert correct_configuration, "Cannot both remove and append slash"
async def impl(request: Request, handler: _Handler) -> StreamResponse:
if isinstance(request.match_info.route, SystemRoute):
paths_to_check = []
if "?" in request.raw_path:
path, query = request.raw_path.split("?", 1)
query = "?" + query
else:
query = ""
path = request.raw_path
if merge_slashes:
paths_to_check.append(re.sub("//+", "/", path))
if append_slash and not request.path.endswith("/"):
paths_to_check.append(path + "/")
if remove_slash and request.path.endswith("/"):
paths_to_check.append(path[:-1])
if merge_slashes and append_slash:
paths_to_check.append(re.sub("//+", "/", path + "/"))
if merge_slashes and remove_slash and path.endswith("/"):
merged_slashes = re.sub("//+", "/", path)
paths_to_check.append(merged_slashes[:-1])
for path in paths_to_check:
resolves, request = await _check_request_resolves(request, path)
if resolves:
raise redirect_class(request.raw_path + query)
return await handler(request)
return impl
def _fix_request_current_app(app: "Application") -> _Middleware:
async def impl(request: Request, handler: _Handler) -> StreamResponse:
with request.match_info.set_current_app(app):
return await handler(request)
return impl
| true | true |
1c30ba3a1c9043eb7ed6e22de92fb59ddc738c90 | 2,062 | py | Python | python/lvmnps/switch/outlet.py | sdss/npsactor | 45377dff33bf37c92c4b4524d9de60697a5392bc | [
"BSD-3-Clause"
] | 1 | 2021-06-03T04:00:34.000Z | 2021-06-03T04:00:34.000Z | python/lvmnps/switch/outlet.py | sdss/npsactor | 45377dff33bf37c92c4b4524d9de60697a5392bc | [
"BSD-3-Clause"
] | 11 | 2021-07-01T17:40:36.000Z | 2021-11-02T04:37:21.000Z | python/lvmnps/switch/outlet.py | sdss/npsactor | 45377dff33bf37c92c4b4524d9de60697a5392bc | [
"BSD-3-Clause"
] | 2 | 2021-08-03T04:15:58.000Z | 2021-10-05T11:16:00.000Z | # -*- coding: utf-8 -*-
#
# @Author: Florian Briegel (briegel@mpia.de)
# @Date: 2021-06-22
# @Filename: lvmnps/switch/outlet.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
class Outlet(object):
"""Outlet class to manage the power switch.
Parameters
----------
swname
the name of the switch (from the configuration file)
name
The name of the outlet
portnum
The number of the port (in the range of 1~8)
description
The description about the outlet
state
the state of the outlet (on: 1, off:0)
"""
def __init__(self, swname, name, portnum, description, state):
self.swname = swname
self.name = name if name else f"{swname}.port{portnum}"
self.portnum = portnum
self.description = description if description else f"{swname} Port {portnum}"
self.inuse = bool(name) or bool(description)
self.state = state
def __str__(self):
return f"#{self.portnum}:{self.name}={self.state}"
def __repr__(self):
return self.__str__()
@staticmethod
def parse(value):
"""Parse the input data for ON/OFF."""
if value in ["off", "OFF", "0", 0, False]:
return 0
if value in ["on", "ON", "1", 1, True]:
return 1
return -1
def setState(self, value):
"""Class method: Set the state of the outlet inside the class."""
self.state = Outlet.parse(value)
def isOn(self):
"""Return the state of the outlet."""
return self.state == 1
def isOff(self):
"""Return the state of the outlet."""
return self.state == 0
def isValid(self):
"""Return the validity of the outlet."""
return self.state == -1
def toDict(self):
"""Return the dictionary describing the status of the outlet."""
return {
"state": self.state,
"descr": self.description,
"switch": self.swname,
"port": self.portnum,
}
| 27.864865 | 85 | 0.576625 |
class Outlet(object):
def __init__(self, swname, name, portnum, description, state):
self.swname = swname
self.name = name if name else f"{swname}.port{portnum}"
self.portnum = portnum
self.description = description if description else f"{swname} Port {portnum}"
self.inuse = bool(name) or bool(description)
self.state = state
def __str__(self):
return f"#{self.portnum}:{self.name}={self.state}"
def __repr__(self):
return self.__str__()
@staticmethod
def parse(value):
if value in ["off", "OFF", "0", 0, False]:
return 0
if value in ["on", "ON", "1", 1, True]:
return 1
return -1
def setState(self, value):
self.state = Outlet.parse(value)
def isOn(self):
return self.state == 1
def isOff(self):
return self.state == 0
def isValid(self):
return self.state == -1
def toDict(self):
return {
"state": self.state,
"descr": self.description,
"switch": self.swname,
"port": self.portnum,
}
| true | true |
1c30ba60a1d36039802e3cc1dc1016fd00d8e8fb | 6,922 | py | Python | tools/test/topos/attmplsfast.py | wuwenbin2/onos_1.6_update | f49cc2440a613c1ec95be0eea9af941b777b3641 | [
"Apache-2.0"
] | 13 | 2017-08-04T02:16:10.000Z | 2019-11-27T16:18:50.000Z | tools/test/topos/attmplsfast.py | maheshraju-Huawei/actn | 8402c2a73758f84daac597958abfd9546cb198be | [
"Apache-2.0"
] | 10 | 2017-10-04T08:29:08.000Z | 2020-03-06T21:02:30.000Z | tools/test/topos/attmplsfast.py | maheshraju-Huawei/actn | 8402c2a73758f84daac597958abfd9546cb198be | [
"Apache-2.0"
] | 9 | 2018-06-27T09:22:36.000Z | 2021-06-29T12:06:24.000Z | #!/usr/bin/env python
"""
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.node import Node
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.util import dumpNodeConnections
class AttMplsTopo( Topo ):
"Internet Topology Zoo Specimen."
def __init__( self ):
"Create a topology."
# Initialize Topology
Topo.__init__( self )
# add nodes, switches first...
NY54 = self.addSwitch( 's25' ) # 40.728270, -73.994483
CMBR = self.addSwitch( 's1' ) # 42.373730, -71.109734
CHCG = self.addSwitch( 's2' ) # 41.877461, -87.642892
CLEV = self.addSwitch( 's3' ) # 41.498928, -81.695217
RLGH = self.addSwitch( 's4' ) # 35.780150, -78.644026
ATLN = self.addSwitch( 's5' ) # 33.749017, -84.394168
PHLA = self.addSwitch( 's6' ) # 39.952906, -75.172278
WASH = self.addSwitch( 's7' ) # 38.906696, -77.035509
NSVL = self.addSwitch( 's8' ) # 36.166410, -86.787305
STLS = self.addSwitch( 's9' ) # 38.626418, -90.198143
NWOR = self.addSwitch( 's10' ) # 29.951475, -90.078434
HSTN = self.addSwitch( 's11' ) # 29.763249, -95.368332
SNAN = self.addSwitch( 's12' ) # 29.424331, -98.491745
DLLS = self.addSwitch( 's13' ) # 32.777665, -96.802064
ORLD = self.addSwitch( 's14' ) # 28.538641, -81.381110
DNVR = self.addSwitch( 's15' ) # 39.736623, -104.984887
KSCY = self.addSwitch( 's16' ) # 39.100725, -94.581228
SNFN = self.addSwitch( 's17' ) # 37.779751, -122.409791
SCRM = self.addSwitch( 's18' ) # 38.581001, -121.497844
PTLD = self.addSwitch( 's19' ) # 45.523317, -122.677768
STTL = self.addSwitch( 's20' ) # 47.607326, -122.331786
SLKC = self.addSwitch( 's21' ) # 40.759577, -111.895079
LA03 = self.addSwitch( 's22' ) # 34.056346, -118.235951
SNDG = self.addSwitch( 's23' ) # 32.714564, -117.153528
PHNX = self.addSwitch( 's24' ) # 33.448289, -112.076299
# ... and now hosts
NY54_host = self.addHost( 'h25' )
CMBR_host = self.addHost( 'h1' )
CHCG_host = self.addHost( 'h2' )
CLEV_host = self.addHost( 'h3' )
RLGH_host = self.addHost( 'h4' )
ATLN_host = self.addHost( 'h5' )
PHLA_host = self.addHost( 'h6' )
WASH_host = self.addHost( 'h7' )
NSVL_host = self.addHost( 'h8' )
STLS_host = self.addHost( 'h9' )
NWOR_host = self.addHost( 'h10' )
HSTN_host = self.addHost( 'h11' )
SNAN_host = self.addHost( 'h12' )
DLLS_host = self.addHost( 'h13' )
ORLD_host = self.addHost( 'h14' )
DNVR_host = self.addHost( 'h15' )
KSCY_host = self.addHost( 'h16' )
SNFN_host = self.addHost( 'h17' )
SCRM_host = self.addHost( 'h18' )
PTLD_host = self.addHost( 'h19' )
STTL_host = self.addHost( 'h20' )
SLKC_host = self.addHost( 'h21' )
LA03_host = self.addHost( 'h22' )
SNDG_host = self.addHost( 'h23' )
PHNX_host = self.addHost( 'h24' )
# add edges between switch and corresponding host
self.addLink( NY54 , NY54_host )
self.addLink( CMBR , CMBR_host )
self.addLink( CHCG , CHCG_host )
self.addLink( CLEV , CLEV_host )
self.addLink( RLGH , RLGH_host )
self.addLink( ATLN , ATLN_host )
self.addLink( PHLA , PHLA_host )
self.addLink( WASH , WASH_host )
self.addLink( NSVL , NSVL_host )
self.addLink( STLS , STLS_host )
self.addLink( NWOR , NWOR_host )
self.addLink( HSTN , HSTN_host )
self.addLink( SNAN , SNAN_host )
self.addLink( DLLS , DLLS_host )
self.addLink( ORLD , ORLD_host )
self.addLink( DNVR , DNVR_host )
self.addLink( KSCY , KSCY_host )
self.addLink( SNFN , SNFN_host )
self.addLink( SCRM , SCRM_host )
self.addLink( PTLD , PTLD_host )
self.addLink( STTL , STTL_host )
self.addLink( SLKC , SLKC_host )
self.addLink( LA03 , LA03_host )
self.addLink( SNDG , SNDG_host )
self.addLink( PHNX , PHNX_host )
# add edges between switches
self.addLink( NY54 , CMBR)
self.addLink( NY54 , CMBR)
self.addLink( NY54 , CMBR)
self.addLink( NY54 , CHCG)
self.addLink( NY54 , PHLA)
self.addLink( NY54 , PHLA)
self.addLink( NY54 , WASH)
self.addLink( CMBR , PHLA)
self.addLink( CHCG , CLEV)
self.addLink( CHCG , PHLA)
self.addLink( CHCG , STLS)
self.addLink( CHCG , DNVR)
self.addLink( CHCG , KSCY)
self.addLink( CHCG , KSCY)
self.addLink( CHCG , SNFN)
self.addLink( CHCG , STTL)
self.addLink( CHCG , SLKC)
self.addLink( CLEV , NSVL)
self.addLink( CLEV , STLS)
self.addLink( CLEV , PHLA)
self.addLink( RLGH , ATLN)
self.addLink( RLGH , WASH)
self.addLink( ATLN , WASH)
self.addLink( ATLN , NSVL)
self.addLink( ATLN , STLS)
self.addLink( ATLN , DLLS)
self.addLink( ATLN , DLLS)
self.addLink( ATLN , DLLS)
self.addLink( ATLN , ORLD)
self.addLink( PHLA , WASH)
self.addLink( NSVL , STLS)
self.addLink( NSVL , DLLS)
self.addLink( STLS , DLLS)
self.addLink( STLS , KSCY)
self.addLink( STLS , LA03)
self.addLink( NWOR , HSTN)
self.addLink( NWOR , DLLS)
self.addLink( NWOR , ORLD)
self.addLink( HSTN , SNAN)
self.addLink( HSTN , DLLS)
self.addLink( HSTN , ORLD)
self.addLink( SNAN , PHNX)
self.addLink( SNAN , DLLS)
self.addLink( DLLS , DNVR)
self.addLink( DLLS , DNVR)
self.addLink( DLLS , KSCY)
self.addLink( DLLS , KSCY)
self.addLink( DLLS , SNFN)
self.addLink( DLLS , LA03)
self.addLink( DLLS , LA03)
self.addLink( DNVR , KSCY)
self.addLink( DNVR , SNFN)
self.addLink( DNVR , SNFN)
self.addLink( DNVR , SLKC)
self.addLink( KSCY , SNFN)
self.addLink( SNFN , SCRM)
self.addLink( SNFN , PTLD)
self.addLink( SNFN , STTL)
self.addLink( SNFN , SLKC)
self.addLink( SNFN , LA03)
self.addLink( SNFN , LA03)
self.addLink( SNFN , LA03)
self.addLink( SCRM , SLKC)
self.addLink( PTLD , STTL)
self.addLink( SLKC , LA03)
self.addLink( LA03 , SNDG)
self.addLink( LA03 , SNDG)
self.addLink( LA03 , PHNX)
self.addLink( LA03 , PHNX)
self.addLink( SNDG , PHNX)
topos = { 'att': ( lambda: AttMplsTopo() ) }
if __name__ == '__main__':
from onosnet import run
run( AttMplsTopo() )
| 38.032967 | 63 | 0.572811 |
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.node import Node
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.util import dumpNodeConnections
class AttMplsTopo( Topo ):
def __init__( self ):
Topo.__init__( self )
NY54 = self.addSwitch( 's25' )
CMBR = self.addSwitch( 's1' )
CHCG = self.addSwitch( 's2' )
CLEV = self.addSwitch( 's3' )
RLGH = self.addSwitch( 's4' )
ATLN = self.addSwitch( 's5' )
PHLA = self.addSwitch( 's6' )
WASH = self.addSwitch( 's7' )
NSVL = self.addSwitch( 's8' )
STLS = self.addSwitch( 's9' )
NWOR = self.addSwitch( 's10' )
HSTN = self.addSwitch( 's11' )
SNAN = self.addSwitch( 's12' )
DLLS = self.addSwitch( 's13' )
ORLD = self.addSwitch( 's14' )
DNVR = self.addSwitch( 's15' )
KSCY = self.addSwitch( 's16' )
SNFN = self.addSwitch( 's17' )
SCRM = self.addSwitch( 's18' )
PTLD = self.addSwitch( 's19' )
STTL = self.addSwitch( 's20' )
SLKC = self.addSwitch( 's21' )
LA03 = self.addSwitch( 's22' )
SNDG = self.addSwitch( 's23' )
PHNX = self.addSwitch( 's24' )
NY54_host = self.addHost( 'h25' )
CMBR_host = self.addHost( 'h1' )
CHCG_host = self.addHost( 'h2' )
CLEV_host = self.addHost( 'h3' )
RLGH_host = self.addHost( 'h4' )
ATLN_host = self.addHost( 'h5' )
PHLA_host = self.addHost( 'h6' )
WASH_host = self.addHost( 'h7' )
NSVL_host = self.addHost( 'h8' )
STLS_host = self.addHost( 'h9' )
NWOR_host = self.addHost( 'h10' )
HSTN_host = self.addHost( 'h11' )
SNAN_host = self.addHost( 'h12' )
DLLS_host = self.addHost( 'h13' )
ORLD_host = self.addHost( 'h14' )
DNVR_host = self.addHost( 'h15' )
KSCY_host = self.addHost( 'h16' )
SNFN_host = self.addHost( 'h17' )
SCRM_host = self.addHost( 'h18' )
PTLD_host = self.addHost( 'h19' )
STTL_host = self.addHost( 'h20' )
SLKC_host = self.addHost( 'h21' )
LA03_host = self.addHost( 'h22' )
SNDG_host = self.addHost( 'h23' )
PHNX_host = self.addHost( 'h24' )
self.addLink( NY54 , NY54_host )
self.addLink( CMBR , CMBR_host )
self.addLink( CHCG , CHCG_host )
self.addLink( CLEV , CLEV_host )
self.addLink( RLGH , RLGH_host )
self.addLink( ATLN , ATLN_host )
self.addLink( PHLA , PHLA_host )
self.addLink( WASH , WASH_host )
self.addLink( NSVL , NSVL_host )
self.addLink( STLS , STLS_host )
self.addLink( NWOR , NWOR_host )
self.addLink( HSTN , HSTN_host )
self.addLink( SNAN , SNAN_host )
self.addLink( DLLS , DLLS_host )
self.addLink( ORLD , ORLD_host )
self.addLink( DNVR , DNVR_host )
self.addLink( KSCY , KSCY_host )
self.addLink( SNFN , SNFN_host )
self.addLink( SCRM , SCRM_host )
self.addLink( PTLD , PTLD_host )
self.addLink( STTL , STTL_host )
self.addLink( SLKC , SLKC_host )
self.addLink( LA03 , LA03_host )
self.addLink( SNDG , SNDG_host )
self.addLink( PHNX , PHNX_host )
self.addLink( NY54 , CMBR)
self.addLink( NY54 , CMBR)
self.addLink( NY54 , CMBR)
self.addLink( NY54 , CHCG)
self.addLink( NY54 , PHLA)
self.addLink( NY54 , PHLA)
self.addLink( NY54 , WASH)
self.addLink( CMBR , PHLA)
self.addLink( CHCG , CLEV)
self.addLink( CHCG , PHLA)
self.addLink( CHCG , STLS)
self.addLink( CHCG , DNVR)
self.addLink( CHCG , KSCY)
self.addLink( CHCG , KSCY)
self.addLink( CHCG , SNFN)
self.addLink( CHCG , STTL)
self.addLink( CHCG , SLKC)
self.addLink( CLEV , NSVL)
self.addLink( CLEV , STLS)
self.addLink( CLEV , PHLA)
self.addLink( RLGH , ATLN)
self.addLink( RLGH , WASH)
self.addLink( ATLN , WASH)
self.addLink( ATLN , NSVL)
self.addLink( ATLN , STLS)
self.addLink( ATLN , DLLS)
self.addLink( ATLN , DLLS)
self.addLink( ATLN , DLLS)
self.addLink( ATLN , ORLD)
self.addLink( PHLA , WASH)
self.addLink( NSVL , STLS)
self.addLink( NSVL , DLLS)
self.addLink( STLS , DLLS)
self.addLink( STLS , KSCY)
self.addLink( STLS , LA03)
self.addLink( NWOR , HSTN)
self.addLink( NWOR , DLLS)
self.addLink( NWOR , ORLD)
self.addLink( HSTN , SNAN)
self.addLink( HSTN , DLLS)
self.addLink( HSTN , ORLD)
self.addLink( SNAN , PHNX)
self.addLink( SNAN , DLLS)
self.addLink( DLLS , DNVR)
self.addLink( DLLS , DNVR)
self.addLink( DLLS , KSCY)
self.addLink( DLLS , KSCY)
self.addLink( DLLS , SNFN)
self.addLink( DLLS , LA03)
self.addLink( DLLS , LA03)
self.addLink( DNVR , KSCY)
self.addLink( DNVR , SNFN)
self.addLink( DNVR , SNFN)
self.addLink( DNVR , SLKC)
self.addLink( KSCY , SNFN)
self.addLink( SNFN , SCRM)
self.addLink( SNFN , PTLD)
self.addLink( SNFN , STTL)
self.addLink( SNFN , SLKC)
self.addLink( SNFN , LA03)
self.addLink( SNFN , LA03)
self.addLink( SNFN , LA03)
self.addLink( SCRM , SLKC)
self.addLink( PTLD , STTL)
self.addLink( SLKC , LA03)
self.addLink( LA03 , SNDG)
self.addLink( LA03 , SNDG)
self.addLink( LA03 , PHNX)
self.addLink( LA03 , PHNX)
self.addLink( SNDG , PHNX)
topos = { 'att': ( lambda: AttMplsTopo() ) }
if __name__ == '__main__':
from onosnet import run
run( AttMplsTopo() )
| true | true |
1c30bade337263b732d8b603b46dd7115aea1503 | 22,923 | gyp | Python | electron.gyp | yuit/electron | 0732329a387badc9e7cd5473e84fc696f0ec8f24 | [
"MIT"
] | 1 | 2021-04-04T20:41:11.000Z | 2021-04-04T20:41:11.000Z | electron.gyp | ezaruba/electron | dfab1043d98067b43b45d8dcbe9d0b84d4820555 | [
"MIT"
] | null | null | null | electron.gyp | ezaruba/electron | dfab1043d98067b43b45d8dcbe9d0b84d4820555 | [
"MIT"
] | null | null | null | {
'variables': {
'project_name%': 'electron',
'product_name%': 'Electron',
'company_name%': 'GitHub, Inc',
'company_abbr%': 'github',
'version%': '1.7.2',
'js2c_input_dir': '<(SHARED_INTERMEDIATE_DIR)/js2c',
},
'includes': [
'filenames.gypi',
'vendor/native_mate/native_mate_files.gypi',
],
'target_defaults': {
'defines': [
'ATOM_PRODUCT_NAME="<(product_name)"',
'ATOM_PROJECT_NAME="<(project_name)"',
],
'conditions': [
['OS=="mac"', {
'mac_framework_dirs': [
'<(source_root)/external_binaries',
],
}],
],
},
'targets': [
{
'target_name': '<(project_name)',
'type': 'executable',
'dependencies': [
'js2asar',
'app2asar',
'<(project_name)_lib',
],
'sources': [
'<@(app_sources)',
],
'include_dirs': [
'.',
],
'conditions': [
['OS=="mac"', {
'product_name': '<(product_name)',
'mac_bundle': 1,
'dependencies!': [
'<(project_name)_lib',
],
'dependencies': [
'<(project_name)_framework',
'<(project_name)_helper',
],
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name)',
'INFOPLIST_FILE': 'atom/browser/resources/mac/Info.plist',
'LD_RUNPATH_SEARCH_PATHS': [
'@executable_path/../Frameworks',
],
},
'mac_bundle_resources': [
'<@(bundle_sources)',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Frameworks',
'files': [
'<(PRODUCT_DIR)/<(product_name) Helper.app',
'<(PRODUCT_DIR)/<(product_name) Framework.framework',
],
},
],
'postbuilds': [
{
# This postbuid step is responsible for creating the following
# helpers:
#
# <(product_name) EH.app and <(product_name) NP.app are created
# from <(product_name).app.
#
# The EH helper is marked for an executable heap. The NP helper
# is marked for no PIE (ASLR).
'postbuild_name': 'Make More Helpers',
'action': [
'tools/mac/make_more_helpers.sh',
'Frameworks',
'<(product_name)',
],
},
# The application doesn't have real localizations, it just has
# empty .lproj directories, which is enough to convince Cocoa
# that Electron supports those languages.
{
'postbuild_name': 'Make Empty Localizations',
'variables': {
'apply_locales_cmd': ['python', 'tools/mac/apply_locales.py'],
'locale_dirs': [
'>!@(<(apply_locales_cmd) -d ZZLOCALE.lproj <(locales))',
],
},
'action': [
'tools/mac/make_locale_dirs.sh',
'<@(locale_dirs)',
],
},
],
'conditions': [
['mas_build==0', {
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Frameworks',
'files': [
'external_binaries/Squirrel.framework',
'external_binaries/ReactiveCocoa.framework',
'external_binaries/Mantle.framework',
],
},
],
}],
],
}], # OS!="mac"
['OS=="win"', {
'include_dirs': [
'<(libchromiumcontent_dir)/gen/ui/resources',
],
'msvs_settings': {
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': 'atom/browser/resources/win/atom.manifest',
},
'VCLinkerTool': {
# Chrome builds with this minimum environment which makes e.g.
# GetSystemMetrics(SM_CXSIZEFRAME) return Windows XP/2003
# compatible metrics. See: https://crbug.com/361720
#
# The following two settings translate to a linker flag
# of /SUBSYSTEM:WINDOWS,5.02
'MinimumRequiredVersion': '5.02',
'SubSystem': '2',
},
},
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(libchromiumcontent_dir)/ffmpeg.dll',
],
}],
],
},
'destination': '<(PRODUCT_DIR)',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/locales',
'<(libchromiumcontent_dir)/libEGL.dll',
'<(libchromiumcontent_dir)/libGLESv2.dll',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/blink_image_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/views_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/snapshot_blob.bin',
'external_binaries/d3dcompiler_47.dll',
],
},
],
}, {
'dependencies': [
'vendor/breakpad/breakpad.gyp:dump_syms#host',
],
}], # OS=="win"
['OS=="linux"', {
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<(PRODUCT_DIR)/lib/libnode.so',
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(PRODUCT_DIR)/lib/libnode.so',
'<(libchromiumcontent_dir)/libffmpeg.so',
],
}],
],
},
'destination': '<(PRODUCT_DIR)',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/locales',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/blink_image_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/views_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/snapshot_blob.bin',
],
},
],
}], # OS=="linux"
],
}, # target <(project_name)
{
'target_name': '<(project_name)_lib',
'type': 'static_library',
'dependencies': [
'atom_js2c',
'vendor/pdf_viewer/pdf_viewer.gyp:pdf_viewer',
'brightray/brightray.gyp:brightray',
'vendor/node/node.gyp:node',
],
'defines': [
# We need to access internal implementations of Node.
'NODE_WANT_INTERNALS=1',
'NODE_SHARED_MODE',
'HAVE_INSPECTOR=1',
# This is defined in skia/skia_common.gypi.
'SK_SUPPORT_LEGACY_GETTOPDEVICE',
# Disable warnings for g_settings_list_schemas.
'GLIB_DISABLE_DEPRECATION_WARNINGS',
# Defined in Chromium but not exposed in its gyp file.
'V8_USE_EXTERNAL_STARTUP_DATA',
'V8_SHARED',
'USING_V8_SHARED',
'USING_V8_PLATFORM_SHARED',
'USING_V8_BASE_SHARED',
],
'sources': [
'<@(lib_sources)',
],
'include_dirs': [
'.',
'chromium_src',
'vendor/native_mate',
# Include atom_natives.h.
'<(SHARED_INTERMEDIATE_DIR)',
# Include directories for uv and node.
'vendor/node/src',
'vendor/node/deps/http_parser',
'vendor/node/deps/uv/include',
# The `node.h` is using `#include"v8.h"`.
'<(libchromiumcontent_src_dir)/v8/include',
# The `node.h` is using `#include"ares.h"`.
'vendor/node/deps/cares/include',
# The `third_party/WebKit/Source/platform/weborigin/SchemeRegistry.h` is using `platform/PlatformExport.h`.
'<(libchromiumcontent_src_dir)/third_party/WebKit/Source',
# The 'third_party/libyuv/include/libyuv/scale_argb.h' is using 'libyuv/basic_types.h'.
'<(libchromiumcontent_src_dir)/third_party/libyuv/include',
# The 'third_party/webrtc/modules/desktop_capture/desktop_frame.h' is using 'webrtc/base/scoped_ptr.h'.
'<(libchromiumcontent_src_dir)/third_party/',
'<(libchromiumcontent_src_dir)/components/cdm',
'<(libchromiumcontent_src_dir)/third_party/widevine',
],
'direct_dependent_settings': {
'include_dirs': [
'.',
],
},
'export_dependent_settings': [
'brightray/brightray.gyp:brightray',
],
'conditions': [
['libchromiumcontent_component', {
'link_settings': {
'libraries': [ '<@(libchromiumcontent_v8_libraries)' ],
},
}],
['OS=="win"', {
'sources': [
'<@(lib_sources_win)',
],
'link_settings': {
'libraries': [
'-limm32.lib',
'-loleacc.lib',
'-lcomctl32.lib',
'-lcomdlg32.lib',
'-lwininet.lib',
'-lwinmm.lib',
'-lcrypt32.lib',
'-luiautomationcore.lib',
],
},
'dependencies': [
# Node is built as static_library on Windows, so we also need to
# include its dependencies here.
'vendor/node/deps/cares/cares.gyp:cares',
'vendor/node/deps/http_parser/http_parser.gyp:http_parser',
'vendor/node/deps/uv/uv.gyp:libuv',
'vendor/node/deps/zlib/zlib.gyp:zlib',
# Build with breakpad support.
'vendor/breakpad/breakpad.gyp:breakpad_handler',
'vendor/breakpad/breakpad.gyp:breakpad_sender',
],
}], # OS=="win"
['OS=="mac" and mas_build==0', {
'dependencies': [
'vendor/crashpad/client/client.gyp:crashpad_client',
'vendor/crashpad/handler/handler.gyp:crashpad_handler',
],
'link_settings': {
# Do not link with QTKit for mas build.
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/QTKit.framework',
],
},
'xcode_settings': {
# ReactiveCocoa which is used by Squirrel requires using __weak.
'CLANG_ENABLE_OBJC_WEAK': 'YES',
},
}], # OS=="mac" and mas_build==0
['OS=="mac" and mas_build==1', {
'defines': [
'MAS_BUILD',
],
'sources!': [
'atom/browser/auto_updater_mac.mm',
'atom/common/crash_reporter/crash_reporter_mac.h',
'atom/common/crash_reporter/crash_reporter_mac.mm',
],
}], # OS=="mac" and mas_build==1
['OS=="linux"', {
'sources': [
'<@(lib_sources_linux)',
'<@(lib_sources_nss)',
],
'link_settings': {
'ldflags': [
# Make binary search for libraries under current directory, so we
# don't have to manually set $LD_LIBRARY_PATH:
# http://serverfault.com/questions/279068/cant-find-so-in-the-same-directory-as-the-executable
'-rpath \$$ORIGIN',
# Make native module dynamic loading work.
'-rdynamic',
],
},
# Required settings of using breakpad.
'cflags_cc': [
'-Wno-empty-body',
'-Wno-reserved-user-defined-literal',
],
'include_dirs': [
'vendor/breakpad/src',
],
'dependencies': [
'vendor/breakpad/breakpad.gyp:breakpad_client',
],
}], # OS=="linux"
],
}, # target <(product_name)_lib
{
'target_name': 'js2asar',
'type': 'none',
'actions': [
{
'action_name': 'js2asar',
'variables': {
'conditions': [
['OS=="mac"', {
'resources_path': '<(PRODUCT_DIR)/<(product_name).app/Contents/Resources',
},{
'resources_path': '<(PRODUCT_DIR)/resources',
}],
],
},
'inputs': [
'<@(js_sources)',
],
'outputs': [
'<(resources_path)/electron.asar',
],
'action': [
'python',
'tools/js2asar.py',
'<@(_outputs)',
'lib',
'<@(_inputs)',
],
}
],
}, # target js2asar
{
'target_name': 'app2asar',
'type': 'none',
'actions': [
{
'action_name': 'app2asar',
'variables': {
'conditions': [
['OS=="mac"', {
'resources_path': '<(PRODUCT_DIR)/<(product_name).app/Contents/Resources',
},{
'resources_path': '<(PRODUCT_DIR)/resources',
}],
],
},
'inputs': [
'<@(default_app_sources)',
],
'outputs': [
'<(resources_path)/default_app.asar',
],
'action': [
'python',
'tools/js2asar.py',
'<@(_outputs)',
'default_app',
'<@(_inputs)',
],
}
],
}, # target app2asar
{
'target_name': 'atom_js2c_copy',
'type': 'none',
'copies': [
{
'destination': '<(js2c_input_dir)',
'files': [
'<@(js2c_sources)',
],
},
],
}, # target atom_js2c_copy
{
'target_name': 'atom_browserify',
'type': 'none',
'dependencies': [
# depend on this target to ensure the '<(js2c_input_dir)' is created
'atom_js2c_copy',
],
'variables': {
'sandbox_args': [
'./lib/sandboxed_renderer/init.js',
'-r',
'./lib/sandboxed_renderer/api/exports/electron.js:electron',
'-r',
'./lib/sandboxed_renderer/api/exports/fs.js:fs',
'-r',
'./lib/sandboxed_renderer/api/exports/os.js:os',
'-r',
'./lib/sandboxed_renderer/api/exports/path.js:path',
'-r',
'./lib/sandboxed_renderer/api/exports/child_process.js:child_process'
],
'isolated_args': [
'lib/isolated_renderer/init.js',
]
},
'actions': [
{
'action_name': 'atom_browserify_sandbox',
'inputs': [
'<!@(python tools/list-browserify-deps.py <(sandbox_args))'
],
'outputs': [
'<(js2c_input_dir)/preload_bundle.js',
],
'action': [
'npm',
'run',
'--silent',
'browserify',
'--',
'<@(sandbox_args)',
'-o',
'<@(_outputs)',
],
},
{
'action_name': 'atom_browserify_isolated_context',
'inputs': [
'<!@(python tools/list-browserify-deps.py <(isolated_args))'
],
'outputs': [
'<(js2c_input_dir)/isolated_bundle.js',
],
'action': [
'npm',
'run',
'--silent',
'browserify',
'--',
'<@(isolated_args)',
'-o',
'<@(_outputs)',
],
},
],
}, # target atom_browserify
{
'target_name': 'atom_js2c',
'type': 'none',
'dependencies': [
'atom_js2c_copy',
'atom_browserify',
],
'actions': [
{
'action_name': 'atom_js2c',
'inputs': [
# List all input files that should trigger a rebuild with js2c
'<@(js2c_sources)',
'<(js2c_input_dir)/preload_bundle.js',
'<(js2c_input_dir)/isolated_bundle.js',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/atom_natives.h',
],
'action': [
'python',
'tools/js2c.py',
'<@(_outputs)',
'<(js2c_input_dir)',
],
}
],
}, # target atom_js2c
],
'conditions': [
['OS=="mac"', {
'targets': [
{
'target_name': '<(project_name)_framework',
'product_name': '<(product_name) Framework',
'type': 'shared_library',
'dependencies': [
'<(project_name)_lib',
],
'sources': [
'<@(framework_sources)',
],
'include_dirs': [
'.',
'vendor',
'<(libchromiumcontent_src_dir)',
],
'export_dependent_settings': [
'<(project_name)_lib',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
'$(SDKROOT)/System/Library/Frameworks/QuartzCore.framework',
'$(SDKROOT)/System/Library/Frameworks/Quartz.framework',
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/SecurityInterface.framework',
],
},
'mac_bundle': 1,
'mac_bundle_resources': [
'atom/common/resources/mac/MainMenu.xib',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/snapshot_blob.bin',
'<(PRODUCT_DIR)/pdf_viewer_resources.pak',
],
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).framework',
'INFOPLIST_FILE': 'atom/common/resources/mac/Info.plist',
'LD_DYLIB_INSTALL_NAME': '@rpath/<(product_name) Framework.framework/<(product_name) Framework',
'LD_RUNPATH_SEARCH_PATHS': [
'@loader_path/Libraries',
],
'OTHER_LDFLAGS': [
'-ObjC',
],
},
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<(PRODUCT_DIR)/libnode.dylib',
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(PRODUCT_DIR)/libnode.dylib',
'<(libchromiumcontent_dir)/libffmpeg.dylib',
],
}],
],
},
'destination': '<(PRODUCT_DIR)/<(product_name) Framework.framework/Versions/A/Libraries',
'files': [
'<@(copied_libraries)',
],
},
],
'postbuilds': [
{
'postbuild_name': 'Fix path of libnode',
'action': [
'install_name_tool',
'-change',
'/usr/local/lib/libnode.dylib',
'@rpath/libnode.dylib',
'${BUILT_PRODUCTS_DIR}/<(product_name) Framework.framework/Versions/A/<(product_name) Framework',
],
},
{
'postbuild_name': 'Add symlinks for framework subdirectories',
'action': [
'tools/mac/create-framework-subdir-symlinks.sh',
'<(product_name) Framework',
'Libraries',
],
},
{
'postbuild_name': 'Copy locales',
'action': [
'tools/mac/copy-locales.py',
'-d',
'<(libchromiumcontent_dir)/locales',
'${BUILT_PRODUCTS_DIR}/<(product_name) Framework.framework/Resources',
'<@(locales)',
],
},
],
'conditions': [
['mas_build==0', {
'link_settings': {
'libraries': [
'external_binaries/Squirrel.framework',
'external_binaries/ReactiveCocoa.framework',
'external_binaries/Mantle.framework',
],
},
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name) Framework.framework/Versions/A/Resources',
'files': [
'<(PRODUCT_DIR)/crashpad_handler',
],
},
],
}],
],
}, # target framework
{
'target_name': '<(project_name)_helper',
'product_name': '<(product_name) Helper',
'type': 'executable',
'dependencies': [
'<(project_name)_framework',
],
'sources': [
'<@(app_sources)',
],
'include_dirs': [
'.',
],
'mac_bundle': 1,
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).helper',
'INFOPLIST_FILE': 'atom/renderer/resources/mac/Info.plist',
'LD_RUNPATH_SEARCH_PATHS': [
'@executable_path/../../..',
],
},
}, # target helper
],
}], # OS!="mac"
],
}
| 33.513158 | 115 | 0.466344 | {
'variables': {
'project_name%': 'electron',
'product_name%': 'Electron',
'company_name%': 'GitHub, Inc',
'company_abbr%': 'github',
'version%': '1.7.2',
'js2c_input_dir': '<(SHARED_INTERMEDIATE_DIR)/js2c',
},
'includes': [
'filenames.gypi',
'vendor/native_mate/native_mate_files.gypi',
],
'target_defaults': {
'defines': [
'ATOM_PRODUCT_NAME="<(product_name)"',
'ATOM_PROJECT_NAME="<(project_name)"',
],
'conditions': [
['OS=="mac"', {
'mac_framework_dirs': [
'<(source_root)/external_binaries',
],
}],
],
},
'targets': [
{
'target_name': '<(project_name)',
'type': 'executable',
'dependencies': [
'js2asar',
'app2asar',
'<(project_name)_lib',
],
'sources': [
'<@(app_sources)',
],
'include_dirs': [
'.',
],
'conditions': [
['OS=="mac"', {
'product_name': '<(product_name)',
'mac_bundle': 1,
'dependencies!': [
'<(project_name)_lib',
],
'dependencies': [
'<(project_name)_framework',
'<(project_name)_helper',
],
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name)',
'INFOPLIST_FILE': 'atom/browser/resources/mac/Info.plist',
'LD_RUNPATH_SEARCH_PATHS': [
'@executable_path/../Frameworks',
],
},
'mac_bundle_resources': [
'<@(bundle_sources)',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Frameworks',
'files': [
'<(PRODUCT_DIR)/<(product_name) Helper.app',
'<(PRODUCT_DIR)/<(product_name) Framework.framework',
],
},
],
'postbuilds': [
{
'postbuild_name': 'Make More Helpers',
'action': [
'tools/mac/make_more_helpers.sh',
'Frameworks',
'<(product_name)',
],
},
# empty .lproj directories, which is enough to convince Cocoa
# that Electron supports those languages.
{
'postbuild_name': 'Make Empty Localizations',
'variables': {
'apply_locales_cmd': ['python', 'tools/mac/apply_locales.py'],
'locale_dirs': [
'>!@(<(apply_locales_cmd) -d ZZLOCALE.lproj <(locales))',
],
},
'action': [
'tools/mac/make_locale_dirs.sh',
'<@(locale_dirs)',
],
},
],
'conditions': [
['mas_build==0', {
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Frameworks',
'files': [
'external_binaries/Squirrel.framework',
'external_binaries/ReactiveCocoa.framework',
'external_binaries/Mantle.framework',
],
},
],
}],
],
}], # OS!="mac"
['OS=="win"', {
'include_dirs': [
'<(libchromiumcontent_dir)/gen/ui/resources',
],
'msvs_settings': {
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': 'atom/browser/resources/win/atom.manifest',
},
'VCLinkerTool': {
# Chrome builds with this minimum environment which makes e.g.
# GetSystemMetrics(SM_CXSIZEFRAME) return Windows XP/2003
# compatible metrics. See: https://crbug.com/361720
#
# The following two settings translate to a linker flag
# of /SUBSYSTEM:WINDOWS,5.02
'MinimumRequiredVersion': '5.02',
'SubSystem': '2',
},
},
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(libchromiumcontent_dir)/ffmpeg.dll',
],
}],
],
},
'destination': '<(PRODUCT_DIR)',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/locales',
'<(libchromiumcontent_dir)/libEGL.dll',
'<(libchromiumcontent_dir)/libGLESv2.dll',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/blink_image_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/views_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/snapshot_blob.bin',
'external_binaries/d3dcompiler_47.dll',
],
},
],
}, {
'dependencies': [
'vendor/breakpad/breakpad.gyp:dump_syms
],
}], # OS=="win"
['OS=="linux"', {
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<(PRODUCT_DIR)/lib/libnode.so',
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(PRODUCT_DIR)/lib/libnode.so',
'<(libchromiumcontent_dir)/libffmpeg.so',
],
}],
],
},
'destination': '<(PRODUCT_DIR)',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/locales',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/blink_image_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/views_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/snapshot_blob.bin',
],
},
],
}], # OS=="linux"
],
}, # target <(project_name)
{
'target_name': '<(project_name)_lib',
'type': 'static_library',
'dependencies': [
'atom_js2c',
'vendor/pdf_viewer/pdf_viewer.gyp:pdf_viewer',
'brightray/brightray.gyp:brightray',
'vendor/node/node.gyp:node',
],
'defines': [
# We need to access internal implementations of Node.
'NODE_WANT_INTERNALS=1',
'NODE_SHARED_MODE',
'HAVE_INSPECTOR=1',
# This is defined in skia/skia_common.gypi.
'SK_SUPPORT_LEGACY_GETTOPDEVICE',
# Disable warnings for g_settings_list_schemas.
'GLIB_DISABLE_DEPRECATION_WARNINGS',
# Defined in Chromium but not exposed in its gyp file.
'V8_USE_EXTERNAL_STARTUP_DATA',
'V8_SHARED',
'USING_V8_SHARED',
'USING_V8_PLATFORM_SHARED',
'USING_V8_BASE_SHARED',
],
'sources': [
'<@(lib_sources)',
],
'include_dirs': [
'.',
'chromium_src',
'vendor/native_mate',
# Include atom_natives.h.
'<(SHARED_INTERMEDIATE_DIR)',
# Include directories for uv and node.
'vendor/node/src',
'vendor/node/deps/http_parser',
'vendor/node/deps/uv/include',
# The `node.h` is using `#include"v8.h"`.
'<(libchromiumcontent_src_dir)/v8/include',
# The `node.h` is using `#include"ares.h"`.
'vendor/node/deps/cares/include',
# The `third_party/WebKit/Source/platform/weborigin/SchemeRegistry.h` is using `platform/PlatformExport.h`.
'<(libchromiumcontent_src_dir)/third_party/WebKit/Source',
# The 'third_party/libyuv/include/libyuv/scale_argb.h' is using 'libyuv/basic_types.h'.
'<(libchromiumcontent_src_dir)/third_party/libyuv/include',
# The 'third_party/webrtc/modules/desktop_capture/desktop_frame.h' is using 'webrtc/base/scoped_ptr.h'.
'<(libchromiumcontent_src_dir)/third_party/',
'<(libchromiumcontent_src_dir)/components/cdm',
'<(libchromiumcontent_src_dir)/third_party/widevine',
],
'direct_dependent_settings': {
'include_dirs': [
'.',
],
},
'export_dependent_settings': [
'brightray/brightray.gyp:brightray',
],
'conditions': [
['libchromiumcontent_component', {
'link_settings': {
'libraries': [ '<@(libchromiumcontent_v8_libraries)' ],
},
}],
['OS=="win"', {
'sources': [
'<@(lib_sources_win)',
],
'link_settings': {
'libraries': [
'-limm32.lib',
'-loleacc.lib',
'-lcomctl32.lib',
'-lcomdlg32.lib',
'-lwininet.lib',
'-lwinmm.lib',
'-lcrypt32.lib',
'-luiautomationcore.lib',
],
},
'dependencies': [
# Node is built as static_library on Windows, so we also need to
# include its dependencies here.
'vendor/node/deps/cares/cares.gyp:cares',
'vendor/node/deps/http_parser/http_parser.gyp:http_parser',
'vendor/node/deps/uv/uv.gyp:libuv',
'vendor/node/deps/zlib/zlib.gyp:zlib',
# Build with breakpad support.
'vendor/breakpad/breakpad.gyp:breakpad_handler',
'vendor/breakpad/breakpad.gyp:breakpad_sender',
],
}], # OS=="win"
['OS=="mac" and mas_build==0', {
'dependencies': [
'vendor/crashpad/client/client.gyp:crashpad_client',
'vendor/crashpad/handler/handler.gyp:crashpad_handler',
],
'link_settings': {
# Do not link with QTKit for mas build.
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/QTKit.framework',
],
},
'xcode_settings': {
# ReactiveCocoa which is used by Squirrel requires using __weak.
'CLANG_ENABLE_OBJC_WEAK': 'YES',
},
}], # OS=="mac" and mas_build==0
['OS=="mac" and mas_build==1', {
'defines': [
'MAS_BUILD',
],
'sources!': [
'atom/browser/auto_updater_mac.mm',
'atom/common/crash_reporter/crash_reporter_mac.h',
'atom/common/crash_reporter/crash_reporter_mac.mm',
],
}], # OS=="mac" and mas_build==1
['OS=="linux"', {
'sources': [
'<@(lib_sources_linux)',
'<@(lib_sources_nss)',
],
'link_settings': {
'ldflags': [
# Make binary search for libraries under current directory, so we
# don't have to manually set $LD_LIBRARY_PATH:
'-rpath \$$ORIGIN',
'-rdynamic',
],
},
'cflags_cc': [
'-Wno-empty-body',
'-Wno-reserved-user-defined-literal',
],
'include_dirs': [
'vendor/breakpad/src',
],
'dependencies': [
'vendor/breakpad/breakpad.gyp:breakpad_client',
],
}],
],
},
{
'target_name': 'js2asar',
'type': 'none',
'actions': [
{
'action_name': 'js2asar',
'variables': {
'conditions': [
['OS=="mac"', {
'resources_path': '<(PRODUCT_DIR)/<(product_name).app/Contents/Resources',
},{
'resources_path': '<(PRODUCT_DIR)/resources',
}],
],
},
'inputs': [
'<@(js_sources)',
],
'outputs': [
'<(resources_path)/electron.asar',
],
'action': [
'python',
'tools/js2asar.py',
'<@(_outputs)',
'lib',
'<@(_inputs)',
],
}
],
},
{
'target_name': 'app2asar',
'type': 'none',
'actions': [
{
'action_name': 'app2asar',
'variables': {
'conditions': [
['OS=="mac"', {
'resources_path': '<(PRODUCT_DIR)/<(product_name).app/Contents/Resources',
},{
'resources_path': '<(PRODUCT_DIR)/resources',
}],
],
},
'inputs': [
'<@(default_app_sources)',
],
'outputs': [
'<(resources_path)/default_app.asar',
],
'action': [
'python',
'tools/js2asar.py',
'<@(_outputs)',
'default_app',
'<@(_inputs)',
],
}
],
},
{
'target_name': 'atom_js2c_copy',
'type': 'none',
'copies': [
{
'destination': '<(js2c_input_dir)',
'files': [
'<@(js2c_sources)',
],
},
],
},
{
'target_name': 'atom_browserify',
'type': 'none',
'dependencies': [
'atom_js2c_copy',
],
'variables': {
'sandbox_args': [
'./lib/sandboxed_renderer/init.js',
'-r',
'./lib/sandboxed_renderer/api/exports/electron.js:electron',
'-r',
'./lib/sandboxed_renderer/api/exports/fs.js:fs',
'-r',
'./lib/sandboxed_renderer/api/exports/os.js:os',
'-r',
'./lib/sandboxed_renderer/api/exports/path.js:path',
'-r',
'./lib/sandboxed_renderer/api/exports/child_process.js:child_process'
],
'isolated_args': [
'lib/isolated_renderer/init.js',
]
},
'actions': [
{
'action_name': 'atom_browserify_sandbox',
'inputs': [
'<!@(python tools/list-browserify-deps.py <(sandbox_args))'
],
'outputs': [
'<(js2c_input_dir)/preload_bundle.js',
],
'action': [
'npm',
'run',
'--silent',
'browserify',
'--',
'<@(sandbox_args)',
'-o',
'<@(_outputs)',
],
},
{
'action_name': 'atom_browserify_isolated_context',
'inputs': [
'<!@(python tools/list-browserify-deps.py <(isolated_args))'
],
'outputs': [
'<(js2c_input_dir)/isolated_bundle.js',
],
'action': [
'npm',
'run',
'--silent',
'browserify',
'--',
'<@(isolated_args)',
'-o',
'<@(_outputs)',
],
},
],
},
{
'target_name': 'atom_js2c',
'type': 'none',
'dependencies': [
'atom_js2c_copy',
'atom_browserify',
],
'actions': [
{
'action_name': 'atom_js2c',
'inputs': [
'<@(js2c_sources)',
'<(js2c_input_dir)/preload_bundle.js',
'<(js2c_input_dir)/isolated_bundle.js',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/atom_natives.h',
],
'action': [
'python',
'tools/js2c.py',
'<@(_outputs)',
'<(js2c_input_dir)',
],
}
],
},
],
'conditions': [
['OS=="mac"', {
'targets': [
{
'target_name': '<(project_name)_framework',
'product_name': '<(product_name) Framework',
'type': 'shared_library',
'dependencies': [
'<(project_name)_lib',
],
'sources': [
'<@(framework_sources)',
],
'include_dirs': [
'.',
'vendor',
'<(libchromiumcontent_src_dir)',
],
'export_dependent_settings': [
'<(project_name)_lib',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
'$(SDKROOT)/System/Library/Frameworks/QuartzCore.framework',
'$(SDKROOT)/System/Library/Frameworks/Quartz.framework',
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/SecurityInterface.framework',
],
},
'mac_bundle': 1,
'mac_bundle_resources': [
'atom/common/resources/mac/MainMenu.xib',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/snapshot_blob.bin',
'<(PRODUCT_DIR)/pdf_viewer_resources.pak',
],
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).framework',
'INFOPLIST_FILE': 'atom/common/resources/mac/Info.plist',
'LD_DYLIB_INSTALL_NAME': '@rpath/<(product_name) Framework.framework/<(product_name) Framework',
'LD_RUNPATH_SEARCH_PATHS': [
'@loader_path/Libraries',
],
'OTHER_LDFLAGS': [
'-ObjC',
],
},
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<(PRODUCT_DIR)/libnode.dylib',
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(PRODUCT_DIR)/libnode.dylib',
'<(libchromiumcontent_dir)/libffmpeg.dylib',
],
}],
],
},
'destination': '<(PRODUCT_DIR)/<(product_name) Framework.framework/Versions/A/Libraries',
'files': [
'<@(copied_libraries)',
],
},
],
'postbuilds': [
{
'postbuild_name': 'Fix path of libnode',
'action': [
'install_name_tool',
'-change',
'/usr/local/lib/libnode.dylib',
'@rpath/libnode.dylib',
'${BUILT_PRODUCTS_DIR}/<(product_name) Framework.framework/Versions/A/<(product_name) Framework',
],
},
{
'postbuild_name': 'Add symlinks for framework subdirectories',
'action': [
'tools/mac/create-framework-subdir-symlinks.sh',
'<(product_name) Framework',
'Libraries',
],
},
{
'postbuild_name': 'Copy locales',
'action': [
'tools/mac/copy-locales.py',
'-d',
'<(libchromiumcontent_dir)/locales',
'${BUILT_PRODUCTS_DIR}/<(product_name) Framework.framework/Resources',
'<@(locales)',
],
},
],
'conditions': [
['mas_build==0', {
'link_settings': {
'libraries': [
'external_binaries/Squirrel.framework',
'external_binaries/ReactiveCocoa.framework',
'external_binaries/Mantle.framework',
],
},
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name) Framework.framework/Versions/A/Resources',
'files': [
'<(PRODUCT_DIR)/crashpad_handler',
],
},
],
}],
],
},
{
'target_name': '<(project_name)_helper',
'product_name': '<(product_name) Helper',
'type': 'executable',
'dependencies': [
'<(project_name)_framework',
],
'sources': [
'<@(app_sources)',
],
'include_dirs': [
'.',
],
'mac_bundle': 1,
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).helper',
'INFOPLIST_FILE': 'atom/renderer/resources/mac/Info.plist',
'LD_RUNPATH_SEARCH_PATHS': [
'@executable_path/../../..',
],
},
},
],
}],
],
}
| true | true |
1c30bb1b26f525b8a9396a9874bf4a1073fc38c1 | 2,674 | py | Python | Contrib/fraggle/fraggle.py | docking-org/rdk | 6eb710254f027b348a8e3089e6a92c3d40de0949 | [
"PostgreSQL"
] | 1 | 2019-01-23T06:02:24.000Z | 2019-01-23T06:02:24.000Z | Contrib/fraggle/fraggle.py | Mike575/rdkit | 373a89021e478f878c6011a201e3fb8f4a122093 | [
"PostgreSQL"
] | null | null | null | Contrib/fraggle/fraggle.py | Mike575/rdkit | 373a89021e478f878c6011a201e3fb8f4a122093 | [
"PostgreSQL"
] | 2 | 2017-12-04T02:28:18.000Z | 2018-11-29T01:18:46.000Z | # Copyright (c) 2013, GlaxoSmithKline Research & Development Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of GlaxoSmithKline Research & Development Ltd.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Jameed Hussain, May 2013
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem.Fraggle import FraggleSim
if __name__ == '__main__':
import sys, re
if (len(sys.argv) >= 2):
print(
"Program to run the first part of Fraggle. Program splits the molecule\nready for the search\n")
print("USAGE: ./fraggle.py <file_of_smiles")
print("Format of smiles file: SMILES ID (space or comma separated)")
print("Output: whole mol smiles,ID,fraggle split smiles\n")
sys.exit(1)
#read the STDIN
for line in sys.stdin:
line = line.rstrip()
smi, id_ = re.split('\s|,', line)
#print smi,id_
mol = Chem.MolFromSmiles(smi)
if mol is None:
sys.stderr.write("Can't generate mol for: %s\n" % (smi))
continue
out_fragments = FraggleSim.generate_fraggle_fragmentation(mol)
#print out the unique fragments
for x in out_fragments:
#cansmi
temp = Chem.MolFromSmiles(x)
print("%s,%s,%s" % (smi, id_, Chem.MolToSmiles(temp)))
| 41.138462 | 102 | 0.727749 |
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem.Fraggle import FraggleSim
if __name__ == '__main__':
import sys, re
if (len(sys.argv) >= 2):
print(
"Program to run the first part of Fraggle. Program splits the molecule\nready for the search\n")
print("USAGE: ./fraggle.py <file_of_smiles")
print("Format of smiles file: SMILES ID (space or comma separated)")
print("Output: whole mol smiles,ID,fraggle split smiles\n")
sys.exit(1)
for line in sys.stdin:
line = line.rstrip()
smi, id_ = re.split('\s|,', line)
mol = Chem.MolFromSmiles(smi)
if mol is None:
sys.stderr.write("Can't generate mol for: %s\n" % (smi))
continue
out_fragments = FraggleSim.generate_fraggle_fragmentation(mol)
#print out the unique fragments
for x in out_fragments:
#cansmi
temp = Chem.MolFromSmiles(x)
print("%s,%s,%s" % (smi, id_, Chem.MolToSmiles(temp)))
| true | true |
1c30bb80b43261276a5ace772b9f2200beeadedf | 1,253 | py | Python | test/functional/rpc_deprecated.py | vipcore/VIP-2 | b5328c58076ee13fc44a58ff7229629546310bee | [
"MIT"
] | 41 | 2015-02-25T20:29:32.000Z | 2021-05-10T11:54:32.000Z | test/functional/rpc_deprecated.py | vipcore/VIP-2 | b5328c58076ee13fc44a58ff7229629546310bee | [
"MIT"
] | 42 | 2017-09-12T03:09:56.000Z | 2021-01-27T18:43:28.000Z | test/functional/rpc_deprecated.py | vipcore/VIP-2 | b5328c58076ee13fc44a58ff7229629546310bee | [
"MIT"
] | 37 | 2015-10-02T19:33:04.000Z | 2021-04-21T22:26:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import PivxTestFramework
from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=estimatefee", "-deprecatedrpc=createmultisig"]]
def run_test(self):
self.log.info("estimatefee: Shows deprecated message")
assert_raises_rpc_error(-32, 'estimatefee is deprecated', self.nodes[0].estimatefee, 1)
self.log.info("Using -deprecatedrpc=estimatefee bypasses the error")
self.nodes[1].estimatefee(1)
self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
if __name__ == '__main__':
DeprecatedRpcTest().main()
| 44.75 | 123 | 0.729449 |
from test_framework.test_framework import PivxTestFramework
from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=estimatefee", "-deprecatedrpc=createmultisig"]]
def run_test(self):
self.log.info("estimatefee: Shows deprecated message")
assert_raises_rpc_error(-32, 'estimatefee is deprecated', self.nodes[0].estimatefee, 1)
self.log.info("Using -deprecatedrpc=estimatefee bypasses the error")
self.nodes[1].estimatefee(1)
self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
if __name__ == '__main__':
DeprecatedRpcTest().main()
| true | true |
1c30bc9d855f28bcd1881acd9ddb9e5ac731c364 | 286 | py | Python | flash/text/seq2seq/__init__.py | Isaac-Flath/lightning-flash | 320f87707587d92a13c8831778864b33af4fe421 | [
"Apache-2.0"
] | 2 | 2021-04-23T11:02:21.000Z | 2021-04-23T11:22:19.000Z | flash/text/seq2seq/__init__.py | Isaac-Flath/lightning-flash | 320f87707587d92a13c8831778864b33af4fe421 | [
"Apache-2.0"
] | 1 | 2021-06-16T14:46:06.000Z | 2021-06-16T14:46:06.000Z | flash/text/seq2seq/__init__.py | Isaac-Flath/lightning-flash | 320f87707587d92a13c8831778864b33af4fe421 | [
"Apache-2.0"
] | 3 | 2021-06-03T10:03:04.000Z | 2021-08-08T21:49:16.000Z | from flash.text.seq2seq.core import Seq2SeqData, Seq2SeqFreezeEmbeddings, Seq2SeqTask # noqa: F401
from flash.text.seq2seq.summarization import SummarizationData, SummarizationTask # noqa: F401
from flash.text.seq2seq.translation import TranslationData, TranslationTask # noqa: F401
| 71.5 | 99 | 0.835664 | from flash.text.seq2seq.core import Seq2SeqData, Seq2SeqFreezeEmbeddings, Seq2SeqTask
from flash.text.seq2seq.summarization import SummarizationData, SummarizationTask
from flash.text.seq2seq.translation import TranslationData, TranslationTask
| true | true |
1c30bd8249fa9f37d5092c64eaf31f35ff3b5262 | 2,372 | py | Python | menu.py | melancholiaque/GLaDOS-voice-db | 53af158ef1114a6a58f29abf16980bb91d53a190 | [
"BSD-2-Clause"
] | 1 | 2020-05-24T02:56:08.000Z | 2020-05-24T02:56:08.000Z | menu.py | melancholiaque/GLaDOS-voice-db | 53af158ef1114a6a58f29abf16980bb91d53a190 | [
"BSD-2-Clause"
] | null | null | null | menu.py | melancholiaque/GLaDOS-voice-db | 53af158ef1114a6a58f29abf16980bb91d53a190 | [
"BSD-2-Clause"
] | null | null | null | from os import system, name
import curses
import threading
from cursesmenu.items import SubmenuItem as SubmenuItem_, FunctionItem
from cursesmenu import SelectionMenu as SelectionMenu_, CursesMenu as CursesMenu_
from utils import get_audio, play, save
root = None
class SubmenuItem(SubmenuItem_):
def action(self):
global root
root.stdscr.clear()
self.submenu.start()
class CursesMenu(CursesMenu_):
stdscr = curses.initscr()
class SelectionMenu(SelectionMenu_):
def _wrap_start(self):
if self.parent is None:
curses.wrapper(self._main_loop)
else:
self._main_loop(self.stdscr)
CursesMenu.currently_active_menu = None
self.stdscr.clear()
self.stdscr.refresh()
CursesMenu.currently_active_menu = self.previous_active_menu
def play_wrapper(id):
def inner(*_, **__):
nonlocal id
play(id)
return inner
def save_wrapper(id):
def inner(*_, **__):
nonlocal id
save(id)
return inner
def restore():
curses.curs_set(1)
if name in ['nt', 'windows']:
system('cls')
print('FUCK YOU ANYWAY! USE PROPER OS!')
else:
system('clear')
def placeholder(*args, **kwargs):
global root, voices_menu
words = []
while True:
try:
word = input('type text to search:\n')
if not word:
break
words.append(word)
except KeyboardInterrupt:
break
print('fetching...')
voices = SelectionMenu([])
i = 0
for text, id in get_audio(words):
i += 1
sel = SelectionMenu([], 'Choose phrase')
si = SubmenuItem(text[:60], sel, voices)
sel.append_item(FunctionItem(f"Play \"{text[:80]}\"", play_wrapper(id)))
sel.append_item(FunctionItem(f"Save \"{text[:80]}\"", save_wrapper(id)))
voices.append_item(si)
voices.title = f'Tip for long outputs: to exit press `1` and arrow up'
string = '+'.join(words)
submenu_item = SubmenuItem(f'Found {string} voices: {i}', voices, root)
root.append_item(submenu_item)
root = SelectionMenu([], title='Welcome to GLaDOS voice management system')
root.append_item(FunctionItem("Search for GLaDOS voices", placeholder))
try:
root.show(show_exit_option=True)
except :
restore()
| 25.505376 | 81 | 0.62774 | from os import system, name
import curses
import threading
from cursesmenu.items import SubmenuItem as SubmenuItem_, FunctionItem
from cursesmenu import SelectionMenu as SelectionMenu_, CursesMenu as CursesMenu_
from utils import get_audio, play, save
root = None
class SubmenuItem(SubmenuItem_):
def action(self):
global root
root.stdscr.clear()
self.submenu.start()
class CursesMenu(CursesMenu_):
stdscr = curses.initscr()
class SelectionMenu(SelectionMenu_):
def _wrap_start(self):
if self.parent is None:
curses.wrapper(self._main_loop)
else:
self._main_loop(self.stdscr)
CursesMenu.currently_active_menu = None
self.stdscr.clear()
self.stdscr.refresh()
CursesMenu.currently_active_menu = self.previous_active_menu
def play_wrapper(id):
def inner(*_, **__):
nonlocal id
play(id)
return inner
def save_wrapper(id):
def inner(*_, **__):
nonlocal id
save(id)
return inner
def restore():
curses.curs_set(1)
if name in ['nt', 'windows']:
system('cls')
print('FUCK YOU ANYWAY! USE PROPER OS!')
else:
system('clear')
def placeholder(*args, **kwargs):
global root, voices_menu
words = []
while True:
try:
word = input('type text to search:\n')
if not word:
break
words.append(word)
except KeyboardInterrupt:
break
print('fetching...')
voices = SelectionMenu([])
i = 0
for text, id in get_audio(words):
i += 1
sel = SelectionMenu([], 'Choose phrase')
si = SubmenuItem(text[:60], sel, voices)
sel.append_item(FunctionItem(f"Play \"{text[:80]}\"", play_wrapper(id)))
sel.append_item(FunctionItem(f"Save \"{text[:80]}\"", save_wrapper(id)))
voices.append_item(si)
voices.title = f'Tip for long outputs: to exit press `1` and arrow up'
string = '+'.join(words)
submenu_item = SubmenuItem(f'Found {string} voices: {i}', voices, root)
root.append_item(submenu_item)
root = SelectionMenu([], title='Welcome to GLaDOS voice management system')
root.append_item(FunctionItem("Search for GLaDOS voices", placeholder))
try:
root.show(show_exit_option=True)
except :
restore()
| true | true |
1c30be9d589430eabcc5dc5c3f24789c6ab46895 | 798 | py | Python | test/types/deformable_mirror_status_test.py | lbusoni/palpao | 95ffeb3733437ab9d96ea47c4a266f73142acca6 | [
"MIT"
] | null | null | null | test/types/deformable_mirror_status_test.py | lbusoni/palpao | 95ffeb3733437ab9d96ea47c4a266f73142acca6 | [
"MIT"
] | 2 | 2022-02-24T23:03:47.000Z | 2022-02-25T00:31:52.000Z | test/types/deformable_mirror_status_test.py | lbusoni/palpao | 95ffeb3733437ab9d96ea47c4a266f73142acca6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import unittest
from plico_dm.types.deformable_mirror_status import DeformableMirrorStatus
class DeformableMirrorStatusTest(unittest.TestCase):
def testHappyPath(self):
numberOfActs = 10
numberOfModes = 8
commandCounter = 42
reference_command_tag = 'sadf'
status = DeformableMirrorStatus(
numberOfActs,
numberOfModes,
commandCounter,
reference_command_tag)
self.assertEqual(numberOfActs, status.number_of_actuators)
self.assertEqual(numberOfModes, status.number_of_modes)
self.assertEqual(commandCounter, status.command_counter)
self.assertEqual(reference_command_tag, status.reference_command_tag)
if __name__ == "__main__":
unittest.main()
| 29.555556 | 77 | 0.709273 |
import unittest
from plico_dm.types.deformable_mirror_status import DeformableMirrorStatus
class DeformableMirrorStatusTest(unittest.TestCase):
def testHappyPath(self):
numberOfActs = 10
numberOfModes = 8
commandCounter = 42
reference_command_tag = 'sadf'
status = DeformableMirrorStatus(
numberOfActs,
numberOfModes,
commandCounter,
reference_command_tag)
self.assertEqual(numberOfActs, status.number_of_actuators)
self.assertEqual(numberOfModes, status.number_of_modes)
self.assertEqual(commandCounter, status.command_counter)
self.assertEqual(reference_command_tag, status.reference_command_tag)
if __name__ == "__main__":
unittest.main()
| true | true |
1c30bed552339a9de7078dde6af3ecf321c8d5ac | 925 | py | Python | src/orders/admin/orders/filters.py | boochamoocha/education-backend | c6ffb0c00bc066c8f1e0a8c0ffe4d0215c7c416a | [
"MIT"
] | null | null | null | src/orders/admin/orders/filters.py | boochamoocha/education-backend | c6ffb0c00bc066c8f1e0a8c0ffe4d0215c7c416a | [
"MIT"
] | 103 | 2021-05-02T15:04:14.000Z | 2021-07-27T00:20:08.000Z | src/orders/admin/orders/filters.py | boochamoocha/education-backend | c6ffb0c00bc066c8f1e0a8c0ffe4d0215c7c416a | [
"MIT"
] | null | null | null | from django.utils.translation import gettext_lazy as _
from django.utils.translation import pgettext_lazy
from app.admin import admin
from orders.models.order import OrderQuerySet
class OrderStatusFilter(admin.SimpleListFilter):
title = pgettext_lazy('orders', 'status')
parameter_name = 'status'
def lookups(self, *args, **kwargs):
return [
('not_paid', _('Not paid')),
('paid', _('Paid')),
('shipped_without_payment', _('Shipped without payment')),
]
def queryset(self, request, queryset: OrderQuerySet):
value = self.value()
if not value:
return
if value == 'not_paid':
return queryset.paid(invert=True).filter(shipped__isnull=True)
if value == 'paid':
return queryset.paid()
if value == 'shipped_without_payment':
return queryset.shipped_without_payment()
| 28.030303 | 74 | 0.632432 | from django.utils.translation import gettext_lazy as _
from django.utils.translation import pgettext_lazy
from app.admin import admin
from orders.models.order import OrderQuerySet
class OrderStatusFilter(admin.SimpleListFilter):
title = pgettext_lazy('orders', 'status')
parameter_name = 'status'
def lookups(self, *args, **kwargs):
return [
('not_paid', _('Not paid')),
('paid', _('Paid')),
('shipped_without_payment', _('Shipped without payment')),
]
def queryset(self, request, queryset: OrderQuerySet):
value = self.value()
if not value:
return
if value == 'not_paid':
return queryset.paid(invert=True).filter(shipped__isnull=True)
if value == 'paid':
return queryset.paid()
if value == 'shipped_without_payment':
return queryset.shipped_without_payment()
| true | true |
1c30bf4abaa673b92d7feef0e811a129cea14372 | 3,740 | py | Python | utility_functions.py | StarryPy/StarryPy-Historic | b9dbd552b8c4631a5a8e9dda98b7ba447eca59da | [
"WTFPL"
] | 38 | 2015-02-12T11:57:59.000Z | 2018-11-15T16:03:45.000Z | utility_functions.py | StarryPy/StarryPy-Historic | b9dbd552b8c4631a5a8e9dda98b7ba447eca59da | [
"WTFPL"
] | 68 | 2015-02-05T23:29:47.000Z | 2017-12-27T08:26:25.000Z | utility_functions.py | StarryPy/StarryPy-Historic | b9dbd552b8c4631a5a8e9dda98b7ba447eca59da | [
"WTFPL"
] | 21 | 2015-02-06T18:58:21.000Z | 2017-12-24T20:08:59.000Z | import collections
import logging
import os
import errno
from construct import Container
from twisted.python.filepath import FilePath
import packets
path = FilePath(os.path.dirname(os.path.abspath(__file__)))
logger = logging.getLogger('starrypy.utility_functions')
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(
*args, **kwargs
)
return cls._instances[cls]
def give_item_to_player(player_protocol, item, count=1):
logger.debug(
'Attempting to give item %s (count: %s) to %s',
item,
count,
player_protocol.player.name
)
item_count = int(count)
hard_max = 90000
if item_count > hard_max:
logger.warn(
'Attempted to give more items than the max allowed (%s). '
'Capping amount.',
hard_max
)
item_count = hard_max
maximum = 1000
given = 0
while item_count > 0:
x = item_count
if x > maximum:
x = maximum
item_packet = build_packet(
packets.Packets.GIVE_ITEM, packets.give_item_write(item, x + 1)
)
player_protocol.transport.write(item_packet)
item_count -= x
given += x
return given
def recursive_dictionary_update(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = recursive_dictionary_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def build_packet(packet_type, data):
"""
Convenience method to build packets for sending.
:param packet_type: An integer 1 <= packet_type <= 53
:param data: Data to send.
:return: The build packet.
:rtype : str
"""
length = len(data)
return packets.packet().build(
Container(
id=packet_type,
payload_size=length,
data=data
)
)
class Planet(object):
def __init__(self, x, y, z, planet, satellite):
self.x = x
self.y = y
self.z = z
self.planet = planet
self.satellite = satellite
def __str__(self):
return '{}:{}:{}:{}:{}'.format(
self.x, self.y, self.z, self.planet, self.satellite
)
def move_ship_to_coords(protocol, x, y, z, planet, satellite):
logger.info(
'Moving %s\'s ship to coordinates: %s',
protocol.player.name,
':'.join(map(str, (x, y, z, planet, satellite)))
)
x, y, z, planet, satellite = map(int, (x, y, z, planet, satellite))
warp_packet = build_packet(
packets.Packets.FLY_SHIP,
packets.fly_ship_write(
x=x,
y=y,
z=z,
planet=planet,
satellite=satellite
)
)
protocol.client_protocol.transport.write(warp_packet)
def extract_name(l):
name = []
if l[0][0] not in ["'", '"']:
return l[0], l[1:]
name.append(l[0][1:])
terminator = l[0][0]
for idx, s in enumerate(l[1:]):
if s[-1] == terminator:
name.append(s[:-1])
if idx + 2 != len(l):
return ' '.join(name), l[idx + 2:]
else:
return ' '.join(name), None
else:
name.append(s)
raise ValueError(
'Final terminator character of <%s> not found'.format(terminator)
)
def verify_path(path):
"""
Helper function to make sure path exists, and create if it doesn't.
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
| 25.100671 | 75 | 0.559358 | import collections
import logging
import os
import errno
from construct import Container
from twisted.python.filepath import FilePath
import packets
path = FilePath(os.path.dirname(os.path.abspath(__file__)))
logger = logging.getLogger('starrypy.utility_functions')
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(
*args, **kwargs
)
return cls._instances[cls]
def give_item_to_player(player_protocol, item, count=1):
logger.debug(
'Attempting to give item %s (count: %s) to %s',
item,
count,
player_protocol.player.name
)
item_count = int(count)
hard_max = 90000
if item_count > hard_max:
logger.warn(
'Attempted to give more items than the max allowed (%s). '
'Capping amount.',
hard_max
)
item_count = hard_max
maximum = 1000
given = 0
while item_count > 0:
x = item_count
if x > maximum:
x = maximum
item_packet = build_packet(
packets.Packets.GIVE_ITEM, packets.give_item_write(item, x + 1)
)
player_protocol.transport.write(item_packet)
item_count -= x
given += x
return given
def recursive_dictionary_update(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = recursive_dictionary_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def build_packet(packet_type, data):
length = len(data)
return packets.packet().build(
Container(
id=packet_type,
payload_size=length,
data=data
)
)
class Planet(object):
def __init__(self, x, y, z, planet, satellite):
self.x = x
self.y = y
self.z = z
self.planet = planet
self.satellite = satellite
def __str__(self):
return '{}:{}:{}:{}:{}'.format(
self.x, self.y, self.z, self.planet, self.satellite
)
def move_ship_to_coords(protocol, x, y, z, planet, satellite):
logger.info(
'Moving %s\'s ship to coordinates: %s',
protocol.player.name,
':'.join(map(str, (x, y, z, planet, satellite)))
)
x, y, z, planet, satellite = map(int, (x, y, z, planet, satellite))
warp_packet = build_packet(
packets.Packets.FLY_SHIP,
packets.fly_ship_write(
x=x,
y=y,
z=z,
planet=planet,
satellite=satellite
)
)
protocol.client_protocol.transport.write(warp_packet)
def extract_name(l):
name = []
if l[0][0] not in ["'", '"']:
return l[0], l[1:]
name.append(l[0][1:])
terminator = l[0][0]
for idx, s in enumerate(l[1:]):
if s[-1] == terminator:
name.append(s[:-1])
if idx + 2 != len(l):
return ' '.join(name), l[idx + 2:]
else:
return ' '.join(name), None
else:
name.append(s)
raise ValueError(
'Final terminator character of <%s> not found'.format(terminator)
)
def verify_path(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
| true | true |
1c30bf60fe627958914fb6eff5675a7d40d2cd1a | 8,056 | py | Python | checkio/Scientific Expedition/Network Attack/test_network_attack.py | KenMercusLai/checkio | c7702221e1bc0b0b30425859ffa6c09722949d65 | [
"MIT"
] | 39 | 2015-02-09T13:24:12.000Z | 2019-05-16T17:51:19.000Z | checkio/Scientific Expedition/Network Attack/test_network_attack.py | KenMercusLai/checkio | c7702221e1bc0b0b30425859ffa6c09722949d65 | [
"MIT"
] | 1 | 2019-10-21T16:18:14.000Z | 2019-10-21T16:18:14.000Z | checkio/Scientific Expedition/Network Attack/test_network_attack.py | KenMercusLai/checkio | c7702221e1bc0b0b30425859ffa6c09722949d65 | [
"MIT"
] | 22 | 2015-01-30T18:00:05.000Z | 2021-05-22T02:57:23.000Z | import unittest
from network_attack import capture
class Tests(unittest.TestCase):
TESTS = {
"Basics": [
{
"input": [
[0, 1, 0, 1, 0, 1],
[1, 8, 1, 0, 0, 0],
[0, 1, 2, 0, 0, 1],
[1, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 3, 1],
[1, 0, 1, 0, 1, 2],
],
"answer": 8,
"explanation": {0: 0, 1: 0, 2: 2, 3: 0, 4: 1, 5: 0},
},
{
"input": [
[0, 1, 0, 1, 0, 1],
[1, 1, 1, 0, 0, 0],
[0, 1, 2, 0, 0, 1],
[1, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 3, 1],
[1, 0, 1, 0, 1, 2],
],
"answer": 4,
"explanation": {0: 0, 1: 0, 2: 1, 3: 0, 4: 1, 5: 0},
},
{
"input": [[0, 1, 1], [1, 9, 1], [1, 1, 9]],
"answer": 9,
"explanation": {0: 0, 1: 0, 2: 0},
},
],
"Edge": [
{
"input": [[0, 1, 1], [1, 1, 1], [1, 1, 1]],
"answer": 1,
"explanation": {0: 0, 1: 0, 2: 0},
},
{
"input": [[0, 1, 0], [1, 9, 1], [0, 1, 9]],
"answer": 18,
"explanation": {0: 0, 1: 0, 2: 9},
},
{
"input": [
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"answer": 1,
"explanation": {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
},
},
{
"input": [
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 3, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 4, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 5, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 6, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 7, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 8, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 9],
],
"answer": 9,
"explanation": {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
},
},
{
"input": [
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 2, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 5, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 6, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 7, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 8, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 9],
],
"answer": 45,
"explanation": {
0: 0,
1: 0,
2: 1,
3: 3,
4: 6,
5: 10,
6: 15,
7: 21,
8: 28,
9: 36,
},
},
],
"Extra": [
{
"input": [
[0, 0, 0, 0, 1, 0, 0],
[0, 4, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 1],
[0, 1, 0, 3, 0, 0, 1],
[1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 5, 1],
[0, 1, 1, 1, 0, 1, 2],
],
"answer": 12,
"explanation": {0: 0, 1: 1, 2: 7, 3: 5, 4: 0, 5: 7, 6: 5},
},
{
"input": [
[0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 6, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 9, 1, 0, 1, 0, 0, 0],
[1, 1, 1, 7, 1, 0, 0, 1, 0],
[0, 0, 0, 1, 2, 1, 1, 1, 0],
[0, 0, 1, 0, 1, 8, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 9, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 7, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 9],
],
"answer": 22,
"explanation": {0: 0, 1: 7, 2: 0, 3: 0, 4: 7, 5: 9, 6: 9, 7: 7, 8: 13},
},
{
"input": [
[0, 1, 0, 1, 0, 0, 0, 1],
[1, 2, 1, 1, 0, 1, 1, 1],
[0, 1, 6, 0, 1, 1, 0, 0],
[1, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 0, 9, 0, 0],
[0, 1, 0, 1, 0, 0, 6, 0],
[1, 1, 0, 0, 1, 0, 0, 6],
],
"answer": 10,
"explanation": {0: 0, 1: 0, 2: 2, 3: 0, 4: 1, 5: 1, 6: 1, 7: 0},
},
{
"input": [
[0, 0, 1, 0, 0, 0, 0, 1],
[0, 5, 1, 0, 1, 1, 0, 0],
[1, 1, 8, 0, 1, 0, 1, 1],
[0, 0, 0, 6, 1, 0, 1, 1],
[0, 1, 1, 1, 2, 0, 0, 0],
[0, 1, 0, 0, 0, 5, 1, 0],
[0, 0, 1, 1, 0, 1, 5, 0],
[1, 0, 1, 1, 0, 0, 0, 4],
],
"answer": 18,
"explanation": {0: 0, 1: 8, 2: 0, 3: 4, 4: 8, 5: 13, 6: 8, 7: 0},
},
{
"input": [[0, 1, 1, 1], [1, 9, 1, 0], [1, 1, 8, 0], [1, 0, 0, 4]],
"answer": 9,
"explanation": {0: 0, 1: 0, 2: 0, 3: 0},
},
{
"input": [
[0, 1, 0, 1, 0, 0],
[1, 5, 1, 1, 0, 1],
[0, 1, 4, 1, 1, 1],
[1, 1, 1, 3, 1, 1],
[0, 0, 1, 1, 9, 0],
[0, 1, 1, 1, 0, 1],
],
"answer": 12,
"explanation": {0: 0, 1: 0, 2: 3, 3: 0, 4: 3, 5: 3},
},
{
"input": [
[0, 1, 1, 0, 0, 1],
[1, 6, 1, 0, 1, 1],
[1, 1, 3, 0, 1, 0],
[0, 0, 0, 9, 1, 0],
[0, 1, 1, 1, 4, 0],
[1, 1, 0, 0, 0, 6],
],
"answer": 16,
"explanation": {0: 0, 1: 0, 2: 0, 3: 7, 4: 3, 5: 0},
},
],
}
def test_Basics(self):
for i in self.TESTS['Basics']:
assert capture(i['input']) == i['answer'], i['input']
def test_Edge(self):
for i in self.TESTS['Edge']:
assert capture(i['input']) == i['answer'], i['input']
def test_Extra(self):
for i in self.TESTS['Extra']:
assert capture(i['input']) == i['answer'], i['input']
if __name__ == "__main__": # pragma: no cover
unittest.main()
| 34.135593 | 87 | 0.204071 | import unittest
from network_attack import capture
class Tests(unittest.TestCase):
TESTS = {
"Basics": [
{
"input": [
[0, 1, 0, 1, 0, 1],
[1, 8, 1, 0, 0, 0],
[0, 1, 2, 0, 0, 1],
[1, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 3, 1],
[1, 0, 1, 0, 1, 2],
],
"answer": 8,
"explanation": {0: 0, 1: 0, 2: 2, 3: 0, 4: 1, 5: 0},
},
{
"input": [
[0, 1, 0, 1, 0, 1],
[1, 1, 1, 0, 0, 0],
[0, 1, 2, 0, 0, 1],
[1, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 3, 1],
[1, 0, 1, 0, 1, 2],
],
"answer": 4,
"explanation": {0: 0, 1: 0, 2: 1, 3: 0, 4: 1, 5: 0},
},
{
"input": [[0, 1, 1], [1, 9, 1], [1, 1, 9]],
"answer": 9,
"explanation": {0: 0, 1: 0, 2: 0},
},
],
"Edge": [
{
"input": [[0, 1, 1], [1, 1, 1], [1, 1, 1]],
"answer": 1,
"explanation": {0: 0, 1: 0, 2: 0},
},
{
"input": [[0, 1, 0], [1, 9, 1], [0, 1, 9]],
"answer": 18,
"explanation": {0: 0, 1: 0, 2: 9},
},
{
"input": [
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"answer": 1,
"explanation": {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
},
},
{
"input": [
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 3, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 4, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 5, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 6, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 7, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 8, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 9],
],
"answer": 9,
"explanation": {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
},
},
{
"input": [
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 2, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 5, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 6, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 7, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 8, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 9],
],
"answer": 45,
"explanation": {
0: 0,
1: 0,
2: 1,
3: 3,
4: 6,
5: 10,
6: 15,
7: 21,
8: 28,
9: 36,
},
},
],
"Extra": [
{
"input": [
[0, 0, 0, 0, 1, 0, 0],
[0, 4, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 1],
[0, 1, 0, 3, 0, 0, 1],
[1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 5, 1],
[0, 1, 1, 1, 0, 1, 2],
],
"answer": 12,
"explanation": {0: 0, 1: 1, 2: 7, 3: 5, 4: 0, 5: 7, 6: 5},
},
{
"input": [
[0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 6, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 9, 1, 0, 1, 0, 0, 0],
[1, 1, 1, 7, 1, 0, 0, 1, 0],
[0, 0, 0, 1, 2, 1, 1, 1, 0],
[0, 0, 1, 0, 1, 8, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 9, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 7, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 9],
],
"answer": 22,
"explanation": {0: 0, 1: 7, 2: 0, 3: 0, 4: 7, 5: 9, 6: 9, 7: 7, 8: 13},
},
{
"input": [
[0, 1, 0, 1, 0, 0, 0, 1],
[1, 2, 1, 1, 0, 1, 1, 1],
[0, 1, 6, 0, 1, 1, 0, 0],
[1, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 0, 9, 0, 0],
[0, 1, 0, 1, 0, 0, 6, 0],
[1, 1, 0, 0, 1, 0, 0, 6],
],
"answer": 10,
"explanation": {0: 0, 1: 0, 2: 2, 3: 0, 4: 1, 5: 1, 6: 1, 7: 0},
},
{
"input": [
[0, 0, 1, 0, 0, 0, 0, 1],
[0, 5, 1, 0, 1, 1, 0, 0],
[1, 1, 8, 0, 1, 0, 1, 1],
[0, 0, 0, 6, 1, 0, 1, 1],
[0, 1, 1, 1, 2, 0, 0, 0],
[0, 1, 0, 0, 0, 5, 1, 0],
[0, 0, 1, 1, 0, 1, 5, 0],
[1, 0, 1, 1, 0, 0, 0, 4],
],
"answer": 18,
"explanation": {0: 0, 1: 8, 2: 0, 3: 4, 4: 8, 5: 13, 6: 8, 7: 0},
},
{
"input": [[0, 1, 1, 1], [1, 9, 1, 0], [1, 1, 8, 0], [1, 0, 0, 4]],
"answer": 9,
"explanation": {0: 0, 1: 0, 2: 0, 3: 0},
},
{
"input": [
[0, 1, 0, 1, 0, 0],
[1, 5, 1, 1, 0, 1],
[0, 1, 4, 1, 1, 1],
[1, 1, 1, 3, 1, 1],
[0, 0, 1, 1, 9, 0],
[0, 1, 1, 1, 0, 1],
],
"answer": 12,
"explanation": {0: 0, 1: 0, 2: 3, 3: 0, 4: 3, 5: 3},
},
{
"input": [
[0, 1, 1, 0, 0, 1],
[1, 6, 1, 0, 1, 1],
[1, 1, 3, 0, 1, 0],
[0, 0, 0, 9, 1, 0],
[0, 1, 1, 1, 4, 0],
[1, 1, 0, 0, 0, 6],
],
"answer": 16,
"explanation": {0: 0, 1: 0, 2: 0, 3: 7, 4: 3, 5: 0},
},
],
}
def test_Basics(self):
for i in self.TESTS['Basics']:
assert capture(i['input']) == i['answer'], i['input']
def test_Edge(self):
for i in self.TESTS['Edge']:
assert capture(i['input']) == i['answer'], i['input']
def test_Extra(self):
for i in self.TESTS['Extra']:
assert capture(i['input']) == i['answer'], i['input']
if __name__ == "__main__":
unittest.main()
| true | true |
1c30c025cb5c20abc6f86555d6f65128044329b4 | 91,665 | py | Python | darwinpush/xb/raw/binding_.py | fasteroute/darwinpush | c919049e076cbdf61007fc9cc1c5a0271cde7929 | [
"Apache-2.0"
] | 3 | 2015-08-15T15:38:06.000Z | 2019-08-06T11:09:32.000Z | darwinpush/xb/raw/binding_.py | grundleborg/darwinpush | c919049e076cbdf61007fc9cc1c5a0271cde7929 | [
"Apache-2.0"
] | 34 | 2015-07-22T13:47:16.000Z | 2015-08-12T17:40:23.000Z | darwinpush/xb/raw/binding_.py | grundleborg/darwinpush | c919049e076cbdf61007fc9cc1c5a0271cde7929 | [
"Apache-2.0"
] | 1 | 2015-08-30T15:26:24.000Z | 2015-08-30T15:26:24.000Z | # ./darwinpush/xb/raw/binding_.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:0b338ff90207234600b9b9432e9e9a2f47008914
# Generated 2015-04-23 16:42:14.515862 by PyXB version 1.2.4 using Python 3.4.1.final.0
# Namespace http://www.thalesgroup.com/rtti/XmlTimetable/v8
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:5049f1de-e9cf-11e4-bb50-a0481ca50ab0')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
import darwinpush.xb.ct as _ImportedBinding_darwinpush_xb_ct
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://www.thalesgroup.com/rtti/XmlTimetable/v8', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://www.thalesgroup.com/rtti/XmlTimetable/v8}CategoryType
class CategoryType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""Association Category Type: JJ=Join, VV=Split, LK=Linked, NP=Next-Working"""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'CategoryType')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 28, 1)
_Documentation = 'Association Category Type: JJ=Join, VV=Split, LK=Linked, NP=Next-Working'
CategoryType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=CategoryType, enum_prefix=None)
CategoryType.JJ = CategoryType._CF_enumeration.addEnumeration(unicode_value='JJ', tag='JJ')
CategoryType.VV = CategoryType._CF_enumeration.addEnumeration(unicode_value='VV', tag='VV')
CategoryType.LK = CategoryType._CF_enumeration.addEnumeration(unicode_value='LK', tag='LK')
CategoryType.NP = CategoryType._CF_enumeration.addEnumeration(unicode_value='NP', tag='NP')
CategoryType._InitializeFacetMap(CategoryType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'CategoryType', CategoryType)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}AssocService with content type EMPTY
class AssocService (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}AssocService with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'AssocService')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 40, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute wta uses Python identifier wta
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 243, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 243, 2)
wta = property(__wta.value, __wta.set, None, 'Working time of arrival.')
# Attribute wtd uses Python identifier wtd
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 248, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 248, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working time of departure.')
# Attribute wtp uses Python identifier wtp
__wtp = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtp'), 'wtp', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_wtp', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wtp._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 253, 2)
__wtp._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 253, 2)
wtp = property(__wtp.value, __wtp.set, None, 'Working time of pass.')
# Attribute pta uses Python identifier pta
__pta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'pta'), 'pta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_pta', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__pta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 258, 2)
__pta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 258, 2)
pta = property(__pta.value, __pta.set, None, 'Public time of arrival.')
# Attribute ptd uses Python identifier ptd
__ptd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ptd'), 'ptd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_ptd', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__ptd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 263, 2)
__ptd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 263, 2)
ptd = property(__ptd.value, __ptd.set, None, 'Public time of departure.')
# Attribute rid uses Python identifier rid
__rid = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rid'), 'rid', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_rid', _ImportedBinding_darwinpush_xb_ct.RIDType, required=True)
__rid._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 41, 2)
__rid._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 41, 2)
rid = property(__rid.value, __rid.set, None, 'RTTI Train ID. Note that since this is an RID, the service must already exist within Darwin.')
_ElementMap.update({
})
_AttributeMap.update({
__wta.name() : __wta,
__wtd.name() : __wtd,
__wtp.name() : __wtp,
__pta.name() : __pta,
__ptd.name() : __ptd,
__rid.name() : __rid
})
Namespace.addCategoryObject('typeBinding', 'AssocService', AssocService)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}Association with content type ELEMENT_ONLY
class Association (pyxb.binding.basis.complexTypeDefinition):
"""Type describing an association between schedules"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Association')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 52, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}main uses Python identifier main
__main = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'main'), 'main', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_httpwww_thalesgroup_comrttiXmlTimetablev8main', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 57, 3), )
main = property(__main.value, __main.set, None, 'The through, previous working or link-to service')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}assoc uses Python identifier assoc
__assoc = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'assoc'), 'assoc', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_httpwww_thalesgroup_comrttiXmlTimetablev8assoc', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 62, 3), )
assoc = property(__assoc.value, __assoc.set, None, 'The starting, terminating, subsequent working or link-from service')
# Attribute tiploc uses Python identifier tiploc
__tiploc = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tiploc'), 'tiploc', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_tiploc', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tiploc._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 68, 2)
__tiploc._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 68, 2)
tiploc = property(__tiploc.value, __tiploc.set, None, 'The TIPLOC of the location where the association occurs.')
# Attribute category uses Python identifier category
__category = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'category'), 'category', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_category', CategoryType, required=True)
__category._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 73, 2)
__category._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 73, 2)
category = property(__category.value, __category.set, None, 'Association category')
# Attribute isCancelled uses Python identifier isCancelled
__isCancelled = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'isCancelled'), 'isCancelled', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_isCancelled', pyxb.binding.datatypes.boolean, unicode_default='false')
__isCancelled._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 78, 2)
__isCancelled._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 78, 2)
isCancelled = property(__isCancelled.value, __isCancelled.set, None, 'True if this association is cancelled, i.e. the association exists but will no longer happen.')
# Attribute isDeleted uses Python identifier isDeleted
__isDeleted = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'isDeleted'), 'isDeleted', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_isDeleted', pyxb.binding.datatypes.boolean, unicode_default='false')
__isDeleted._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 83, 2)
__isDeleted._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 83, 2)
isDeleted = property(__isDeleted.value, __isDeleted.set, None, 'True if this association is deleted, i.e. the association no longer exists.')
_ElementMap.update({
__main.name() : __main,
__assoc.name() : __assoc
})
_AttributeMap.update({
__tiploc.name() : __tiploc,
__category.name() : __category,
__isCancelled.name() : __isCancelled,
__isDeleted.name() : __isDeleted
})
Namespace.addCategoryObject('typeBinding', 'Association', Association)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OR with content type EMPTY
class OR (pyxb.binding.basis.complexTypeDefinition):
"""Defines a Passenger Origin Calling Point"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'OR')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 134, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute tpl uses Python identifier tpl
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
# Attribute act uses Python identifier act
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
# Attribute planAct uses Python identifier planAct
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
# Attribute plat uses Python identifier plat
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
# Attribute pta uses Python identifier pta
__pta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'pta'), 'pta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_pta', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__pta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
__pta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
pta = property(__pta.value, __pta.set, None, 'Public Scheduled Time of Arrival')
# Attribute ptd uses Python identifier ptd
__ptd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ptd'), 'ptd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_ptd', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__ptd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
__ptd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
ptd = property(__ptd.value, __ptd.set, None, 'Public Scheduled Time of Departure')
# Attribute wta uses Python identifier wta
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 140, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 140, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
# Attribute wtd uses Python identifier wtd
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 145, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 145, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
# Attribute fd uses Python identifier fd
__fd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'fd'), 'fd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_fd', _ImportedBinding_darwinpush_xb_ct.TiplocType)
__fd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 150, 2)
__fd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 150, 2)
fd = property(__fd.value, __fd.set, None, 'TIPLOC of False Destination to be used at this location')
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__pta.name() : __pta,
__ptd.name() : __ptd,
__wta.name() : __wta,
__wtd.name() : __wtd,
__fd.name() : __fd
})
Namespace.addCategoryObject('typeBinding', 'OR', OR)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OPOR with content type EMPTY
class OPOR (pyxb.binding.basis.complexTypeDefinition):
"""Defines an Operational Origin Calling Point"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'OPOR')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 156, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute tpl uses Python identifier tpl
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
# Attribute act uses Python identifier act
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
# Attribute planAct uses Python identifier planAct
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
# Attribute plat uses Python identifier plat
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
# Attribute wta uses Python identifier wta
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 161, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 161, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
# Attribute wtd uses Python identifier wtd
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 166, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 166, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__wta.name() : __wta,
__wtd.name() : __wtd
})
Namespace.addCategoryObject('typeBinding', 'OPOR', OPOR)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}IP with content type EMPTY
class IP (pyxb.binding.basis.complexTypeDefinition):
"""Defines aPassenger Intermediate Calling Point"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'IP')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 172, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute tpl uses Python identifier tpl
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
# Attribute act uses Python identifier act
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
# Attribute planAct uses Python identifier planAct
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
# Attribute plat uses Python identifier plat
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
# Attribute pta uses Python identifier pta
__pta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'pta'), 'pta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_pta', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__pta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
__pta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
pta = property(__pta.value, __pta.set, None, 'Public Scheduled Time of Arrival')
# Attribute ptd uses Python identifier ptd
__ptd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ptd'), 'ptd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_ptd', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__ptd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
__ptd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
ptd = property(__ptd.value, __ptd.set, None, 'Public Scheduled Time of Departure')
# Attribute wta uses Python identifier wta
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 178, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 178, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
# Attribute wtd uses Python identifier wtd
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 183, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 183, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
# Attribute rdelay uses Python identifier rdelay
__rdelay = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rdelay'), 'rdelay', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_rdelay', _ImportedBinding_darwinpush_xb_ct.DelayValueType, unicode_default='0')
__rdelay._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 188, 2)
__rdelay._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 188, 2)
rdelay = property(__rdelay.value, __rdelay.set, None, "A delay value that is implied by a change to the service's route.")
# Attribute fd uses Python identifier fd
__fd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'fd'), 'fd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_fd', _ImportedBinding_darwinpush_xb_ct.TiplocType)
__fd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 193, 2)
__fd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 193, 2)
fd = property(__fd.value, __fd.set, None, 'TIPLOC of False Destination to be used at this location')
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__pta.name() : __pta,
__ptd.name() : __ptd,
__wta.name() : __wta,
__wtd.name() : __wtd,
__rdelay.name() : __rdelay,
__fd.name() : __fd
})
Namespace.addCategoryObject('typeBinding', 'IP', IP)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OPIP with content type EMPTY
class OPIP (pyxb.binding.basis.complexTypeDefinition):
"""Defines an Operational Intermediate Calling Point"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'OPIP')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 199, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute tpl uses Python identifier tpl
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
# Attribute act uses Python identifier act
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
# Attribute planAct uses Python identifier planAct
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
# Attribute plat uses Python identifier plat
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
# Attribute wta uses Python identifier wta
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 204, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 204, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
# Attribute wtd uses Python identifier wtd
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 209, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 209, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
# Attribute rdelay uses Python identifier rdelay
__rdelay = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rdelay'), 'rdelay', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_rdelay', _ImportedBinding_darwinpush_xb_ct.DelayValueType, unicode_default='0')
__rdelay._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 214, 2)
__rdelay._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 214, 2)
rdelay = property(__rdelay.value, __rdelay.set, None, "A delay value that is implied by a change to the service's route.")
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__wta.name() : __wta,
__wtd.name() : __wtd,
__rdelay.name() : __rdelay
})
Namespace.addCategoryObject('typeBinding', 'OPIP', OPIP)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}PP with content type EMPTY
class PP (pyxb.binding.basis.complexTypeDefinition):
"""Defines an Intermediate Passing Point"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'PP')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 220, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute tpl uses Python identifier tpl
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
# Attribute act uses Python identifier act
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
# Attribute planAct uses Python identifier planAct
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
# Attribute plat uses Python identifier plat
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
# Attribute wtp uses Python identifier wtp
__wtp = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtp'), 'wtp', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_wtp', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wtp._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 225, 2)
__wtp._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 225, 2)
wtp = property(__wtp.value, __wtp.set, None, 'Working Scheduled Time of Passing')
# Attribute rdelay uses Python identifier rdelay
__rdelay = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rdelay'), 'rdelay', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_rdelay', _ImportedBinding_darwinpush_xb_ct.DelayValueType, unicode_default='0')
__rdelay._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 230, 2)
__rdelay._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 230, 2)
rdelay = property(__rdelay.value, __rdelay.set, None, "A delay value that is implied by a change to the service's route.")
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__wtp.name() : __wtp,
__rdelay.name() : __rdelay
})
Namespace.addCategoryObject('typeBinding', 'PP', PP)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}DT with content type EMPTY
class DT (pyxb.binding.basis.complexTypeDefinition):
"""Defines a Passenger Destination Calling point"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'DT')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 236, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute tpl uses Python identifier tpl
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
# Attribute act uses Python identifier act
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
# Attribute planAct uses Python identifier planAct
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
# Attribute plat uses Python identifier plat
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
# Attribute pta uses Python identifier pta
__pta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'pta'), 'pta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_pta', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__pta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
__pta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
pta = property(__pta.value, __pta.set, None, 'Public Scheduled Time of Arrival')
# Attribute ptd uses Python identifier ptd
__ptd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ptd'), 'ptd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_ptd', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__ptd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
__ptd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
ptd = property(__ptd.value, __ptd.set, None, 'Public Scheduled Time of Departure')
# Attribute wta uses Python identifier wta
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 242, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 242, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
# Attribute wtd uses Python identifier wtd
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 247, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 247, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
# Attribute rdelay uses Python identifier rdelay
__rdelay = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rdelay'), 'rdelay', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_rdelay', _ImportedBinding_darwinpush_xb_ct.DelayValueType, unicode_default='0')
__rdelay._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 252, 2)
__rdelay._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 252, 2)
rdelay = property(__rdelay.value, __rdelay.set, None, "A delay value that is implied by a change to the service's route.")
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__pta.name() : __pta,
__ptd.name() : __ptd,
__wta.name() : __wta,
__wtd.name() : __wtd,
__rdelay.name() : __rdelay
})
Namespace.addCategoryObject('typeBinding', 'DT', DT)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OPDT with content type EMPTY
class OPDT (pyxb.binding.basis.complexTypeDefinition):
"""Defines an Operational Destination Calling point"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'OPDT')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 258, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute tpl uses Python identifier tpl
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
# Attribute act uses Python identifier act
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
# Attribute planAct uses Python identifier planAct
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
# Attribute plat uses Python identifier plat
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
# Attribute wta uses Python identifier wta
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 263, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 263, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
# Attribute wtd uses Python identifier wtd
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 268, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 268, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
# Attribute rdelay uses Python identifier rdelay
__rdelay = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rdelay'), 'rdelay', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_rdelay', _ImportedBinding_darwinpush_xb_ct.DelayValueType, unicode_default='0')
__rdelay._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 273, 2)
__rdelay._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 273, 2)
rdelay = property(__rdelay.value, __rdelay.set, None, "A delay value that is implied by a change to the service's route.")
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__wta.name() : __wta,
__wtd.name() : __wtd,
__rdelay.name() : __rdelay
})
Namespace.addCategoryObject('typeBinding', 'OPDT', OPDT)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}Schedule with content type ELEMENT_ONLY
class Schedule (pyxb.binding.basis.complexTypeDefinition):
"""Train Schedule"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Schedule')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 279, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OR uses Python identifier OR
__OR = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'OR'), 'OR', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8OR', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 285, 4), )
OR = property(__OR.value, __OR.set, None, 'Origin location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OPOR uses Python identifier OPOR
__OPOR = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'OPOR'), 'OPOR', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8OPOR', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 290, 4), )
OPOR = property(__OPOR.value, __OPOR.set, None, 'Operational origin location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}IP uses Python identifier IP
__IP = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'IP'), 'IP', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8IP', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 295, 4), )
IP = property(__IP.value, __IP.set, None, 'Intermediate calling location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OPIP uses Python identifier OPIP
__OPIP = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'OPIP'), 'OPIP', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8OPIP', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 300, 4), )
OPIP = property(__OPIP.value, __OPIP.set, None, 'Intermediate operational calling location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}PP uses Python identifier PP
__PP = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'PP'), 'PP', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8PP', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 305, 4), )
PP = property(__PP.value, __PP.set, None, 'Passing location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}DT uses Python identifier DT
__DT = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'DT'), 'DT', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8DT', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 310, 4), )
DT = property(__DT.value, __DT.set, None, 'Destination location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OPDT uses Python identifier OPDT
__OPDT = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'OPDT'), 'OPDT', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8OPDT', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 315, 4), )
OPDT = property(__OPDT.value, __OPDT.set, None, 'Operational destination location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}cancelReason uses Python identifier cancelReason
__cancelReason = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'cancelReason'), 'cancelReason', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8cancelReason', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 321, 3), )
cancelReason = property(__cancelReason.value, __cancelReason.set, None, 'Reason for cancellation of service/location')
# Attribute rid uses Python identifier rid
__rid = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rid'), 'rid', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_rid', _ImportedBinding_darwinpush_xb_ct.RIDType, required=True)
__rid._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 327, 2)
__rid._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 327, 2)
rid = property(__rid.value, __rid.set, None, 'RTTI unique Train ID')
# Attribute uid uses Python identifier uid
__uid = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'uid'), 'uid', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_uid', _ImportedBinding_darwinpush_xb_ct.UIDType, required=True)
__uid._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 332, 2)
__uid._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 332, 2)
uid = property(__uid.value, __uid.set, None, 'Train UID')
# Attribute trainId uses Python identifier trainId
__trainId = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'trainId'), 'trainId', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_trainId', _ImportedBinding_darwinpush_xb_ct.TrainIdType, required=True)
__trainId._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 337, 2)
__trainId._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 337, 2)
trainId = property(__trainId.value, __trainId.set, None, 'Train ID (Headcode)')
# Attribute ssd uses Python identifier ssd
__ssd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ssd'), 'ssd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_ssd', _ImportedBinding_darwinpush_xb_ct.RTTIDateType, required=True)
__ssd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 342, 2)
__ssd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 342, 2)
ssd = property(__ssd.value, __ssd.set, None, 'Scheduled Start Date')
# Attribute toc uses Python identifier toc
__toc = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'toc'), 'toc', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_toc', _ImportedBinding_darwinpush_xb_ct.TOCType, required=True)
__toc._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 347, 2)
__toc._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 347, 2)
toc = property(__toc.value, __toc.set, None, 'ATOC Code')
# Attribute status uses Python identifier status
__status = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'status'), 'status', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_status', _ImportedBinding_darwinpush_xb_ct.CIFTrainStatusType, unicode_default='P')
__status._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 352, 2)
__status._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 352, 2)
status = property(__status.value, __status.set, None, 'Type of service, i.e. Train/Bus/Ship.')
# Attribute trainCat uses Python identifier trainCat
__trainCat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'trainCat'), 'trainCat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_trainCat', _ImportedBinding_darwinpush_xb_ct.CIFTrainCategoryType, unicode_default='OO')
__trainCat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 357, 2)
__trainCat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 357, 2)
trainCat = property(__trainCat.value, __trainCat.set, None, 'Category of service.')
# Attribute isPassengerSvc uses Python identifier isPassengerSvc
__isPassengerSvc = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'isPassengerSvc'), 'isPassengerSvc', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_isPassengerSvc', pyxb.binding.datatypes.boolean, unicode_default='true')
__isPassengerSvc._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 362, 2)
__isPassengerSvc._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 362, 2)
isPassengerSvc = property(__isPassengerSvc.value, __isPassengerSvc.set, None, 'True if Darwin classifies the train category as a passenger service.')
# Attribute deleted uses Python identifier deleted
__deleted = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'deleted'), 'deleted', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_deleted', pyxb.binding.datatypes.boolean, unicode_default='false')
__deleted._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 367, 2)
__deleted._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 367, 2)
deleted = property(__deleted.value, __deleted.set, None, 'Service has been deleted and should not be used/displayed.')
# Attribute isCharter uses Python identifier isCharter
__isCharter = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'isCharter'), 'isCharter', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_isCharter', pyxb.binding.datatypes.boolean, unicode_default='false')
__isCharter._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 372, 2)
__isCharter._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 372, 2)
isCharter = property(__isCharter.value, __isCharter.set, None, 'Indicates if this service is a charter service.')
# Attribute qtrain uses Python identifier qtrain
__qtrain = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'qtrain'), 'qtrain', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_qtrain', pyxb.binding.datatypes.boolean, unicode_default='false')
__qtrain._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 377, 2)
__qtrain._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 377, 2)
qtrain = property(__qtrain.value, __qtrain.set, None, 'True if this is a Q Train (runs as required) that has not yet been activated. Note that a Q Train that has been activated before the XML Timetable file has been built will not have this attribute set true.')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 382, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 382, 2)
can = property(__can.value, __can.set, None, None)
_ElementMap.update({
__OR.name() : __OR,
__OPOR.name() : __OPOR,
__IP.name() : __IP,
__OPIP.name() : __OPIP,
__PP.name() : __PP,
__DT.name() : __DT,
__OPDT.name() : __OPDT,
__cancelReason.name() : __cancelReason
})
_AttributeMap.update({
__rid.name() : __rid,
__uid.name() : __uid,
__trainId.name() : __trainId,
__ssd.name() : __ssd,
__toc.name() : __toc,
__status.name() : __status,
__trainCat.name() : __trainCat,
__isPassengerSvc.name() : __isPassengerSvc,
__deleted.name() : __deleted,
__isCharter.name() : __isCharter,
__qtrain.name() : __qtrain,
__can.name() : __can
})
Namespace.addCategoryObject('typeBinding', 'Schedule', Schedule)
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON (pyxb.binding.basis.complexTypeDefinition):
"""Push Port Timetable Schema"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 389, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}Journey uses Python identifier Journey
__Journey = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Journey'), 'Journey', '__httpwww_thalesgroup_comrttiXmlTimetablev8_CTD_ANON_httpwww_thalesgroup_comrttiXmlTimetablev8Journey', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 391, 4), )
Journey = property(__Journey.value, __Journey.set, None, 'Schedule of a service that exists in Darwin')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}Association uses Python identifier Association
__Association = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Association'), 'Association', '__httpwww_thalesgroup_comrttiXmlTimetablev8_CTD_ANON_httpwww_thalesgroup_comrttiXmlTimetablev8Association', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 396, 4), )
Association = property(__Association.value, __Association.set, None, 'An association between two schedules')
# Attribute timetableID uses Python identifier timetableID
__timetableID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'timetableID'), 'timetableID', '__httpwww_thalesgroup_comrttiXmlTimetablev8_CTD_ANON_timetableID', _ImportedBinding_darwinpush_xb_ct.TimetableIDType, required=True)
__timetableID._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 402, 3)
__timetableID._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 402, 3)
timetableID = property(__timetableID.value, __timetableID.set, None, 'The ID of the RTTI timetable from which this XML timetable was generated.')
_ElementMap.update({
__Journey.name() : __Journey,
__Association.name() : __Association
})
_AttributeMap.update({
__timetableID.name() : __timetableID
})
PportTimetable = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PportTimetable'), CTD_ANON, documentation='Push Port Timetable Schema', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 385, 1))
Namespace.addCategoryObject('elementBinding', PportTimetable.name().localName(), PportTimetable)
Association._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'main'), AssocService, scope=Association, documentation='The through, previous working or link-to service', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 57, 3)))
Association._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'assoc'), AssocService, scope=Association, documentation='The starting, terminating, subsequent working or link-from service', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 62, 3)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(Association._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'main')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 57, 3))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(Association._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'assoc')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 62, 3))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
Association._Automaton = _BuildAutomaton()
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'OR'), OR, scope=Schedule, documentation='Origin location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 285, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'OPOR'), OPOR, scope=Schedule, documentation='Operational origin location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 290, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'IP'), IP, scope=Schedule, documentation='Intermediate calling location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 295, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'OPIP'), OPIP, scope=Schedule, documentation='Intermediate operational calling location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 300, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PP'), PP, scope=Schedule, documentation='Passing location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 305, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'DT'), DT, scope=Schedule, documentation='Destination location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 310, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'OPDT'), OPDT, scope=Schedule, documentation='Operational destination location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 315, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'cancelReason'), _ImportedBinding_darwinpush_xb_ct.DisruptionReasonType, scope=Schedule, documentation='Reason for cancellation of service/location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 321, 3)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=2, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 284, 3))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 321, 3))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'OR')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 285, 4))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'OPOR')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 290, 4))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'IP')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 295, 4))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'OPIP')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 300, 4))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'PP')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 305, 4))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'DT')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 310, 4))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'OPDT')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 315, 4))
st_6 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'cancelReason')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 321, 3))
st_7 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_1, True) ]))
st_7._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
Schedule._Automaton = _BuildAutomaton_()
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Journey'), Schedule, scope=CTD_ANON, documentation='Schedule of a service that exists in Darwin', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 391, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Association'), Association, scope=CTD_ANON, documentation='An association between two schedules', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 396, 4)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 391, 4))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 396, 4))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Journey')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 391, 4))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Association')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 396, 4))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON._Automaton = _BuildAutomaton_2()
| 64.056604 | 353 | 0.75741 |
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:5049f1de-e9cf-11e4-bb50-a0481ca50ab0')
_PyXBVersion = '1.2.4'
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
import pyxb.binding.datatypes
import darwinpush.xb.ct as _ImportedBinding_darwinpush_xb_ct
Namespace = pyxb.namespace.NamespaceForURI('http://www.thalesgroup.com/rtti/XmlTimetable/v8', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
class CategoryType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'CategoryType')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 28, 1)
_Documentation = 'Association Category Type: JJ=Join, VV=Split, LK=Linked, NP=Next-Working'
CategoryType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=CategoryType, enum_prefix=None)
CategoryType.JJ = CategoryType._CF_enumeration.addEnumeration(unicode_value='JJ', tag='JJ')
CategoryType.VV = CategoryType._CF_enumeration.addEnumeration(unicode_value='VV', tag='VV')
CategoryType.LK = CategoryType._CF_enumeration.addEnumeration(unicode_value='LK', tag='LK')
CategoryType.NP = CategoryType._CF_enumeration.addEnumeration(unicode_value='NP', tag='NP')
CategoryType._InitializeFacetMap(CategoryType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'CategoryType', CategoryType)
class AssocService (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'AssocService')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 40, 1)
_ElementMap = {}
_AttributeMap = {}
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 243, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 243, 2)
wta = property(__wta.value, __wta.set, None, 'Working time of arrival.')
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 248, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 248, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working time of departure.')
__wtp = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtp'), 'wtp', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_wtp', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wtp._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 253, 2)
__wtp._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 253, 2)
wtp = property(__wtp.value, __wtp.set, None, 'Working time of pass.')
__pta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'pta'), 'pta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_pta', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__pta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 258, 2)
__pta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 258, 2)
pta = property(__pta.value, __pta.set, None, 'Public time of arrival.')
__ptd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ptd'), 'ptd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_ptd', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__ptd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 263, 2)
__ptd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 263, 2)
ptd = property(__ptd.value, __ptd.set, None, 'Public time of departure.')
__rid = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rid'), 'rid', '__httpwww_thalesgroup_comrttiXmlTimetablev8_AssocService_rid', _ImportedBinding_darwinpush_xb_ct.RIDType, required=True)
__rid._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 41, 2)
__rid._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 41, 2)
rid = property(__rid.value, __rid.set, None, 'RTTI Train ID. Note that since this is an RID, the service must already exist within Darwin.')
_ElementMap.update({
})
_AttributeMap.update({
__wta.name() : __wta,
__wtd.name() : __wtd,
__wtp.name() : __wtp,
__pta.name() : __pta,
__ptd.name() : __ptd,
__rid.name() : __rid
})
Namespace.addCategoryObject('typeBinding', 'AssocService', AssocService)
class Association (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Association')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 52, 1)
_ElementMap = {}
_AttributeMap = {}
__main = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'main'), 'main', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_httpwww_thalesgroup_comrttiXmlTimetablev8main', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 57, 3), )
main = property(__main.value, __main.set, None, 'The through, previous working or link-to service')
__assoc = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'assoc'), 'assoc', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_httpwww_thalesgroup_comrttiXmlTimetablev8assoc', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 62, 3), )
assoc = property(__assoc.value, __assoc.set, None, 'The starting, terminating, subsequent working or link-from service')
__tiploc = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tiploc'), 'tiploc', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_tiploc', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tiploc._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 68, 2)
__tiploc._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 68, 2)
tiploc = property(__tiploc.value, __tiploc.set, None, 'The TIPLOC of the location where the association occurs.')
__category = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'category'), 'category', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_category', CategoryType, required=True)
__category._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 73, 2)
__category._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 73, 2)
category = property(__category.value, __category.set, None, 'Association category')
__isCancelled = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'isCancelled'), 'isCancelled', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_isCancelled', pyxb.binding.datatypes.boolean, unicode_default='false')
__isCancelled._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 78, 2)
__isCancelled._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 78, 2)
isCancelled = property(__isCancelled.value, __isCancelled.set, None, 'True if this association is cancelled, i.e. the association exists but will no longer happen.')
__isDeleted = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'isDeleted'), 'isDeleted', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Association_isDeleted', pyxb.binding.datatypes.boolean, unicode_default='false')
__isDeleted._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 83, 2)
__isDeleted._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 83, 2)
isDeleted = property(__isDeleted.value, __isDeleted.set, None, 'True if this association is deleted, i.e. the association no longer exists.')
_ElementMap.update({
__main.name() : __main,
__assoc.name() : __assoc
})
_AttributeMap.update({
__tiploc.name() : __tiploc,
__category.name() : __category,
__isCancelled.name() : __isCancelled,
__isDeleted.name() : __isDeleted
})
Namespace.addCategoryObject('typeBinding', 'Association', Association)
class OR (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'OR')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 134, 1)
_ElementMap = {}
_AttributeMap = {}
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
__pta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'pta'), 'pta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_pta', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__pta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
__pta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
pta = property(__pta.value, __pta.set, None, 'Public Scheduled Time of Arrival')
__ptd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ptd'), 'ptd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_ptd', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__ptd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
__ptd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
ptd = property(__ptd.value, __ptd.set, None, 'Public Scheduled Time of Departure')
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 140, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 140, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 145, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 145, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
__fd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'fd'), 'fd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OR_fd', _ImportedBinding_darwinpush_xb_ct.TiplocType)
__fd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 150, 2)
__fd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 150, 2)
fd = property(__fd.value, __fd.set, None, 'TIPLOC of False Destination to be used at this location')
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__pta.name() : __pta,
__ptd.name() : __ptd,
__wta.name() : __wta,
__wtd.name() : __wtd,
__fd.name() : __fd
})
Namespace.addCategoryObject('typeBinding', 'OR', OR)
class OPOR (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'OPOR')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 156, 1)
_ElementMap = {}
_AttributeMap = {}
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 161, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 161, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPOR_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 166, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 166, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__wta.name() : __wta,
__wtd.name() : __wtd
})
Namespace.addCategoryObject('typeBinding', 'OPOR', OPOR)
class IP (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'IP')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 172, 1)
_ElementMap = {}
_AttributeMap = {}
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
__pta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'pta'), 'pta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_pta', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__pta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
__pta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
pta = property(__pta.value, __pta.set, None, 'Public Scheduled Time of Arrival')
__ptd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ptd'), 'ptd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_ptd', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__ptd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
__ptd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
ptd = property(__ptd.value, __ptd.set, None, 'Public Scheduled Time of Departure')
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 178, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 178, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 183, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 183, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
__rdelay = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rdelay'), 'rdelay', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_rdelay', _ImportedBinding_darwinpush_xb_ct.DelayValueType, unicode_default='0')
__rdelay._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 188, 2)
__rdelay._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 188, 2)
rdelay = property(__rdelay.value, __rdelay.set, None, "A delay value that is implied by a change to the service's route.")
# Attribute fd uses Python identifier fd
__fd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'fd'), 'fd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_IP_fd', _ImportedBinding_darwinpush_xb_ct.TiplocType)
__fd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 193, 2)
__fd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 193, 2)
fd = property(__fd.value, __fd.set, None, 'TIPLOC of False Destination to be used at this location')
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__pta.name() : __pta,
__ptd.name() : __ptd,
__wta.name() : __wta,
__wtd.name() : __wtd,
__rdelay.name() : __rdelay,
__fd.name() : __fd
})
Namespace.addCategoryObject('typeBinding', 'IP', IP)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OPIP with content type EMPTY
class OPIP (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'OPIP')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 199, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute tpl uses Python identifier tpl
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
# Attribute act uses Python identifier act
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
# Attribute planAct uses Python identifier planAct
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
# Attribute plat uses Python identifier plat
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
# Attribute wta uses Python identifier wta
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 204, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 204, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
# Attribute wtd uses Python identifier wtd
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 209, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 209, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
# Attribute rdelay uses Python identifier rdelay
__rdelay = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rdelay'), 'rdelay', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPIP_rdelay', _ImportedBinding_darwinpush_xb_ct.DelayValueType, unicode_default='0')
__rdelay._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 214, 2)
__rdelay._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 214, 2)
rdelay = property(__rdelay.value, __rdelay.set, None, "A delay value that is implied by a change to the service's route.")
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__wta.name() : __wta,
__wtd.name() : __wtd,
__rdelay.name() : __rdelay
})
Namespace.addCategoryObject('typeBinding', 'OPIP', OPIP)
class PP (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'PP')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 220, 1)
_ElementMap = {}
_AttributeMap = {}
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
__wtp = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtp'), 'wtp', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_wtp', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wtp._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 225, 2)
__wtp._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 225, 2)
wtp = property(__wtp.value, __wtp.set, None, 'Working Scheduled Time of Passing')
__rdelay = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rdelay'), 'rdelay', '__httpwww_thalesgroup_comrttiXmlTimetablev8_PP_rdelay', _ImportedBinding_darwinpush_xb_ct.DelayValueType, unicode_default='0')
__rdelay._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 230, 2)
__rdelay._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 230, 2)
rdelay = property(__rdelay.value, __rdelay.set, None, "A delay value that is implied by a change to the service's route.")
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__wtp.name() : __wtp,
__rdelay.name() : __rdelay
})
Namespace.addCategoryObject('typeBinding', 'PP', PP)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}DT with content type EMPTY
class DT (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'DT')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 236, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute tpl uses Python identifier tpl
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
# Attribute act uses Python identifier act
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
# Attribute planAct uses Python identifier planAct
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
# Attribute plat uses Python identifier plat
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
# Attribute pta uses Python identifier pta
__pta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'pta'), 'pta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_pta', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__pta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
__pta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 123, 2)
pta = property(__pta.value, __pta.set, None, 'Public Scheduled Time of Arrival')
# Attribute ptd uses Python identifier ptd
__ptd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ptd'), 'ptd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_ptd', _ImportedBinding_darwinpush_xb_ct.RTTITimeType)
__ptd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
__ptd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 128, 2)
ptd = property(__ptd.value, __ptd.set, None, 'Public Scheduled Time of Departure')
# Attribute wta uses Python identifier wta
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 242, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 242, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
# Attribute wtd uses Python identifier wtd
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 247, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 247, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
# Attribute rdelay uses Python identifier rdelay
__rdelay = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rdelay'), 'rdelay', '__httpwww_thalesgroup_comrttiXmlTimetablev8_DT_rdelay', _ImportedBinding_darwinpush_xb_ct.DelayValueType, unicode_default='0')
__rdelay._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 252, 2)
__rdelay._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 252, 2)
rdelay = property(__rdelay.value, __rdelay.set, None, "A delay value that is implied by a change to the service's route.")
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__pta.name() : __pta,
__ptd.name() : __ptd,
__wta.name() : __wta,
__wtd.name() : __wtd,
__rdelay.name() : __rdelay
})
Namespace.addCategoryObject('typeBinding', 'DT', DT)
class OPDT (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'OPDT')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 258, 1)
_ElementMap = {}
_AttributeMap = {}
__tpl = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tpl'), 'tpl', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_tpl', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True)
__tpl._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
__tpl._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 93, 2)
tpl = property(__tpl.value, __tpl.set, None, 'TIPLOC')
__act = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'act'), 'act', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_act', _ImportedBinding_darwinpush_xb_ct.ActivityType, unicode_default=' ')
__act._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
__act._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 98, 2)
act = property(__act.value, __act.set, None, 'Current Activity Codes')
__planAct = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'planAct'), 'planAct', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_planAct', _ImportedBinding_darwinpush_xb_ct.ActivityType)
__planAct._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
__planAct._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 103, 2)
planAct = property(__planAct.value, __planAct.set, None, 'Planned Activity Codes (if different to current activities)')
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 108, 2)
can = property(__can.value, __can.set, None, 'Cancelled')
__plat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'plat'), 'plat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_plat', _ImportedBinding_darwinpush_xb_ct.PlatformType)
__plat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
__plat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 113, 2)
plat = property(__plat.value, __plat.set, None, 'Platform number')
__wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType, required=True)
__wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 263, 2)
__wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 263, 2)
wta = property(__wta.value, __wta.set, None, 'Working Scheduled Time of Arrival')
__wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType)
__wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 268, 2)
__wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 268, 2)
wtd = property(__wtd.value, __wtd.set, None, 'Working Scheduled Time of Departure')
__rdelay = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rdelay'), 'rdelay', '__httpwww_thalesgroup_comrttiXmlTimetablev8_OPDT_rdelay', _ImportedBinding_darwinpush_xb_ct.DelayValueType, unicode_default='0')
__rdelay._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 273, 2)
__rdelay._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 273, 2)
rdelay = property(__rdelay.value, __rdelay.set, None, "A delay value that is implied by a change to the service's route.")
_ElementMap.update({
})
_AttributeMap.update({
__tpl.name() : __tpl,
__act.name() : __act,
__planAct.name() : __planAct,
__can.name() : __can,
__plat.name() : __plat,
__wta.name() : __wta,
__wtd.name() : __wtd,
__rdelay.name() : __rdelay
})
Namespace.addCategoryObject('typeBinding', 'OPDT', OPDT)
# Complex type {http://www.thalesgroup.com/rtti/XmlTimetable/v8}Schedule with content type ELEMENT_ONLY
class Schedule (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Schedule')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 279, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OR uses Python identifier OR
__OR = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'OR'), 'OR', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8OR', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 285, 4), )
OR = property(__OR.value, __OR.set, None, 'Origin location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OPOR uses Python identifier OPOR
__OPOR = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'OPOR'), 'OPOR', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8OPOR', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 290, 4), )
OPOR = property(__OPOR.value, __OPOR.set, None, 'Operational origin location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}IP uses Python identifier IP
__IP = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'IP'), 'IP', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8IP', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 295, 4), )
IP = property(__IP.value, __IP.set, None, 'Intermediate calling location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OPIP uses Python identifier OPIP
__OPIP = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'OPIP'), 'OPIP', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8OPIP', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 300, 4), )
OPIP = property(__OPIP.value, __OPIP.set, None, 'Intermediate operational calling location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}PP uses Python identifier PP
__PP = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'PP'), 'PP', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8PP', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 305, 4), )
PP = property(__PP.value, __PP.set, None, 'Passing location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}DT uses Python identifier DT
__DT = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'DT'), 'DT', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8DT', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 310, 4), )
DT = property(__DT.value, __DT.set, None, 'Destination location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}OPDT uses Python identifier OPDT
__OPDT = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'OPDT'), 'OPDT', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8OPDT', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 315, 4), )
OPDT = property(__OPDT.value, __OPDT.set, None, 'Operational destination location')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}cancelReason uses Python identifier cancelReason
__cancelReason = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'cancelReason'), 'cancelReason', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_httpwww_thalesgroup_comrttiXmlTimetablev8cancelReason', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 321, 3), )
cancelReason = property(__cancelReason.value, __cancelReason.set, None, 'Reason for cancellation of service/location')
# Attribute rid uses Python identifier rid
__rid = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rid'), 'rid', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_rid', _ImportedBinding_darwinpush_xb_ct.RIDType, required=True)
__rid._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 327, 2)
__rid._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 327, 2)
rid = property(__rid.value, __rid.set, None, 'RTTI unique Train ID')
# Attribute uid uses Python identifier uid
__uid = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'uid'), 'uid', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_uid', _ImportedBinding_darwinpush_xb_ct.UIDType, required=True)
__uid._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 332, 2)
__uid._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 332, 2)
uid = property(__uid.value, __uid.set, None, 'Train UID')
# Attribute trainId uses Python identifier trainId
__trainId = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'trainId'), 'trainId', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_trainId', _ImportedBinding_darwinpush_xb_ct.TrainIdType, required=True)
__trainId._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 337, 2)
__trainId._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 337, 2)
trainId = property(__trainId.value, __trainId.set, None, 'Train ID (Headcode)')
# Attribute ssd uses Python identifier ssd
__ssd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ssd'), 'ssd', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_ssd', _ImportedBinding_darwinpush_xb_ct.RTTIDateType, required=True)
__ssd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 342, 2)
__ssd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 342, 2)
ssd = property(__ssd.value, __ssd.set, None, 'Scheduled Start Date')
# Attribute toc uses Python identifier toc
__toc = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'toc'), 'toc', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_toc', _ImportedBinding_darwinpush_xb_ct.TOCType, required=True)
__toc._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 347, 2)
__toc._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 347, 2)
toc = property(__toc.value, __toc.set, None, 'ATOC Code')
# Attribute status uses Python identifier status
__status = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'status'), 'status', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_status', _ImportedBinding_darwinpush_xb_ct.CIFTrainStatusType, unicode_default='P')
__status._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 352, 2)
__status._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 352, 2)
status = property(__status.value, __status.set, None, 'Type of service, i.e. Train/Bus/Ship.')
# Attribute trainCat uses Python identifier trainCat
__trainCat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'trainCat'), 'trainCat', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_trainCat', _ImportedBinding_darwinpush_xb_ct.CIFTrainCategoryType, unicode_default='OO')
__trainCat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 357, 2)
__trainCat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 357, 2)
trainCat = property(__trainCat.value, __trainCat.set, None, 'Category of service.')
# Attribute isPassengerSvc uses Python identifier isPassengerSvc
__isPassengerSvc = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'isPassengerSvc'), 'isPassengerSvc', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_isPassengerSvc', pyxb.binding.datatypes.boolean, unicode_default='true')
__isPassengerSvc._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 362, 2)
__isPassengerSvc._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 362, 2)
isPassengerSvc = property(__isPassengerSvc.value, __isPassengerSvc.set, None, 'True if Darwin classifies the train category as a passenger service.')
# Attribute deleted uses Python identifier deleted
__deleted = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'deleted'), 'deleted', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_deleted', pyxb.binding.datatypes.boolean, unicode_default='false')
__deleted._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 367, 2)
__deleted._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 367, 2)
deleted = property(__deleted.value, __deleted.set, None, 'Service has been deleted and should not be used/displayed.')
# Attribute isCharter uses Python identifier isCharter
__isCharter = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'isCharter'), 'isCharter', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_isCharter', pyxb.binding.datatypes.boolean, unicode_default='false')
__isCharter._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 372, 2)
__isCharter._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 372, 2)
isCharter = property(__isCharter.value, __isCharter.set, None, 'Indicates if this service is a charter service.')
# Attribute qtrain uses Python identifier qtrain
__qtrain = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'qtrain'), 'qtrain', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_qtrain', pyxb.binding.datatypes.boolean, unicode_default='false')
__qtrain._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 377, 2)
__qtrain._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 377, 2)
qtrain = property(__qtrain.value, __qtrain.set, None, 'True if this is a Q Train (runs as required) that has not yet been activated. Note that a Q Train that has been activated before the XML Timetable file has been built will not have this attribute set true.')
# Attribute can uses Python identifier can
__can = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'can'), 'can', '__httpwww_thalesgroup_comrttiXmlTimetablev8_Schedule_can', pyxb.binding.datatypes.boolean, unicode_default='false')
__can._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 382, 2)
__can._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 382, 2)
can = property(__can.value, __can.set, None, None)
_ElementMap.update({
__OR.name() : __OR,
__OPOR.name() : __OPOR,
__IP.name() : __IP,
__OPIP.name() : __OPIP,
__PP.name() : __PP,
__DT.name() : __DT,
__OPDT.name() : __OPDT,
__cancelReason.name() : __cancelReason
})
_AttributeMap.update({
__rid.name() : __rid,
__uid.name() : __uid,
__trainId.name() : __trainId,
__ssd.name() : __ssd,
__toc.name() : __toc,
__status.name() : __status,
__trainCat.name() : __trainCat,
__isPassengerSvc.name() : __isPassengerSvc,
__deleted.name() : __deleted,
__isCharter.name() : __isCharter,
__qtrain.name() : __qtrain,
__can.name() : __can
})
Namespace.addCategoryObject('typeBinding', 'Schedule', Schedule)
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON (pyxb.binding.basis.complexTypeDefinition):
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 389, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}Journey uses Python identifier Journey
__Journey = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Journey'), 'Journey', '__httpwww_thalesgroup_comrttiXmlTimetablev8_CTD_ANON_httpwww_thalesgroup_comrttiXmlTimetablev8Journey', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 391, 4), )
Journey = property(__Journey.value, __Journey.set, None, 'Schedule of a service that exists in Darwin')
# Element {http://www.thalesgroup.com/rtti/XmlTimetable/v8}Association uses Python identifier Association
__Association = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Association'), 'Association', '__httpwww_thalesgroup_comrttiXmlTimetablev8_CTD_ANON_httpwww_thalesgroup_comrttiXmlTimetablev8Association', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 396, 4), )
Association = property(__Association.value, __Association.set, None, 'An association between two schedules')
# Attribute timetableID uses Python identifier timetableID
__timetableID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'timetableID'), 'timetableID', '__httpwww_thalesgroup_comrttiXmlTimetablev8_CTD_ANON_timetableID', _ImportedBinding_darwinpush_xb_ct.TimetableIDType, required=True)
__timetableID._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 402, 3)
__timetableID._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 402, 3)
timetableID = property(__timetableID.value, __timetableID.set, None, 'The ID of the RTTI timetable from which this XML timetable was generated.')
_ElementMap.update({
__Journey.name() : __Journey,
__Association.name() : __Association
})
_AttributeMap.update({
__timetableID.name() : __timetableID
})
PportTimetable = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PportTimetable'), CTD_ANON, documentation='Push Port Timetable Schema', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 385, 1))
Namespace.addCategoryObject('elementBinding', PportTimetable.name().localName(), PportTimetable)
Association._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'main'), AssocService, scope=Association, documentation='The through, previous working or link-to service', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 57, 3)))
Association._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'assoc'), AssocService, scope=Association, documentation='The starting, terminating, subsequent working or link-from service', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 62, 3)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(Association._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'main')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 57, 3))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(Association._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'assoc')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 62, 3))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
Association._Automaton = _BuildAutomaton()
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'OR'), OR, scope=Schedule, documentation='Origin location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 285, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'OPOR'), OPOR, scope=Schedule, documentation='Operational origin location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 290, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'IP'), IP, scope=Schedule, documentation='Intermediate calling location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 295, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'OPIP'), OPIP, scope=Schedule, documentation='Intermediate operational calling location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 300, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'PP'), PP, scope=Schedule, documentation='Passing location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 305, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'DT'), DT, scope=Schedule, documentation='Destination location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 310, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'OPDT'), OPDT, scope=Schedule, documentation='Operational destination location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 315, 4)))
Schedule._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'cancelReason'), _ImportedBinding_darwinpush_xb_ct.DisruptionReasonType, scope=Schedule, documentation='Reason for cancellation of service/location', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 321, 3)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=2, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 284, 3))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 321, 3))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'OR')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 285, 4))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'OPOR')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 290, 4))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'IP')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 295, 4))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'OPIP')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 300, 4))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'PP')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 305, 4))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'DT')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 310, 4))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'OPDT')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 315, 4))
st_6 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(Schedule._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'cancelReason')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 321, 3))
st_7 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_1, True) ]))
st_7._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
Schedule._Automaton = _BuildAutomaton_()
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Journey'), Schedule, scope=CTD_ANON, documentation='Schedule of a service that exists in Darwin', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 391, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Association'), Association, scope=CTD_ANON, documentation='An association between two schedules', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 396, 4)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 391, 4))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 396, 4))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Journey')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 391, 4))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Association')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiCTTSchema_v8.xsd', 396, 4))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON._Automaton = _BuildAutomaton_2()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.