code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import logging
import archinstall
__version__ = 0.1
class Plugin:
VARIANTS_DICT_KEY = "variants"
VARIANT_KEY = "variant"
def __init__(self):
if self.has_variants() and self.variants_is_dict():
variant_key = self.get_selected_variant_key()
variant = archinstall.arguments[self.VARIANTS_DICT_KEY][variant_key]
self.apply_variant(variant)
self.clean_arguments()
archinstall.log(
f"The '{ variant_key }' variant was applied to the arguments.",
level=logging.INFO
)
archinstall.log(
"New arguments: " + archinstall.arguments.__str__(),
level=logging.DEBUG
)
def variants_is_dict(self) -> bool:
return isinstance(self.get_variants(), dict)
def has_variant_argument(self) -> bool:
return self.VARIANT_KEY in archinstall.arguments and \
isinstance(self.get_variant_argument(), str)
def get_variant_argument(self) -> str:
return archinstall.arguments[self.VARIANT_KEY]
def variant_argument_in_variants(self) -> bool:
return self.get_variant_argument() in self.get_variants()
def get_variants(self) -> dict:
return archinstall.arguments[self.VARIANTS_DICT_KEY]
def has_variants(self) -> bool:
return self.VARIANTS_DICT_KEY in archinstall.arguments
def variant_exists(self, variant: str) -> bool:
return variant in self.get_variants()
def get_selected_variant_key(self) -> str:
options = list(self.get_variants().keys())
if self.has_variant_argument() and self.variant_argument_in_variants():
return self.get_variant_argument()
if len(options) > 1:
return archinstall.generic_select(
options,
f"Select which variant you want to install (default: {options[0]}):",
True
) or options[0]
return options[0]
def apply_variant(self, variant: dict):
for option in variant:
if option in archinstall.arguments:
if isinstance(archinstall.arguments[option], list):
archinstall.arguments[option] += variant[option]
continue
self.overwrite(option, variant[option])
def clean_arguments(self):
del archinstall.arguments[self.VARIANTS_DICT_KEY]
del archinstall.arguments[self.VARIANT_KEY]
def overwrite(self, key: str, value):
archinstall.arguments[key] = value
| [
"archinstall.arguments.__str__",
"archinstall.log",
"archinstall.generic_select"
] | [((444, 545), 'archinstall.log', 'archinstall.log', (['f"""The \'{variant_key}\' variant was applied to the arguments."""'], {'level': 'logging.INFO'}), '(f"The \'{variant_key}\' variant was applied to the arguments.",\n level=logging.INFO)\n', (459, 545), False, 'import archinstall\n'), ((1791, 1906), 'archinstall.generic_select', 'archinstall.generic_select', (['options', 'f"""Select which variant you want to install (default: {options[0]}):"""', '(True)'], {}), "(options,\n f'Select which variant you want to install (default: {options[0]}):', True)\n", (1817, 1906), False, 'import archinstall\n'), ((655, 686), 'archinstall.arguments.__str__', 'archinstall.arguments.__str__', ([], {}), '()\n', (684, 686), False, 'import archinstall\n')] |
# Standard Library
import pickle
from typing import *
from pathlib import Path
# Third-party Party
import numpy as np
import PIL.Image as Image
from colorama import Fore, init
# Torch Library
import torch
import torch.utils.data as data
import torchvision.transforms as T
# My Library
from helper import visualize_np, visualize_plt, visualize_pil
from helper import ProjectPath, DatasetPath
from helper import ClassLabelLookuper
init(autoreset=True)
ImageType = TypeVar(
"ImageType",
np.ndarray, torch.Tensor, Path
)
ClassType = TypeVar(
"ClassType",
np.ndarray, torch.Tensor
)
class MultiDataset(data.Dataset):
def __init__(self, dataset: str, split: str):
super(MultiDataset, self).__init__()
assert split in (s := ["train", "val", "test"]), f"{Fore.RED}Invalid split, should be in {s}"
self.split = split
self.dataset = dataset
self._dataset_reader: Dict[str, Callable] = {
"Cifar10": self.__read_cifar10,
"Cifar100": self.__read_cifar100,
"PascalVOC2012": self.__read_PascalVOC2012
}
assert dataset in self._dataset_reader.keys(), f"{Fore.RED}Invalid dataset, please select in " \
f"{self._dataset_reader.keys()}."
self.image: Union[np.ndarray, List[Path]]
self.label: np.ndarray
self.image, self.label = self._dataset_reader[self.dataset]()
self.select_train_val()
self.num_class = len(ClassLabelLookuper(self.dataset).cls)
def __len__(self) -> int:
return len(self.image)
def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor]:
image, label = self.image[idx], self.label[idx]
if isinstance(image, Path):
image = Image.open(image)
else:
image = Image.fromarray(image.astype(np.uint8)).convert("RGB")
return self.transform(image), label
def set_transform(self, transform: T.Compose) -> "MultiDataset":
self.transform = transform
return self
def select_train_val(self, trainval_ratio: Optional[float] = 0.2) -> None:
# get image of each label
self.label_image: Dict[int, np.ndarray] = {}
for label in np.unique(self.label):
self.label_image[label] = np.where(self.label == label)[0]
if self.dataset in ["Cifar10", "Cifar100"]:
if self.split == "test":
return
else:
# generate train val if not exists, else load
if (config_path := ProjectPath.config.joinpath(f"{self.dataset}.npz")).exists():
data = np.load(config_path)
ratio, train, val =data["ratio"], data["train"], data["val"]
if not config_path.exists() or ratio != trainval_ratio:
train, val = [], []
for label, image_idx in self.label_image.items():
np.random.shuffle(image_idx)
val_num = int(trainval_ratio * len(image_idx))
val.append(image_idx[:val_num])
train.append(image_idx[val_num:])
train = np.stack(train, axis=0)
val = np.stack(val, axis=0)
config_path.parent.mkdir(parents=True, exist_ok=True)
np.savez(config_path, ratio=trainval_ratio, train=train, val=val)
train = np.concatenate(train, axis=0)
val = np.concatenate(val, axis=0)
# select train val
if self.split == "val":
self.image = self.image[val]
self.label = self.label[val]
else:
self.image = self.image[train]
self.label = self.label[train]
else:
return
def __read_cifar10(self) -> Tuple[np.ndarray, np.ndarray]:
if self.split in ["train", "val"]:
data = []
for batch in DatasetPath.Cifar10.train:
with batch.open(mode="rb") as f:
data.append(pickle.load(f, encoding="bytes"))
image = np.concatenate([i[b"data"].reshape(-1, 3, 32, 32) for i in data], axis=0)
label = np.concatenate([i[b"labels"] for i in data], axis=0)
else:
with DatasetPath.Cifar10.test.open(mode="rb") as f:
data = pickle.load(f, encoding="bytes")
image = data[b"data"].reshape(-1, 3, 32, 32)
label = data[b"labels"]
return image.transpose(0, 2, 3, 1), np.array(label)
def __read_cifar100(self) -> Tuple[np.ndarray, np.ndarray]:
if self.split in ["train", "val"]:
with DatasetPath.Cifar100.train.open(mode="rb") as f:
data = pickle.load(f, encoding="bytes")
image = data[b"data"].reshape(-1, 3, 32, 32)
label = data[b"fine_labels"]
else:
with DatasetPath.Cifar100.test.open(mode="rb") as f:
data = pickle.load(f, encoding="bytes")
image = data["data"].reshape(-1, 3, 32, 32)
label = data["label"]
return image.transpose(0, 2, 3, 1), np.asarray(label)
def __read_PascalVOC2012(self) -> Tuple[List[Path], np.ndarray]:
image = []
label = []
ccn = ClassLabelLookuper(datasets="PascalVOC2012")
if self.split in "train":
for k, v in DatasetPath.PascalVOC2012.train_idx.items():
image.extend(v)
label.extend([ccn.get_label(k)] * len(v))
elif self.split == "val":
for k, v in DatasetPath.PascalVOC2012.val_idx.items():
image.extend(v)
label.extend([ccn.get_label(k)] * len(v))
else:
assert False, f"{Fore.RED}PascalVOC2012 test data is not accesibly"
# Attention: PascalVOC2012 中图像是存在重复的
image, idx = np.unique(image, return_index=True)
return image, np.array(label)[idx]
if __name__ == "__main__":
# md = MultiDataset(dataset="PascalVOC2012", split="val")
# tt = T.Compose([
# T.RandomHorizontalFlip(),
# T.Resize((224, 224)),
# T.ToTensor()
# ])
# md.set_transform(tt)
md = MultiDataset(dataset="Cifar100", split="train")
tt = T.Compose([
T.RandomHorizontalFlip(),
T.ToTensor()
])
md.set_transform(tt)
ccn = ClassLabelLookuper(datasets=md.dataset)
dl = data.DataLoader(md, batch_size=64)
for x, y in dl:
print(x.shape)
visualize_plt(x, [ccn.get_class(i.item()) for i in y])
break
| [
"helper.ProjectPath.config.joinpath",
"numpy.array",
"helper.DatasetPath.Cifar10.test.open",
"colorama.init",
"numpy.load",
"numpy.savez",
"numpy.where",
"helper.DatasetPath.Cifar100.test.open",
"helper.ClassLabelLookuper",
"numpy.asarray",
"helper.DatasetPath.PascalVOC2012.train_idx.items",
"... | [((434, 454), 'colorama.init', 'init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (438, 454), False, 'from colorama import Fore, init\n'), ((6474, 6513), 'helper.ClassLabelLookuper', 'ClassLabelLookuper', ([], {'datasets': 'md.dataset'}), '(datasets=md.dataset)\n', (6492, 6513), False, 'from helper import ClassLabelLookuper\n'), ((6523, 6557), 'torch.utils.data.DataLoader', 'data.DataLoader', (['md'], {'batch_size': '(64)'}), '(md, batch_size=64)\n', (6538, 6557), True, 'import torch.utils.data as data\n'), ((2257, 2278), 'numpy.unique', 'np.unique', (['self.label'], {}), '(self.label)\n', (2266, 2278), True, 'import numpy as np\n'), ((5389, 5433), 'helper.ClassLabelLookuper', 'ClassLabelLookuper', ([], {'datasets': '"""PascalVOC2012"""'}), "(datasets='PascalVOC2012')\n", (5407, 5433), False, 'from helper import ClassLabelLookuper\n'), ((5978, 6013), 'numpy.unique', 'np.unique', (['image'], {'return_index': '(True)'}), '(image, return_index=True)\n', (5987, 6013), True, 'import numpy as np\n'), ((1789, 1806), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (1799, 1806), True, 'import PIL.Image as Image\n'), ((4312, 4364), 'numpy.concatenate', 'np.concatenate', (["[i[b'labels'] for i in data]"], {'axis': '(0)'}), "([i[b'labels'] for i in data], axis=0)\n", (4326, 4364), True, 'import numpy as np\n'), ((4636, 4651), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (4644, 4651), True, 'import numpy as np\n'), ((5249, 5266), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (5259, 5266), True, 'import numpy as np\n'), ((5492, 5535), 'helper.DatasetPath.PascalVOC2012.train_idx.items', 'DatasetPath.PascalVOC2012.train_idx.items', ([], {}), '()\n', (5533, 5535), False, 'from helper import ProjectPath, DatasetPath\n'), ((6384, 6408), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (6406, 6408), True, 'import torchvision.transforms as T\n'), ((6418, 6430), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (6428, 6430), True, 'import torchvision.transforms as T\n'), ((1507, 1539), 'helper.ClassLabelLookuper', 'ClassLabelLookuper', (['self.dataset'], {}), '(self.dataset)\n', (1525, 1539), False, 'from helper import ClassLabelLookuper\n'), ((2318, 2347), 'numpy.where', 'np.where', (['(self.label == label)'], {}), '(self.label == label)\n', (2326, 2347), True, 'import numpy as np\n'), ((3474, 3503), 'numpy.concatenate', 'np.concatenate', (['train'], {'axis': '(0)'}), '(train, axis=0)\n', (3488, 3503), True, 'import numpy as np\n'), ((3526, 3553), 'numpy.concatenate', 'np.concatenate', (['val'], {'axis': '(0)'}), '(val, axis=0)\n', (3540, 3553), True, 'import numpy as np\n'), ((4396, 4436), 'helper.DatasetPath.Cifar10.test.open', 'DatasetPath.Cifar10.test.open', ([], {'mode': '"""rb"""'}), "(mode='rb')\n", (4425, 4436), False, 'from helper import ProjectPath, DatasetPath\n'), ((4466, 4498), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (4477, 4498), False, 'import pickle\n'), ((4777, 4819), 'helper.DatasetPath.Cifar100.train.open', 'DatasetPath.Cifar100.train.open', ([], {'mode': '"""rb"""'}), "(mode='rb')\n", (4808, 4819), False, 'from helper import ProjectPath, DatasetPath\n'), ((4849, 4881), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (4860, 4881), False, 'import pickle\n'), ((5011, 5052), 'helper.DatasetPath.Cifar100.test.open', 'DatasetPath.Cifar100.test.open', ([], {'mode': '"""rb"""'}), "(mode='rb')\n", (5041, 5052), False, 'from helper import ProjectPath, DatasetPath\n'), ((5082, 5114), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (5093, 5114), False, 'import pickle\n'), ((5685, 5726), 'helper.DatasetPath.PascalVOC2012.val_idx.items', 'DatasetPath.PascalVOC2012.val_idx.items', ([], {}), '()\n', (5724, 5726), False, 'from helper import ProjectPath, DatasetPath\n'), ((6036, 6051), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (6044, 6051), True, 'import numpy as np\n'), ((2668, 2688), 'numpy.load', 'np.load', (['config_path'], {}), '(config_path)\n', (2675, 2688), True, 'import numpy as np\n'), ((3218, 3241), 'numpy.stack', 'np.stack', (['train'], {'axis': '(0)'}), '(train, axis=0)\n', (3226, 3241), True, 'import numpy as np\n'), ((3268, 3289), 'numpy.stack', 'np.stack', (['val'], {'axis': '(0)'}), '(val, axis=0)\n', (3276, 3289), True, 'import numpy as np\n'), ((3384, 3449), 'numpy.savez', 'np.savez', (['config_path'], {'ratio': 'trainval_ratio', 'train': 'train', 'val': 'val'}), '(config_path, ratio=trainval_ratio, train=train, val=val)\n', (3392, 3449), True, 'import numpy as np\n'), ((2976, 3004), 'numpy.random.shuffle', 'np.random.shuffle', (['image_idx'], {}), '(image_idx)\n', (2993, 3004), True, 'import numpy as np\n'), ((4164, 4196), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (4175, 4196), False, 'import pickle\n'), ((2579, 2629), 'helper.ProjectPath.config.joinpath', 'ProjectPath.config.joinpath', (['f"""{self.dataset}.npz"""'], {}), "(f'{self.dataset}.npz')\n", (2606, 2629), False, 'from helper import ProjectPath, DatasetPath\n')] |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
pipline for U-GAT-IT
"""
import time
import math
import os
from glob import glob
import cv2
import numpy as np
import mindspore.ops as ops
from mindspore import nn
from mindspore import save_checkpoint, load_checkpoint, load_param_into_net
from mindspore.common import initializer as init
from mindspore.communication.management import get_rank
from .networks import ResnetGenerator, Discriminator, GWithLossCell, DWithLossCell
from .cell import TrainOneStepG, TrainOneStepD, Generator
from ..utils.tools import denorm, tensor2numpy, RGB2BGR, cam
from ..dataset.dataset import TrainDataLoader, TestDataLoader
from ..metrics.metrics import mean_kernel_inception_distance
class UGATIT:
"""pipline"""
def __init__(self, args):
self.light = args.light
self.distributed = args.distributed
self.mode = args.phase
if self.light:
self.model_name = 'UGATIT_light'
else:
self.model_name = 'UGATIT'
self.modelart = args.enable_modelarts
self.train_url = args.train_url
self.output_path = args.output_path
self.dataset = args.dataset
self.data_path = args.data_path
self.decay_flag = args.decay_flag
self.epoch = args.epoch
self.decay_epoch = args.decay_epoch
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.lr_policy = 'linear'
self.loss_scale = args.loss_scale
self.lr = args.lr
self.weight_decay = args.weight_decay
self.ch = args.ch
self.use_global_norm = args.use_global_norm
""" Weight """
self.adv_weight = args.adv_weight
self.cycle_weight = args.cycle_weight
self.identity_weight = args.identity_weight
self.cam_weight = args.cam_weight
self.weights = [self.adv_weight, self.cycle_weight, self.identity_weight, self.cam_weight]
""" Generator """
self.n_res = args.n_res
""" Discriminator """
self.n_dis = args.n_dis
self.img_size = args.img_size
self.img_ch = args.img_ch
self.resume = args.resume
"""utils"""
self.oneslike = ops.OnesLike()
self.zeroslike = ops.ZerosLike()
self.assign = ops.Assign()
print()
print("##### Information #####")
print("# light : ", self.light)
print("# dataset : ", self.dataset)
print("# batch_size : ", self.batch_size)
print("# epochs: ", self.epoch)
print()
print("##### Generator #####")
print("# residual blocks : ", self.n_res)
print()
print("##### Discriminator #####")
print("# discriminator layer : ", self.n_dis)
print()
print("##### Weight #####")
print("# adv_weight : ", self.adv_weight)
print("# cycle_weight : ", self.cycle_weight)
print("# identity_weight : ", self.identity_weight)
print("# cam_weight : ", self.cam_weight)
##################################################################################
# Model
##################################################################################
def build_model(self):
"""build model"""
self.train_nums = 1
if self.mode == 'train':
train_loader, test_loader, train_nums = TrainDataLoader(self.img_size,
self.data_path,
self.dataset,
self.batch_size,
self.distributed)
self.train_loader = train_loader
self.test_iterator = test_loader.create_dict_iterator()
self.train_nums = train_nums
print("Training dataset size = ", self.train_nums)
elif self.mode == 'test':
test_loader = TestDataLoader(self.img_size,
self.data_path,
self.dataset)
self.test_iterator = test_loader.create_dict_iterator()
else:
raise RuntimeError("Invalid mode")
print("Dataset load finished")
self.genA2B = ResnetGenerator(input_nc=3,
output_nc=3,
ngf=self.ch,
n_blocks=self.n_res,
img_size=self.img_size,
light=self.light)
self.genB2A = ResnetGenerator(input_nc=3,
output_nc=3,
ngf=self.ch,
n_blocks=self.n_res,
img_size=self.img_size,
light=self.light)
self.disGA = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
self.disGB = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
self.disLA = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)
self.disLB = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)
self.generator = Generator(self.genA2B, self.genB2A)
self.init_weights(self.genA2B, 'KaimingUniform', math.sqrt(5))
self.init_weights(self.genB2A, 'KaimingUniform', math.sqrt(5))
self.init_weights(self.disGA, 'KaimingUniform', math.sqrt(5))
self.init_weights(self.disGB, 'KaimingUniform', math.sqrt(5))
self.init_weights(self.disLA, 'KaimingUniform', math.sqrt(5))
self.init_weights(self.disLB, 'KaimingUniform', math.sqrt(5))
self.start_epoch = 1
if self.resume:
model_list = glob(os.path.join(self.output_path, self.dataset, 'model', '*.ckpt'))
if model_list:
model_list.sort()
self.start_epoch = int(model_list[-1].split('_')[-1].split('.')[0])
self.load(os.path.join(self.output_path, self.dataset, 'model'), self.start_epoch)
print(" [*]Epoch %d Load SUCCESS" % self.start_epoch)
start_step = (self.start_epoch - 1) * self.train_nums
self.learning_rate = self.get_lr()[start_step:]
loss_scale = self.loss_scale
self.D_loss_net = DWithLossCell(self.disGA,
self.disLA,
self.disGB,
self.disLB,
self.weights)
self.G_loss_net = GWithLossCell(self.generator,
self.disGA,
self.disLA,
self.disGB,
self.disLB,
self.weights)
self.G_optim = nn.Adam(self.generator.trainable_params(),
learning_rate=self.learning_rate,
beta1=0.5,
beta2=0.999,
weight_decay=self.weight_decay)
self.D_optim = nn.Adam(self.D_loss_net.trainable_params(),
learning_rate=self.learning_rate,
beta1=0.5,
beta2=0.999,
weight_decay=self.weight_decay)
self.D_train_net = TrainOneStepD(self.D_loss_net, self.D_optim, loss_scale, self.use_global_norm)
self.G_train_net = TrainOneStepG(self.G_loss_net, self.generator, self.G_optim,
loss_scale, self.use_global_norm)
def get_lr(self):
"""
Learning rate generator.
"""
if self.lr_policy == 'linear':
lrs = [self.lr] * self.train_nums * self.decay_epoch
for epoch in range(self.decay_epoch, self.epoch):
lr_epoch = self.lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch)
lrs += [lr_epoch] * self.train_nums
return lrs
return self.lr
def init_weights(self, net, init_type='normal', init_gain=0.02):
"""init weights"""
for _, cell in net.cells_and_names():
if isinstance(cell, (nn.Conv2d, nn.Conv2dTranspose, nn.Dense)):
if init_type == 'normal':
cell.weight.set_data(init.initializer(init.Normal(init_gain), cell.weight.shape))
elif init_type == 'xavier':
cell.weight.set_data(init.initializer(init.XavierUniform(init_gain), cell.weight.shape))
elif init_type == 'KaimingUniform':
cell.weight.set_data(init.initializer(init.HeUniform(init_gain), cell.weight.shape))
elif init_type == 'constant':
cell.weight.set_data(init.initializer(0.0005, cell.weight.shape))
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
elif isinstance(cell, (nn.GroupNorm, nn.BatchNorm2d)):
cell.gamma.set_data(init.initializer('ones', cell.gamma.shape))
cell.beta.set_data(init.initializer('zeros', cell.beta.shape))
def train(self):
"""train"""
self.D_train_net.set_train()
self.G_train_net.set_train()
data_loader = self.train_loader.create_dict_iterator()
# training loop
print('training start !')
for epoch in range(self.start_epoch, self.epoch + 1):
i = 0
for data in data_loader:
i += 1
start_time = time.time()
real_A = data["image_A"]
real_B = data["image_B"]
# Update
fake_A2B, fake_B2A, Generator_loss = self.G_train_net(real_A, real_B)
Discriminator_loss = self.D_train_net(real_A, real_B, fake_A2B, fake_B2A)
# clip parameter of AdaILN and ILN, applied after optimizer step
for m in self.genA2B.cells_and_names():
if hasattr(m[1], 'rho'):
w = m[1].rho.data
w = ops.clip_by_value(w, 0, 1)
m[1].rho.data.set_data(w)
for m in self.genB2A.cells_and_names():
if hasattr(m[1], 'rho'):
w = m[1].rho.data
w = ops.clip_by_value(w, 0, 1)
m[1].rho.data.set_data(w)
print("epoch %d:[%5d/%5d] time per iter: %4.4f " % (epoch,
i,
self.train_nums,
time.time() - start_time))
print("d_loss:", Discriminator_loss)
print("g_loss:", Generator_loss)
if epoch % self.print_freq == 0:
if self.distributed:
if get_rank() == 0:
self.print(epoch)
save_checkpoint(self.genA2B,
os.path.join(self.output_path, self.dataset + '_genA2B_params_latest.ckpt'))
save_checkpoint(self.genB2A,
os.path.join(self.output_path, self.dataset + '_genB2A_params_latest.ckpt'))
save_checkpoint(self.disGA,
os.path.join(self.output_path, self.dataset + '_disGA_params_latest.ckpt'))
save_checkpoint(self.disGB,
os.path.join(self.output_path, self.dataset + '_disGB_params_latest.ckpt'))
save_checkpoint(self.disLA,
os.path.join(self.output_path, self.dataset + '_disLA_params_latest.ckpt'))
save_checkpoint(self.disLB,
os.path.join(self.output_path, self.dataset + '_disLB_params_latest.ckpt'))
else:
self.print(epoch)
save_checkpoint(self.genA2B,
os.path.join(self.output_path, self.dataset + '_genA2B_params_latest.ckpt'))
save_checkpoint(self.genB2A,
os.path.join(self.output_path, self.dataset + '_genB2A_params_latest.ckpt'))
save_checkpoint(self.disGA,
os.path.join(self.output_path, self.dataset + '_disGA_params_latest.ckpt'))
save_checkpoint(self.disGB,
os.path.join(self.output_path, self.dataset + '_disGB_params_latest.ckpt'))
save_checkpoint(self.disLA,
os.path.join(self.output_path, self.dataset + '_disLA_params_latest.ckpt'))
save_checkpoint(self.disLB,
os.path.join(self.output_path, self.dataset + '_disLB_params_latest.ckpt'))
if epoch % self.save_freq == 0:
if self.distributed:
if get_rank() == 0:
self.save(os.path.join(self.output_path, self.dataset, 'model'), epoch)
else:
self.save(os.path.join(self.output_path, self.dataset, 'model'), epoch)
def print(self, epoch):
"""save middle results"""
test_sample_num = 5
A2B = np.zeros((self.img_size * 7, 0, 3))
B2A = np.zeros((self.img_size * 7, 0, 3))
for _ in range(test_sample_num):
data = next(self.test_iterator)
real_A = data["image_A"]
real_B = data["image_B"]
fake_A2B, _, fake_A2B_heatmap = self.genA2B(real_A)
fake_B2A, _, fake_B2A_heatmap = self.genB2A(real_B)
fake_A2B2A, _, fake_A2B2A_heatmap = self.genB2A(fake_A2B)
fake_B2A2B, _, fake_B2A2B_heatmap = self.genA2B(fake_B2A)
# Without copying real_A and real_B tensors before feeding them
# into genB2A and genA2B does not work correctly with the GPU backend.
fake_A2A, _, fake_A2A_heatmap = self.genB2A(real_A.copy())
fake_B2B, _, fake_B2B_heatmap = self.genA2B(real_B.copy())
A2B = np.concatenate((A2B, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_A[0]))),
cam(tensor2numpy(fake_A2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2A[0]))),
cam(tensor2numpy(fake_A2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B[0]))),
cam(tensor2numpy(fake_A2B2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B2A[0])))), 0)), 1)
B2A = np.concatenate((B2A, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_B[0]))),
cam(tensor2numpy(fake_B2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2B[0]))),
cam(tensor2numpy(fake_B2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2A[0]))),
cam(tensor2numpy(fake_B2A2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2A2B[0])))), 0)), 1)
cv2.imwrite(os.path.join(self.output_path, self.dataset, 'img', 'A2B_%07d.png' % epoch), A2B * 255.0)
cv2.imwrite(os.path.join(self.output_path, self.dataset, 'img', 'B2A_%07d.png' % epoch), B2A * 255.0)
def save(self, savedir, epoch):
save_checkpoint(self.genA2B, os.path.join(savedir, self.dataset + '_genA2B_params_%07d.ckpt' % epoch))
save_checkpoint(self.genB2A, os.path.join(savedir, self.dataset + '_genB2A_params_%07d.ckpt' % epoch))
save_checkpoint(self.disGA, os.path.join(savedir, self.dataset + '_disGA_params_%07d.ckpt' % epoch))
save_checkpoint(self.disGB, os.path.join(savedir, self.dataset + '_disGB_params_%07d.ckpt' % epoch))
save_checkpoint(self.disLA, os.path.join(savedir, self.dataset + '_disLA_params_%07d.ckpt' % epoch))
save_checkpoint(self.disLB, os.path.join(savedir, self.dataset + '_disLB_params_%07d.ckpt' % epoch))
def load(self, loaddir, epoch):
"""load checkpoint"""
genA2B_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_genA2B_params_%07d.ckpt' % epoch))
not_load = {}
not_load['genA2B'] = load_param_into_net(self.genA2B, genA2B_params)
if self.mode == 'train':
genB2A_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_genB2A_params_%07d.ckpt' % epoch))
disGA_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_disGA_params_%07d.ckpt' % epoch))
disGB_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_disGB_params_%07d.ckpt' % epoch))
disLA_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_disLA_params_%07d.ckpt' % epoch))
disLB_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_disLB_params_%07d.ckpt' % epoch))
not_load['genB2A'] = load_param_into_net(self.genB2A, genB2A_params)
not_load['disGA'] = load_param_into_net(self.disGA, disGA_params)
not_load['disGB'] = load_param_into_net(self.disGB, disGB_params)
not_load['disLA'] = load_param_into_net(self.disLA, disLA_params)
not_load['disLB'] = load_param_into_net(self.disLB, disLB_params)
print("these params are not loaded: ", not_load)
def test(self, inception_ckpt_path=None):
"""test"""
self.genA2B.set_train(True)
output_path = os.path.join(self.output_path, self.dataset)
model_list = glob(os.path.join(output_path, 'model', '*.ckpt'))
if model_list:
model_list.sort()
start_epoch = int(model_list[-1].split('_')[-1].split('.')[0])
self.load(os.path.join(output_path, 'model'), start_epoch)
print(" [*] epoch %d Load SUCCESS" % start_epoch)
else:
print(" [*] Load FAILURE")
return
for n, data in enumerate(self.test_iterator):
real_A = data['image_A']
fake_A2B, _, _ = self.genA2B(real_A)
A = RGB2BGR(tensor2numpy(denorm(real_A[0])))
A2B = RGB2BGR(tensor2numpy(denorm(fake_A2B[0])))
cv2.imwrite(os.path.join(output_path, 'test', 'A_%d.png' % (n + 1)), A * 255.0)
cv2.imwrite(os.path.join(output_path, 'test', 'A2B_%d.png' % (n + 1)), A2B * 255.0)
if inception_ckpt_path is not None:
dataset_path = os.path.join(self.data_path, self.dataset)
mean_kernel_inception_distance(output_path, dataset_path, inception_ckpt_path)
| [
"mindspore.common.initializer.XavierUniform",
"mindspore.common.initializer.HeUniform",
"mindspore.ops.ZerosLike",
"mindspore.ops.clip_by_value",
"os.path.join",
"math.sqrt",
"mindspore.ops.OnesLike",
"numpy.zeros",
"mindspore.common.initializer.Normal",
"mindspore.common.initializer.initializer",... | [((2894, 2908), 'mindspore.ops.OnesLike', 'ops.OnesLike', ([], {}), '()\n', (2906, 2908), True, 'import mindspore.ops as ops\n'), ((2934, 2949), 'mindspore.ops.ZerosLike', 'ops.ZerosLike', ([], {}), '()\n', (2947, 2949), True, 'import mindspore.ops as ops\n'), ((2972, 2984), 'mindspore.ops.Assign', 'ops.Assign', ([], {}), '()\n', (2982, 2984), True, 'import mindspore.ops as ops\n'), ((14424, 14459), 'numpy.zeros', 'np.zeros', (['(self.img_size * 7, 0, 3)'], {}), '((self.img_size * 7, 0, 3))\n', (14432, 14459), True, 'import numpy as np\n'), ((14474, 14509), 'numpy.zeros', 'np.zeros', (['(self.img_size * 7, 0, 3)'], {}), '((self.img_size * 7, 0, 3))\n', (14482, 14509), True, 'import numpy as np\n'), ((17867, 17914), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.genA2B', 'genA2B_params'], {}), '(self.genA2B, genA2B_params)\n', (17886, 17914), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((19105, 19149), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset'], {}), '(self.output_path, self.dataset)\n', (19117, 19149), False, 'import os\n'), ((6084, 6096), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6093, 6096), False, 'import math\n'), ((6155, 6167), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6164, 6167), False, 'import math\n'), ((6225, 6237), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6234, 6237), False, 'import math\n'), ((6295, 6307), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6304, 6307), False, 'import math\n'), ((6365, 6377), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6374, 6377), False, 'import math\n'), ((6435, 6447), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6444, 6447), False, 'import math\n'), ((16740, 16815), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""img"""', "('A2B_%07d.png' % epoch)"], {}), "(self.output_path, self.dataset, 'img', 'A2B_%07d.png' % epoch)\n", (16752, 16815), False, 'import os\n'), ((16850, 16925), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""img"""', "('B2A_%07d.png' % epoch)"], {}), "(self.output_path, self.dataset, 'img', 'B2A_%07d.png' % epoch)\n", (16862, 16925), False, 'import os\n'), ((17014, 17086), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_genA2B_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_genA2B_params_%07d.ckpt' % epoch)\n", (17026, 17086), False, 'import os\n'), ((17125, 17197), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_genB2A_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_genB2A_params_%07d.ckpt' % epoch)\n", (17137, 17197), False, 'import os\n'), ((17235, 17306), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_disGA_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_disGA_params_%07d.ckpt' % epoch)\n", (17247, 17306), False, 'import os\n'), ((17344, 17415), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_disGB_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_disGB_params_%07d.ckpt' % epoch)\n", (17356, 17415), False, 'import os\n'), ((17453, 17524), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_disLA_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_disLA_params_%07d.ckpt' % epoch)\n", (17465, 17524), False, 'import os\n'), ((17562, 17633), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_disLB_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_disLB_params_%07d.ckpt' % epoch)\n", (17574, 17633), False, 'import os\n'), ((17742, 17814), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_genA2B_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_genA2B_params_%07d.ckpt' % epoch)\n", (17754, 17814), False, 'import os\n'), ((18564, 18611), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.genB2A', 'genB2A_params'], {}), '(self.genB2A, genB2A_params)\n', (18583, 18611), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((18644, 18689), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.disGA', 'disGA_params'], {}), '(self.disGA, disGA_params)\n', (18663, 18689), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((18722, 18767), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.disGB', 'disGB_params'], {}), '(self.disGB, disGB_params)\n', (18741, 18767), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((18800, 18845), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.disLA', 'disLA_params'], {}), '(self.disLA, disLA_params)\n', (18819, 18845), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((18878, 18923), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.disLB', 'disLB_params'], {}), '(self.disLB, disLB_params)\n', (18897, 18923), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((19176, 19220), 'os.path.join', 'os.path.join', (['output_path', '"""model"""', '"""*.ckpt"""'], {}), "(output_path, 'model', '*.ckpt')\n", (19188, 19220), False, 'import os\n'), ((20075, 20117), 'os.path.join', 'os.path.join', (['self.data_path', 'self.dataset'], {}), '(self.data_path, self.dataset)\n', (20087, 20117), False, 'import os\n'), ((6532, 6595), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""model"""', '"""*.ckpt"""'], {}), "(self.output_path, self.dataset, 'model', '*.ckpt')\n", (6544, 6595), False, 'import os\n'), ((10482, 10493), 'time.time', 'time.time', ([], {}), '()\n', (10491, 10493), False, 'import time\n'), ((17992, 18064), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_genB2A_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_genB2A_params_%07d.ckpt' % epoch)\n", (18004, 18064), False, 'import os\n'), ((18109, 18180), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_disGA_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_disGA_params_%07d.ckpt' % epoch)\n", (18121, 18180), False, 'import os\n'), ((18225, 18296), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_disGB_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_disGB_params_%07d.ckpt' % epoch)\n", (18237, 18296), False, 'import os\n'), ((18341, 18412), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_disLA_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_disLA_params_%07d.ckpt' % epoch)\n", (18353, 18412), False, 'import os\n'), ((18457, 18528), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_disLB_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_disLB_params_%07d.ckpt' % epoch)\n", (18469, 18528), False, 'import os\n'), ((19372, 19406), 'os.path.join', 'os.path.join', (['output_path', '"""model"""'], {}), "(output_path, 'model')\n", (19384, 19406), False, 'import os\n'), ((19839, 19894), 'os.path.join', 'os.path.join', (['output_path', '"""test"""', "('A_%d.png' % (n + 1))"], {}), "(output_path, 'test', 'A_%d.png' % (n + 1))\n", (19851, 19894), False, 'import os\n'), ((19931, 19988), 'os.path.join', 'os.path.join', (['output_path', '"""test"""', "('A2B_%d.png' % (n + 1))"], {}), "(output_path, 'test', 'A2B_%d.png' % (n + 1))\n", (19943, 19988), False, 'import os\n'), ((6768, 6821), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""model"""'], {}), "(self.output_path, self.dataset, 'model')\n", (6780, 6821), False, 'import os\n'), ((9953, 9995), 'mindspore.common.initializer.initializer', 'init.initializer', (['"""ones"""', 'cell.gamma.shape'], {}), "('ones', cell.gamma.shape)\n", (9969, 9995), True, 'from mindspore.common import initializer as init\n'), ((10032, 10074), 'mindspore.common.initializer.initializer', 'init.initializer', (['"""zeros"""', 'cell.beta.shape'], {}), "('zeros', cell.beta.shape)\n", (10048, 10074), True, 'from mindspore.common import initializer as init\n'), ((11031, 11057), 'mindspore.ops.clip_by_value', 'ops.clip_by_value', (['w', '(0)', '(1)'], {}), '(w, 0, 1)\n', (11048, 11057), True, 'import mindspore.ops as ops\n'), ((11279, 11305), 'mindspore.ops.clip_by_value', 'ops.clip_by_value', (['w', '(0)', '(1)'], {}), '(w, 0, 1)\n', (11296, 11305), True, 'import mindspore.ops as ops\n'), ((11891, 11901), 'mindspore.communication.management.get_rank', 'get_rank', ([], {}), '()\n', (11899, 11901), False, 'from mindspore.communication.management import get_rank\n'), ((13108, 13183), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_genA2B_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_genA2B_params_latest.ckpt')\n", (13120, 13183), False, 'import os\n'), ((13270, 13345), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_genB2A_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_genB2A_params_latest.ckpt')\n", (13282, 13345), False, 'import os\n'), ((13431, 13505), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disGA_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disGA_params_latest.ckpt')\n", (13443, 13505), False, 'import os\n'), ((13591, 13665), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disGB_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disGB_params_latest.ckpt')\n", (13603, 13665), False, 'import os\n'), ((13751, 13825), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disLA_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disLA_params_latest.ckpt')\n", (13763, 13825), False, 'import os\n'), ((13911, 13985), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disLB_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disLB_params_latest.ckpt')\n", (13923, 13985), False, 'import os\n'), ((14092, 14102), 'mindspore.communication.management.get_rank', 'get_rank', ([], {}), '()\n', (14100, 14102), False, 'from mindspore.communication.management import get_rank\n'), ((14257, 14310), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""model"""'], {}), "(self.output_path, self.dataset, 'model')\n", (14269, 14310), False, 'import os\n'), ((9235, 9257), 'mindspore.common.initializer.Normal', 'init.Normal', (['init_gain'], {}), '(init_gain)\n', (9246, 9257), True, 'from mindspore.common import initializer as init\n'), ((12043, 12118), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_genA2B_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_genA2B_params_latest.ckpt')\n", (12055, 12118), False, 'import os\n'), ((12213, 12288), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_genB2A_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_genB2A_params_latest.ckpt')\n", (12225, 12288), False, 'import os\n'), ((12382, 12456), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disGA_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disGA_params_latest.ckpt')\n", (12394, 12456), False, 'import os\n'), ((12550, 12624), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disGB_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disGB_params_latest.ckpt')\n", (12562, 12624), False, 'import os\n'), ((12718, 12792), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disLA_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disLA_params_latest.ckpt')\n", (12730, 12792), False, 'import os\n'), ((12886, 12960), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disLB_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disLB_params_latest.ckpt')\n", (12898, 12960), False, 'import os\n'), ((14143, 14196), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""model"""'], {}), "(self.output_path, self.dataset, 'model')\n", (14155, 14196), False, 'import os\n'), ((9381, 9410), 'mindspore.common.initializer.XavierUniform', 'init.XavierUniform', (['init_gain'], {}), '(init_gain)\n', (9399, 9410), True, 'from mindspore.common import initializer as init\n'), ((11656, 11667), 'time.time', 'time.time', ([], {}), '()\n', (11665, 11667), False, 'import time\n'), ((9542, 9567), 'mindspore.common.initializer.HeUniform', 'init.HeUniform', (['init_gain'], {}), '(init_gain)\n', (9556, 9567), True, 'from mindspore.common import initializer as init\n'), ((9676, 9719), 'mindspore.common.initializer.initializer', 'init.initializer', (['(0.0005)', 'cell.weight.shape'], {}), '(0.0005, cell.weight.shape)\n', (9692, 9719), True, 'from mindspore.common import initializer as init\n')] |
import itertools
stevke = ['1', '3', '7', '9']
def je_prastevilo(n):
if n < 2 or n % 2 == 0:
return n == 2
i = 3
while i * i <= n:
if n % i == 0 or n % 2 == 0:
return False
i += 2
return True
def zasukej(nabor):
stevilo = list(nabor)
seznam = []
while len(seznam) < len(stevilo):
seznam.append(int(''.join(stevilo)))
stevilo.insert(0, stevilo.pop())
return seznam
circular_primes = []
for ponavljanje in range(1, 7):
for stevilo in itertools.product(stevke, repeat=ponavljanje):
if all(je_prastevilo(permutacija) for permutacija in zasukej(stevilo)):
circular_primes += zasukej(stevilo)
for stevilo in range(10):
if je_prastevilo(stevilo):
circular_primes.append(stevilo)
print(sorted(list(set(circular_primes))), len(set(circular_primes))) | [
"itertools.product"
] | [((525, 570), 'itertools.product', 'itertools.product', (['stevke'], {'repeat': 'ponavljanje'}), '(stevke, repeat=ponavljanje)\n', (542, 570), False, 'import itertools\n')] |
"""
~/utils/update_ontario_stocking.py
Created: 23 Jan 2019 15:29:22
DESCRIPTION:
This script updates the ontario data in the lake wide cwt database.
Updates include tag type, and sequence number for sequential cwts, cwt
manufacturer (where it should have been Micro Mark (MM))
Updates are preformed on both stocking (below) and recovery (NOT YET)
tables.
This script should be run after the lakewide database has been
built and populated with both US and ontario data.
<NAME>
=============================================================
"""
import csv
import re
from collections import namedtuple
from fsdviz.common.models import CWT, CWTsequence
from fsdviz.stocking.models import StockingEvent
# ======================================================
# FSIS_ID to ID
# to update the OMNR stocking data, we need a dictionary that maps
# the ontario id values (fs_event) to the StockingEvent.Id in the
# current database
# get the id numbers and notes for each lake huron ontario stocking event
ont_events = StockingEvent.objects.filter(
agency__abbrev="OMNR", jurisdiction__lake__abbrev="HU"
)
# ontario fs_event numbers are in the notes field as 'fs_event:
# <fsis_id>' this code extracts the fsis_id from the notes and pairs
# it with its corresponding id in the current lakewide database.
# returns a list of tuples of the form: (<fsis_id>, <id>)
# id_pairs = [(int(re.match('fs_event: (\d+)',x['notes']).groups()[0]), x['id'])
# for x in ont_events]
# create a dictionary with the fsis_id as key - makes it easy to get
# associated id for the lakewide db:
fsis2lwdb = {x.agency_stock_id: x.id for x in ont_events}
# ======================================================
# STOCKED SEQUENTIAL CWTS
print("Updating Ontario's Sequential tags...")
# the csv file "MNRF_stocking_events_sequential_cwts.csv" contains a
# list of stocking events associated with sequential csv and the start
# and end the range associated with that event.
# create a named tuple that will hold our stocking event info:
seqCWT = namedtuple("seqCWT", "fsis_event, cwt_number, seq_start, seq_end")
fname = "utils/patches/MNRF_stocking_events_sequential_cwts.csv"
with open(fname) as csvfile:
reader = csv.reader(csvfile)
next(reader, None) # skip header
seqcwt_events = [seqCWT(*x) for x in reader]
for x in seqcwt_events[:3]:
print(x)
# make sure that all of the cwts are in the database - and that Lake
# Huron is the only lake and agency to stock that those tags.
cwt_numbers = list({x.cwt_number for x in seqcwt_events})
for event in seqcwt_events:
cwt = CWT.objects.filter(cwt_number=event.cwt_number).first()
if cwt is None:
print(event)
# now loop over the sequential cwt events and find the associated cwt
# and cwt_sequences in our database. Update the cwt start, end and tag
# type for each one. Keep a list of errors and print them out if
# anything goes wrong.
oops = []
for event in seqcwt_events:
cwt = CWT.objects.filter(cwt_number=event.cwt_number).first()
if cwt is None:
print(event)
oops.append(event)
continue
lwdb_id = fsis2lwdb[event.fsis_event]
stocking_event = StockingEvent.objects.get(id=lwdb_id)
cwt_seq, created = CWTsequence.objects.get_or_create(
cwt=cwt, sequence=(int(event.seq_start), int(event.seq_end))
)
cwt_seq.events.add(stocking_event)
cwt.tag_type = "sequential"
cwt.save()
# delete any cwtsequence events that are associated with sequential
# tags, but the sequence range is 0,0 (this was the old placeholder)
if oops:
print("There were problems with the following sequential tag records:")
for x in oops:
print(x)
# make sure that there aren't any stocking events associated with
# sequential cwts series that end with 1 - they should have all been
# fixed in the last step.
oops = StockingEvent.objects.filter(
cwt_series__seq_end=1, cwt_series__cwt__tag_type="sequental"
)
assert len(oops) == 0
# delete all of cwt series associated with seqential tags that start
# and end with 1 - these were created when the cwt was added but no
# longer point to any stocking events
childless_cwts = CWTsequence.objects.filter(
cwt__tag_type="sequential", sequence__isnull=True
)
childless_cwts.delete()
foo = CWTsequence.objects.filter(sequence__isnull=True)
#
# ======================================================
# CWT MANUFACTURER
print("Updating MicroMark tags...")
# this query returs a list of cwt numbers (without dashes) that we
# know were manufactured by Micro Mark. Only cwt numbers that are
# unique were to micro mark are included (63-59-01, 63-41-04,
# 63-43-04, 63-56-03 were manufactured by both MM and NMT and must be
# handled seperately (below))
fname = "utils/patches/MNRF_MicroMark_cwts.csv"
with open(fname) as csvfile:
reader = csv.reader(csvfile)
next(reader, None) # skip header
mm_cwts = [x[0] for x in reader]
omnr = Agency.objects.get(abbrev="OMNR")
for cwt_num in mm_cwts:
qs = CWT.objects.filter(
cwt_number=cwt_num, cwt_series__events__agency=omnr
).distinct()
assert len(qs) == 1
cwt = qs[0]
cwt.manufacturer = "mm"
cwt.save()
# these are the cwt number that have been purchased from two
# vendors. The event numbers are the stocking event IDs that used the
# Micro Mark tags.
micromark_events = {
# chinook stocked by ssa in 2001 - Not in FSIS Yet!
# "634104": [],
# chinook stocked by ssa in 2001 - Not in FSIS Yet!
# "634304": [],
"635603": [2650],
"635901": [2379, 2928],
}
# now loop over cwt numbers that have been purchased from 2
# manufacturers and get the events associated with each one create a
# new CWT object and new cwt_sequence. FInally, get the original
# stocking event and assign it to the sequence object created above.
for cwt_num, event_nums in micromark_events.items():
print("Applying updates for both {} tags...".format(cwt_num))
cwt_obj, created = CWT.objects.get_or_create(
cwt_number=cwt_num, tag_type="cwt", tag_count=0, manufacturer="mm"
)
cwt_seq, created = CWTsequence.objects.get_or_create(cwt=cwt_obj, sequence=(0, 1))
if event_nums:
for fsis_id in event_nums:
lwdb_id = fsis2lwdb.get(str(fsis_id))
if lwdb_id:
event = StockingEvent.objects.get(id=lwdb_id)
event.cwt_series.clear()
cwt_seq.events.add(event)
else:
print("/t unable for find FSIS event: {}".format(fsis_id))
print("Done updating Ontario-Huron tags.")
| [
"fsdviz.common.models.CWT.objects.get_or_create",
"collections.namedtuple",
"fsdviz.stocking.models.StockingEvent.objects.filter",
"fsdviz.common.models.CWT.objects.filter",
"fsdviz.stocking.models.StockingEvent.objects.get",
"fsdviz.common.models.CWTsequence.objects.filter",
"fsdviz.common.models.CWTse... | [((1036, 1124), 'fsdviz.stocking.models.StockingEvent.objects.filter', 'StockingEvent.objects.filter', ([], {'agency__abbrev': '"""OMNR"""', 'jurisdiction__lake__abbrev': '"""HU"""'}), "(agency__abbrev='OMNR',\n jurisdiction__lake__abbrev='HU')\n", (1064, 1124), False, 'from fsdviz.stocking.models import StockingEvent\n'), ((2070, 2136), 'collections.namedtuple', 'namedtuple', (['"""seqCWT"""', '"""fsis_event, cwt_number, seq_start, seq_end"""'], {}), "('seqCWT', 'fsis_event, cwt_number, seq_start, seq_end')\n", (2080, 2136), False, 'from collections import namedtuple\n'), ((3895, 3989), 'fsdviz.stocking.models.StockingEvent.objects.filter', 'StockingEvent.objects.filter', ([], {'cwt_series__seq_end': '(1)', 'cwt_series__cwt__tag_type': '"""sequental"""'}), "(cwt_series__seq_end=1,\n cwt_series__cwt__tag_type='sequental')\n", (3923, 3989), False, 'from fsdviz.stocking.models import StockingEvent\n'), ((4207, 4284), 'fsdviz.common.models.CWTsequence.objects.filter', 'CWTsequence.objects.filter', ([], {'cwt__tag_type': '"""sequential"""', 'sequence__isnull': '(True)'}), "(cwt__tag_type='sequential', sequence__isnull=True)\n", (4233, 4284), False, 'from fsdviz.common.models import CWT, CWTsequence\n'), ((4324, 4373), 'fsdviz.common.models.CWTsequence.objects.filter', 'CWTsequence.objects.filter', ([], {'sequence__isnull': '(True)'}), '(sequence__isnull=True)\n', (4350, 4373), False, 'from fsdviz.common.models import CWT, CWTsequence\n'), ((2245, 2264), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (2255, 2264), False, 'import csv\n'), ((3206, 3243), 'fsdviz.stocking.models.StockingEvent.objects.get', 'StockingEvent.objects.get', ([], {'id': 'lwdb_id'}), '(id=lwdb_id)\n', (3231, 3243), False, 'from fsdviz.stocking.models import StockingEvent\n'), ((4899, 4918), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (4909, 4918), False, 'import csv\n'), ((6039, 6136), 'fsdviz.common.models.CWT.objects.get_or_create', 'CWT.objects.get_or_create', ([], {'cwt_number': 'cwt_num', 'tag_type': '"""cwt"""', 'tag_count': '(0)', 'manufacturer': '"""mm"""'}), "(cwt_number=cwt_num, tag_type='cwt', tag_count=0,\n manufacturer='mm')\n", (6064, 6136), False, 'from fsdviz.common.models import CWT, CWTsequence\n'), ((6171, 6234), 'fsdviz.common.models.CWTsequence.objects.get_or_create', 'CWTsequence.objects.get_or_create', ([], {'cwt': 'cwt_obj', 'sequence': '(0, 1)'}), '(cwt=cwt_obj, sequence=(0, 1))\n', (6204, 6234), False, 'from fsdviz.common.models import CWT, CWTsequence\n'), ((2623, 2670), 'fsdviz.common.models.CWT.objects.filter', 'CWT.objects.filter', ([], {'cwt_number': 'event.cwt_number'}), '(cwt_number=event.cwt_number)\n', (2641, 2670), False, 'from fsdviz.common.models import CWT, CWTsequence\n'), ((3002, 3049), 'fsdviz.common.models.CWT.objects.filter', 'CWT.objects.filter', ([], {'cwt_number': 'event.cwt_number'}), '(cwt_number=event.cwt_number)\n', (3020, 3049), False, 'from fsdviz.common.models import CWT, CWTsequence\n'), ((5072, 5143), 'fsdviz.common.models.CWT.objects.filter', 'CWT.objects.filter', ([], {'cwt_number': 'cwt_num', 'cwt_series__events__agency': 'omnr'}), '(cwt_number=cwt_num, cwt_series__events__agency=omnr)\n', (5090, 5143), False, 'from fsdviz.common.models import CWT, CWTsequence\n'), ((6387, 6424), 'fsdviz.stocking.models.StockingEvent.objects.get', 'StockingEvent.objects.get', ([], {'id': 'lwdb_id'}), '(id=lwdb_id)\n', (6412, 6424), False, 'from fsdviz.stocking.models import StockingEvent\n')] |
import os
from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2
from django import forms
from django.db import models
from django.conf import settings
from modelcluster.fields import ParentalKey
from wagtail.wagtailcore.models import Page, Orderable
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
from wagtail.wagtaildocs.models import Document
from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel
class Dxf2VrPage(Page):
intro = models.CharField(max_length=250, null=True, blank=True,)
equirectangular_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete = models.SET_NULL,
related_name = '+',
)
dxf_file = models.ForeignKey(
'wagtaildocs.Document',
null=True,
on_delete = models.SET_NULL,
related_name = '+',
)
shadows = models.BooleanField(default=False)
fly_camera = models.BooleanField(default=False)
double_face = models.BooleanField(default=False)
search_fields = Page.search_fields + [
index.SearchField('intro'),
#index.SearchField('body'),
]
content_panels = Page.content_panels + [
FieldPanel('intro'),
DocumentChooserPanel('dxf_file'),
ImageChooserPanel('equirectangular_image'),
MultiFieldPanel([
FieldPanel('shadows'),
FieldPanel('fly_camera'),
FieldPanel('double_face'),
], heading="Visual settings"),
InlinePanel('material_images', label="Material Image Gallery",),
]
def extract_dxf(self):
path_to_dxf = os.path.join(settings.MEDIA_ROOT, 'documents', self.dxf_file.filename)
dxf_f = open(path_to_dxf, encoding = 'utf-8')
material_gallery=self.material_images.all()
output = {}
flag = False
x = 0
value = 'dummy'
while value !='ENTITIES':
key = dxf_f.readline().strip()
value = dxf_f.readline().strip()
while value !='ENDSEC':
key = dxf_f.readline().strip()
value = dxf_f.readline().strip()
if flag == 'face':#stores values for 3D faces
if key == '8':#layer name
temp[key] = value
elif key == '10' or key == '11' or key == '12' or key == '13':#X position
temp[key] = value
elif key == '20' or key == '21' or key == '22' or key == '23':#mirror Y position
value = -float(value)
temp[key] = value
elif key == '30' or key == '31' or key == '32' or key == '33':#Z position
temp[key] = value
elif flag == 'block':#stores values for blocks
if key == '2' or key == '8':#block name and layer name
temp[key] = value
elif key == '10' or key == '30':#X Z position
temp[key] = value
elif key == '20':#Y position, mirrored
temp[key] = -float(value)
elif key == '50':#Z rotation
temp[key] = value
elif key == '41' or key == '42' or key == '43':#scale values
temp[key] = value
elif key == '210':#X of OCS unitary vector
Az_1 = float(value)
P_x = float(temp['10'])
elif key == '220':#Y of OCS unitary vector
Az_2 = float(value)
P_y = -float(temp['20'])#reset original value
elif key == '230':#Z of OCS unitary vector
Az_3 = float(value)
P_z = float(temp['30'])
#arbitrary axis algorithm
#see if OCS z vector is close to world Z axis
if fabs(Az_1) < (1/64) and fabs(Az_2) < (1/64):
W = ('Y', 0, 1, 0)
else:
W = ('Z', 0, 0, 1)
#cross product for OCS x arbitrary vector, normalized
Ax_1 = W[2]*Az_3-W[3]*Az_2
Ax_2 = W[3]*Az_1-W[1]*Az_3
Ax_3 = W[1]*Az_2-W[2]*Az_1
Norm = sqrt(pow(Ax_1, 2)+pow(Ax_2, 2)+pow(Ax_3, 2))
Ax_1 = Ax_1/Norm
Ax_2 = Ax_2/Norm
Ax_3 = Ax_3/Norm
#cross product for OCS y arbitrary vector, normalized
Ay_1 = Az_2*Ax_3-Az_3*Ax_2
Ay_2 = Az_3*Ax_1-Az_1*Ax_3
Ay_3 = Az_1*Ax_2-Az_2*Ax_1
Norm = sqrt(pow(Ay_1, 2)+pow(Ay_2, 2)+pow(Ay_3, 2))
Ay_1 = Ay_1/Norm
Ay_2 = Ay_2/Norm
Ay_3 = Ay_3/Norm
#insertion world coordinates from OCS
temp['10'] = P_x*Ax_1+P_y*Ay_1+P_z*Az_1
temp['20'] = P_x*Ax_2+P_y*Ay_2+P_z*Az_2
temp['30'] = P_x*Ax_3+P_y*Ay_3+P_z*Az_3
#OCS X vector translated into WCS
Ax_1 = ((P_x+cos(radians(float(temp['50']))))*Ax_1+(P_y+sin(radians(float(temp['50']))))*Ay_1+P_z*Az_1)-temp['10']
Ax_2 = ((P_x+cos(radians(float(temp['50']))))*Ax_2+(P_y+sin(radians(float(temp['50']))))*Ay_2+P_z*Az_2)-temp['20']
Ax_3 = ((P_x+cos(radians(float(temp['50']))))*Ax_3+(P_y+sin(radians(float(temp['50']))))*Ay_3+P_z*Az_3)-temp['30']
#cross product for OCS y vector, normalized
Ay_1 = Az_2*Ax_3-Az_3*Ax_2
Ay_2 = Az_3*Ax_1-Az_1*Ax_3
Ay_3 = Az_1*Ax_2-Az_2*Ax_1
Norm = sqrt(pow(Ay_1, 2)+pow(Ay_2, 2)+pow(Ay_3, 2))
Ay_1 = Ay_1/Norm
Ay_2 = Ay_2/Norm
Ay_3 = Ay_3/Norm
#A-Frame rotation order is Yaw(Z), Pitch(X) and Roll(Y)
#thanks for help <NAME> and https://www.geometrictools.com/
if Ay_3<1:
if Ay_3>-1:
pitch = asin(Ay_3)
yaw = atan2(-Ay_1, Ay_2)
roll = atan2(-Ax_3, Az_3)
else:
pitch = -pi/2
yaw = -atan2(Az_1, Ax_1)
roll = 0
else:
pitch = pi/2
yaw = atan2(Az_1, Ax_1)
roll = 0
#Y position, mirrored
temp['20'] = -temp['20']
#rotations from radians to degrees
temp['210'] = degrees(pitch)
temp['50'] = degrees(yaw)
temp['220'] = -degrees(roll)
elif flag == 'attrib':#stores values for attributes within block
if key == '1':#attribute value
attr_value = value
elif key == '2':#attribute key
temp[value] = attr_value
flag = 'block'#restore block modality
if key == '0':
if flag == 'face':#close 3D face
#is material set in model?
no_color=True
if material_gallery:
for material in material_gallery:
if material.layer == temp['8']:
no_color=False
temp['color'] = material.color
if no_color:#color is still not set for layer, so we use default
temp['8'] = 'default'
temp['color'] = 'white'
output[x] = self.make_triangle_1(x, temp)
if temp['12']!=temp['13'] or temp['22']!=temp['23'] or temp['32']!=temp['33']:
x += 1
output[x] = self.make_triangle_2(x, temp)
flag = False
elif value == 'ATTRIB':#start attribute within block
attr_value = ''
flag = 'attrib'
elif flag == 'block':#close block
#material images are patterns? is material set in model?
no_color=True
if material_gallery:
for material in material_gallery:
if material.layer == temp['8']:
no_color=False
temp['color'] = material.color
if material.pattern:# == True
temp['repeat']=True
if no_color:#color is still not set for layer, so we use default
temp['8'] = 'default'
temp['color'] = 'white'
if temp['2'] == '6planes':#left for legacy
output[x] = self.make_box(x, temp)
elif temp['2'] == 'box' or temp['2'] == 'a-box':
output[x] = self.make_box(x, temp)
elif temp['2'] == 'cylinder' or temp['2'] == 'a-cylinder':
output[x] = self.make_cylinder(x, temp)
elif temp['2'] == 'cone' or temp['2'] == 'a-cone':
output[x] = self.make_cone(x, temp)
elif temp['2'] == 'sphere' or temp['2'] == 'a-sphere':
output[x] = self.make_sphere(x, temp)
elif temp['2'] == 'circle' or temp['2'] == 'a-circle':
output[x] = self.make_circle(x, temp)
elif temp['2'] == 'plane' or temp['2'] == 'a-plane' or temp['2'] == 'look-at':
output[x] = self.make_plane(x, temp)
elif temp['2'] == 'floor':#left for legacy
temp['210'] = float(temp['210']) - 90
output[x] = self.make_plane(x, temp)
elif temp['2'] == 'ceiling':#left for legacy
temp['210'] = float(temp['210']) + 90
output[x] = self.make_plane(x, temp)
elif temp['2'] == 'light' or temp['2'] == 'a-light':
output[x] = self.make_light(x, temp)
elif temp['2'] == 'a-text':
output[x] = self.make_text(x, temp)
elif temp['2'] == 'a-link':
output[x] = self.make_link(x, temp)
flag = False
if value == '3DFACE':#start 3D face
temp = {}#default values
flag = 'face'
x += 1
elif value == 'INSERT':#start block
temp = {'41': 1, '42': 1, '43': 1, '50': 0, '210': 0, '220': 0, '230': 1,'repeat': False}#default values
flag = 'block'
x += 1
dxf_f.close()
return output
def is_repeat(self, repeat, rx, ry):
if repeat:
output = f'; repeat:{rx} {ry}'
return output
else:
return ';'
def make_box(self, x, temp):
outstr = f'<a-entity id="box-ent-{x}" \n'
outstr += f'position="{temp["10"]} {temp["30"]} {temp["20"]}" \n'
outstr += f'rotation="{temp["210"]} {temp["50"]} {temp["220"]}">\n'
outstr += f'<a-box id="box-{x}" \n'
outstr += f'position="{float(temp["41"])/2} {float(temp["43"])/2} {-float(temp["42"])/2}" \n'
outstr += f'scale="{temp["41"]} {temp["43"]} {temp["42"]}" \n'
outstr += 'geometry="'
try:
if temp['segments-depth']!='1':
outstr += f'segments-depth: {temp["segments-depth"]};'
if temp['segments-height']!='1':
outstr += f'segments-height: {temp["segments-height"]};'
if temp['segments-width']!='1':
outstr += f'segments-width: {temp["segments-width"]};'
outstr += '" \n'
except KeyError:
outstr += '" \n'
outstr += f'material="src: #image-{temp["8"]}; color: {temp["color"]}'
outstr += self.is_repeat(temp["repeat"], temp["41"], temp["43"])
outstr += '">\n</a-box>\n</a-entity>\n'
return outstr
def make_cone(self, x, temp):
outstr = f'<a-entity id="cone-ent-{x}" \n'
outstr += f'position="{temp["10"]} {temp["30"]} {temp["20"]}" \n'
outstr += f'rotation="{temp["210"]} {temp["50"]} {temp["220"]}">\n'
outstr += f'<a-cone id="cone-{x}" \n'
outstr += f'position="0 {float(temp["43"])/2} 0" \n'
outstr += f'scale="{temp["41"]} {temp["43"]} {temp["42"]}" \n'
outstr += 'geometry="'
try:
if temp['open-ended']!='false':
outstr += 'open-ended: true;'
if temp['radius-top']!='0':
outstr += f'radius-top: {temp["radius-top"]};'
if temp['segments-height']!='18':
outstr += f'segments-height: {temp["segments-height"]};'
if temp['segments-radial']!='36':
outstr += f'segments-radial: {temp["segments-radial"]};'
if temp['theta-length']!='360':
outstr += f'theta-length: {temp["theta-length"]};'
if temp['theta-start']!='0':
outstr += f'theta-start: {temp["theta-start"]};'
outstr += '" \n'
except KeyError:
outstr += '" \n'
outstr += f'material="src: #image-{temp["8"]}; color: {temp["color"]}'
outstr += self.is_repeat(temp["repeat"], temp["41"], temp["43"])
outstr += '">\n</a-cone>\n</a-entity>\n'
return outstr
def make_circle(self, x, temp):
outstr = f'<a-entity id="circle-ent-{x}" \n'
outstr += f'position="{temp["10"]} {temp["30"]} {temp["20"]}" \n'
outstr += f'rotation="{temp["210"]} {temp["50"]} {temp["220"]}">\n'
outstr += f'<a-circle id="circle-{x}" \n'
if temp['2'] == 'circle':
outstr += f'rotation="-90 0 0"\n'
outstr += f'radius="{temp["41"]}" \n'
outstr += 'geometry="'
try:
if temp['segments']!='32':
outstr += f'segments: {temp["segments"]};'
if temp['theta-length']!='360':
outstr += f'theta-length: {temp["theta-length"]};'
if temp['theta-start']!='0':
outstr += f'theta-start: {temp["theta-start"]};'
outstr += '" \n'
except KeyError:
outstr += '" \n'
outstr += f'material="src: #image-{temp["8"]}; color: {temp["color"]}'
outstr += self.is_repeat(temp["repeat"], temp["41"], temp["43"])
outstr += '">\n</a-circle>\n</a-entity>\n'
return outstr
def make_cylinder(self, x, temp):
outstr = f'<a-entity id="cylinder-ent-{x}" \n'
outstr += f'position="{temp["10"]} {temp["30"]} {temp["20"]}" \n'
outstr += f'rotation="{temp["210"]} {temp["50"]} {temp["220"]}">\n'
outstr += f'<a-cylinder id="cylinder-{x}" \n'
outstr += f'position="0 {float(temp["43"])/2} 0" \n'
outstr += f'scale="{temp["41"]} {temp["43"]} {temp["42"]}" \n'
outstr += 'geometry="'
try:
if temp['open-ended']!='false':
outstr += 'open-ended: true;'
if temp['radius-top']!='0':
outstr += f'radius-top: {temp["radius-top"]};'
if temp['segments-height']!='18':
outstr += f'segments-height: {temp["segments-height"]};'
if temp['segments-radial']!='36':
outstr += f'segments-radial: {temp["segments-radial"]};'
if temp['theta-length']!='360':
outstr += f'theta-length: {temp["theta-length"]};'
if temp['theta-start']!='0':
outstr += f'theta-start: {temp["theta-start"]};'
outstr += '" \n'
except KeyError:
outstr += '" \n'
outstr += f'material="src: #image-{temp["8"]}; color: {temp["color"]}'
outstr += self.is_repeat(temp["repeat"], temp["41"], temp["43"])
outstr += '">\n</a-cylinder>\n</a-entity>\n'
return outstr
def make_sphere(self, x, temp):
outstr = f'<a-entity id="sphere-ent-{x}" \n'
outstr += f'position="{temp["10"]} {temp["30"]} {temp["20"]}" \n'
outstr += f'rotation="{temp["210"]} {temp["50"]} {temp["220"]}">\n'
outstr += f'<a-sphere id="sphere-{x}" \n'
outstr += f'position="0 {temp["43"]} 0" \n'
outstr += f'scale="{temp["41"]} {temp["43"]} {temp["42"]}" \n'
outstr += 'geometry="'
try:
if temp['phi-length']!='360':
outstr += f'phi-length: {temp["phi-length"]};'
if temp['phi-start']!='0':
outstr += f'phi-start: {temp["phi-start"]};'
if temp['segments-height']!='18':
outstr += f'segments-height: {temp["segments-height"]};'
if temp['segments-width']!='36':
outstr += f'segments-width: {temp["segments-width"]};'
if temp['theta-length']!='180':
outstr += f'theta-length: {temp["theta-length"]};'
if temp['theta-start']!='0':
outstr += f'theta-start: {temp["theta-start"]};'
outstr += '" \n'
except KeyError:
outstr += '" \n'
outstr += f'material="src: #image-{temp["8"]}; color: {temp["color"]}'
outstr += self.is_repeat(temp["repeat"], temp["41"], temp["43"])
outstr += '">\n</a-sphere>\n</a-entity>\n'
return outstr
def make_plane(self, x, temp):
outstr = f'<a-entity id="plane-ent-{x}" \n'
outstr += f'position="{temp["10"]} {temp["30"]} {temp["20"]}" \n'
outstr += f'rotation="{temp["210"]} {temp["50"]} {temp["220"]}">\n'
outstr += f'<a-plane id="plane-{x}" \n'
if temp['2'] == 'look-at':#if it's a look at, it is centered and looks at the camera foot
outstr += f'position="0 {float(temp["43"])/2} 0" \n'
outstr += 'look-at="#camera-foot" \n'
elif temp['2'] == 'ceiling':#if it's a ceiling, correct position
outstr += f'position="{float(temp["41"])/2} {-float(temp["43"])/2} 0" \n'
else:#insertion is at corner
outstr += f'position="{float(temp["41"])/2} {float(temp["43"])/2} 0" \n'
outstr += f'width="{temp["41"]}" height="{temp["43"]}" \n'
outstr += 'geometry="'
try:
if temp['segments-height']!='1':
outstr += f'segments-height: {temp["segments-height"]};'
if temp['segments-width']!='1':
outstr += f'segments-width: {temp["segments-width"]};'
outstr += '" \n'
except KeyError:
outstr += '" \n'
outstr += f'material="src: #image-{temp["8"]}; color: {temp["color"]}'
outstr += self.is_repeat(temp["repeat"], temp["41"], temp["43"])
outstr += '">\n</a-plane>\n</a-entity>\n'
return outstr
def make_text(self, x, temp):
outstr = f'<a-entity id="text-{x}" \n'
outstr += f'position="{temp["10"]} {temp["30"]} {temp["20"]}" \n'
outstr += f'rotation="{temp["210"]} {temp["50"]} {temp["220"]}"\n'
outstr += f'text="width: {temp["41"]}; align: {temp["align"]}; color: {temp["color"]}; '
outstr += f'value: {temp["text"]}; wrap-count: {temp["wrap-count"]}; '
outstr += '">\n</a-entity>\n'
return outstr
def make_link(self, x, temp):
outstr = f'<a-link id="link-{x}" \n'
outstr += f'position="{temp["10"]} {temp["30"]} {temp["20"]}" \n'
outstr += f'rotation="{temp["210"]} {temp["50"]} {temp["220"]}"\n'
outstr += f'scale="{temp["41"]} {temp["43"]} {temp["42"]}"\n'
if temp['tree'] == 'parent':
target = self.get_parent()
elif temp['tree'] == 'child':
target = self.get_first_child()
elif temp['tree'] == 'previous' or temp['tree'] == 'prev':
target = self.get_prev_sibling()
else:#we default to next sibling
target = self.get_next_sibling()
if target:
outstr += f'href="{target.url}"\n'
outstr += f'title="{temp["title"]}" color="{temp["color"]}" on="click"\n'
eq_image = target.specific.equirectangular_image
if eq_image:
outstr += f'image="{eq_image.file.url}"'
else:
outstr += 'image="#default-sky"'
outstr += '>\n</a-link>\n'
return outstr
else:
return ''
def make_triangle_1(self, x, temp):
outstr = f'<a-triangle id="triangle-{x}" \n'
outstr += f'geometry="vertexA:{temp["10"]} {temp["30"]} {temp["20"]}; \n'
outstr += f'vertexB:{temp["11"]} {temp["31"]} {temp["21"]}; \n'
outstr += f'vertexC:{temp["12"]} {temp["32"]} {temp["22"]}" \n'
outstr += f'material="src: #image-{temp["8"]}; color: {temp["color"]}; '
if self.double_face:
outstr += 'side: double; '
outstr += '">\n</a-triangle> \n'
return outstr
def make_triangle_2(self, x, temp):
outstr = f'<a-triangle id="triangle-{x}" \n'
outstr += f'geometry="vertexA:{temp["10"]} {temp["30"]} {temp["20"]}; \n'
outstr += f'vertexB:{temp["12"]} {temp["32"]} {temp["22"]}; \n'
outstr += f'vertexC:{temp["13"]} {temp["33"]} {temp["23"]}" \n'
outstr += f'material="src: #image-{temp["8"]}; color: {temp["color"]}; '
if self.double_face:
outstr += 'side: double; '
outstr += '">\n</a-triangle> \n'
return outstr
def make_light(self, x, temp):
outstr = f'<a-entity id="light-{x}" \n'
outstr += f'position="{temp["10"]} {temp["30"]} {temp["20"]}" \n'
outstr += f'rotation="{temp["210"]} {temp["50"]} {temp["220"]}"\n'
try:
if temp['type'] == 'ambient':
outstr += f'light="type: ambient; color: {temp["color"]}; intensity: {temp["intensity"]}; '
outstr += '">\n</a-entity>\n'#close light entity
elif temp['type'] == 'point':
outstr += f'light="type: point; color: {temp["color"]}; intensity: {temp["intensity"]}; '
outstr += f'decay: {temp["decay"]}; distance: {temp["distance"]}; '
if self.shadows:
outstr += 'castShadow: true; '
outstr += '"> \n</a-entity>\n'#close light entity
elif temp['type'] == 'spot':
outstr += f'light="type: spot; color: {temp["color"]}; intensity: {temp["intensity"]}; '
outstr += f'decay: {temp["decay"]}; distance: {temp["distance"]}; '
outstr += f'angle: {temp["angle"]}; penumbra: {temp["penumbra"]}; '
if self.shadows:
outstr += 'castShadow: true; '
outstr += f'target: #light-{x}-target;"> \n'
outstr += f'<a-entity id="light-{x}-target" position="0 -1 0"> </a-entity> \n</a-entity> \n'#close light entity
else:#defaults to directional
outstr += f'light="type: directional; color: {temp["color"]}; intensity: {temp["intensity"]}; '
if self.shadows:
outstr += 'castShadow: true; '
outstr += f'target: #light-{x}-target;"> \n'
outstr += f'<a-entity id="light-{x}-target" position="0 -1 0"> </a-entity> \n</a-entity> \n'#close light entity
except KeyError:#default if no light type is set
outstr += 'light="type: point; intensity: 0.75; distance: 50; decay: 2; '
if self.shadows:
outstr += 'castShadow: true;'
outstr += '">\n</a-entity>\n'#close light entity
return outstr
class Dxf2VrPageMaterialImage(Orderable):
page = ParentalKey(Dxf2VrPage, related_name='material_images')
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete = models.SET_NULL,
related_name = '+',
)
layer = models.CharField(max_length=250, default="0",)
color = models.CharField(max_length=250, default="white",)
pattern = models.BooleanField(default=False)
panels = [
FieldPanel('layer'),
ImageChooserPanel('image'),
FieldPanel('pattern'),
FieldPanel('color'),
] | [
"wagtail.wagtailimages.edit_handlers.ImageChooserPanel",
"django.db.models.ForeignKey",
"math.pow",
"wagtail.wagtailsearch.index.SearchField",
"wagtail.wagtailadmin.edit_handlers.FieldPanel",
"os.path.join",
"math.degrees",
"math.asin",
"django.db.models.BooleanField",
"wagtail.wagtailadmin.edit_h... | [((622, 677), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(True)', 'blank': '(True)'}), '(max_length=250, null=True, blank=True)\n', (638, 677), False, 'from django.db import models\n'), ((707, 820), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""wagtailimages.Image"""'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.SET_NULL', 'related_name': '"""+"""'}), "('wagtailimages.Image', null=True, blank=True, on_delete=\n models.SET_NULL, related_name='+')\n", (724, 820), False, 'from django.db import models\n'), ((888, 990), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""wagtaildocs.Document"""'], {'null': '(True)', 'on_delete': 'models.SET_NULL', 'related_name': '"""+"""'}), "('wagtaildocs.Document', null=True, on_delete=models.\n SET_NULL, related_name='+')\n", (905, 990), False, 'from django.db import models\n'), ((1049, 1083), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1068, 1083), False, 'from django.db import models\n'), ((1101, 1135), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1120, 1135), False, 'from django.db import models\n'), ((1154, 1188), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1173, 1188), False, 'from django.db import models\n'), ((24227, 24282), 'modelcluster.fields.ParentalKey', 'ParentalKey', (['Dxf2VrPage'], {'related_name': '"""material_images"""'}), "(Dxf2VrPage, related_name='material_images')\n", (24238, 24282), False, 'from modelcluster.fields import ParentalKey\n'), ((24295, 24408), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""wagtailimages.Image"""'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.SET_NULL', 'related_name': '"""+"""'}), "('wagtailimages.Image', null=True, blank=True, on_delete=\n models.SET_NULL, related_name='+')\n", (24312, 24408), False, 'from django.db import models\n'), ((24469, 24514), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'default': '"""0"""'}), "(max_length=250, default='0')\n", (24485, 24514), False, 'from django.db import models\n'), ((24528, 24577), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'default': '"""white"""'}), "(max_length=250, default='white')\n", (24544, 24577), False, 'from django.db import models\n'), ((24593, 24627), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (24612, 24627), False, 'from django.db import models\n'), ((1786, 1856), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', '"""documents"""', 'self.dxf_file.filename'], {}), "(settings.MEDIA_ROOT, 'documents', self.dxf_file.filename)\n", (1798, 1856), False, 'import os\n'), ((24652, 24671), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""layer"""'], {}), "('layer')\n", (24662, 24671), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\n'), ((24681, 24707), 'wagtail.wagtailimages.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""image"""'], {}), "('image')\n", (24698, 24707), False, 'from wagtail.wagtailimages.edit_handlers import ImageChooserPanel\n'), ((24717, 24738), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""pattern"""'], {}), "('pattern')\n", (24727, 24738), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\n'), ((24748, 24767), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""color"""'], {}), "('color')\n", (24758, 24767), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\n'), ((1241, 1267), 'wagtail.wagtailsearch.index.SearchField', 'index.SearchField', (['"""intro"""'], {}), "('intro')\n", (1258, 1267), False, 'from wagtail.wagtailsearch import index\n'), ((1365, 1384), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""intro"""'], {}), "('intro')\n", (1375, 1384), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\n'), ((1394, 1426), 'wagtail.wagtaildocs.edit_handlers.DocumentChooserPanel', 'DocumentChooserPanel', (['"""dxf_file"""'], {}), "('dxf_file')\n", (1414, 1426), False, 'from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel\n'), ((1436, 1478), 'wagtail.wagtailimages.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""equirectangular_image"""'], {}), "('equirectangular_image')\n", (1453, 1478), False, 'from wagtail.wagtailimages.edit_handlers import ImageChooserPanel\n'), ((1665, 1727), 'wagtail.wagtailadmin.edit_handlers.InlinePanel', 'InlinePanel', (['"""material_images"""'], {'label': '"""Material Image Gallery"""'}), "('material_images', label='Material Image Gallery')\n", (1676, 1727), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\n'), ((1518, 1539), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""shadows"""'], {}), "('shadows')\n", (1528, 1539), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\n'), ((1553, 1577), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""fly_camera"""'], {}), "('fly_camera')\n", (1563, 1577), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\n'), ((1591, 1616), 'wagtail.wagtailadmin.edit_handlers.FieldPanel', 'FieldPanel', (['"""double_face"""'], {}), "('double_face')\n", (1601, 1616), False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\n'), ((6907, 6921), 'math.degrees', 'degrees', (['pitch'], {}), '(pitch)\n', (6914, 6921), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((6955, 6967), 'math.degrees', 'degrees', (['yaw'], {}), '(yaw)\n', (6962, 6967), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((6679, 6696), 'math.atan2', 'atan2', (['Az_1', 'Ax_1'], {}), '(Az_1, Ax_1)\n', (6684, 6696), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((7003, 7016), 'math.degrees', 'degrees', (['roll'], {}), '(roll)\n', (7010, 7016), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((4008, 4018), 'math.fabs', 'fabs', (['Az_1'], {}), '(Az_1)\n', (4012, 4018), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((4032, 4042), 'math.fabs', 'fabs', (['Az_2'], {}), '(Az_2)\n', (4036, 4042), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((4438, 4450), 'math.pow', 'pow', (['Ax_3', '(2)'], {}), '(Ax_3, 2)\n', (4441, 4450), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((4836, 4848), 'math.pow', 'pow', (['Ay_3', '(2)'], {}), '(Ay_3, 2)\n', (4839, 4848), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((5921, 5933), 'math.pow', 'pow', (['Ay_3', '(2)'], {}), '(Ay_3, 2)\n', (5924, 5933), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((6306, 6316), 'math.asin', 'asin', (['Ay_3'], {}), '(Ay_3)\n', (6310, 6316), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((6351, 6369), 'math.atan2', 'atan2', (['(-Ay_1)', 'Ay_2'], {}), '(-Ay_1, Ay_2)\n', (6356, 6369), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((6405, 6423), 'math.atan2', 'atan2', (['(-Ax_3)', 'Az_3'], {}), '(-Ax_3, Az_3)\n', (6410, 6423), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((4412, 4424), 'math.pow', 'pow', (['Ax_1', '(2)'], {}), '(Ax_1, 2)\n', (4415, 4424), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((4425, 4437), 'math.pow', 'pow', (['Ax_2', '(2)'], {}), '(Ax_2, 2)\n', (4428, 4437), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((4810, 4822), 'math.pow', 'pow', (['Ay_1', '(2)'], {}), '(Ay_1, 2)\n', (4813, 4822), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((4823, 4835), 'math.pow', 'pow', (['Ay_2', '(2)'], {}), '(Ay_2, 2)\n', (4826, 4835), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((5895, 5907), 'math.pow', 'pow', (['Ay_1', '(2)'], {}), '(Ay_1, 2)\n', (5898, 5907), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((5908, 5920), 'math.pow', 'pow', (['Ay_2', '(2)'], {}), '(Ay_2, 2)\n', (5911, 5920), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n'), ((6531, 6548), 'math.atan2', 'atan2', (['Az_1', 'Ax_1'], {}), '(Az_1, Ax_1)\n', (6536, 6548), False, 'from math import radians, sin, cos, asin, degrees, pi, sqrt, pow, fabs, atan2\n')] |
#!/usr/bin/env python
import os
os.environ['POMAGMA_LOG_LEVEL'] = '3'
from pomagma.compiler.util import temp_memoize # isort:skip
from pomagma.reducer import bohm # isort:skip
print('Example 1.')
with temp_memoize():
bohm.sexpr_simplify('(ABS (ABS (1 0 (1 0))) (ABS (ABS (1 (0 0)))))')
print('Example 2.')
with temp_memoize():
bohm.sexpr_simplify('(ABS (ABS (0 1 1)) (ABS (ABS (1 (0 0)))))')
| [
"pomagma.reducer.bohm.sexpr_simplify",
"pomagma.compiler.util.temp_memoize"
] | [((208, 222), 'pomagma.compiler.util.temp_memoize', 'temp_memoize', ([], {}), '()\n', (220, 222), False, 'from pomagma.compiler.util import temp_memoize\n'), ((228, 296), 'pomagma.reducer.bohm.sexpr_simplify', 'bohm.sexpr_simplify', (['"""(ABS (ABS (1 0 (1 0))) (ABS (ABS (1 (0 0)))))"""'], {}), "('(ABS (ABS (1 0 (1 0))) (ABS (ABS (1 (0 0)))))')\n", (247, 296), False, 'from pomagma.reducer import bohm\n'), ((324, 338), 'pomagma.compiler.util.temp_memoize', 'temp_memoize', ([], {}), '()\n', (336, 338), False, 'from pomagma.compiler.util import temp_memoize\n'), ((344, 408), 'pomagma.reducer.bohm.sexpr_simplify', 'bohm.sexpr_simplify', (['"""(ABS (ABS (0 1 1)) (ABS (ABS (1 (0 0)))))"""'], {}), "('(ABS (ABS (0 1 1)) (ABS (ABS (1 (0 0)))))')\n", (363, 408), False, 'from pomagma.reducer import bohm\n')] |
"""Utilities for debugging memory usage, blocking calls, etc."""
import os
import sys
import traceback
from contextlib import contextmanager
from functools import partial
from pprint import pprint
from celery.platforms import signals
from celery.utils.text import WhateverIO
try:
from psutil import Process
except ImportError:
Process = None
__all__ = (
'blockdetection', 'sample_mem', 'memdump', 'sample',
'humanbytes', 'mem_rss', 'ps', 'cry',
)
UNITS = (
(2 ** 40.0, 'TB'),
(2 ** 30.0, 'GB'),
(2 ** 20.0, 'MB'),
(2 ** 10.0, 'KB'),
(0.0, 'b'),
)
_process = None
_mem_sample = []
def _on_blocking(signum, frame):
import inspect
raise RuntimeError(
f'Blocking detection timed-out at: {inspect.getframeinfo(frame)}'
)
@contextmanager
def blockdetection(timeout):
"""Context that raises an exception if process is blocking.
Uses ``SIGALRM`` to detect blocking functions.
"""
if not timeout:
yield
else:
old_handler = signals['ALRM']
old_handler = None if old_handler == _on_blocking else old_handler
signals['ALRM'] = _on_blocking
try:
yield signals.arm_alarm(timeout)
finally:
if old_handler:
signals['ALRM'] = old_handler
signals.reset_alarm()
def sample_mem():
"""Sample RSS memory usage.
Statistics can then be output by calling :func:`memdump`.
"""
current_rss = mem_rss()
_mem_sample.append(current_rss)
return current_rss
def _memdump(samples=10): # pragma: no cover
S = _mem_sample
prev = list(S) if len(S) <= samples else sample(S, samples)
_mem_sample[:] = []
import gc
gc.collect()
after_collect = mem_rss()
return prev, after_collect
def memdump(samples=10, file=None): # pragma: no cover
"""Dump memory statistics.
Will print a sample of all RSS memory samples added by
calling :func:`sample_mem`, and in addition print
used RSS memory after :func:`gc.collect`.
"""
say = partial(print, file=file)
if ps() is None:
say('- rss: (psutil not installed).')
return
prev, after_collect = _memdump(samples)
if prev:
say('- rss (sample):')
for mem in prev:
say(f'- > {mem},')
say(f'- rss (end): {after_collect}.')
def sample(x, n, k=0):
"""Given a list `x` a sample of length ``n`` of that list is returned.
For example, if `n` is 10, and `x` has 100 items, a list of every tenth.
item is returned.
``k`` can be used as offset.
"""
j = len(x) // n
for _ in range(n):
try:
yield x[k]
except IndexError:
break
k += j
def hfloat(f, p=5):
"""Convert float to value suitable for humans.
Arguments:
f (float): The floating point number.
p (int): Floating point precision (default is 5).
"""
i = int(f)
return i if i == f else '{0:.{p}}'.format(f, p=p)
def humanbytes(s):
"""Convert bytes to human-readable form (e.g., KB, MB)."""
return next(
f'{hfloat(s / div if div else s)}{unit}'
for div, unit in UNITS if s >= div
)
def mem_rss():
"""Return RSS memory usage as a humanized string."""
p = ps()
if p is not None:
return humanbytes(_process_memory_info(p).rss)
def ps(): # pragma: no cover
"""Return the global :class:`psutil.Process` instance.
Note:
Returns :const:`None` if :pypi:`psutil` is not installed.
"""
global _process
if _process is None and Process is not None:
_process = Process(os.getpid())
return _process
def _process_memory_info(process):
try:
return process.memory_info()
except AttributeError:
return process.get_memory_info()
def cry(out=None, sepchr='=', seplen=49): # pragma: no cover
"""Return stack-trace of all active threads.
See Also:
Taken from https://gist.github.com/737056.
"""
import threading
out = WhateverIO() if out is None else out
P = partial(print, file=out)
# get a map of threads by their ID so we can print their names
# during the traceback dump
tmap = {t.ident: t for t in threading.enumerate()}
sep = sepchr * seplen
for tid, frame in sys._current_frames().items():
thread = tmap.get(tid)
if not thread:
# skip old junk (left-overs from a fork)
continue
P(f'{thread.name}')
P(sep)
traceback.print_stack(frame, file=out)
P(sep)
P('LOCAL VARIABLES')
P(sep)
pprint(frame.f_locals, stream=out)
P('\n')
return out.getvalue()
| [
"inspect.getframeinfo",
"traceback.print_stack",
"threading.enumerate",
"celery.utils.text.WhateverIO",
"celery.platforms.signals.arm_alarm",
"functools.partial",
"gc.collect",
"os.getpid",
"celery.platforms.signals.reset_alarm",
"sys._current_frames",
"pprint.pprint"
] | [((1715, 1727), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1725, 1727), False, 'import gc\n'), ((2056, 2081), 'functools.partial', 'partial', (['print'], {'file': 'file'}), '(print, file=file)\n', (2063, 2081), False, 'from functools import partial\n'), ((4087, 4111), 'functools.partial', 'partial', (['print'], {'file': 'out'}), '(print, file=out)\n', (4094, 4111), False, 'from functools import partial\n'), ((4042, 4054), 'celery.utils.text.WhateverIO', 'WhateverIO', ([], {}), '()\n', (4052, 4054), False, 'from celery.utils.text import WhateverIO\n'), ((4526, 4564), 'traceback.print_stack', 'traceback.print_stack', (['frame'], {'file': 'out'}), '(frame, file=out)\n', (4547, 4564), False, 'import traceback\n'), ((4632, 4666), 'pprint.pprint', 'pprint', (['frame.f_locals'], {'stream': 'out'}), '(frame.f_locals, stream=out)\n', (4638, 4666), False, 'from pprint import pprint\n'), ((1309, 1330), 'celery.platforms.signals.reset_alarm', 'signals.reset_alarm', ([], {}), '()\n', (1328, 1330), False, 'from celery.platforms import signals\n'), ((3639, 3650), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3648, 3650), False, 'import os\n'), ((4244, 4265), 'threading.enumerate', 'threading.enumerate', ([], {}), '()\n', (4263, 4265), False, 'import threading\n'), ((4316, 4337), 'sys._current_frames', 'sys._current_frames', ([], {}), '()\n', (4335, 4337), False, 'import sys\n'), ((743, 770), 'inspect.getframeinfo', 'inspect.getframeinfo', (['frame'], {}), '(frame)\n', (763, 770), False, 'import inspect\n'), ((1179, 1205), 'celery.platforms.signals.arm_alarm', 'signals.arm_alarm', (['timeout'], {}), '(timeout)\n', (1196, 1205), False, 'from celery.platforms import signals\n')] |
#! /usr/bin/python3
import sys
sys.path.append('../../')
import numpy as np
import numpy.fft as npfft
import matplotlib.pyplot as plt
from matplotlib import animation
import time
from netCDF4 import MFDataset
from nephelae_simulation.mesonh_interface import MesoNHVariable
from nephelae_base.types import Position
from nephelae_base.types import Bounds
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process import kernels as gpk
class WindKernel(gpk.Kernel):
"""
Kernel compatible with sklearn.gaussian_process.Kernel
to be used in GaussianProcessRegressor
/!\ Hyper parameters optimizatin HAS NOT BEEN TESTED
When using with GaussianProcessRegressor, set optimizer=None
/!\ Only implemented for dimension (t,x,y) for now for testing purposes.
"""
# Actually used (maybe)
def __init__(self, lengthScale=[1.0,1.0,1.0],
stddev=1.0, noiseStddev=0.1,
windSpeed=[0.0,0.0]):
self.lengthScale = lengthScale
self.stddev = stddev
self.noiseStddev = noiseStddev
self.windSpeed = windSpeed
def __call__(self, X, Y=None):
if Y is None:
Y = X
# print("X shape: ", X.shape)
# print("Y shape: ", X.shape, end="\n\n")
cop = False
# cop = True
# Far from most efficient but efficiency requires C++ implementation (or is it ?)
t0,t1 = np.meshgrid(X[:,0], Y[:,0], indexing='ij', copy=cop)
dt = t1 - t0
distMat = (dt / self.lengthScale[0])**2
x0,x1 = np.meshgrid(X[:,1], Y[:,1], indexing='ij', copy=cop)
dx = x1 - (x0 + self.windSpeed[0] * dt)
distMat = distMat + (dx / self.lengthScale[1])**2
x0,x1 = np.meshgrid(X[:,2], Y[:,2], indexing='ij', copy=cop)
dx = x1 - (x0 + self.windSpeed[1] * dt)
distMat = distMat + (dx / self.lengthScale[2])**2
if Y is X:
return self.stddev*np.exp(-0.5*distMat) + np.diag([self.noiseStddev]*X.shape[0])
else:
return self.stddev*np.exp(-0.5*distMat)
def diag(self, X):
return np.array([self.stddev + self.noiseStddev]*X.shape[0])
def is_stationary(self):
return True
mesonhPath = '/home/pnarvor/work/nephelae/data/MesoNH-2019-02/REFHR.1.ARMCu.4D.nc'
rct = MesoNHVariable(MFDataset(mesonhPath), 'RCT')
# Estimating advective wind
ut = MesoNHVariable(MFDataset(mesonhPath), 'UT')[50.0, 1100.0,:,:].data.mean()
vt = MesoNHVariable(MFDataset(mesonhPath), 'VT')[50.0, 1100.0,:,:].data.mean()
print("Advective wind :", [ut, vt])
rctSlice = rct[240,1100,:,:].data
print("Variance : ", (rctSlice**2).mean())
t = np.linspace(0,300.0,300)
# a0 = 400.0
a0 = 250.0
f0 = - 1 / 120.0
# f0 = 1 / 150.0
a1 = 0.0
# f1 = 1.5*f0
f1 = 2.5*f0
# f1 = -1.3*f0
# f1 = -2.5*f0
# f1 = -4.5*f0
tStart = 50.0
tEnd = 700.0
t = np.linspace(tStart, tEnd, int(tEnd - tStart))
# p0 = Position(240.0, 1700.0, 2000.0, 1100.0)
# p0 = Position(50.0, 0.0, 2000.0, 1100.0)
p0 = Position(50.0, 100.0, 1950.0, 1100.0)
p = np.array([[p0.t, p0.x, p0.y, p0.z]]*len(t))
# v0 = np.array([[9.09, 0.68]])
v0 = np.array([8.5, 0.9])
p[:,0] = t
p[:,1] = p[:,1] + a0*(a1 + np.cos(2*np.pi*f1*(t-t[0])))*np.cos(2*np.pi*f0*(t-t[0]))
p[:,2] = p[:,2] + a0*(a1 + np.cos(2*np.pi*f1*(t-t[0])))*np.sin(2*np.pi*f0*(t-t[0]))
print("Max velocity relative to wind :",
max(np.sqrt(np.sum((p[1:,1:3] - p[:-1,1:3])**2, axis=1)) / (p[1:,0] - p[:-1,0])))
p[:,1:3] = p[:,1:3] + (t - tStart).reshape([len(t), 1]) @ v0.reshape([1,2])
# building prediction locations
# X0,Y0 = np.meshgrid(
# np.linspace(rct.bounds[3][0], rct.bounds[3][-1], rct.shape[3]),
# np.linspace(rct.bounds[2][0], rct.bounds[2][-1], rct.shape[2]))
b = rct.bounds
yBounds = [min(p[:,2]), max(p[:,2])]
tmp = rct[p0.t,p0.z,yBounds[0]:yBounds[1],:]
X0,Y0 = np.meshgrid(
np.linspace(tmp.bounds[1][0], tmp.bounds[1][-1], tmp.shape[1]),
np.linspace(tmp.bounds[0][0], tmp.bounds[0][-1], tmp.shape[0]))
xyLocations = np.array([[0]*X0.shape[0]*X0.shape[1], X0.ravel(), Y0.ravel()]).T
b[2].min = yBounds[0]
b[2].max = yBounds[1]
# Kernel
processVariance = 1.0e-8
noiseStddev = 0.1 * np.sqrt(processVariance)
# lengthScales = [100, 50, 50]
# lengthScales = [70, 50, 50]
lengthScales = [70, 60, 60]
# lengthScales = [140, 120, 120]
kernel0 = WindKernel(lengthScales, processVariance, noiseStddev**2, v0)
rctValues = []
print("Getting rct values... ", end='')
sys.stdout.flush()
for pos in p:
rctValues.append(rct[pos[0],pos[3],pos[2],pos[1]])
rctValues = np.array(rctValues)
print("Done !")
sys.stdout.flush()
noise = noiseStddev*np.random.randn(rctValues.shape[0])
rctValues = rctValues + noise
# # plotting rct values
# fig, axes = plt.subplots(1,1)
# axes.plot(p[:,0], np.array(rctValues))
# profiling = False
profiling = True
if not profiling:
fig, axes = plt.subplots(3,1,sharex=True,sharey=True)
simTime = p0.t
lastTime = time.time()
simSpeed = 50.0
def do_update(t):
print("Sim time :", t)
# prediction
gprProcessor0 = GaussianProcessRegressor(kernel0,
alpha=0.0,
optimizer=None,
copy_X_train=False)
# trainSet = np.array([list(pos) + [rctVal] \
# for pos, rctVal in zip(p[:,0:3],rctValues)\
# if pos[0] < t and pos[0] > t - 2*lengthScales[0]])
trainSet = np.array([list(pos) + [rctVal] \
for pos, rctVal in zip(p[:,0:3],rctValues)\
if pos[0] < t and pos[0] > t - 3*lengthScales[0]])
print("Number of used measures samples :", trainSet.shape[0])
gprProcessor0.fit(trainSet[:,:-1], trainSet[:,-1])
xyLocations[:,0] = t
map0, std0 = gprProcessor0.predict(xyLocations, return_std=True)
map0[map0 < 0.0] = 0.0
map0 = map0.reshape(X0.shape)
std0 = std0.reshape(X0.shape)
# display
if not profiling:
global axes
axes[0].cla()
axes[0].imshow(rct[t,p0.z,yBounds[0]:yBounds[1],:].data, origin='lower',
extent=[b[3].min, b[3].max, b[2].min, b[2].max])
axes[0].grid()
axes[0].set_title("Ground truth")
try:
axes[0].plot(p[:int(t-tStart + 0.5),1], p[:int(t-tStart + 0.5),2], '.')
finally:
pass
axes[1].cla()
axes[1].imshow(map0, origin='lower',
extent=[b[3].min, b[3].max, b[2].min, b[2].max])
axes[1].grid()
axes[1].set_title("MAP")
axes[2].cla()
axes[2].imshow(std0**2, origin='lower',
extent=[b[3].min, b[3].max, b[2].min, b[2].max])
axes[2].grid()
axes[2].set_title("Variance AP")
def init():
pass
def update(i):
# global lastTime
global simTime
# currentTime = time.time()
# simTime = simTime + simSpeed*(currentTime - lastTime)
# lastTime = currentTime
# simTime = simTime + 5.0
simTime = simTime + 2.0
do_update(simTime)
if not profiling:
anim = animation.FuncAnimation(
fig,
update,
init_func=init,
interval = 1)
plt.show(block=False)
else:
t0 = time.time()
while simTime < 600:
update(0)
print("Ellapsed :", time.time() - t0, "s")
| [
"numpy.sqrt",
"netCDF4.MFDataset",
"numpy.array",
"numpy.sin",
"sys.path.append",
"sklearn.gaussian_process.GaussianProcessRegressor",
"nephelae_base.types.Position",
"numpy.exp",
"numpy.linspace",
"numpy.meshgrid",
"sys.stdout.flush",
"numpy.cos",
"numpy.random.randn",
"time.time",
"mat... | [((32, 57), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (47, 57), False, 'import sys\n'), ((2711, 2737), 'numpy.linspace', 'np.linspace', (['(0)', '(300.0)', '(300)'], {}), '(0, 300.0, 300)\n', (2722, 2737), True, 'import numpy as np\n'), ((3050, 3087), 'nephelae_base.types.Position', 'Position', (['(50.0)', '(100.0)', '(1950.0)', '(1100.0)'], {}), '(50.0, 100.0, 1950.0, 1100.0)\n', (3058, 3087), False, 'from nephelae_base.types import Position\n'), ((3174, 3194), 'numpy.array', 'np.array', (['[8.5, 0.9]'], {}), '([8.5, 0.9])\n', (3182, 3194), True, 'import numpy as np\n'), ((4486, 4504), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4502, 4504), False, 'import sys\n'), ((4586, 4605), 'numpy.array', 'np.array', (['rctValues'], {}), '(rctValues)\n', (4594, 4605), True, 'import numpy as np\n'), ((4622, 4640), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4638, 4640), False, 'import sys\n'), ((4965, 4976), 'time.time', 'time.time', ([], {}), '()\n', (4974, 4976), False, 'import time\n'), ((2375, 2396), 'netCDF4.MFDataset', 'MFDataset', (['mesonhPath'], {}), '(mesonhPath)\n', (2384, 2396), False, 'from netCDF4 import MFDataset\n'), ((3897, 3959), 'numpy.linspace', 'np.linspace', (['tmp.bounds[1][0]', 'tmp.bounds[1][-1]', 'tmp.shape[1]'], {}), '(tmp.bounds[1][0], tmp.bounds[1][-1], tmp.shape[1])\n', (3908, 3959), True, 'import numpy as np\n'), ((3965, 4027), 'numpy.linspace', 'np.linspace', (['tmp.bounds[0][0]', 'tmp.bounds[0][-1]', 'tmp.shape[0]'], {}), '(tmp.bounds[0][0], tmp.bounds[0][-1], tmp.shape[0])\n', (3976, 4027), True, 'import numpy as np\n'), ((4211, 4235), 'numpy.sqrt', 'np.sqrt', (['processVariance'], {}), '(processVariance)\n', (4218, 4235), True, 'import numpy as np\n'), ((4661, 4696), 'numpy.random.randn', 'np.random.randn', (['rctValues.shape[0]'], {}), '(rctValues.shape[0])\n', (4676, 4696), True, 'import numpy as np\n'), ((4897, 4941), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, sharex=True, sharey=True)\n', (4909, 4941), True, 'import matplotlib.pyplot as plt\n'), ((5077, 5162), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', (['kernel0'], {'alpha': '(0.0)', 'optimizer': 'None', 'copy_X_train': '(False)'}), '(kernel0, alpha=0.0, optimizer=None, copy_X_train=False\n )\n', (5101, 5162), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((7139, 7203), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'update'], {'init_func': 'init', 'interval': '(1)'}), '(fig, update, init_func=init, interval=1)\n', (7162, 7203), False, 'from matplotlib import animation\n'), ((7248, 7269), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (7256, 7269), True, 'import matplotlib.pyplot as plt\n'), ((7285, 7296), 'time.time', 'time.time', ([], {}), '()\n', (7294, 7296), False, 'import time\n'), ((1471, 1525), 'numpy.meshgrid', 'np.meshgrid', (['X[:, 0]', 'Y[:, 0]'], {'indexing': '"""ij"""', 'copy': 'cop'}), "(X[:, 0], Y[:, 0], indexing='ij', copy=cop)\n", (1482, 1525), True, 'import numpy as np\n'), ((1610, 1664), 'numpy.meshgrid', 'np.meshgrid', (['X[:, 1]', 'Y[:, 1]'], {'indexing': '"""ij"""', 'copy': 'cop'}), "(X[:, 1], Y[:, 1], indexing='ij', copy=cop)\n", (1621, 1664), True, 'import numpy as np\n'), ((1786, 1840), 'numpy.meshgrid', 'np.meshgrid', (['X[:, 2]', 'Y[:, 2]'], {'indexing': '"""ij"""', 'copy': 'cop'}), "(X[:, 2], Y[:, 2], indexing='ij', copy=cop)\n", (1797, 1840), True, 'import numpy as np\n'), ((2164, 2219), 'numpy.array', 'np.array', (['([self.stddev + self.noiseStddev] * X.shape[0])'], {}), '([self.stddev + self.noiseStddev] * X.shape[0])\n', (2172, 2219), True, 'import numpy as np\n'), ((3263, 3298), 'numpy.cos', 'np.cos', (['(2 * np.pi * f0 * (t - t[0]))'], {}), '(2 * np.pi * f0 * (t - t[0]))\n', (3269, 3298), True, 'import numpy as np\n'), ((3347, 3382), 'numpy.sin', 'np.sin', (['(2 * np.pi * f0 * (t - t[0]))'], {}), '(2 * np.pi * f0 * (t - t[0]))\n', (3353, 3382), True, 'import numpy as np\n'), ((7364, 7375), 'time.time', 'time.time', ([], {}), '()\n', (7373, 7375), False, 'import time\n'), ((2019, 2059), 'numpy.diag', 'np.diag', (['([self.noiseStddev] * X.shape[0])'], {}), '([self.noiseStddev] * X.shape[0])\n', (2026, 2059), True, 'import numpy as np\n'), ((2103, 2125), 'numpy.exp', 'np.exp', (['(-0.5 * distMat)'], {}), '(-0.5 * distMat)\n', (2109, 2125), True, 'import numpy as np\n'), ((3234, 3269), 'numpy.cos', 'np.cos', (['(2 * np.pi * f1 * (t - t[0]))'], {}), '(2 * np.pi * f1 * (t - t[0]))\n', (3240, 3269), True, 'import numpy as np\n'), ((3318, 3353), 'numpy.cos', 'np.cos', (['(2 * np.pi * f1 * (t - t[0]))'], {}), '(2 * np.pi * f1 * (t - t[0]))\n', (3324, 3353), True, 'import numpy as np\n'), ((3432, 3479), 'numpy.sum', 'np.sum', (['((p[1:, 1:3] - p[:-1, 1:3]) ** 2)'], {'axis': '(1)'}), '((p[1:, 1:3] - p[:-1, 1:3]) ** 2, axis=1)\n', (3438, 3479), True, 'import numpy as np\n'), ((1996, 2018), 'numpy.exp', 'np.exp', (['(-0.5 * distMat)'], {}), '(-0.5 * distMat)\n', (2002, 2018), True, 'import numpy as np\n'), ((2454, 2475), 'netCDF4.MFDataset', 'MFDataset', (['mesonhPath'], {}), '(mesonhPath)\n', (2463, 2475), False, 'from netCDF4 import MFDataset\n'), ((2533, 2554), 'netCDF4.MFDataset', 'MFDataset', (['mesonhPath'], {}), '(mesonhPath)\n', (2542, 2554), False, 'from netCDF4 import MFDataset\n')] |
# coding: utf-8
# Originally from
# https://github.com/PPartisan/THE_LONG_DARK
# Simply adapted to LINUX by <NAME>
import threading
import time
import psutil
from pylab import rcParams
from pynput.keyboard import Key, Controller
import mapping
rcParams['figure.figsize'] = 12, 9.5
def is_tld_running():
return True
processes_numbers = psutil.pids()
for n in processes_numbers:
if 'tld.x86_64' == psutil.Process(n).name():
return True
def background(func, args):
th = threading.Thread(target=func, args=args)
th.start()
class Interaction:
def __init__(self):
self.recording = True
self.keyboard = Controller()
def start_recording(self):
print('Started recording')
self.recording = True
def stop_recording(self):
print('Stopped recording')
self.recording = False
def press(self):
print(f'Pressed the button')
self.keyboard.press(Key.f8)
self.keyboard.release(Key.f8)
def start_interactive_mapping(self, s_path, f_path):
print(f'Started!')
if self.recording:
while is_tld_running():
self.press()
coord = mapping.read_coords_from_screenshots(s_path)
mapping.write_coords_to_file(coord, f_path + "coords.txt", "a")
mapping.delete_screenshots(s_path)
time.sleep(30)
| [
"pynput.keyboard.Controller",
"psutil.pids",
"mapping.delete_screenshots",
"psutil.Process",
"mapping.write_coords_to_file",
"time.sleep",
"mapping.read_coords_from_screenshots",
"threading.Thread"
] | [((351, 364), 'psutil.pids', 'psutil.pids', ([], {}), '()\n', (362, 364), False, 'import psutil\n'), ((513, 553), 'threading.Thread', 'threading.Thread', ([], {'target': 'func', 'args': 'args'}), '(target=func, args=args)\n', (529, 553), False, 'import threading\n'), ((668, 680), 'pynput.keyboard.Controller', 'Controller', ([], {}), '()\n', (678, 680), False, 'from pynput.keyboard import Key, Controller\n'), ((1209, 1253), 'mapping.read_coords_from_screenshots', 'mapping.read_coords_from_screenshots', (['s_path'], {}), '(s_path)\n', (1245, 1253), False, 'import mapping\n'), ((1270, 1333), 'mapping.write_coords_to_file', 'mapping.write_coords_to_file', (['coord', "(f_path + 'coords.txt')", '"""a"""'], {}), "(coord, f_path + 'coords.txt', 'a')\n", (1298, 1333), False, 'import mapping\n'), ((1350, 1384), 'mapping.delete_screenshots', 'mapping.delete_screenshots', (['s_path'], {}), '(s_path)\n', (1376, 1384), False, 'import mapping\n'), ((1401, 1415), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (1411, 1415), False, 'import time\n'), ((424, 441), 'psutil.Process', 'psutil.Process', (['n'], {}), '(n)\n', (438, 441), False, 'import psutil\n')] |
# import random
#
# deck = list()
# for suit in ["♦", "♥", "♠", "♣"]:
# for value in ["A", "K", "Q", "J", "10", "9", "8", "7", "6", "5", "4", "3", "2"]:
# deck.append((value, suit))
# r_sample = random.sample(deck, 7)
# print(r_sample)
from enum import Enum, auto, unique
class Suit(Enum):
HEARTS = "♥"
DIAMONDS = "♦"
CLUBS = "♣"
SPADES = "♠"
class CardRank(Enum):
TWO = auto()
THREE = auto()
FOUR = auto()
FIVE = auto()
SIX = auto()
SEVEN = auto()
EIGHT = auto()
NINE = auto()
TEN = auto()
JACK = auto()
QUEEN = auto()
KING = auto()
AXE = auto()
class HandRank(Enum):
HIGHEST_CARD = auto()
PAIR = auto()
TWO_PAIRS = auto()
THREE = auto()
STRAIGHT = auto()
FLUSH = auto()
FULL_HOUSE = auto()
FOUR = auto()
STRAIGHT_FLUSH = auto()
ROYAL_FLUSH = auto()
class Card():
def __init__(self, value, suit):
self.__value = value
self.__suit = suit
def getValue(self):
return self.__value
def __str__(self):
pass
def __eq__(self, card2):
return self.__value.value == card2.getValue().value
def __lt__(self, card2):
return self.__value.value < card2.getValue().value
class Player(Card):
def __init__(self, name):
self.__name = name
self.__hand = []
def getName(self):
return self.__name
def receiveCard(self, new_card):
if isinstance(new_card, Card):
self.__hand.append(new_card)
def showHand(self):
hand_str = []
for card in self.__hand:
hand_str.append(card)
print(hand_str)
card1 = Card(CardRank.FIVE, Suit.SPADES)
card2 = Card(CardRank.SEVEN, Suit.CLUBS)
card3 = Card(CardRank.AXE, Suit.CLUBS)
card4 = Card(CardRank.FIVE, Suit.SPADES)
card5 = Card(CardRank.SEVEN, Suit.CLUBS)
card6 = Card(CardRank.AXE, Suit.CLUBS)
card7 = Card(CardRank.AXE, Suit.CLUBS)
l = [card1, card2, card3, card4, card5, card6, card7]
print(l)
print(card1 < card3)
| [
"enum.auto"
] | [((435, 441), 'enum.auto', 'auto', ([], {}), '()\n', (439, 441), False, 'from enum import Enum, auto, unique\n'), ((455, 461), 'enum.auto', 'auto', ([], {}), '()\n', (459, 461), False, 'from enum import Enum, auto, unique\n'), ((475, 481), 'enum.auto', 'auto', ([], {}), '()\n', (479, 481), False, 'from enum import Enum, auto, unique\n'), ((495, 501), 'enum.auto', 'auto', ([], {}), '()\n', (499, 501), False, 'from enum import Enum, auto, unique\n'), ((515, 521), 'enum.auto', 'auto', ([], {}), '()\n', (519, 521), False, 'from enum import Enum, auto, unique\n'), ((535, 541), 'enum.auto', 'auto', ([], {}), '()\n', (539, 541), False, 'from enum import Enum, auto, unique\n'), ((555, 561), 'enum.auto', 'auto', ([], {}), '()\n', (559, 561), False, 'from enum import Enum, auto, unique\n'), ((575, 581), 'enum.auto', 'auto', ([], {}), '()\n', (579, 581), False, 'from enum import Enum, auto, unique\n'), ((595, 601), 'enum.auto', 'auto', ([], {}), '()\n', (599, 601), False, 'from enum import Enum, auto, unique\n'), ((615, 621), 'enum.auto', 'auto', ([], {}), '()\n', (619, 621), False, 'from enum import Enum, auto, unique\n'), ((635, 641), 'enum.auto', 'auto', ([], {}), '()\n', (639, 641), False, 'from enum import Enum, auto, unique\n'), ((655, 661), 'enum.auto', 'auto', ([], {}), '()\n', (659, 661), False, 'from enum import Enum, auto, unique\n'), ((675, 681), 'enum.auto', 'auto', ([], {}), '()\n', (679, 681), False, 'from enum import Enum, auto, unique\n'), ((731, 737), 'enum.auto', 'auto', ([], {}), '()\n', (735, 737), False, 'from enum import Enum, auto, unique\n'), ((760, 766), 'enum.auto', 'auto', ([], {}), '()\n', (764, 766), False, 'from enum import Enum, auto, unique\n'), ((789, 795), 'enum.auto', 'auto', ([], {}), '()\n', (793, 795), False, 'from enum import Enum, auto, unique\n'), ((818, 824), 'enum.auto', 'auto', ([], {}), '()\n', (822, 824), False, 'from enum import Enum, auto, unique\n'), ((847, 853), 'enum.auto', 'auto', ([], {}), '()\n', (851, 853), False, 'from enum import Enum, auto, unique\n'), ((876, 882), 'enum.auto', 'auto', ([], {}), '()\n', (880, 882), False, 'from enum import Enum, auto, unique\n'), ((905, 911), 'enum.auto', 'auto', ([], {}), '()\n', (909, 911), False, 'from enum import Enum, auto, unique\n'), ((934, 940), 'enum.auto', 'auto', ([], {}), '()\n', (938, 940), False, 'from enum import Enum, auto, unique\n'), ((963, 969), 'enum.auto', 'auto', ([], {}), '()\n', (967, 969), False, 'from enum import Enum, auto, unique\n'), ((992, 998), 'enum.auto', 'auto', ([], {}), '()\n', (996, 998), False, 'from enum import Enum, auto, unique\n')] |
import datetime
import unittest
class KalmanFilterTest(unittest.TestCase):
def test_kalman_filter_with_prior_predict(self):
t0 = datetime.datetime(2014, 2, 12, 16, 18, 25, 204000)
print(t0)
self.assertEqual(1., 1.)
def test_kalman_filter_without_prior_predict(self):
pass
def test_kalman_filter_with_low_variance_observation(self):
pass
def test_kalman_filter_multidim(self):
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"datetime.datetime"
] | [((519, 534), 'unittest.main', 'unittest.main', ([], {}), '()\n', (532, 534), False, 'import unittest\n'), ((147, 197), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(2)', '(12)', '(16)', '(18)', '(25)', '(204000)'], {}), '(2014, 2, 12, 16, 18, 25, 204000)\n', (164, 197), False, 'import datetime\n')] |
#!/usr/bin/env python3
import numpy as np
import numpy.random as npr
import pytest
A1 = npr.rand( 1, 1)
B1 = npr.rand( 1, 1)
C1 = npr.rand( 1, 1)
A3 = npr.rand( 3, 3)
B3 = npr.rand( 3, 3)
C3 = npr.rand( 3, 3)
A10 = npr.rand( 10, 10)
B10 = npr.rand( 10, 10)
C10 = npr.rand( 10, 10)
A30 = npr.rand( 30, 30)
B30 = npr.rand( 30, 30)
C30 = npr.rand( 30, 30)
A100 = npr.rand( 100, 100)
B100 = npr.rand( 100, 100)
C100 = npr.rand( 100, 100)
A300 = npr.rand( 300, 300)
B300 = npr.rand( 300, 300)
C300 = npr.rand( 300, 300)
A1000 = npr.rand(1000, 1000)
B1000 = npr.rand(1000, 1000)
C1000 = npr.rand(1000, 1000)
A3000 = npr.rand(3000, 3000)
B3000 = npr.rand(3000, 3000)
C3000 = npr.rand(3000, 3000)
NC_A1 = list(A1 .flatten())
NC_B1 = list(B1 .flatten())
NC_C1 = list(C1 .flatten())
NC_A3 = list(A3 .flatten())
NC_B3 = list(B3 .flatten())
NC_C3 = list(C3 .flatten())
NC_A10 = list(A10 .flatten())
NC_B10 = list(B10 .flatten())
NC_C10 = list(C10 .flatten())
NC_A30 = list(A30 .flatten())
NC_B30 = list(B30 .flatten())
NC_C30 = list(C30 .flatten())
NC_A100 = list(A100 .flatten())
NC_B100 = list(B100 .flatten())
NC_C100 = list(C100 .flatten())
NC_A300 = list(A300 .flatten())
NC_B300 = list(B300 .flatten())
NC_C300 = list(C300 .flatten())
NC_A1000 = list(A1000.flatten())
NC_B1000 = list(B1000.flatten())
NC_C1000 = list(C1000.flatten())
NC_A3000 = list(A3000.flatten())
NC_B3000 = list(B3000.flatten())
NC_C3000 = list(C3000.flatten())
def add_numpy_core(a: np.ndarray, b: np.ndarray, c: np.ndarray) -> np.ndarray:
return a + b + c
def add_simple_core(a: list, b: list, c: list) -> list:
retval = [0.0] * len(a)
for i in range(len(a)):
retval[i] = a[i] + b[i] + c[i]
return retval
def add_numpy_1 (): return add_numpy_core(A1 , B1 , C1 )
def add_numpy_3 (): return add_numpy_core(A3 , B3 , C3 )
def add_numpy_10 (): return add_numpy_core(A10 , B10 , C10 )
def add_numpy_30 (): return add_numpy_core(A30 , B30 , C30 )
def add_numpy_100 (): return add_numpy_core(A100 , B100 , C100 )
def add_numpy_300 (): return add_numpy_core(A300 , B300 , C300 )
def add_numpy_1000(): return add_numpy_core(A1000, B1000, C1000)
def add_numpy_3000(): return add_numpy_core(A3000, B3000, C3000)
def add_simple_1 (): return add_simple_core(A1 , B1 , C1 )
def add_simple_3 (): return add_simple_core(A3 , B3 , C3 )
def add_simple_10 (): return add_simple_core(A10 , B10 , C10 )
def add_simple_30 (): return add_simple_core(A30 , B30 , C30 )
def add_simple_100 (): return add_simple_core(A100 , B100 , C100 )
def add_simple_300 (): return add_simple_core(A300 , B300 , C300 )
def add_simple_1000(): return add_simple_core(A1000, B1000, C1000)
def add_simple_3000(): return add_simple_core(A3000, B3000, C3000)
def test_add_numpy_1 (benchmark): benchmark.pedantic(add_numpy_1 , rounds=256, iterations=16)
def test_add_numpy_3 (benchmark): benchmark.pedantic(add_numpy_3 , rounds=256, iterations=16)
def test_add_numpy_10 (benchmark): benchmark.pedantic(add_numpy_10 , rounds=256, iterations=16)
def test_add_numpy_30 (benchmark): benchmark.pedantic(add_numpy_30 , rounds=256, iterations=16)
def test_add_numpy_100 (benchmark): benchmark.pedantic(add_numpy_100 , rounds=256, iterations=16)
def test_add_numpy_300 (benchmark): benchmark.pedantic(add_numpy_300 , rounds=256, iterations=16)
def test_add_numpy_1000 (benchmark): benchmark.pedantic(add_numpy_1000 , rounds=256, iterations=16)
def test_add_numpy_3000 (benchmark): benchmark.pedantic(add_numpy_3000 , rounds=256, iterations=16)
def test_add_simple_1 (benchmark): benchmark.pedantic(add_simple_1 , rounds=256, iterations=16)
def test_add_simple_3 (benchmark): benchmark.pedantic(add_simple_3 , rounds=256, iterations=16)
def test_add_simple_10 (benchmark): benchmark.pedantic(add_simple_10 , rounds=256, iterations=16)
def test_add_simple_30 (benchmark): benchmark.pedantic(add_simple_30 , rounds=256, iterations=16)
def test_add_simple_100 (benchmark): benchmark.pedantic(add_simple_100 , rounds=256, iterations=16)
def test_add_simple_300 (benchmark): benchmark.pedantic(add_simple_300 , rounds=256, iterations=16)
def test_add_simple_1000(benchmark): benchmark.pedantic(add_simple_1000, rounds=256, iterations=16)
def test_add_simple_3000(benchmark): benchmark.pedantic(add_simple_3000, rounds=256, iterations=16)
if __name__ == "__main__":
pytest.main(['-v', __file__])
| [
"numpy.random.rand",
"pytest.main"
] | [((93, 107), 'numpy.random.rand', 'npr.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (101, 107), True, 'import numpy.random as npr\n'), ((122, 136), 'numpy.random.rand', 'npr.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (130, 136), True, 'import numpy.random as npr\n'), ((151, 165), 'numpy.random.rand', 'npr.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (159, 165), True, 'import numpy.random as npr\n'), ((180, 194), 'numpy.random.rand', 'npr.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (188, 194), True, 'import numpy.random as npr\n'), ((209, 223), 'numpy.random.rand', 'npr.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (217, 223), True, 'import numpy.random as npr\n'), ((238, 252), 'numpy.random.rand', 'npr.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (246, 252), True, 'import numpy.random as npr\n'), ((267, 283), 'numpy.random.rand', 'npr.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (275, 283), True, 'import numpy.random as npr\n'), ((296, 312), 'numpy.random.rand', 'npr.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (304, 312), True, 'import numpy.random as npr\n'), ((325, 341), 'numpy.random.rand', 'npr.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (333, 341), True, 'import numpy.random as npr\n'), ((354, 370), 'numpy.random.rand', 'npr.rand', (['(30)', '(30)'], {}), '(30, 30)\n', (362, 370), True, 'import numpy.random as npr\n'), ((383, 399), 'numpy.random.rand', 'npr.rand', (['(30)', '(30)'], {}), '(30, 30)\n', (391, 399), True, 'import numpy.random as npr\n'), ((412, 428), 'numpy.random.rand', 'npr.rand', (['(30)', '(30)'], {}), '(30, 30)\n', (420, 428), True, 'import numpy.random as npr\n'), ((441, 459), 'numpy.random.rand', 'npr.rand', (['(100)', '(100)'], {}), '(100, 100)\n', (449, 459), True, 'import numpy.random as npr\n'), ((470, 488), 'numpy.random.rand', 'npr.rand', (['(100)', '(100)'], {}), '(100, 100)\n', (478, 488), True, 'import numpy.random as npr\n'), ((499, 517), 'numpy.random.rand', 'npr.rand', (['(100)', '(100)'], {}), '(100, 100)\n', (507, 517), True, 'import numpy.random as npr\n'), ((528, 546), 'numpy.random.rand', 'npr.rand', (['(300)', '(300)'], {}), '(300, 300)\n', (536, 546), True, 'import numpy.random as npr\n'), ((557, 575), 'numpy.random.rand', 'npr.rand', (['(300)', '(300)'], {}), '(300, 300)\n', (565, 575), True, 'import numpy.random as npr\n'), ((586, 604), 'numpy.random.rand', 'npr.rand', (['(300)', '(300)'], {}), '(300, 300)\n', (594, 604), True, 'import numpy.random as npr\n'), ((615, 635), 'numpy.random.rand', 'npr.rand', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (623, 635), True, 'import numpy.random as npr\n'), ((644, 664), 'numpy.random.rand', 'npr.rand', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (652, 664), True, 'import numpy.random as npr\n'), ((673, 693), 'numpy.random.rand', 'npr.rand', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (681, 693), True, 'import numpy.random as npr\n'), ((702, 722), 'numpy.random.rand', 'npr.rand', (['(3000)', '(3000)'], {}), '(3000, 3000)\n', (710, 722), True, 'import numpy.random as npr\n'), ((731, 751), 'numpy.random.rand', 'npr.rand', (['(3000)', '(3000)'], {}), '(3000, 3000)\n', (739, 751), True, 'import numpy.random as npr\n'), ((760, 780), 'numpy.random.rand', 'npr.rand', (['(3000)', '(3000)'], {}), '(3000, 3000)\n', (768, 780), True, 'import numpy.random as npr\n'), ((4540, 4569), 'pytest.main', 'pytest.main', (["['-v', __file__]"], {}), "(['-v', __file__])\n", (4551, 4569), False, 'import pytest\n')] |
# Copyright 2015 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""dogpile.cache backend that uses dictionary for storage"""
from dogpile.cache import api
from oslo_cache import core
from oslo_utils import timeutils
__all__ = [
'DictCacheBackend'
]
_NO_VALUE = core.NO_VALUE
class DictCacheBackend(api.CacheBackend):
"""A DictCacheBackend based on dictionary.
Arguments accepted in the arguments dictionary:
:param expiration_time: interval in seconds to indicate maximum
time-to-live value for each key in DictCacheBackend.
Default expiration_time value is 0, that means that all keys have
infinite time-to-live value.
:type expiration_time: real
"""
def __init__(self, arguments):
self.expiration_time = arguments.get('expiration_time', 0)
self.cache = {}
def get(self, key):
"""Retrieves the value for a key.
:param key: dictionary key
:returns: value for a key or :data:`oslo_cache.core.NO_VALUE`
for nonexistent or expired keys.
"""
(value, timeout) = self.cache.get(key, (_NO_VALUE, 0))
if self.expiration_time > 0 and timeutils.utcnow_ts() >= timeout:
self.cache.pop(key, None)
return _NO_VALUE
return value
def get_multi(self, keys):
"""Retrieves the value for a list of keys."""
return [self.get(key) for key in keys]
def set(self, key, value):
"""Sets the value for a key.
Expunges expired keys during each set.
:param key: dictionary key
:param value: value associated with the key
"""
self.set_multi({key: value})
def set_multi(self, mapping):
"""Set multiple values in the cache.
Expunges expired keys during each set.
:param mapping: dictionary with key/value pairs
"""
self._clear()
timeout = 0
if self.expiration_time > 0:
timeout = timeutils.utcnow_ts() + self.expiration_time
for key, value in mapping.items():
self.cache[key] = (value, timeout)
def delete(self, key):
"""Deletes the value associated with the key if it exists.
:param key: dictionary key
"""
self.cache.pop(key, None)
def delete_multi(self, keys):
"""Deletes the value associated with each key in list if it exists.
:param keys: list of dictionary keys
"""
for key in keys:
self.cache.pop(key, None)
def _clear(self):
"""Expunges expired keys."""
now = timeutils.utcnow_ts()
for k in list(self.cache):
(_value, timeout) = self.cache[k]
if timeout > 0 and now >= timeout:
del self.cache[k]
| [
"oslo_utils.timeutils.utcnow_ts"
] | [((3106, 3127), 'oslo_utils.timeutils.utcnow_ts', 'timeutils.utcnow_ts', ([], {}), '()\n', (3125, 3127), False, 'from oslo_utils import timeutils\n'), ((1681, 1702), 'oslo_utils.timeutils.utcnow_ts', 'timeutils.utcnow_ts', ([], {}), '()\n', (1700, 1702), False, 'from oslo_utils import timeutils\n'), ((2488, 2509), 'oslo_utils.timeutils.utcnow_ts', 'timeutils.utcnow_ts', ([], {}), '()\n', (2507, 2509), False, 'from oslo_utils import timeutils\n')] |
import numpy as np
from bs4 import BeautifulSoup
from tqdm import tqdm
import json
def read_file(filename):
with open(filename, 'r', encoding='utf-8') as f:
contents = f.read()
soup = BeautifulSoup(contents, "html.parser")
return soup
if __name__ == '__main__':
with open('name2idx.json', 'r') as fp:
name2idx = json.load(fp)
with open('data/0.txt', 'r', encoding='utf-8')as f:
contents = f.read()
soup = BeautifulSoup(contents, "html.parser")
for tag in soup.find_all():
try:
if tag['class']:
tag['class'] = '###' + str(name2idx[' '.join(tag['class'])])
except KeyError:
continue
print(soup)
| [
"bs4.BeautifulSoup",
"json.load"
] | [((206, 244), 'bs4.BeautifulSoup', 'BeautifulSoup', (['contents', '"""html.parser"""'], {}), "(contents, 'html.parser')\n", (219, 244), False, 'from bs4 import BeautifulSoup\n'), ((356, 369), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (365, 369), False, 'import json\n'), ((470, 508), 'bs4.BeautifulSoup', 'BeautifulSoup', (['contents', '"""html.parser"""'], {}), "(contents, 'html.parser')\n", (483, 508), False, 'from bs4 import BeautifulSoup\n')] |
from firedrake import *
import numpy as np
from firedrake.petsc import PETSc
from firedrake import COMM_WORLD
try:
import matplotlib.pyplot as plt
plt.rcParams["contour.corner_mask"] = False
plt.close("all")
except:
warning("Matplotlib not imported")
nx, ny = 4, 4
Lx, Ly = 1.0, 1.0
quadrilateral = False
mesh = RectangleMesh(nx, ny, Lx, Ly, quadrilateral=quadrilateral)
plot(mesh)
plt.axis("off")
degree = 1
pressure_family = "DG"
velocity_family = "DG"
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T * U * V * T
# Trial and test functions
DPP_solution = Function(W)
u1, p1, lambda1, u2, p2, lambda2 = split(DPP_solution)
v1, q1, mu1, v2, q2, mu2 = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
x, y = SpatialCoordinate(mesh)
h = CellDiameter(mesh)
#################################################
# *** Model parameters
#################################################
mu0 = Constant(1.0)
k1 = Constant(1.0)
k2 = Constant(0.1)
b_factor = Constant(1.0)
def alpha1():
return mu0 / k1
def invalpha1():
return 1.0 / alpha1()
def alpha2():
return mu0 / k2
def invalpha2():
return 1.0 / alpha2()
#################################################
#################################################
#################################################
# Exact solution and source term projection
eta = sqrt(b_factor * (k1 + k2) / (k1 * k2))
p_exact_1 = mu0 / pi * exp(pi * x) * sin(pi * y) - mu0 / (b_factor * k1) * exp(eta * y)
p_exact_2 = mu0 / pi * exp(pi * x) * sin(pi * y) + mu0 / (b_factor * k2) * exp(eta * y)
p_e_1 = Function(W.sub(1)).interpolate(p_exact_1)
p_e_1_tr = Function(T).interpolate(p_exact_1)
p_e_1.rename("Exact macro pressure", "label")
p_e_2 = Function(W.sub(4)).interpolate(p_exact_2)
p_e_2_tr = Function(T).interpolate(p_exact_2)
p_e_2.rename("Exact micro pressure", "label")
v_e_1 = Function(W.sub(0), name="Exact macro velocity")
v_e_1.project(-(k1 / mu0) * grad(p_e_1))
v_e_2 = Function(W.sub(3), name="Exact macro velocity")
v_e_2.project(-(k2 / mu0) * grad(p_e_2))
plot(p_e_1)
plot(p_e_2)
# Source term
rhob1, rhob2 = Constant((0.0, 0.0)), Constant((0.0, 0.0))
f = Constant(0.0)
# Stabilizing parameter
beta_0 = Constant(1.0e-2)
beta = beta_0 / h
beta_avg = beta_0 / h("+")
delta_0 = Constant(1.0)
delta_1 = Constant(-0.5)
delta_2 = Constant(0.5)
delta_3 = Constant(0.0)
# Mixed classical terms
a = (dot(alpha1() * u1, v1) - div(v1) * p1 - delta_0 * q1 * div(u1)) * dx
a += (dot(alpha2() * u2, v2) - div(v2) * p2 - delta_0 * q2 * div(u2)) * dx
a += delta_0 * q1 * (b_factor * invalpha1() / k1) * (p2 - p1) * dx
a += delta_0 * q2 * (b_factor * invalpha2() / k2) * (p1 - p2) * dx
L = -delta_0 * dot(rhob1, v1) * dx
L += -delta_0 * dot(rhob2, v2) * dx
# Stabilizing terms
###
a += (
delta_1
* inner(invalpha1() * (alpha1() * u1 + grad(p1)), delta_0 * alpha1() * v1 + grad(q1))
* dx
)
a += (
delta_1
* inner(invalpha2() * (alpha2() * u2 + grad(p2)), delta_0 * alpha2() * v2 + grad(q2))
* dx
)
###
a += delta_2 * alpha1() * div(u1) * div(v1) * dx
a += delta_2 * alpha2() * div(u2) * div(v2) * dx
L += delta_2 * alpha1() * (b_factor * invalpha1() / k1) * (p2 - p1) * div(v1) * dx
L += delta_2 * alpha2() * (b_factor * invalpha2() / k2) * (p1 - p2) * div(v2) * dx
###
a += delta_3 * inner(invalpha1() * curl(alpha1() * u1), curl(alpha1() * v1)) * dx
a += delta_3 * inner(invalpha2() * curl(alpha2() * u2), curl(alpha2() * v2)) * dx
# Hybridization terms
###
a += lambda1("+") * jump(v1, n) * dS + mu1("+") * jump(u1, n) * dS
a += lambda2("+") * jump(v2, n) * dS + mu2("+") * jump(u2, n) * dS
###
a += beta_avg * invalpha1()("+") * (lambda1("+") - p1("+")) * (mu1("+") - q1("+")) * dS
a += beta_avg * invalpha2()("+") * (lambda2("+") - p2("+")) * (mu2("+") - q2("+")) * dS
# Weakly imposed BC from hybridization
a += beta * invalpha1() * (lambda1 - p_e_1) * mu1 * ds
a += beta * invalpha1() * (lambda2 - p_e_2) * mu2 * ds
a += (p_e_1 * dot(v1, n) + mu1 * (dot(u1, n) - dot(v_e_1, n))) * ds
a += (p_e_2 * dot(v2, n) + mu2 * (dot(u2, n) - dot(v_e_2, n))) * ds
F = a - L
# Solving SC below
PETSc.Sys.Print("*******************************************\nSolving using static condensation.\n")
solver_parameters = {
"snes_type": "ksponly",
"pmat_type": "matfree",
# 'ksp_view': True,
"ksp_type": "lgmres",
"ksp_monitor_true_residual": True,
# 'snes_monitor': True,
"ksp_rtol": 1.0e-5,
"ksp_atol": 1.0e-5,
"pc_type": "fieldsplit",
"pc_fieldsplit_0_fields": "0,1,2",
"pc_fieldsplit_1_fields": "3,4,5",
"fieldsplit_0": {
"pmat_type": "matfree",
"ksp_type": "preonly",
"pc_type": "python",
"pc_python_type": "firedrake.SCPC",
"pc_sc_eliminate_fields": "0, 1",
"condensed_field": {
"ksp_type": "preonly",
"pc_type": "lu",
"pc_factor_mat_solver_type": "mumps",
},
},
"fieldsplit_1": {
"pmat_type": "matfree",
"ksp_type": "preonly",
"pc_type": "python",
"pc_python_type": "firedrake.SCPC",
"pc_sc_eliminate_fields": "0, 1",
"condensed_field": {
"ksp_type": "preonly",
"pc_type": "lu",
"pc_factor_mat_solver_type": "mumps",
},
},
}
problem_flow = NonlinearVariationalProblem(F, DPP_solution)
solver_flow = NonlinearVariationalSolver(problem_flow, solver_parameters=solver_parameters)
solver_flow.solve()
# Writing solution in a .vtk file and plotting the solution
plot(DPP_solution.sub(1))
plot(DPP_solution.sub(4))
plt.show()
output_file = File("dpp_sdhm_exact.pvd")
v1_sol = DPP_solution.sub(0)
v1_sol.rename("Macro velocity", "label")
p1_sol = DPP_solution.sub(1)
p1_sol.rename("Macro pressure", "label")
v2_sol = DPP_solution.sub(3)
v2_sol.rename("Micro velocity", "label")
p2_sol = DPP_solution.sub(4)
p2_sol.rename("Micro pressure", "label")
output_file.write(p1_sol, v1_sol, p2_sol, v2_sol, p_e_1, v_e_1, p_e_2, v_e_2)
| [
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"firedrake.petsc.PETSc.Sys.Print",
"matplotlib.pyplot.show"
] | [((402, 417), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (410, 417), True, 'import matplotlib.pyplot as plt\n'), ((4237, 4349), 'firedrake.petsc.PETSc.Sys.Print', 'PETSc.Sys.Print', (['"""*******************************************\nSolving using static condensation.\n"""'], {}), '(\n """*******************************************\nSolving using static condensation.\n"""\n )\n', (4252, 4349), False, 'from firedrake.petsc import PETSc\n'), ((5697, 5707), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5705, 5707), True, 'import matplotlib.pyplot as plt\n'), ((205, 221), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (214, 221), True, 'import matplotlib.pyplot as plt\n')] |
from kafka import KafkaConsumer
consumer = KafkaConsumer('test-topic', bootstrap_servers='localhost:9092')
print("listening")
for msg in consumer:
print(msg)
| [
"kafka.KafkaConsumer"
] | [((44, 107), 'kafka.KafkaConsumer', 'KafkaConsumer', (['"""test-topic"""'], {'bootstrap_servers': '"""localhost:9092"""'}), "('test-topic', bootstrap_servers='localhost:9092')\n", (57, 107), False, 'from kafka import KafkaConsumer\n')] |
import random
import numpy as np
import tensorflow as tf
from recognition.utils import train_utils, googlenet_load
try:
from tensorflow.models.rnn import rnn_cell
except ImportError:
rnn_cell = tf.nn.rnn_cell
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
random.seed(0)
np.random.seed(0)
@ops.RegisterGradient("Hungarian")
def _hungarian_grad(op, *args):
return map(array_ops.zeros_like, op.inputs)
def build_lstm_inner(H, lstm_input):
'''
build lstm decoder
'''
lstm_cell = rnn_cell.BasicLSTMCell(H['lstm_size'], forget_bias=0.0, state_is_tuple=False)
if H['num_lstm_layers'] > 1:
lstm = rnn_cell.MultiRNNCell([lstm_cell] * H['num_lstm_layers'], state_is_tuple=False)
else:
lstm = lstm_cell
batch_size = H['batch_size'] * H['grid_height'] * H['grid_width']
state = tf.zeros([batch_size, lstm.state_size])
outputs = []
with tf.variable_scope('RNN', initializer=tf.random_uniform_initializer(-0.1, 0.1)):
for time_step in range(H['rnn_len']):
if time_step > 0: tf.get_variable_scope().reuse_variables()
output, state = lstm(lstm_input, state)
outputs.append(output)
return outputs
def build_overfeat_inner(H, lstm_input):
'''
build simple overfeat decoder
'''
if H['rnn_len'] > 1:
raise ValueError('rnn_len > 1 only supported with use_lstm == True')
outputs = []
initializer = tf.random_uniform_initializer(-0.1, 0.1)
with tf.variable_scope('Overfeat', initializer=initializer):
w = tf.get_variable('ip', shape=[H['later_feat_channels'], H['lstm_size']])
outputs.append(tf.matmul(lstm_input, w))
return outputs
def deconv(x, output_shape, channels):
k_h = 2
k_w = 2
w = tf.get_variable('w_deconv', initializer=tf.random_normal_initializer(stddev=0.01),
shape=[k_h, k_w, channels[1], channels[0]])
y = tf.nn.conv2d_transpose(x, w, output_shape, strides=[1, k_h, k_w, 1], padding='VALID')
return y
def rezoom(H, pred_boxes, early_feat, early_feat_channels, w_offsets, h_offsets):
'''
Rezoom into a feature map at multiple interpolation points in a grid.
If the predicted object center is at X, len(w_offsets) == 3, and len(h_offsets) == 5,
the rezoom grid will look as follows:
[o o o]
[o o o]
[o X o]
[o o o]
[o o o]
Where each letter indexes into the feature map with bilinear interpolation
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
indices = []
for w_offset in w_offsets:
for h_offset in h_offsets:
indices.append(train_utils.bilinear_select(H,
pred_boxes,
early_feat,
early_feat_channels,
w_offset, h_offset))
interp_indices = tf.concat(0, indices)
rezoom_features = train_utils.interp(early_feat,
interp_indices,
early_feat_channels)
rezoom_features_r = tf.reshape(rezoom_features,
[len(w_offsets) * len(h_offsets),
outer_size,
H['rnn_len'],
early_feat_channels])
rezoom_features_t = tf.transpose(rezoom_features_r, [1, 2, 0, 3])
return tf.reshape(rezoom_features_t,
[outer_size,
H['rnn_len'],
len(w_offsets) * len(h_offsets) * early_feat_channels])
def build_forward(H, x, phase, reuse):
'''
Construct the forward model
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
input_mean = 117.
x -= input_mean
cnn, early_feat, _ = googlenet_load.model(x, H, reuse)
early_feat_channels = H['early_feat_channels']
early_feat = early_feat[:, :, :, :early_feat_channels]
if H['deconv']:
size = 3
stride = 2
pool_size = 5
with tf.variable_scope("deconv", reuse=reuse):
w = tf.get_variable('conv_pool_w', shape=[size, size, H['later_feat_channels'], H['later_feat_channels']],
initializer=tf.random_normal_initializer(stddev=0.01))
cnn_s = tf.nn.conv2d(cnn, w, strides=[1, stride, stride, 1], padding='SAME')
cnn_s_pool = tf.nn.avg_pool(cnn_s[:, :, :, :256], ksize=[1, pool_size, pool_size, 1],
strides=[1, 1, 1, 1], padding='SAME')
cnn_s_with_pool = tf.concat(3, [cnn_s_pool, cnn_s[:, :, :, 256:]])
cnn_deconv = deconv(cnn_s_with_pool, output_shape=[H['batch_size'], H['grid_height'], H['grid_width'], 256],
channels=[H['later_feat_channels'], 256])
cnn = tf.concat(3, (cnn_deconv, cnn[:, :, :, 256:]))
elif H['avg_pool_size'] > 1:
pool_size = H['avg_pool_size']
cnn1 = cnn[:, :, :, :700]
cnn2 = cnn[:, :, :, 700:]
cnn2 = tf.nn.avg_pool(cnn2, ksize=[1, pool_size, pool_size, 1],
strides=[1, 1, 1, 1], padding='SAME')
cnn = tf.concat(3, [cnn1, cnn2])
cnn = tf.reshape(cnn,
[H['batch_size'] * H['grid_width'] * H['grid_height'], H['later_feat_channels']])
initializer = tf.random_uniform_initializer(-0.1, 0.1)
with tf.variable_scope('decoder', reuse=reuse, initializer=initializer):
scale_down = 0.01
lstm_input = tf.reshape(cnn * scale_down, (H['batch_size'] * grid_size, H['later_feat_channels']))
if H['use_lstm']:
lstm_outputs = build_lstm_inner(H, lstm_input)
else:
lstm_outputs = build_overfeat_inner(H, lstm_input)
pred_boxes = []
pred_logits = []
for k in range(H['rnn_len']):
output = lstm_outputs[k]
if phase == 'train':
output = tf.nn.dropout(output, 0.5)
box_weights = tf.get_variable('box_ip%d' % k,
shape=(H['lstm_size'], 4))
conf_weights = tf.get_variable('conf_ip%d' % k,
shape=(H['lstm_size'], H['num_classes']))
pred_boxes_step = tf.reshape(tf.matmul(output, box_weights) * 50,
[outer_size, 1, 4])
pred_boxes.append(pred_boxes_step)
pred_logits.append(tf.reshape(tf.matmul(output, conf_weights),
[outer_size, 1, H['num_classes']]))
pred_boxes = tf.concat(1, pred_boxes)
pred_logits = tf.concat(1, pred_logits)
pred_logits_squash = tf.reshape(pred_logits,
[outer_size * H['rnn_len'], H['num_classes']])
pred_confidences_squash = tf.nn.softmax(pred_logits_squash)
pred_confidences = tf.reshape(pred_confidences_squash,
[outer_size, H['rnn_len'], H['num_classes']])
if H['use_rezoom']:
pred_confs_deltas = []
pred_boxes_deltas = []
w_offsets = H['rezoom_w_coords']
h_offsets = H['rezoom_h_coords']
num_offsets = len(w_offsets) * len(h_offsets)
rezoom_features = rezoom(H, pred_boxes, early_feat, early_feat_channels, w_offsets, h_offsets)
if phase == 'train':
rezoom_features = tf.nn.dropout(rezoom_features, 0.5)
for k in range(H['rnn_len']):
delta_features = tf.concat(1, [lstm_outputs[k], rezoom_features[:, k, :] / 1000.])
dim = 128
delta_weights1 = tf.get_variable(
'delta_ip1%d' % k,
shape=[H['lstm_size'] + early_feat_channels * num_offsets, dim])
# TODO: add dropout here ?
ip1 = tf.nn.relu(tf.matmul(delta_features, delta_weights1))
if phase == 'train':
ip1 = tf.nn.dropout(ip1, 0.5)
delta_confs_weights = tf.get_variable(
'delta_ip2%d' % k,
shape=[dim, H['num_classes']])
if H['reregress']:
delta_boxes_weights = tf.get_variable(
'delta_ip_boxes%d' % k,
shape=[dim, 4])
pred_boxes_deltas.append(tf.reshape(tf.matmul(ip1, delta_boxes_weights) * 5,
[outer_size, 1, 4]))
scale = H.get('rezoom_conf_scale', 50)
pred_confs_deltas.append(tf.reshape(tf.matmul(ip1, delta_confs_weights) * scale,
[outer_size, 1, H['num_classes']]))
pred_confs_deltas = tf.concat(1, pred_confs_deltas)
if H['reregress']:
pred_boxes_deltas = tf.concat(1, pred_boxes_deltas)
return pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas
return pred_boxes, pred_logits, pred_confidences
| [
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.models.rnn.rnn_cell.MultiRNNCell",
"tensorflow.get_variable",
"tensorflow.transpose",
"tensorflow.get_variable_scope",
"tensorflow.nn.dropout",
"tensorflow.nn.conv2d_transpose",
"tensorflow.nn.softmax",
"recognition.utils.train_utils.in... | [((309, 323), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (320, 323), False, 'import random\n'), ((324, 341), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (338, 341), True, 'import numpy as np\n'), ((345, 378), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""Hungarian"""'], {}), "('Hungarian')\n", (365, 378), False, 'from tensorflow.python.framework import ops\n'), ((553, 630), 'tensorflow.models.rnn.rnn_cell.BasicLSTMCell', 'rnn_cell.BasicLSTMCell', (["H['lstm_size']"], {'forget_bias': '(0.0)', 'state_is_tuple': '(False)'}), "(H['lstm_size'], forget_bias=0.0, state_is_tuple=False)\n", (575, 630), False, 'from tensorflow.models.rnn import rnn_cell\n'), ((877, 916), 'tensorflow.zeros', 'tf.zeros', (['[batch_size, lstm.state_size]'], {}), '([batch_size, lstm.state_size])\n', (885, 916), True, 'import tensorflow as tf\n'), ((1478, 1518), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (1507, 1518), True, 'import tensorflow as tf\n'), ((1968, 2057), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'w', 'output_shape'], {'strides': '[1, k_h, k_w, 1]', 'padding': '"""VALID"""'}), "(x, w, output_shape, strides=[1, k_h, k_w, 1],\n padding='VALID')\n", (1990, 2057), True, 'import tensorflow as tf\n'), ((3061, 3082), 'tensorflow.concat', 'tf.concat', (['(0)', 'indices'], {}), '(0, indices)\n', (3070, 3082), True, 'import tensorflow as tf\n'), ((3105, 3172), 'recognition.utils.train_utils.interp', 'train_utils.interp', (['early_feat', 'interp_indices', 'early_feat_channels'], {}), '(early_feat, interp_indices, early_feat_channels)\n', (3123, 3172), False, 'from recognition.utils import train_utils, googlenet_load\n'), ((3556, 3601), 'tensorflow.transpose', 'tf.transpose', (['rezoom_features_r', '[1, 2, 0, 3]'], {}), '(rezoom_features_r, [1, 2, 0, 3])\n', (3568, 3601), True, 'import tensorflow as tf\n'), ((4047, 4080), 'recognition.utils.googlenet_load.model', 'googlenet_load.model', (['x', 'H', 'reuse'], {}), '(x, H, reuse)\n', (4067, 4080), False, 'from recognition.utils import train_utils, googlenet_load\n'), ((5470, 5572), 'tensorflow.reshape', 'tf.reshape', (['cnn', "[H['batch_size'] * H['grid_width'] * H['grid_height'], H['later_feat_channels']\n ]"], {}), "(cnn, [H['batch_size'] * H['grid_width'] * H['grid_height'], H[\n 'later_feat_channels']])\n", (5480, 5572), True, 'import tensorflow as tf\n'), ((5607, 5647), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (5636, 5647), True, 'import tensorflow as tf\n'), ((679, 758), 'tensorflow.models.rnn.rnn_cell.MultiRNNCell', 'rnn_cell.MultiRNNCell', (["([lstm_cell] * H['num_lstm_layers'])"], {'state_is_tuple': '(False)'}), "([lstm_cell] * H['num_lstm_layers'], state_is_tuple=False)\n", (700, 758), False, 'from tensorflow.models.rnn import rnn_cell\n'), ((1528, 1582), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Overfeat"""'], {'initializer': 'initializer'}), "('Overfeat', initializer=initializer)\n", (1545, 1582), True, 'import tensorflow as tf\n'), ((1596, 1667), 'tensorflow.get_variable', 'tf.get_variable', (['"""ip"""'], {'shape': "[H['later_feat_channels'], H['lstm_size']]"}), "('ip', shape=[H['later_feat_channels'], H['lstm_size']])\n", (1611, 1667), True, 'import tensorflow as tf\n'), ((5657, 5723), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder"""'], {'reuse': 'reuse', 'initializer': 'initializer'}), "('decoder', reuse=reuse, initializer=initializer)\n", (5674, 5723), True, 'import tensorflow as tf\n'), ((5772, 5862), 'tensorflow.reshape', 'tf.reshape', (['(cnn * scale_down)', "(H['batch_size'] * grid_size, H['later_feat_channels'])"], {}), "(cnn * scale_down, (H['batch_size'] * grid_size, H[\n 'later_feat_channels']))\n", (5782, 5862), True, 'import tensorflow as tf\n'), ((6865, 6889), 'tensorflow.concat', 'tf.concat', (['(1)', 'pred_boxes'], {}), '(1, pred_boxes)\n', (6874, 6889), True, 'import tensorflow as tf\n'), ((6912, 6937), 'tensorflow.concat', 'tf.concat', (['(1)', 'pred_logits'], {}), '(1, pred_logits)\n', (6921, 6937), True, 'import tensorflow as tf\n'), ((6967, 7037), 'tensorflow.reshape', 'tf.reshape', (['pred_logits', "[outer_size * H['rnn_len'], H['num_classes']]"], {}), "(pred_logits, [outer_size * H['rnn_len'], H['num_classes']])\n", (6977, 7037), True, 'import tensorflow as tf\n'), ((7112, 7145), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['pred_logits_squash'], {}), '(pred_logits_squash)\n', (7125, 7145), True, 'import tensorflow as tf\n'), ((7173, 7259), 'tensorflow.reshape', 'tf.reshape', (['pred_confidences_squash', "[outer_size, H['rnn_len'], H['num_classes']]"], {}), "(pred_confidences_squash, [outer_size, H['rnn_len'], H[\n 'num_classes']])\n", (7183, 7259), True, 'import tensorflow as tf\n'), ((1691, 1715), 'tensorflow.matmul', 'tf.matmul', (['lstm_input', 'w'], {}), '(lstm_input, w)\n', (1700, 1715), True, 'import tensorflow as tf\n'), ((1849, 1890), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (1877, 1890), True, 'import tensorflow as tf\n'), ((4284, 4324), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""deconv"""'], {'reuse': 'reuse'}), "('deconv', reuse=reuse)\n", (4301, 4324), True, 'import tensorflow as tf\n'), ((4552, 4620), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['cnn', 'w'], {'strides': '[1, stride, stride, 1]', 'padding': '"""SAME"""'}), "(cnn, w, strides=[1, stride, stride, 1], padding='SAME')\n", (4564, 4620), True, 'import tensorflow as tf\n'), ((4646, 4760), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['cnn_s[:, :, :, :256]'], {'ksize': '[1, pool_size, pool_size, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(cnn_s[:, :, :, :256], ksize=[1, pool_size, pool_size, 1],\n strides=[1, 1, 1, 1], padding='SAME')\n", (4660, 4760), True, 'import tensorflow as tf\n'), ((4828, 4876), 'tensorflow.concat', 'tf.concat', (['(3)', '[cnn_s_pool, cnn_s[:, :, :, 256:]]'], {}), '(3, [cnn_s_pool, cnn_s[:, :, :, 256:]])\n', (4837, 4876), True, 'import tensorflow as tf\n'), ((5090, 5136), 'tensorflow.concat', 'tf.concat', (['(3)', '(cnn_deconv, cnn[:, :, :, 256:])'], {}), '(3, (cnn_deconv, cnn[:, :, :, 256:]))\n', (5099, 5136), True, 'import tensorflow as tf\n'), ((5293, 5392), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['cnn2'], {'ksize': '[1, pool_size, pool_size, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(cnn2, ksize=[1, pool_size, pool_size, 1], strides=[1, 1, 1, \n 1], padding='SAME')\n", (5307, 5392), True, 'import tensorflow as tf\n'), ((5432, 5458), 'tensorflow.concat', 'tf.concat', (['(3)', '[cnn1, cnn2]'], {}), '(3, [cnn1, cnn2])\n', (5441, 5458), True, 'import tensorflow as tf\n'), ((6256, 6314), 'tensorflow.get_variable', 'tf.get_variable', (["('box_ip%d' % k)"], {'shape': "(H['lstm_size'], 4)"}), "('box_ip%d' % k, shape=(H['lstm_size'], 4))\n", (6271, 6314), True, 'import tensorflow as tf\n'), ((6384, 6458), 'tensorflow.get_variable', 'tf.get_variable', (["('conf_ip%d' % k)"], {'shape': "(H['lstm_size'], H['num_classes'])"}), "('conf_ip%d' % k, shape=(H['lstm_size'], H['num_classes']))\n", (6399, 6458), True, 'import tensorflow as tf\n'), ((9070, 9101), 'tensorflow.concat', 'tf.concat', (['(1)', 'pred_confs_deltas'], {}), '(1, pred_confs_deltas)\n', (9079, 9101), True, 'import tensorflow as tf\n'), ((981, 1021), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (1010, 1021), True, 'import tensorflow as tf\n'), ((2722, 2821), 'recognition.utils.train_utils.bilinear_select', 'train_utils.bilinear_select', (['H', 'pred_boxes', 'early_feat', 'early_feat_channels', 'w_offset', 'h_offset'], {}), '(H, pred_boxes, early_feat, early_feat_channels,\n w_offset, h_offset)\n', (2749, 2821), False, 'from recognition.utils import train_utils, googlenet_load\n'), ((6203, 6229), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output', '(0.5)'], {}), '(output, 0.5)\n', (6216, 6229), True, 'import tensorflow as tf\n'), ((7714, 7749), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['rezoom_features', '(0.5)'], {}), '(rezoom_features, 0.5)\n', (7727, 7749), True, 'import tensorflow as tf\n'), ((7825, 7891), 'tensorflow.concat', 'tf.concat', (['(1)', '[lstm_outputs[k], rezoom_features[:, k, :] / 1000.0]'], {}), '(1, [lstm_outputs[k], rezoom_features[:, k, :] / 1000.0])\n', (7834, 7891), True, 'import tensorflow as tf\n'), ((7950, 8054), 'tensorflow.get_variable', 'tf.get_variable', (["('delta_ip1%d' % k)"], {'shape': "[H['lstm_size'] + early_feat_channels * num_offsets, dim]"}), "('delta_ip1%d' % k, shape=[H['lstm_size'] + \n early_feat_channels * num_offsets, dim])\n", (7965, 8054), True, 'import tensorflow as tf\n'), ((8335, 8400), 'tensorflow.get_variable', 'tf.get_variable', (["('delta_ip2%d' % k)"], {'shape': "[dim, H['num_classes']]"}), "('delta_ip2%d' % k, shape=[dim, H['num_classes']])\n", (8350, 8400), True, 'import tensorflow as tf\n'), ((9169, 9200), 'tensorflow.concat', 'tf.concat', (['(1)', 'pred_boxes_deltas'], {}), '(1, pred_boxes_deltas)\n', (9178, 9200), True, 'import tensorflow as tf\n'), ((4489, 4530), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (4517, 4530), True, 'import tensorflow as tf\n'), ((6544, 6574), 'tensorflow.matmul', 'tf.matmul', (['output', 'box_weights'], {}), '(output, box_weights)\n', (6553, 6574), True, 'import tensorflow as tf\n'), ((6732, 6763), 'tensorflow.matmul', 'tf.matmul', (['output', 'conf_weights'], {}), '(output, conf_weights)\n', (6741, 6763), True, 'import tensorflow as tf\n'), ((8167, 8208), 'tensorflow.matmul', 'tf.matmul', (['delta_features', 'delta_weights1'], {}), '(delta_features, delta_weights1)\n', (8176, 8208), True, 'import tensorflow as tf\n'), ((8273, 8296), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['ip1', '(0.5)'], {}), '(ip1, 0.5)\n', (8286, 8296), True, 'import tensorflow as tf\n'), ((8519, 8574), 'tensorflow.get_variable', 'tf.get_variable', (["('delta_ip_boxes%d' % k)"], {'shape': '[dim, 4]'}), "('delta_ip_boxes%d' % k, shape=[dim, 4])\n", (8534, 8574), True, 'import tensorflow as tf\n'), ((1100, 1123), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (1121, 1123), True, 'import tensorflow as tf\n'), ((8905, 8940), 'tensorflow.matmul', 'tf.matmul', (['ip1', 'delta_confs_weights'], {}), '(ip1, delta_confs_weights)\n', (8914, 8940), True, 'import tensorflow as tf\n'), ((8680, 8715), 'tensorflow.matmul', 'tf.matmul', (['ip1', 'delta_boxes_weights'], {}), '(ip1, delta_boxes_weights)\n', (8689, 8715), True, 'import tensorflow as tf\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: linear.ipynb (unless otherwise specified).
__all__ = ['vv', 'denoising_MRF']
# Cell
import numpy as np
import gtsam
from gtsam import noiseModel
from .display import show
from typing import Dict
# Cell
def vv(keys_vectors: Dict[int, np.ndarray]):
"""Create a VectorValues from a dict"""
result = gtsam.VectorValues()
for j, v in keys_vectors.items():
result.insert(j, v)
return result
# Cell
def denoising_MRF(M: int, N: int, sigma = 0.5, smoothness_sigma=0.5):
"""Create MxN MRF
@returns graph and symbols used for rows.
"""
row_symbols = [chr(ord('a')+row) for row in range(M)]
keys = {(row, col): gtsam.symbol(row_symbols[row], col+1)
for row in range(M) for col in range(N)}
rng = np.random.default_rng(42)
data = rng.normal(loc=0, scale=sigma, size=(M, N, 1))
data_model = noiseModel.Isotropic.Sigmas([sigma])
smoothness_model = noiseModel.Isotropic.Sigmas([smoothness_sigma])
I = np.eye(1, 1, dtype=float)
zero = np.zeros((1, 1))
graph = gtsam.GaussianFactorGraph()
for row in range(M):
for col in range(N):
# add data terms:
j = keys[(row, col)]
graph.add(j, I, np.array(data[row, col]), data_model)
# add smoothness terms:
if col > 0:
j1 = keys[(row, col-1)]
graph.add(j, I, j1, -I, zero, smoothness_model)
if row > 0:
j2 = keys[(row-1, col)]
graph.add(j, I, j2, -I, zero, smoothness_model)
return graph, row_symbols
# Cell
| [
"numpy.eye",
"gtsam.noiseModel.Isotropic.Sigmas",
"numpy.random.default_rng",
"gtsam.symbol",
"numpy.array",
"numpy.zeros",
"gtsam.VectorValues",
"gtsam.GaussianFactorGraph"
] | [((352, 372), 'gtsam.VectorValues', 'gtsam.VectorValues', ([], {}), '()\n', (370, 372), False, 'import gtsam\n'), ((800, 825), 'numpy.random.default_rng', 'np.random.default_rng', (['(42)'], {}), '(42)\n', (821, 825), True, 'import numpy as np\n'), ((901, 937), 'gtsam.noiseModel.Isotropic.Sigmas', 'noiseModel.Isotropic.Sigmas', (['[sigma]'], {}), '([sigma])\n', (928, 937), False, 'from gtsam import noiseModel\n'), ((962, 1009), 'gtsam.noiseModel.Isotropic.Sigmas', 'noiseModel.Isotropic.Sigmas', (['[smoothness_sigma]'], {}), '([smoothness_sigma])\n', (989, 1009), False, 'from gtsam import noiseModel\n'), ((1019, 1044), 'numpy.eye', 'np.eye', (['(1)', '(1)'], {'dtype': 'float'}), '(1, 1, dtype=float)\n', (1025, 1044), True, 'import numpy as np\n'), ((1056, 1072), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1064, 1072), True, 'import numpy as np\n'), ((1085, 1112), 'gtsam.GaussianFactorGraph', 'gtsam.GaussianFactorGraph', ([], {}), '()\n', (1110, 1112), False, 'import gtsam\n'), ((698, 737), 'gtsam.symbol', 'gtsam.symbol', (['row_symbols[row]', '(col + 1)'], {}), '(row_symbols[row], col + 1)\n', (710, 737), False, 'import gtsam\n'), ((1258, 1282), 'numpy.array', 'np.array', (['data[row, col]'], {}), '(data[row, col])\n', (1266, 1282), True, 'import numpy as np\n')] |
import os
from shutil import copyfile
from sphinxcontrib.collections.drivers import Driver
class CopyFileDriver(Driver):
def run(self):
self.info('Copy file...')
if not os.path.exists(self.config['source']):
self.error('Source {} does not exist'.format(self.config['source']))
return
try:
copyfile(self.config['source'],
self.config['target'])
except IOError as e:
self.error('Problems during copying file.', e)
def clean(self):
try:
os.remove(self.config['target'])
self.info('File deleted: {}'.format(self.config['target']))
except FileNotFoundError:
pass # Already cleaned? I'm okay with it.
except IOError as e:
self.error('Problems during cleaning for collection {}'.format(self.config['name']), e)
| [
"os.path.exists",
"shutil.copyfile",
"os.remove"
] | [((195, 232), 'os.path.exists', 'os.path.exists', (["self.config['source']"], {}), "(self.config['source'])\n", (209, 232), False, 'import os\n'), ((360, 414), 'shutil.copyfile', 'copyfile', (["self.config['source']", "self.config['target']"], {}), "(self.config['source'], self.config['target'])\n", (368, 414), False, 'from shutil import copyfile\n'), ((571, 603), 'os.remove', 'os.remove', (["self.config['target']"], {}), "(self.config['target'])\n", (580, 603), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import ast
import re
from ansible.plugins.action import ActionBase
from ansible.module_utils.common._collections_compat import (
MutableMapping,
MutableSequence,
)
from ansible.module_utils._text import to_native
from jinja2 import Template, TemplateSyntaxError
from ansible_collections.ansible.utils.plugins.modules.update_fact import (
DOCUMENTATION,
)
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
from ansible.errors import AnsibleActionFail
class ActionModule(ActionBase):
"""action module"""
def __init__(self, *args, **kwargs):
"""Start here"""
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = True
self._updates = None
self._result = None
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args, schema=DOCUMENTATION, name=self._task.action
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def _ensure_valid_jinja(self):
"""Ensure each path is jinja valid"""
errors = []
for entry in self._task.args["updates"]:
try:
Template("{{" + entry["path"] + "}}")
except TemplateSyntaxError as exc:
error = (
"While processing '{path}' found malformed path."
" Ensure syntax follows valid jinja format. The error was:"
" {error}"
).format(path=entry["path"], error=to_native(exc))
errors.append(error)
if errors:
raise AnsibleActionFail(" ".join(errors))
@staticmethod
def _field_split(path):
"""Split the path into it's parts
:param path: The user provided path
:type path: str
:return: the individual parts of the path
:rtype: list
"""
que = list(path)
val = que.pop(0)
fields = []
try:
while True:
field = ""
# found a '.', move to the next character
if val == ".":
val = que.pop(0)
# found a '[', pop until ']' and then get the next
if val == "[":
val = que.pop(0)
while val != "]":
field += val
val = que.pop(0)
val = que.pop(0)
else:
while val not in [".", "["]:
field += val
val = que.pop(0)
try:
# make numbers numbers
fields.append(ast.literal_eval(field))
except Exception:
# or strip the quotes
fields.append(re.sub("['\"]", "", field))
except IndexError:
# pop'ed past the end of the que
# so add the final field
try:
fields.append(ast.literal_eval(field))
except Exception:
fields.append(re.sub("['\"]", "", field))
return fields
def set_value(self, obj, path, val):
"""Set a value
:param obj: The object to modify
:type obj: mutable object
:param path: The path to where the update should be made
:type path: list
:param val: The new value to place at path
:type val: string, dict, list, bool, etc
"""
first, rest = path[0], path[1:]
if rest:
try:
new_obj = obj[first]
except (KeyError, TypeError):
msg = (
"Error: the key '{first}' was not found "
"in {obj}.".format(obj=obj, first=first)
)
raise AnsibleActionFail(msg)
self.set_value(new_obj, rest, val)
else:
if isinstance(obj, MutableMapping):
if obj.get(first) != val:
self._result["changed"] = True
obj[first] = val
elif isinstance(obj, MutableSequence):
if not isinstance(first, int):
msg = (
"Error: {obj} is a list, "
"but index provided was not an integer: '{first}'"
).format(obj=obj, first=first)
raise AnsibleActionFail(msg)
if first > len(obj):
msg = "Error: {obj} not long enough for item #{first} to be set.".format(
obj=obj, first=first
)
raise AnsibleActionFail(msg)
if first == len(obj):
obj.append(val)
self._result["changed"] = True
else:
if obj[first] != val:
obj[first] = val
self._result["changed"] = True
else:
msg = "update_fact can only modify mutable objects."
raise AnsibleActionFail(msg)
def run(self, tmp=None, task_vars=None):
"""action entry point"""
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
results = set()
self._ensure_valid_jinja()
for entry in self._task.args["updates"]:
parts = self._field_split(entry["path"])
obj, path = parts[0], parts[1:]
results.add(obj)
if obj not in task_vars["vars"]:
msg = "'{obj}' was not found in the current facts.".format(
obj=obj
)
raise AnsibleActionFail(msg)
retrieved = task_vars["vars"].get(obj)
if path:
self.set_value(retrieved, path, entry["value"])
else:
if task_vars["vars"][obj] != entry["value"]:
task_vars["vars"][obj] = entry["value"]
self._result["changed"] = True
for key in results:
value = task_vars["vars"].get(key)
self._result[key] = value
return self._result
| [
"jinja2.Template",
"ast.literal_eval",
"ansible.module_utils._text.to_native",
"re.sub",
"ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate.AnsibleArgSpecValidator",
"ansible.errors.AnsibleActionFail"
] | [((1094, 1190), 'ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate.AnsibleArgSpecValidator', 'AnsibleArgSpecValidator', ([], {'data': 'self._task.args', 'schema': 'DOCUMENTATION', 'name': 'self._task.action'}), '(data=self._task.args, schema=DOCUMENTATION, name=\n self._task.action)\n', (1117, 1190), False, 'from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import AnsibleArgSpecValidator\n'), ((1304, 1329), 'ansible.errors.AnsibleActionFail', 'AnsibleActionFail', (['errors'], {}), '(errors)\n', (1321, 1329), False, 'from ansible.errors import AnsibleActionFail\n'), ((1514, 1551), 'jinja2.Template', 'Template', (["('{{' + entry['path'] + '}}')"], {}), "('{{' + entry['path'] + '}}')\n", (1522, 1551), False, 'from jinja2 import Template, TemplateSyntaxError\n'), ((6107, 6129), 'ansible.errors.AnsibleActionFail', 'AnsibleActionFail', (['msg'], {}), '(msg)\n', (6124, 6129), False, 'from ansible.errors import AnsibleActionFail\n'), ((4156, 4178), 'ansible.errors.AnsibleActionFail', 'AnsibleActionFail', (['msg'], {}), '(msg)\n', (4173, 4178), False, 'from ansible.errors import AnsibleActionFail\n'), ((5411, 5433), 'ansible.errors.AnsibleActionFail', 'AnsibleActionFail', (['msg'], {}), '(msg)\n', (5428, 5433), False, 'from ansible.errors import AnsibleActionFail\n'), ((3019, 3042), 'ast.literal_eval', 'ast.literal_eval', (['field'], {}), '(field)\n', (3035, 3042), False, 'import ast\n'), ((3338, 3361), 'ast.literal_eval', 'ast.literal_eval', (['field'], {}), '(field)\n', (3354, 3361), False, 'import ast\n'), ((4747, 4769), 'ansible.errors.AnsibleActionFail', 'AnsibleActionFail', (['msg'], {}), '(msg)\n', (4764, 4769), False, 'from ansible.errors import AnsibleActionFail\n'), ((4994, 5016), 'ansible.errors.AnsibleActionFail', 'AnsibleActionFail', (['msg'], {}), '(msg)\n', (5011, 5016), False, 'from ansible.errors import AnsibleActionFail\n'), ((1857, 1871), 'ansible.module_utils._text.to_native', 'to_native', (['exc'], {}), '(exc)\n', (1866, 1871), False, 'from ansible.module_utils._text import to_native\n'), ((3154, 3180), 're.sub', 're.sub', (['"""[\'"]"""', '""""""', 'field'], {}), '(\'[\\\'"]\', \'\', field)\n', (3160, 3180), False, 'import re\n'), ((3423, 3449), 're.sub', 're.sub', (['"""[\'"]"""', '""""""', 'field'], {}), '(\'[\\\'"]\', \'\', field)\n', (3429, 3449), False, 'import re\n')] |
import os
import zipfile
import zlib
def make_rel_archive(a_parent, a_name):
archive = zipfile.ZipFile("release/" + a_name + ".zip", "w", zipfile.ZIP_DEFLATED)
def do_write(a_relative):
archive.write(a_parent + a_relative, a_relative)
do_write("F4SE/Plugins/" + a_name + ".dll")
do_write("F4SE/Plugins/" + a_name + ".toml")
do_write("F4SE/Plugins/" + a_name + "_preload.txt")
def make_dbg_archive(a_parent, a_name):
archive = zipfile.ZipFile("release/" + a_name + "_pdb" + ".zip", "w", zipfile.ZIP_DEFLATED)
archive.write(a_parent + "F4SE/Plugins/" + a_name + ".pdb", a_name + ".pdb")
def main():
os.chdir(os.path.dirname(os.path.realpath(__file__)))
try:
os.mkdir("release")
except FileExistsError:
pass
parent = os.environ["Fallout4Path"] + "/Data/"
project = os.path.split(os.getcwd())[1].strip(os.sep)
make_rel_archive(parent, project)
make_dbg_archive(parent, project)
if __name__ == "__main__":
main()
| [
"os.path.realpath",
"os.mkdir",
"zipfile.ZipFile",
"os.getcwd"
] | [((89, 161), 'zipfile.ZipFile', 'zipfile.ZipFile', (["('release/' + a_name + '.zip')", '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "('release/' + a_name + '.zip', 'w', zipfile.ZIP_DEFLATED)\n", (104, 161), False, 'import zipfile\n'), ((437, 523), 'zipfile.ZipFile', 'zipfile.ZipFile', (["('release/' + a_name + '_pdb' + '.zip')", '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "('release/' + a_name + '_pdb' + '.zip', 'w', zipfile.\n ZIP_DEFLATED)\n", (452, 523), False, 'import zipfile\n'), ((673, 692), 'os.mkdir', 'os.mkdir', (['"""release"""'], {}), "('release')\n", (681, 692), False, 'import os\n'), ((636, 662), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (652, 662), False, 'import os\n'), ((799, 810), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (808, 810), False, 'import os\n')] |
"""Evaluating Prophet model on M4 timeseries
"""
from darts.models import Prophet
from darts.utils.statistics import check_seasonality
from darts.utils import _build_tqdm_iterator
import numpy as np
import pandas as pd
import pickle as pkl
from M4_metrics import owa_m4, mase_m4, smape_m4
if __name__ == "__main__":
data_categories = ['Yearly', 'Quarterly', 'Monthly', 'Weekly', 'Daily', 'Hourly']
info_dataset = pd.read_csv('dataset/M4-info.csv', delimiter=',').set_index('M4id')
for cat in data_categories[::-1]:
# Load TimeSeries from M4
ts_train = pkl.load(open("dataset/train_"+cat+".pkl", "rb"))
ts_test = pkl.load(open("dataset/test_"+cat+".pkl", "rb"))
# Test models on all time series
mase_all = []
smape_all = []
m = int(info_dataset.Frequency[cat[0]+"1"])
for train, test in _build_tqdm_iterator(zip(ts_train, ts_test), verbose=True):
train_des = train
seasonOut = 1
if m > 1:
if check_seasonality(train, m=int(m), max_lag=2*m):
pass
else:
m = 1
try:
prophet_args = {
'daily_seasonality': False,
'weekly_seasonality': False,
'yearly_seasonality': False,
'frequency': None,
'changepoint_range': 0.95,
}
if cat == 'Daily':
prophet_args['daily_seasonality'] = True
elif cat == 'Hourly':
prophet_args['daily_seasonality'] = True
elif cat == 'Weekly':
prophet_args['weekly_seasonality'] = True
elif cat == 'Monthly':
prophet_args['yearly_seasonality'] = True
elif cat == 'Quarterly':
prophet_args['yearly_seasonality'] = True
elif cat == 'Yearly':
prophet_args['yearly_seasonality'] = True
prophet = Prophet(**prophet_args)
derivate = np.diff(train.univariate_values(), n=1)
jump = derivate.max()/(train.max().max() - train.min().min())
try:
if jump <= 0.5:
prophet.fit(train)
else:
prophet.fit(train.drop_before(train.time_index()[np.argmax(derivate)+1]))
except ValueError as e:
raise e
forecast_prophet = prophet.predict(len(test))
m = info_dataset.Frequency[cat[0]+"1"]
mase_all.append(np.vstack([
mase_m4(train, test, forecast_prophet, m=m),
]))
smape_all.append(np.vstack([
smape_m4(test, forecast_prophet),
]))
except Exception as e:
print(e)
break
pkl.dump(mase_all, open("prophet_mase_"+cat+".pkl", "wb"))
pkl.dump(smape_all, open("prophet_smape_"+cat+".pkl", "wb"))
print("MASE; Prophet: {}".format(*tuple(np.nanmean(np.stack(mase_all), axis=(0, 2)))))
print("sMAPE; Prophet: {}".format(*tuple(np.nanmean(np.stack(smape_all), axis=(0, 2)))))
print("OWA: ", owa_m4(cat, np.nanmean(np.stack(smape_all), axis=(0, 2)),
np.nanmean(np.stack(mase_all), axis=(0, 2))))
| [
"M4_metrics.mase_m4",
"pandas.read_csv",
"M4_metrics.smape_m4",
"numpy.argmax",
"numpy.stack",
"darts.models.Prophet"
] | [((429, 478), 'pandas.read_csv', 'pd.read_csv', (['"""dataset/M4-info.csv"""'], {'delimiter': '""","""'}), "('dataset/M4-info.csv', delimiter=',')\n", (440, 478), True, 'import pandas as pd\n'), ((2152, 2175), 'darts.models.Prophet', 'Prophet', ([], {}), '(**prophet_args)\n', (2159, 2175), False, 'from darts.models import Prophet\n'), ((3442, 3461), 'numpy.stack', 'np.stack', (['smape_all'], {}), '(smape_all)\n', (3450, 3461), True, 'import numpy as np\n'), ((3518, 3536), 'numpy.stack', 'np.stack', (['mase_all'], {}), '(mase_all)\n', (3526, 3536), True, 'import numpy as np\n'), ((2794, 2837), 'M4_metrics.mase_m4', 'mase_m4', (['train', 'test', 'forecast_prophet'], {'m': 'm'}), '(train, test, forecast_prophet, m=m)\n', (2801, 2837), False, 'from M4_metrics import owa_m4, mase_m4, smape_m4\n'), ((2928, 2960), 'M4_metrics.smape_m4', 'smape_m4', (['test', 'forecast_prophet'], {}), '(test, forecast_prophet)\n', (2936, 2960), False, 'from M4_metrics import owa_m4, mase_m4, smape_m4\n'), ((3263, 3281), 'numpy.stack', 'np.stack', (['mase_all'], {}), '(mase_all)\n', (3271, 3281), True, 'import numpy as np\n'), ((3359, 3378), 'numpy.stack', 'np.stack', (['smape_all'], {}), '(smape_all)\n', (3367, 3378), True, 'import numpy as np\n'), ((2520, 2539), 'numpy.argmax', 'np.argmax', (['derivate'], {}), '(derivate)\n', (2529, 2539), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os
from datasets import convert_data
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('data_type', None,
'The type of the dataset to convert, need to be either "train" or "test".')
tf.app.flags.DEFINE_string('dataset_dir', None,
'The directory where the image files are saved.')
tf.app.flags.DEFINE_string('output_dir', None,
'The directory where the output TFRecords are saved.')
tf.app.flags.DEFINE_string('filename', None,
'The txt file where the list all image files to be converted.')
tf.app.flags.DEFINE_integer('num_tfrecords', 1,
'Number of tfrecords to convert.')
def main(_):
# check if dir exits and make it
directory = FLAGS.output_dir
if not os.path.exists(directory):
os.makedirs(directory)
# start convert data to tfrecords
convert_data.run(dataset_dir=FLAGS.dataset_dir,
output_dir=FLAGS.output_dir,
filename=FLAGS.filename,
data_type=FLAGS.data_type,
num_tfrecords=FLAGS.num_tfrecords)
if __name__ == '__main__':
tf.app.run()
| [
"os.path.exists",
"tensorflow.app.flags.DEFINE_integer",
"os.makedirs",
"tensorflow.app.flags.DEFINE_string",
"datasets.convert_data.run",
"tensorflow.app.run"
] | [((208, 333), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_type"""', 'None', '"""The type of the dataset to convert, need to be either "train" or "test"."""'], {}), '(\'data_type\', None,\n \'The type of the dataset to convert, need to be either "train" or "test".\')\n', (234, 333), True, 'import tensorflow as tf\n'), ((357, 458), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_dir"""', 'None', '"""The directory where the image files are saved."""'], {}), "('dataset_dir', None,\n 'The directory where the image files are saved.')\n", (383, 458), True, 'import tensorflow as tf\n'), ((482, 587), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""output_dir"""', 'None', '"""The directory where the output TFRecords are saved."""'], {}), "('output_dir', None,\n 'The directory where the output TFRecords are saved.')\n", (508, 587), True, 'import tensorflow as tf\n'), ((611, 723), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""filename"""', 'None', '"""The txt file where the list all image files to be converted."""'], {}), "('filename', None,\n 'The txt file where the list all image files to be converted.')\n", (637, 723), True, 'import tensorflow as tf\n'), ((747, 833), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_tfrecords"""', '(1)', '"""Number of tfrecords to convert."""'], {}), "('num_tfrecords', 1,\n 'Number of tfrecords to convert.')\n", (774, 833), True, 'import tensorflow as tf\n'), ((1055, 1227), 'datasets.convert_data.run', 'convert_data.run', ([], {'dataset_dir': 'FLAGS.dataset_dir', 'output_dir': 'FLAGS.output_dir', 'filename': 'FLAGS.filename', 'data_type': 'FLAGS.data_type', 'num_tfrecords': 'FLAGS.num_tfrecords'}), '(dataset_dir=FLAGS.dataset_dir, output_dir=FLAGS.output_dir,\n filename=FLAGS.filename, data_type=FLAGS.data_type, num_tfrecords=FLAGS\n .num_tfrecords)\n', (1071, 1227), False, 'from datasets import convert_data\n'), ((1339, 1351), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (1349, 1351), True, 'import tensorflow as tf\n'), ((955, 980), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (969, 980), False, 'import os\n'), ((990, 1012), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1001, 1012), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from vispy.testing import run_tests_if_main
from vispy.geometry import (create_box, create_cube, create_cylinder,
create_sphere, create_plane)
def test_box():
"""Test box function"""
vertices, filled, outline = create_box()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
def test_cube():
"""Test cube function"""
vertices, filled, outline = create_cube()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
def test_sphere():
"""Test sphere function"""
md = create_sphere(rows=10, cols=20, radius=10, method='latitude')
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
md = create_sphere(subdivisions=5, radius=10, method='ico')
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
md = create_sphere(rows=20, cols=20, depth=20, radius=10, method='cube')
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
def test_cylinder():
"""Test cylinder function"""
md = create_cylinder(10, 20, radius=[10, 10])
radii = np.sqrt((md.get_vertices()[:, :2] ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
def test_plane():
"""Test plane function"""
vertices, filled, outline = create_plane()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
run_tests_if_main()
| [
"numpy.ones_like",
"numpy.unique",
"vispy.geometry.create_cylinder",
"vispy.testing.run_tests_if_main",
"vispy.geometry.create_plane",
"vispy.geometry.create_sphere",
"vispy.geometry.create_cube",
"vispy.geometry.create_box"
] | [((1913, 1932), 'vispy.testing.run_tests_if_main', 'run_tests_if_main', ([], {}), '()\n', (1930, 1932), False, 'from vispy.testing import run_tests_if_main\n'), ((475, 487), 'vispy.geometry.create_box', 'create_box', ([], {}), '()\n', (485, 487), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((705, 718), 'vispy.geometry.create_cube', 'create_cube', ([], {}), '()\n', (716, 718), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((917, 978), 'vispy.geometry.create_sphere', 'create_sphere', ([], {'rows': '(10)', 'cols': '(20)', 'radius': '(10)', 'method': '"""latitude"""'}), "(rows=10, cols=20, radius=10, method='latitude')\n", (930, 978), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((1099, 1153), 'vispy.geometry.create_sphere', 'create_sphere', ([], {'subdivisions': '(5)', 'radius': '(10)', 'method': '"""ico"""'}), "(subdivisions=5, radius=10, method='ico')\n", (1112, 1153), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((1274, 1341), 'vispy.geometry.create_sphere', 'create_sphere', ([], {'rows': '(20)', 'cols': '(20)', 'depth': '(20)', 'radius': '(10)', 'method': '"""cube"""'}), "(rows=20, cols=20, depth=20, radius=10, method='cube')\n", (1287, 1341), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((1518, 1558), 'vispy.geometry.create_cylinder', 'create_cylinder', (['(10)', '(20)'], {'radius': '[10, 10]'}), '(10, 20, radius=[10, 10])\n', (1533, 1558), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((1759, 1773), 'vispy.geometry.create_plane', 'create_plane', ([], {}), '()\n', (1771, 1773), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((537, 554), 'numpy.unique', 'np.unique', (['filled'], {}), '(filled)\n', (546, 554), True, 'import numpy as np\n'), ((605, 623), 'numpy.unique', 'np.unique', (['outline'], {}), '(outline)\n', (614, 623), True, 'import numpy as np\n'), ((768, 785), 'numpy.unique', 'np.unique', (['filled'], {}), '(filled)\n', (777, 785), True, 'import numpy as np\n'), ((836, 854), 'numpy.unique', 'np.unique', (['outline'], {}), '(outline)\n', (845, 854), True, 'import numpy as np\n'), ((1823, 1840), 'numpy.unique', 'np.unique', (['filled'], {}), '(filled)\n', (1832, 1840), True, 'import numpy as np\n'), ((1891, 1909), 'numpy.unique', 'np.unique', (['outline'], {}), '(outline)\n', (1900, 1909), True, 'import numpy as np\n'), ((1064, 1083), 'numpy.ones_like', 'np.ones_like', (['radii'], {}), '(radii)\n', (1076, 1083), True, 'import numpy as np\n'), ((1239, 1258), 'numpy.ones_like', 'np.ones_like', (['radii'], {}), '(radii)\n', (1251, 1258), True, 'import numpy as np\n'), ((1427, 1446), 'numpy.ones_like', 'np.ones_like', (['radii'], {}), '(radii)\n', (1439, 1446), True, 'import numpy as np\n'), ((1651, 1670), 'numpy.ones_like', 'np.ones_like', (['radii'], {}), '(radii)\n', (1663, 1670), True, 'import numpy as np\n')] |
import logging
import numpy as np
from bico.geometry.point import Point
from bico.nearest_neighbor.base import NearestNeighbor
from bico.utils.ClusteringFeature import ClusteringFeature
from datetime import datetime
from typing import Callable, TextIO, List
logger = logging.getLogger(__name__)
class BICONode:
def __init__(self, level: int, dim: int, proj: int, bico: 'BICO',
projection_func: Callable[[int, int, float], NearestNeighbor]):
self.level = level
self.dim = dim
self.proj = proj
self.point_to_biconode = []
self.projection_func = projection_func
self.nn_engine = projection_func(dim, proj, bico.get_radius(self.level))
self.num_cfs = 0
self.bico = bico
self.cf = ClusteringFeature(Point(np.zeros(dim)), Point(np.zeros(dim)), 0, 0)
def insert_point(self, point_cf: ClusteringFeature) -> int:
if self.bico.verbose:
logger.debug("Insert point: {}".format(point_cf))
# check whether geometry fits into CF
if self.level > 0:
if self.cf.size == 0:
self.cf += point_cf
self.cf.ref = point_cf.ref
else:
test = self.cf + point_cf
cost = test.kmeans_cost(self.cf.ref)
if self.bico.verbose:
logger.debug("Cost: " + str(cost) + ", Thresh: " + str(self.bico.get_threshold(self.level)))
if cost < self.bico.get_threshold(self.level):
self.cf = test
return 0
# search nearest neighbor and insert geometry there or open new BICONode
candidates = []
if self.num_cfs > 0:
if self.bico.track_time:
tstart = datetime.now()
candidates = self.nn_engine.get_candidates(point_cf.ref.p)
# candidates = self.ann_engine.neighbours(point_cf.ref.p)
if self.bico.track_time:
tend = datetime.now()
if len(self.bico.time) < self.level + 1:
self.bico.time.append(tend - tstart)
else:
self.bico.time[self.level] += tend - tstart
if len(candidates) == 0:
if self.bico.verbose:
logger.debug("No nearest neighbor found.")
self.num_cfs += 1
self.nn_engine.insert_candidate(point=point_cf.ref.p, metadata=self.num_cfs)
# self.ann_engine.store_vector(point_cf.ref.p, data=self.num_cfs)
new_node = BICONode(self.level + 1, self.dim, self.proj, self.bico, self.projection_func)
# new_node.cf = ClusteringFeature(geometry, geometry, geometry*geometry, 1)
new_node.cf = point_cf
# debug
if len(self.point_to_biconode) != self.num_cfs - 1:
logger.error("Something is wrong: {} != {}".format(len(self.point_to_biconode), self.num_cfs - 1))
self.point_to_biconode.append(new_node)
return 1
else:
if self.bico.verbose:
logger.debug(str(len(candidates)) + " nearest neighbor found!")
logger.debug(candidates)
nearest = candidates[0]
node = nearest.data # contains the index
# sanity check
if len(self.point_to_biconode) < node - 2:
logger.error("Something is wrong: {} > {}".format(len(self.point_to_biconode), node - 2))
return self.point_to_biconode[node - 1].insert_point(point_cf)
def output_cf(self, f: TextIO) -> None:
if self.level > 0:
f.write(str(self.cf) + "\n")
for node in self.point_to_biconode:
node.output_cf(f)
def get_cf(self) -> List[np.ndarray]:
cur = []
if self.level > 0:
cur.append(np.insert(self.cf.center().p, 0, self.cf.size))
for node in self.point_to_biconode:
cur = cur + node.get_cf()
return cur
| [
"logging.getLogger",
"datetime.datetime.now",
"numpy.zeros"
] | [((268, 295), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (285, 295), False, 'import logging\n'), ((796, 809), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (804, 809), True, 'import numpy as np\n'), ((818, 831), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (826, 831), True, 'import numpy as np\n'), ((1771, 1785), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1783, 1785), False, 'from datetime import datetime\n'), ((1987, 2001), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1999, 2001), False, 'from datetime import datetime\n')] |
# import required module
import ctypes
# create node class
class Node:
def __init__(self, value):
self.value = value
self.npx = 0
# create linked list class
class XorLinkedList:
# constructor
def __init__(self):
self.head = None
self.tail = None
self.__nodes = []
# method to insert node at beginning
def InsertAtStart(self, value):
node = Node(value)
if self.head is None: # If list is empty
self.head = node
self.tail = node
else:
self.head.npx = id(node) ^ self.head.npx
node.npx = id(self.head)
self.head = node
self.__nodes.append(node)
# method to insert node at end
def InsertAtEnd(self, value):
node = Node(value)
if self.head is None: # If list is empty
self.head = node
self.tail = node
else:
self.tail.npx = id(node) ^ self.tail.npx
node.npx = id(self.tail)
self.tail = node
self.__nodes.append(node)
# method to remove node at beginning
def DeleteAtStart(self):
if self.isEmpty(): # If list is empty
return "List is empty !"
elif self.head == self.tail: # If list has 1 node
self.head = self.tail = None
elif (0 ^ self.head.npx) == id(self.tail): # If list has 2 nodes
self.head = self.tail
self.head.npx = self.tail.npx = 0
else: # If list has more than 2 nodes
res = self.head.value
x = self.__type_cast(0 ^ self.head.npx) # Address of next node
y = (id(self.head) ^ x.npx) # Address of next of next node
self.head = x
self.head.npx = 0 ^ y
return res
# method to remove node at end
def DeleteAtEnd(self):
if self.isEmpty(): # If list is empty
return "List is empty !"
elif self.head == self.tail: # If list has 1 node
self.head = self.tail = None
elif self.__type_cast(0 ^ self.head.npx) == (self.tail): # If list has 2 nodes
self.tail = self.head
self.head.npx = self.tail.npx = 0
else: # If list has more than 2 nodes
prev_id = 0
node = self.head
next_id = 1
while next_id:
next_id = prev_id ^ node.npx
if next_id:
prev_id = id(node)
node = self.__type_cast(next_id)
res = node.value
x = self.__type_cast(prev_id).npx ^ id(node)
y = self.__type_cast(prev_id)
y.npx = x ^ 0
self.tail = y
return res
# method to traverse linked list
def Print(self):
"""We are printing values rather than returning it bacause
for returning we have to append all values in a list
and it takes extra memory to save all values in a list."""
if self.head != None:
prev_id = 0
node = self.head
next_id = 1
print(node.value, end=' ')
while next_id:
next_id = prev_id ^ node.npx
if next_id:
prev_id = id(node)
node = self.__type_cast(next_id)
print(node.value, end=' ')
else:
return
else:
print("List is empty !")
# method to traverse linked list in reverse order
def ReversePrint(self):
# Print Values is reverse order.
"""We are printing values rather than returning it bacause
for returning we have to append all values in a list
and it takes extra memory to save all values in a list."""
if self.head != None:
prev_id = 0
node = self.tail
next_id = 1
print(node.value, end=' ')
while next_id:
next_id = prev_id ^ node.npx
if next_id:
prev_id = id(node)
node = self.__type_cast(next_id)
print(node.value, end=' ')
else:
return
else:
print("List is empty !")
# method to get length of linked list
def Length(self):
if not self.isEmpty():
prev_id = 0
node = self.head
next_id = 1
count = 1
while next_id:
next_id = prev_id ^ node.npx
if next_id:
prev_id = id(node)
node = self.__type_cast(next_id)
count += 1
else:
return count
else:
return 0
# method to get node data value by index
def PrintByIndex(self, index):
prev_id = 0
node = self.head
for i in range(index):
next_id = prev_id ^ node.npx
if next_id:
prev_id = id(node)
node = self.__type_cast(next_id)
else:
return "Value dosn't found index out of range."
return node.value
# method to check if the liked list is empty or not
def isEmpty(self):
if self.head is None:
return True
return False
# method to return a new instance of type
def __type_cast(self, id):
return ctypes.cast(id, ctypes.py_object).value
# Driver Code
# create object
obj = XorLinkedList()
# insert nodes
obj.InsertAtEnd(2)
obj.InsertAtEnd(3)
obj.InsertAtEnd(4)
obj.InsertAtEnd(0)
obj.InsertAtEnd(6)
obj.InsertAtEnd(55)
# display length
# print("\nLength:", obj.Length())
# traverse
print("\nTraverse linked list:")
obj.Print()
# print("\nTraverse in reverse order:")
# obj.ReversePrint()
# # display data values by index
# print('\nNodes:')
# for i in range(obj.Length()):
# print("Data value at index", i, 'is', obj.PrintByIndex(i))
# # removing nodes
# print("\nDelete Last Node: ", obj.DeleteAtEnd())
# print("\nDelete First Node: ", obj.DeleteAtStart())
# # new length
# print("\nUpdated length:", obj.Length())
# # display data values by index
# print('\nNodes:')
# for i in range(obj.Length()):
# print("Data value at index", i, 'is', obj.PrintByIndex(i))
# # traverse
# print("\nTraverse linked list:")
# obj.Print()
# print("\nTraverse in reverse order:")
# obj.ReversePrint()
| [
"ctypes.cast"
] | [((4364, 4397), 'ctypes.cast', 'ctypes.cast', (['id', 'ctypes.py_object'], {}), '(id, ctypes.py_object)\n', (4375, 4397), False, 'import ctypes\n')] |
"""
class to read files in specific ways
"""
import glob
import random
class Filer:
"""
read files
"""
def __init__(self, file_path):
self.path = file_path
def get_random_iter(self):
"""
get file contents in random
"""
nb_files = sum(1 for _ in glob.iglob(self.path))
file_iter = glob.glob(self.path)
return file_iter, nb_files
| [
"glob.iglob",
"glob.glob"
] | [((301, 321), 'glob.glob', 'glob.glob', (['self.path'], {}), '(self.path)\n', (310, 321), False, 'import glob\n'), ((264, 285), 'glob.iglob', 'glob.iglob', (['self.path'], {}), '(self.path)\n', (274, 285), False, 'import glob\n')] |
import numpy as np
from scipy.stats import linregress as li
from math import exp
def calc_factor(field,stepsize=0.01):
"""
Function for calculation of the summed binning.
The returned result is an integral over the binning of the velocities.
It is done for the negative and positive half separately.
:param field: is a 1D field which will be binned
:param stepsize: is the step size for the velocity
:return (positive,negative):
velocities and the binning result for positive half and negative half are returned
as a tuple of numpy arrays
"""
result_pos = []
result_neg = []
alpha = 0.
#: binning of the positive half
while alpha <= np.max(field)+stepsize:
pos = alpha
neg = 0.
filtered = np.copy(field)
filtered[filtered<=neg] = np.nan
filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_pos.append([alpha,outlier])
alpha += stepsize
alpha = 0.
#: binning of the negative half
while alpha <= np.abs(np.min(field))+stepsize:
pos = 0.
neg = -1.*alpha
filtered = np.copy(field)
filtered[filtered<=neg] = np.nan
filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_neg.append([-1.*alpha,outlier])
alpha += stepsize
return (np.array(result_pos),np.array(result_neg))
def calc_derivative(field,stepsize=0.01):
"""
Function for calculation of the binning.
The returned result is the binning of the velocities.
It is called derivative because it is mathematically the derivative of the function:
.. function:: velofilter.calc_factor
It is done for the negative and positive half separately.
:param field: is a 1D field which will be binned
:param stepsize: is the step size for the velocity
:return (positive,negative):
velocities and the binning result for positive half and negative half are returned
as a tuple
"""
result_pos = []
result_neg = []
outlier = 1.
alpha = 0.
while alpha <= np.max(field)+stepsize:
pos = alpha+stepsize
neg = alpha
filtered = np.copy(field)
filtered[(filtered<=neg) | (filtered>pos)] = np.nan
#filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_pos.append([alpha,outlier])
alpha += stepsize
outlier = 1.
alpha = 0.
while alpha <= np.abs(np.min(field))+stepsize:
pos = -1.*alpha
neg = -1.*(alpha+stepsize)
filtered = np.copy(field)
filtered[(filtered<=neg) | (filtered>pos)] = np.nan
#filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_neg.append([-1.*alpha,outlier])
alpha += stepsize
return (np.array(result_pos),np.array(result_neg))
def filter(piv,tfactor=3.,dalpha=.01):
"""
Function for calculating the cutoff values.
:param object piv: PIV class object
This is supposed to be an object from a Direct or adaptive Class
it is needed to get the velocities
:param double tfactor: Factor for cutoff in the velocity binning
The default value is set to 3 which works for many cases
:param double dalpha: value for differential velocity
The default is set to .01 which work for many cases
if the velocities vary over a larger ranger use a larger value
"""
#: pre sampling
numberup = np.count_nonzero(piv.u<=0.)/np.float(np.count_nonzero(piv.u))
numberun = np.count_nonzero(piv.u>0.)/np.float(np.count_nonzero(piv.u))
numbervp = np.count_nonzero(piv.v<=0.)/np.float(np.count_nonzero(piv.v))
numbervn = np.count_nonzero(piv.v>0.)/np.float(np.count_nonzero(piv.v))
upos = numberup
uneg = numberun
vpos = numbervp
vneg = numbervn
#: get alpha dependency
up_alpha, un_alpha = calc_factor(piv.u,dalpha)
vp_alpha, vn_alpha = calc_factor(piv.v,dalpha)
#: calculate derivative directly from data
dup_alpha1, dun_alpha1 = calc_derivative(piv.u,dalpha)
dvp_alpha1, dvn_alpha1 = calc_derivative(piv.v,dalpha)
dup_alpha = dup_alpha1[:,1]
dun_alpha = dun_alpha1[:,1]
dvp_alpha = dvp_alpha1[:,1]
dvn_alpha = dvn_alpha1[:,1]
#get boundaries
boundup = np.sum(dup_alpha[0:5])/5./np.exp(tfactor)
boundun = np.sum(dun_alpha[0:5])/5./np.exp(tfactor)
boundvp = np.sum(dvp_alpha[0:5])/5./np.exp(tfactor)
boundvn = np.sum(dvn_alpha[0:5])/5./np.exp(tfactor)
#get indices and exponential
if upos != 0.:
indexup = np.where(dup_alpha<boundup)
cut_up = np.int(np.sum(indexup[0][0:5])/5.)
nup = np.polyfit(np.log( up_alpha[1:cut_up,0]),np.log(up_alpha[1:cut_up,1]),1)
upos = exp(-nup[1]/nup[0])
if uneg != 0.:
indexun = np.where(dun_alpha<boundun)
cut_un = np.int(np.sum(indexun[0][0:5])/5.)
nun = np.polyfit(np.log(-un_alpha[1:cut_un,0]),np.log(un_alpha[1:cut_un,1]),1)
uneg = -exp(-nun[1]/nun[0])
if vpos != 0.:
indexvp = np.where(dvp_alpha<boundvp)
cut_vp = np.int(np.sum(indexvp[0][0:5])/5.)
nvp = np.polyfit(np.log( vp_alpha[1:cut_vp,0]),np.log(vp_alpha[1:cut_vp,1]),1)
vpos = exp(-nvp[1]/nvp[0])
if vneg != 0.:
indexvn = np.where(dvn_alpha<boundvn)
cut_vn = np.int(np.sum(indexvn[0][0:5])/5.)
nvn = np.polyfit(np.log(-vn_alpha[1:cut_vn,0]),np.log(vn_alpha[1:cut_vn,1]),1)
vneg = -exp(-nvn[1]/nvn[0])
#filter + clamping
if upos > np.max(piv.u):
upos = np.max(piv.u)
if uneg < np.min(piv.u):
uneg = np.min(piv.u)
if vpos > np.max(piv.v):
vpos = np.max(piv.v)
if vneg < np.min(piv.v):
vneg = np.min(piv.v)
#equalizing the cutoff
upos *= (0.5+numberup)
uneg *= (0.5+numberun)
vpos *= (0.5+numbervp)
vneg *= (0.5+numbervn)
#making the mask
masku = (piv.u<uneg) | (piv.u>upos)
maskv = (piv.v<vneg) | (piv.v>vpos)
piv.u[masku] = np.nan
piv.v[maskv] = np.nan
| [
"numpy.copy",
"numpy.where",
"numpy.log",
"numpy.max",
"numpy.count_nonzero",
"numpy.array",
"numpy.exp",
"numpy.sum",
"numpy.isnan",
"numpy.min",
"math.exp"
] | [((785, 799), 'numpy.copy', 'np.copy', (['field'], {}), '(field)\n', (792, 799), True, 'import numpy as np\n'), ((1213, 1227), 'numpy.copy', 'np.copy', (['field'], {}), '(field)\n', (1220, 1227), True, 'import numpy as np\n'), ((1496, 1516), 'numpy.array', 'np.array', (['result_pos'], {}), '(result_pos)\n', (1504, 1516), True, 'import numpy as np\n'), ((1517, 1537), 'numpy.array', 'np.array', (['result_neg'], {}), '(result_neg)\n', (1525, 1537), True, 'import numpy as np\n'), ((2334, 2348), 'numpy.copy', 'np.copy', (['field'], {}), '(field)\n', (2341, 2348), True, 'import numpy as np\n'), ((2781, 2795), 'numpy.copy', 'np.copy', (['field'], {}), '(field)\n', (2788, 2795), True, 'import numpy as np\n'), ((3084, 3104), 'numpy.array', 'np.array', (['result_pos'], {}), '(result_pos)\n', (3092, 3104), True, 'import numpy as np\n'), ((3105, 3125), 'numpy.array', 'np.array', (['result_neg'], {}), '(result_neg)\n', (3113, 3125), True, 'import numpy as np\n'), ((3750, 3780), 'numpy.count_nonzero', 'np.count_nonzero', (['(piv.u <= 0.0)'], {}), '(piv.u <= 0.0)\n', (3766, 3780), True, 'import numpy as np\n'), ((3827, 3856), 'numpy.count_nonzero', 'np.count_nonzero', (['(piv.u > 0.0)'], {}), '(piv.u > 0.0)\n', (3843, 3856), True, 'import numpy as np\n'), ((3903, 3933), 'numpy.count_nonzero', 'np.count_nonzero', (['(piv.v <= 0.0)'], {}), '(piv.v <= 0.0)\n', (3919, 3933), True, 'import numpy as np\n'), ((3980, 4009), 'numpy.count_nonzero', 'np.count_nonzero', (['(piv.v > 0.0)'], {}), '(piv.v > 0.0)\n', (3996, 4009), True, 'import numpy as np\n'), ((4610, 4625), 'numpy.exp', 'np.exp', (['tfactor'], {}), '(tfactor)\n', (4616, 4625), True, 'import numpy as np\n'), ((4666, 4681), 'numpy.exp', 'np.exp', (['tfactor'], {}), '(tfactor)\n', (4672, 4681), True, 'import numpy as np\n'), ((4722, 4737), 'numpy.exp', 'np.exp', (['tfactor'], {}), '(tfactor)\n', (4728, 4737), True, 'import numpy as np\n'), ((4778, 4793), 'numpy.exp', 'np.exp', (['tfactor'], {}), '(tfactor)\n', (4784, 4793), True, 'import numpy as np\n'), ((4865, 4894), 'numpy.where', 'np.where', (['(dup_alpha < boundup)'], {}), '(dup_alpha < boundup)\n', (4873, 4894), True, 'import numpy as np\n'), ((5048, 5069), 'math.exp', 'exp', (['(-nup[1] / nup[0])'], {}), '(-nup[1] / nup[0])\n', (5051, 5069), False, 'from math import exp\n'), ((5105, 5134), 'numpy.where', 'np.where', (['(dun_alpha < boundun)'], {}), '(dun_alpha < boundun)\n', (5113, 5134), True, 'import numpy as np\n'), ((5345, 5374), 'numpy.where', 'np.where', (['(dvp_alpha < boundvp)'], {}), '(dvp_alpha < boundvp)\n', (5353, 5374), True, 'import numpy as np\n'), ((5528, 5549), 'math.exp', 'exp', (['(-nvp[1] / nvp[0])'], {}), '(-nvp[1] / nvp[0])\n', (5531, 5549), False, 'from math import exp\n'), ((5585, 5614), 'numpy.where', 'np.where', (['(dvn_alpha < boundvn)'], {}), '(dvn_alpha < boundvn)\n', (5593, 5614), True, 'import numpy as np\n'), ((5826, 5839), 'numpy.max', 'np.max', (['piv.u'], {}), '(piv.u)\n', (5832, 5839), True, 'import numpy as np\n'), ((5856, 5869), 'numpy.max', 'np.max', (['piv.u'], {}), '(piv.u)\n', (5862, 5869), True, 'import numpy as np\n'), ((5884, 5897), 'numpy.min', 'np.min', (['piv.u'], {}), '(piv.u)\n', (5890, 5897), True, 'import numpy as np\n'), ((5914, 5927), 'numpy.min', 'np.min', (['piv.u'], {}), '(piv.u)\n', (5920, 5927), True, 'import numpy as np\n'), ((5942, 5955), 'numpy.max', 'np.max', (['piv.v'], {}), '(piv.v)\n', (5948, 5955), True, 'import numpy as np\n'), ((5972, 5985), 'numpy.max', 'np.max', (['piv.v'], {}), '(piv.v)\n', (5978, 5985), True, 'import numpy as np\n'), ((6000, 6013), 'numpy.min', 'np.min', (['piv.v'], {}), '(piv.v)\n', (6006, 6013), True, 'import numpy as np\n'), ((6030, 6043), 'numpy.min', 'np.min', (['piv.v'], {}), '(piv.v)\n', (6036, 6043), True, 'import numpy as np\n'), ((705, 718), 'numpy.max', 'np.max', (['field'], {}), '(field)\n', (711, 718), True, 'import numpy as np\n'), ((2242, 2255), 'numpy.max', 'np.max', (['field'], {}), '(field)\n', (2248, 2255), True, 'import numpy as np\n'), ((3787, 3810), 'numpy.count_nonzero', 'np.count_nonzero', (['piv.u'], {}), '(piv.u)\n', (3803, 3810), True, 'import numpy as np\n'), ((3863, 3886), 'numpy.count_nonzero', 'np.count_nonzero', (['piv.u'], {}), '(piv.u)\n', (3879, 3886), True, 'import numpy as np\n'), ((3940, 3963), 'numpy.count_nonzero', 'np.count_nonzero', (['piv.v'], {}), '(piv.v)\n', (3956, 3963), True, 'import numpy as np\n'), ((4016, 4039), 'numpy.count_nonzero', 'np.count_nonzero', (['piv.v'], {}), '(piv.v)\n', (4032, 4039), True, 'import numpy as np\n'), ((4584, 4606), 'numpy.sum', 'np.sum', (['dup_alpha[0:5]'], {}), '(dup_alpha[0:5])\n', (4590, 4606), True, 'import numpy as np\n'), ((4640, 4662), 'numpy.sum', 'np.sum', (['dun_alpha[0:5]'], {}), '(dun_alpha[0:5])\n', (4646, 4662), True, 'import numpy as np\n'), ((4696, 4718), 'numpy.sum', 'np.sum', (['dvp_alpha[0:5]'], {}), '(dvp_alpha[0:5])\n', (4702, 4718), True, 'import numpy as np\n'), ((4752, 4774), 'numpy.sum', 'np.sum', (['dvn_alpha[0:5]'], {}), '(dvn_alpha[0:5])\n', (4758, 4774), True, 'import numpy as np\n'), ((4970, 4999), 'numpy.log', 'np.log', (['up_alpha[1:cut_up, 0]'], {}), '(up_alpha[1:cut_up, 0])\n', (4976, 4999), True, 'import numpy as np\n'), ((5000, 5029), 'numpy.log', 'np.log', (['up_alpha[1:cut_up, 1]'], {}), '(up_alpha[1:cut_up, 1])\n', (5006, 5029), True, 'import numpy as np\n'), ((5210, 5240), 'numpy.log', 'np.log', (['(-un_alpha[1:cut_un, 0])'], {}), '(-un_alpha[1:cut_un, 0])\n', (5216, 5240), True, 'import numpy as np\n'), ((5240, 5269), 'numpy.log', 'np.log', (['un_alpha[1:cut_un, 1]'], {}), '(un_alpha[1:cut_un, 1])\n', (5246, 5269), True, 'import numpy as np\n'), ((5288, 5309), 'math.exp', 'exp', (['(-nun[1] / nun[0])'], {}), '(-nun[1] / nun[0])\n', (5291, 5309), False, 'from math import exp\n'), ((5450, 5479), 'numpy.log', 'np.log', (['vp_alpha[1:cut_vp, 0]'], {}), '(vp_alpha[1:cut_vp, 0])\n', (5456, 5479), True, 'import numpy as np\n'), ((5480, 5509), 'numpy.log', 'np.log', (['vp_alpha[1:cut_vp, 1]'], {}), '(vp_alpha[1:cut_vp, 1])\n', (5486, 5509), True, 'import numpy as np\n'), ((5690, 5720), 'numpy.log', 'np.log', (['(-vn_alpha[1:cut_vn, 0])'], {}), '(-vn_alpha[1:cut_vn, 0])\n', (5696, 5720), True, 'import numpy as np\n'), ((5720, 5749), 'numpy.log', 'np.log', (['vn_alpha[1:cut_vn, 1]'], {}), '(vn_alpha[1:cut_vn, 1])\n', (5726, 5749), True, 'import numpy as np\n'), ((5768, 5789), 'math.exp', 'exp', (['(-nvn[1] / nvn[0])'], {}), '(-nvn[1] / nvn[0])\n', (5771, 5789), False, 'from math import exp\n'), ((1128, 1141), 'numpy.min', 'np.min', (['field'], {}), '(field)\n', (1134, 1141), True, 'import numpy as np\n'), ((2678, 2691), 'numpy.min', 'np.min', (['field'], {}), '(field)\n', (2684, 2691), True, 'import numpy as np\n'), ((4917, 4940), 'numpy.sum', 'np.sum', (['indexup[0][0:5]'], {}), '(indexup[0][0:5])\n', (4923, 4940), True, 'import numpy as np\n'), ((5157, 5180), 'numpy.sum', 'np.sum', (['indexun[0][0:5]'], {}), '(indexun[0][0:5])\n', (5163, 5180), True, 'import numpy as np\n'), ((5397, 5420), 'numpy.sum', 'np.sum', (['indexvp[0][0:5]'], {}), '(indexvp[0][0:5])\n', (5403, 5420), True, 'import numpy as np\n'), ((5637, 5660), 'numpy.sum', 'np.sum', (['indexvn[0][0:5]'], {}), '(indexvn[0][0:5])\n', (5643, 5660), True, 'import numpy as np\n'), ((917, 935), 'numpy.isnan', 'np.isnan', (['filtered'], {}), '(filtered)\n', (925, 935), True, 'import numpy as np\n'), ((1345, 1363), 'numpy.isnan', 'np.isnan', (['filtered'], {}), '(filtered)\n', (1353, 1363), True, 'import numpy as np\n'), ((2486, 2504), 'numpy.isnan', 'np.isnan', (['filtered'], {}), '(filtered)\n', (2494, 2504), True, 'import numpy as np\n'), ((2933, 2951), 'numpy.isnan', 'np.isnan', (['filtered'], {}), '(filtered)\n', (2941, 2951), True, 'import numpy as np\n'), ((964, 979), 'numpy.isnan', 'np.isnan', (['field'], {}), '(field)\n', (972, 979), True, 'import numpy as np\n'), ((1392, 1407), 'numpy.isnan', 'np.isnan', (['field'], {}), '(field)\n', (1400, 1407), True, 'import numpy as np\n'), ((2533, 2548), 'numpy.isnan', 'np.isnan', (['field'], {}), '(field)\n', (2541, 2548), True, 'import numpy as np\n'), ((2980, 2995), 'numpy.isnan', 'np.isnan', (['field'], {}), '(field)\n', (2988, 2995), True, 'import numpy as np\n')] |
from machine import Pin, UART
from grip import Grip
import time
class Floppy:
AXIS_POS_LIMIT = (0, 5, 5)
AXIS_NEG_LIMIT = (-7.5, 0, -5)
def __init__(self):
# region Attributes
self._speed = 20
self._buffer = 0
self._pos_tracker = [0.0, 0.0, 0.0]
# endregion
self.error_led = Pin(21, Pin.OUT)
self.rst_grbl = Pin(13, Pin.OUT)
self.rst_grbl(1)
for i in range(3):
self.error_led(1)
time.sleep(0.5)
self.error_led(0)
time.sleep(0.5)
self._uart = UART(1, 115200)
self._uart.init(115200, tx=17, rx=16)
self.grip = Grip(pin=4)
# Initialization
self.reset_grbl()
self.read() # Flush
def reset_grbl(self) -> None:
self.rst_grbl(0)
time.sleep_us(5)
self.rst_grbl(1)
time.sleep(2) # Wait for grbl to initialize
def get_state(self) -> str:
self.read() # Flush
self._uart.write(b"?\n")
time.sleep_ms(100)
data = self._uart.readline().decode().split("|")[0][1:]
self.read() # Flush
return data
def get_position(self) -> tuple:
self.read() # Flush
self._uart.write(b"?\n")
time.sleep_ms(100)
data = self._uart.readline().decode()
data = data.replace("\n", "").replace("\r", "")
data = [float(x) for x in data.split("|")[1].split(":")[1].split(",")]
# data = [sum(x) for x in zip(data, self._offset)]
self.read() # Flush
return tuple(data)
def move_to(self, joint: tuple, relative=False, jog=False) -> None:
# 10 per revolution.
if self.get_state() == "Idle":
self._buffer = 0
if self._buffer >= 10:
self.error_led(1)
raise Exception("Buffer overflow, a maximum of 10 commands can be sent simultaneously. Abort")
else:
if jog:
cmd = "$J="
else:
cmd = "G1"
if relative:
cmd += "G91"
new_pos = list()
for j, n in zip(joint, self._pos_tracker):
if j is not None:
new_pos.append(sum((j, n)))
else:
cmd += "G90"
new_pos = joint
if not jog:
for pos, neg_limit, pos_limit in zip(new_pos, self.AXIS_NEG_LIMIT, self.AXIS_POS_LIMIT):
if pos is not None:
if pos > pos_limit or pos < neg_limit:
self.error_led(1)
raise ValueError("Trying to move outside limits.")
self._pos_tracker = new_pos
for axis, joint in zip(("X", "Y", "Z"), joint):
if joint is not None:
cmd += axis + str(joint)
cmd += "F" + str(self.speed) + "\n"
self._uart.write(cmd.encode())
self.read() # Flush
self._buffer += 1
def cancel_jog(self):
self._uart.write(b"\x85")
self.read() # Flush
def wait_until_idle(self) -> None:
time.sleep(0.2)
msg = self.get_state()
while msg != "Idle":
time.sleep(0.2)
msg = self.get_state()
def read(self) -> None:
msg = self._uart.read()
if msg is None:
return
if "error" in msg.decode():
self.error_led(1)
raise Exception("GRBL respond with error. Abort")
def disable_motors(self, force=False) -> None:
if force or self.get_position() == (0.0, 0.0, 0.0):
self._uart.write("$SLP\n")
else:
self.error_led(1)
raise Exception("Could not disable motors while not at home position. Abort")
def send_command(self, command: str) -> None:
self._uart.write(command + "\n")
time.sleep_ms(100)
print(self._uart.read().decode())
@property
def speed(self) -> int:
return self._speed
@speed.setter
def speed(self, value: int) -> None:
if 0 < value <= 500:
self._speed = value
else:
self.error_led(1)
raise ValueError("Speed must be between 1 and 500")
floppy = Floppy()
| [
"time.sleep",
"machine.Pin",
"grip.Grip",
"time.sleep_ms",
"machine.UART",
"time.sleep_us"
] | [((342, 358), 'machine.Pin', 'Pin', (['(21)', 'Pin.OUT'], {}), '(21, Pin.OUT)\n', (345, 358), False, 'from machine import Pin, UART\n'), ((383, 399), 'machine.Pin', 'Pin', (['(13)', 'Pin.OUT'], {}), '(13, Pin.OUT)\n', (386, 399), False, 'from machine import Pin, UART\n'), ((591, 606), 'machine.UART', 'UART', (['(1)', '(115200)'], {}), '(1, 115200)\n', (595, 606), False, 'from machine import Pin, UART\n'), ((674, 685), 'grip.Grip', 'Grip', ([], {'pin': '(4)'}), '(pin=4)\n', (678, 685), False, 'from grip import Grip\n'), ((836, 852), 'time.sleep_us', 'time.sleep_us', (['(5)'], {}), '(5)\n', (849, 852), False, 'import time\n'), ((887, 900), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (897, 900), False, 'import time\n'), ((1037, 1055), 'time.sleep_ms', 'time.sleep_ms', (['(100)'], {}), '(100)\n', (1050, 1055), False, 'import time\n'), ((1281, 1299), 'time.sleep_ms', 'time.sleep_ms', (['(100)'], {}), '(100)\n', (1294, 1299), False, 'import time\n'), ((3207, 3222), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (3217, 3222), False, 'import time\n'), ((3967, 3985), 'time.sleep_ms', 'time.sleep_ms', (['(100)'], {}), '(100)\n', (3980, 3985), False, 'import time\n'), ((495, 510), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (505, 510), False, 'import time\n'), ((553, 568), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (563, 568), False, 'import time\n'), ((3295, 3310), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (3305, 3310), False, 'import time\n')] |
import urllib3
import pandas as pd
import numpy as np
import zipfile
import copy
import pickle
import os
from esig import tosig
from tqdm import tqdm
from multiprocessing import Pool
from functools import partial
from os import listdir
from os.path import isfile, join
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score, confusion_matrix
def get_inputs():
url = "https://storage.googleapis.com/challenge-2012-1.0.0.physionet.org/set-a.zip"
http = urllib3.PoolManager()
r = http.request('GET', url, preload_content=False)
with open('data/input.zip', 'wb') as out:
while True:
data = r.read()
if not data:
break
out.write(data)
r.release_conn()
zip_ref = zipfile.ZipFile("data/input.zip", 'r')
zip_ref.extractall("data/")
zip_ref.close()
data = {}
list_files = [f for f in listdir(
"data/set-a") if isfile(join("data/set-a", f))]
for f in list_files:
df = pd.read_csv(join("data/set-a", f))
patient_id = int(df.values[0, 2])
data[patient_id] = df
return data
def get_outputs():
url = "https://storage.googleapis.com/challenge-2012-1.0.0.physionet.org/Outcomes-a.txt"
data_df = pd.read_csv(url)
data = {}
for patient in data_df.values:
patient_id = int(patient[0])
data[patient_id] = patient[-1]
return data
def download():
X_dict, Y_dict = get_inputs(), get_outputs()
X = []
Y = []
for patient_id in X_dict:
X.append(X_dict[patient_id])
Y.append(Y_dict[patient_id])
print("Data for %s patients downloaded." % len(X))
return X, Y
def split(X, Y, proportion=0.75):
idx = int(len(X)*proportion)
print("Dataset split in a training set of %s and testing set of %s patients." % (
idx, len(X)-idx))
return X[:idx], Y[:idx], X[idx:], Y[idx:]
def features_point(x):
static, path = x
maximums = np.max(path, axis=0)
minimums = np.min(path, axis=0)
last_observation = path[-1]
return np.concatenate([static, maximums, minimums, last_observation])
def extract(X):
return list(map(features_point, X))
def lead_lag(mylist):
leadlist = np.concatenate([[mylist[0]], mylist])
laglist = np.concatenate([mylist, [mylist[-1]]])
return np.concatenate([leadlist, laglist], axis=1)
def add_time(mylist, init_time=0., total_time=1.):
ans = [[init_time + xn * total_time /
(len(mylist)-1)] + list(x) for (xn, x) in enumerate(mylist)]
return np.array(ans)
def home_and_pen_off(mylist):
ans = [list(x) + [1.] for x in mylist]
last = list(ans[-1])
last[-1] = 0.
ans.append(last)
ans.append([0 for item in last])
return np.array(ans)
def refocus(path, centre):
return np.concatenate((centre[::-1], path), axis=0)
def train(features, Y):
classifier = RandomForestClassifier()
classifier.fit(features, Y)
return classifier
def normalise_point(x):
static, path = x
path[:, 0] /= 2.
return [static, path]
def normalise(X):
return list(map(normalise_point, X))
def evaluate(classifier, features, Y):
THRESHOLD = .3
predictions_proba = classifier.predict_proba(features)[:, 1]
predictions = [1. if pred >
THRESHOLD else 0. for pred in predictions_proba]
cm = confusion_matrix(Y, predictions)
Se = cm[1, 1] / float(cm[1, 1] + cm[1, 0])
P = cm[1, 1] / float(cm[1, 1] + cm[0, 1])
score = min(Se, P)
print("Score of predictions: %s" % score)
def to_path(df, dynamic_variables):
dim = len(dynamic_variables) + 1
path = [[0.]*dim]
for event in df.values:
if event[1] in dynamic_variables:
new_value = copy.deepcopy(path[-1])
idx = 1 + dynamic_variables.index(event[1])
new_value[idx] = event[2]
hour, min = event[0].split(":")
days = (float(hour) + float(min) / 60.)/24.
new_value[0] = days
path.append(new_value)
path = np.array(path)
unique_times = np.unique(path[:, 0])
idx = []
for time in unique_times:
last_idx = np.where(path[:, 0] == time)[0][-1]
idx.append(last_idx)
path = path[idx]
return path
def static_features(df, static_variables):
return df[df["Parameter"].isin(static_variables)]["Value"].values
def reformat(X, static_variables, dynamic_variables):
for i, x in enumerate(X):
dynamic = to_path(x, dynamic_variables=dynamic_variables)
static = static_features(x, static_variables=static_variables)
X[i] = [static, dynamic]
return X
def st2si(order, stream):
if order > 1:
return(tosig.stream2sig(stream, order))
else:
if order == 1:
return np.concatenate((np.array([1]), stream[-1] - stream[0]), axis=0)
else:
return np.array([1])
def compute(X, order=2):
func = partial(st2si, order)
pool = Pool()
n_samples = len(X)
signatures = []
try:
signatures = np.array(list(tqdm(pool.imap(func, X), total=n_samples)))
except Exception as e:
print('Failed to compute signatures: ' + repr(e))
signatures = []
return signatures
def predict(classifier, url):
http = urllib3.PoolManager()
r = http.request('GET', url, preload_content=False)
with open('data/test_input.txt', 'wb') as out:
while True:
data = r.read()
if not data:
break
out.write(data)
r.release_conn()
data = {}
df = pd.read_csv("data/test_input.txt")
patient_id = int(df.values[0, 2])
data[patient_id] = df
X = []
for patient_id in data:
X.append(data[patient_id])
X = reformat(X, static_variables=["Age", "Gender"], dynamic_variables=[
"Creatinine", "Glucose"])
X = normalise(X)
X = extract(X)
# [0] means in-house dead [1] means in-house alive
print('Predicted result: ' + classifier.predict(X))
if __name__ == "__main__":
# DOWNLOAD & REFORMAT EVENT DATA, TRANSFORM TIME DEPENDENT VARIABLES
X, Y = download()
X = reformat(X, static_variables=["Age", "Gender"], dynamic_variables=[
"Creatinine", "Glucose"])
# NORMALISE & EXTRACT FEATURES
X = normalise(X)
features = extract(X)
# TRAIN THE MODEL BY SPLITING
features_train, Y_train, features_test, Y_test = split(
features, Y, proportion=0.75)
classifier = train(features_train, Y_train)
predict(classifier, 'https://storage.googleapis.com/challenge-2012-1.0.0.physionet.org/set-a/132539.txt')
# EVALUATE PERFORMANCE
evaluate(classifier, features_test, Y_test)
| [
"os.listdir",
"copy.deepcopy",
"numpy.unique",
"zipfile.ZipFile",
"pandas.read_csv",
"numpy.where",
"os.path.join",
"sklearn.ensemble.RandomForestClassifier",
"numpy.max",
"numpy.array",
"urllib3.PoolManager",
"functools.partial",
"numpy.concatenate",
"numpy.min",
"multiprocessing.Pool",... | [((500, 521), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (519, 521), False, 'import urllib3\n'), ((784, 822), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""data/input.zip"""', '"""r"""'], {}), "('data/input.zip', 'r')\n", (799, 822), False, 'import zipfile\n'), ((1275, 1291), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (1286, 1291), True, 'import pandas as pd\n'), ((1989, 2009), 'numpy.max', 'np.max', (['path'], {'axis': '(0)'}), '(path, axis=0)\n', (1995, 2009), True, 'import numpy as np\n'), ((2025, 2045), 'numpy.min', 'np.min', (['path'], {'axis': '(0)'}), '(path, axis=0)\n', (2031, 2045), True, 'import numpy as np\n'), ((2090, 2152), 'numpy.concatenate', 'np.concatenate', (['[static, maximums, minimums, last_observation]'], {}), '([static, maximums, minimums, last_observation])\n', (2104, 2152), True, 'import numpy as np\n'), ((2250, 2287), 'numpy.concatenate', 'np.concatenate', (['[[mylist[0]], mylist]'], {}), '([[mylist[0]], mylist])\n', (2264, 2287), True, 'import numpy as np\n'), ((2302, 2340), 'numpy.concatenate', 'np.concatenate', (['[mylist, [mylist[-1]]]'], {}), '([mylist, [mylist[-1]]])\n', (2316, 2340), True, 'import numpy as np\n'), ((2352, 2395), 'numpy.concatenate', 'np.concatenate', (['[leadlist, laglist]'], {'axis': '(1)'}), '([leadlist, laglist], axis=1)\n', (2366, 2395), True, 'import numpy as np\n'), ((2575, 2588), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (2583, 2588), True, 'import numpy as np\n'), ((2776, 2789), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (2784, 2789), True, 'import numpy as np\n'), ((2830, 2874), 'numpy.concatenate', 'np.concatenate', (['(centre[::-1], path)'], {'axis': '(0)'}), '((centre[::-1], path), axis=0)\n', (2844, 2874), True, 'import numpy as np\n'), ((2918, 2942), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (2940, 2942), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3386, 3418), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y', 'predictions'], {}), '(Y, predictions)\n', (3402, 3418), False, 'from sklearn.metrics import roc_auc_score, confusion_matrix\n'), ((4069, 4083), 'numpy.array', 'np.array', (['path'], {}), '(path)\n', (4077, 4083), True, 'import numpy as np\n'), ((4103, 4124), 'numpy.unique', 'np.unique', (['path[:, 0]'], {}), '(path[:, 0])\n', (4112, 4124), True, 'import numpy as np\n'), ((4968, 4989), 'functools.partial', 'partial', (['st2si', 'order'], {}), '(st2si, order)\n', (4975, 4989), False, 'from functools import partial\n'), ((5001, 5007), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (5005, 5007), False, 'from multiprocessing import Pool\n'), ((5313, 5334), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (5332, 5334), False, 'import urllib3\n'), ((5612, 5646), 'pandas.read_csv', 'pd.read_csv', (['"""data/test_input.txt"""'], {}), "('data/test_input.txt')\n", (5623, 5646), True, 'import pandas as pd\n'), ((4734, 4765), 'esig.tosig.stream2sig', 'tosig.stream2sig', (['stream', 'order'], {}), '(stream, order)\n', (4750, 4765), False, 'from esig import tosig\n'), ((919, 940), 'os.listdir', 'listdir', (['"""data/set-a"""'], {}), "('data/set-a')\n", (926, 940), False, 'from os import listdir\n'), ((1035, 1056), 'os.path.join', 'join', (['"""data/set-a"""', 'f'], {}), "('data/set-a', f)\n", (1039, 1056), False, 'from os.path import isfile, join\n'), ((3772, 3795), 'copy.deepcopy', 'copy.deepcopy', (['path[-1]'], {}), '(path[-1])\n', (3785, 3795), False, 'import copy\n'), ((4916, 4929), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4924, 4929), True, 'import numpy as np\n'), ((960, 981), 'os.path.join', 'join', (['"""data/set-a"""', 'f'], {}), "('data/set-a', f)\n", (964, 981), False, 'from os.path import isfile, join\n'), ((4187, 4215), 'numpy.where', 'np.where', (['(path[:, 0] == time)'], {}), '(path[:, 0] == time)\n', (4195, 4215), True, 'import numpy as np\n'), ((4835, 4848), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4843, 4848), True, 'import numpy as np\n')] |
#-------------------------------------------------------------------------------
# Filename: create_pics.py
# Description: creates square pictures out of a picture which is mostly empty
# for training a neural network later.
# The parameters to fool around with include:
# factor: scaled down image for faster image processing
# sq_size: size of square that is used to construct the standard-deviation map
# cutoff: cutoff for standard deviation
# Authors: <NAME>, <NAME>
#-------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from os import listdir, path, makedirs
import argparse
import sys
# class MyParser(argparse.ArgumentParser):
# def error(self, message):
# sys.stderr.write('error: %s\n' % message)
# self.print_help()
# sys.exit(2)
def pics(from_path='raw_data',to_path='preproc_data'):
# parser = MyParser()
# parser.add_argument('input_folder', nargs='+')
# parser.add_argument('output_folder', nargs='+')
# args = parser.parse_args()
# from_path = args.input_folder[0]
if not from_path[-1]=='/':
from_path+=('/')
# to_path = args.output_folder[0]
if not to_path[-1]=='/':
to_path+=('/')
#check whether input path exists
if not path.exists(from_path):
raise IOError("input directory {0} does not exist, exiting script".format(from_path))
#possible image file extensions.
exts = ['.jpg', '.png', '.tif', '.bmp']
# input file dimensions
xdim = 1330 #2560
ydim = 884 #1920
# output file dimensions
dim = 80 #256
export_ext = '.png' #extension files will be saved
#first, find all the image file in the directory
files = listdir(from_path)
filenames = []
extensions = []
for f in files:
name, ext = path.splitext(from_path+f)
if ext in exts:
filenames.append(name)
extensions.append(ext)
print("found {0} image files in folder {1}".format(len(filenames), from_path))
total_flakes = 0
good_flakes = 0
missed_flakes = 0
#start the actual work of cutting the pictures into smaller pictures
for i, filename in enumerate(filenames):
print("starting with new image file: {0}{1}".format(filename,
extensions[i]))
#first, check for the .csv file with the coordinates of good flakes
good_ones = []
try:
with open(filename+".csv") as f:
content = f.read().splitlines()
for line in content:
good_ones.append(line.split(','))
except IOError:
print("Warning: Couldn't find file {0}.csv, assume there's no good flakes".format(filename))
# open image
full_im = Image.open(filename+extensions[i])
Lx = full_im.size[0] #x dimension of picture
Ly = full_im.size[1] #y dimension of picture
# we want to work on pictures of equal size, so if they are not the right
# size, we rescale them.
scalex = 1.
scaley = 1.
if not Lx == xdim:
scalex = float(xdim) / Lx
scaley = float(ydim) / Ly
full_im = full_im.resize((xdim, ydim))
print("picture is too big, resizing to ({0}, {1})".format(xdim, ydim))
#to speed up the whole work, we resize the image for the first step
factor = 8
lx = int(xdim/factor) # resized x dimension
ly = int(ydim/factor) # resized y dimension
small_im = full_im.resize((lx, ly))
sq_size = dim//factor # size of square in resized image
cutoff = 5 #was 2.75 # cutoff for standard deviation
#calculate the standard deviation of the black and white images
# (convert('L') returns a BW image)
stds = np.zeros((lx-sq_size, ly-sq_size))
for k in range(lx-sq_size):
for l in range(ly-sq_size):
tmp_im = small_im.crop((k, l, k+sq_size, l+sq_size))
stds[k,l] = np.std(list(tmp_im.convert('L').getdata()))
Lstds = np.reshape(stds, (lx-sq_size)*(ly-sq_size))
sorted_stds = np.argsort(Lstds)
centers = []
for j in reversed(sorted_stds):
if Lstds[j]< cutoff: break
ix = int(j/(ly-sq_size))+sq_size/2
iy = j%(ly-sq_size)+sq_size/2
included = False
for c in centers:
if (abs(c[0]-ix) < sq_size) and (abs(c[1]-iy)<sq_size):
included = True
continue
if included: continue
ix = min(max(sq_size, ix), lx-sq_size)
iy = min(max(sq_size, iy), ly-sq_size)
centers.append((ix, iy))
print("identified {0} potential candidates in image {1}".format(len(centers), filename))
total_flakes += len(centers)
squares = []
coordinates = []
for c in centers:
ix = c[0]*factor
iy = c[1]*factor
coordinates.append([ix, iy])
x0 = ix - factor*sq_size
x1 = ix + factor*sq_size
y0 = iy - factor*sq_size
y1 = iy + factor*sq_size
squares.append(full_im.crop((x0, y0, x1, y1)))
if not path.exists(to_path):
print("{0} does not exist yet, creating it".format(to_path))
makedirs(to_path)
found = np.zeros(len(good_ones)) # to make sure we found all good ones
for k in range(len(squares)):
x = coordinates[k][0]
y = coordinates[k][1]
bad = True
name = filename.split('/')[-1]
for j, good in enumerate(good_ones):
g0 = scalex*float(good[0])
g1 = scaley*float(good[1])
if (abs(g0-x) < factor*sq_size) and (abs(g1-y)<factor*sq_size):
this_file = to_path+name+"_" + str(coordinates[k][0])\
+ "_" + str(coordinates[k][1])+"_0A"+ export_ext
squares[k].resize((dim, dim)).save(this_file)
for t in range(5):
this_file = to_path+name + "_" + str(coordinates[k][0]) + \
"_" + str(coordinates[k][1])+"_{0}A".format(t+1)+ export_ext
squares[k].transpose(t).resize((dim, dim)).save(this_file)
found[j]=1
bad = False
good_flakes += 1
if not bad: continue
this_file = to_path + name +"_" + str(coordinates[k][0]) + "_" + \
str(coordinates[k][1])+"_B" + export_ext
squares[k].resize((dim, dim)).save(this_file)
if np.sum(found)<len(good_ones):
missed_flakes += len(good_ones) - np.sum(found)
print("Warning: We have missed a good one in {0}".format(filename))
print("(should have found {0}, found {1}instead".format( \
len(good_ones), np.sum(found)))
print("")
print("total flakes found: {0}".format(total_flakes))
print("of which are good : {0}".format(good_flakes))
print("good flakes missed: {0}".format(int(missed_flakes)))
| [
"os.path.exists",
"os.listdir",
"PIL.Image.open",
"numpy.reshape",
"os.makedirs",
"os.path.splitext",
"numpy.argsort",
"numpy.sum",
"numpy.zeros"
] | [((1785, 1803), 'os.listdir', 'listdir', (['from_path'], {}), '(from_path)\n', (1792, 1803), False, 'from os import listdir, path, makedirs\n'), ((1343, 1365), 'os.path.exists', 'path.exists', (['from_path'], {}), '(from_path)\n', (1354, 1365), False, 'from os import listdir, path, makedirs\n'), ((1883, 1911), 'os.path.splitext', 'path.splitext', (['(from_path + f)'], {}), '(from_path + f)\n', (1896, 1911), False, 'from os import listdir, path, makedirs\n'), ((2826, 2862), 'PIL.Image.open', 'Image.open', (['(filename + extensions[i])'], {}), '(filename + extensions[i])\n', (2836, 2862), False, 'from PIL import Image\n'), ((3863, 3901), 'numpy.zeros', 'np.zeros', (['(lx - sq_size, ly - sq_size)'], {}), '((lx - sq_size, ly - sq_size))\n', (3871, 3901), True, 'import numpy as np\n'), ((4132, 4181), 'numpy.reshape', 'np.reshape', (['stds', '((lx - sq_size) * (ly - sq_size))'], {}), '(stds, (lx - sq_size) * (ly - sq_size))\n', (4142, 4181), True, 'import numpy as np\n'), ((4198, 4215), 'numpy.argsort', 'np.argsort', (['Lstds'], {}), '(Lstds)\n', (4208, 4215), True, 'import numpy as np\n'), ((5304, 5324), 'os.path.exists', 'path.exists', (['to_path'], {}), '(to_path)\n', (5315, 5324), False, 'from os import listdir, path, makedirs\n'), ((5411, 5428), 'os.makedirs', 'makedirs', (['to_path'], {}), '(to_path)\n', (5419, 5428), False, 'from os import listdir, path, makedirs\n'), ((6750, 6763), 'numpy.sum', 'np.sum', (['found'], {}), '(found)\n', (6756, 6763), True, 'import numpy as np\n'), ((6826, 6839), 'numpy.sum', 'np.sum', (['found'], {}), '(found)\n', (6832, 6839), True, 'import numpy as np\n'), ((7027, 7040), 'numpy.sum', 'np.sum', (['found'], {}), '(found)\n', (7033, 7040), True, 'import numpy as np\n')] |
"""
Helper functions for the tests
"""
import os
import numpy as np
from msl.io import read
def read_sample(filename, **kwargs):
"""Read a file in the 'samples' directory.
Parameters
----------
filename : str
The name of the file in the samples/ directory
Returns
-------
A root object
"""
return read(os.path.join(os.path.dirname(__file__), 'samples', filename), **kwargs)
def metadata_equal(m1, m2):
"""Assert that two Metadata objects are equal."""
assert len(m1) == len(m2)
for k1, v1 in m1.items():
v2 = m2[k1]
if isinstance(v1, (list, tuple, np.ndarray)):
assert np.array_equal(v1, v2), '{}\n{}'.format(v1, v2)
else:
assert v1 == v2, '{} != {}'.format(v1, v2)
return True
def datasets_equal(d1, d2):
"""Assert that two Dataset objects are equal."""
assert d1.name == d2.name, '{} != {}'.format(d1.name, d2.name)
assert np.array_equal(d1.data, d2.data), '{}\n{}'.format(d1.data, d2.data)
assert metadata_equal(d1.metadata, d2.metadata)
return True
def roots_equal(r1, r2):
"""Assert that two Root objects are equal."""
assert metadata_equal(r1.metadata, r2.metadata)
groups1 = list(r1.groups())
groups1.sort(key=lambda x: x.name)
groups2 = list(r2.groups())
groups2.sort(key=lambda x: x.name)
assert len(groups1) == len(groups2)
for g1, g2 in zip(groups1, groups2):
assert g1.name == g2.name, '{} != {}'.format(g1.name, g2.name)
assert metadata_equal(g1.metadata, g2.metadata)
datasets1 = list(r1.datasets())
datasets1.sort(key=lambda x: x.name)
datasets2 = list(r2.datasets())
datasets2.sort(key=lambda x: x.name)
assert len(datasets1) == len(datasets2)
for d1, d2 in zip(datasets1, datasets2):
assert datasets_equal(d1, d2)
return True
| [
"os.path.dirname",
"numpy.array_equal"
] | [((955, 987), 'numpy.array_equal', 'np.array_equal', (['d1.data', 'd2.data'], {}), '(d1.data, d2.data)\n', (969, 987), True, 'import numpy as np\n'), ((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((661, 683), 'numpy.array_equal', 'np.array_equal', (['v1', 'v2'], {}), '(v1, v2)\n', (675, 683), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 19 17:35:09 2016
@author: yxl
"""
from imagepy.core.engine import Tool
import numpy as np
from imagepy.core.manager import ColorManager
from imagepy.core.draw.fill import floodfill
class Plugin(Tool):
title = 'Flood Fill'
para = {'tor':10, 'con':'8-connect'}
view = [(int, 'tor', (0,1000), 0, 'torlorance', 'value'),
(list, 'con', ['4-connect', '8-connect'], str, 'fill', 'pix')]
def mouse_down(self, ips, x, y, btn, **key):
ips.snapshot()
msk = floodfill(ips.img, x, y, self.para['tor'], self.para['con']=='8-connect')
#plt.imshow(msk)
#plt.show()
color = ColorManager.get_front()
if ips.get_nchannels()==1:color = np.mean(color)
ips.img[msk] = color
ips.update()
def mouse_up(self, ips, x, y, btn, **key):
pass
def mouse_move(self, ips, x, y, btn, **key):
pass
def mouse_wheel(self, ips, x, y, d, **key):
pass
| [
"imagepy.core.manager.ColorManager.get_front",
"numpy.mean",
"imagepy.core.draw.fill.floodfill"
] | [((549, 624), 'imagepy.core.draw.fill.floodfill', 'floodfill', (['ips.img', 'x', 'y', "self.para['tor']", "(self.para['con'] == '8-connect')"], {}), "(ips.img, x, y, self.para['tor'], self.para['con'] == '8-connect')\n", (558, 624), False, 'from imagepy.core.draw.fill import floodfill\n'), ((684, 708), 'imagepy.core.manager.ColorManager.get_front', 'ColorManager.get_front', ([], {}), '()\n', (706, 708), False, 'from imagepy.core.manager import ColorManager\n'), ((751, 765), 'numpy.mean', 'np.mean', (['color'], {}), '(color)\n', (758, 765), True, 'import numpy as np\n')] |
"""Signal dispatchers and handlers for the articles module"""
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from authors.apps.articles.models import Article
# our custom signal that will be sent when a new article is published
# we could have stuck to using the post_save signal and receiving it in the
# notifications app or calling one of the util methods there,
# but that kills the whole benefit to the modularity we're going for
article_published_signal = Signal(providing_args=["article"])
class ArticlesSignalSender:
pass
@receiver(post_save, sender=Article)
def on_article_post_save(sender, **kwargs):
"""called when an article is saved"""
if kwargs['created']:
# we are only acting when something we are interested in
# actually happened
article_published_signal.send(ArticlesSignalSender,
article=kwargs['instance'])
| [
"django.dispatch.receiver",
"django.dispatch.Signal"
] | [((509, 543), 'django.dispatch.Signal', 'Signal', ([], {'providing_args': "['article']"}), "(providing_args=['article'])\n", (515, 543), False, 'from django.dispatch import receiver, Signal\n'), ((586, 621), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Article'}), '(post_save, sender=Article)\n', (594, 621), False, 'from django.dispatch import receiver, Signal\n')] |
"""TFTBechmark scripts"""
import shutil
import tempfile
import time
import tensorflow as tf
import tqdm
from datasets import load_dataset
from transformers import RobertaTokenizerFast
from tf_transformers.models import Classification_Model
from tf_transformers.models import RobertaModel as Model
_ALLOWED_DECODER_TYPES = ["keras_model", "saved_model"]
class TftBenchmark:
def __init__(self, cfg):
self.cfg = cfg
# Check compatible model type
self.model_type = cfg.benchmark.model.type
if self.model_type not in _ALLOWED_DECODER_TYPES:
raise ValueError("Unknow model type {} defined".format(self.model_type))
self.model_name = cfg.benchmark.model.name
self.tokenizer = RobertaTokenizerFast.from_pretrained(self.model_name)
self.temp_dir = tempfile.mkdtemp()
def load_and_batch_dataset(self):
"""Load TF dataset"""
cfg = self.cfg
tokenizer = self.tokenizer
# Load from hydra config
dataset_name = cfg.benchmark.data.name
take_sample = cfg.benchmark.data.take_sample
batch_size = cfg.benchmark.data.batch_size
max_length = cfg.benchmark.data.max_length
dataset = load_dataset(dataset_name, split="test")
if take_sample:
dataset = dataset.select(range(50))
# Add summarize: with text
self.dataset = dataset
dataset = dataset.map(
lambda e: tokenizer(e["text"], truncation=True, padding=True, max_length=max_length),
batched=True,
)
dataset.set_format(type="tensorflow", columns=["input_ids"])
features = {
x: tf.cast(dataset[x], dtype=tf.int32).to_tensor(default_value=0, shape=[None, max_length])
for x in ["input_ids"]
}
features['input_mask'] = tf.ones_like(features['input_ids'])
features['input_type_ids'] = tf.zeros_like(features['input_ids'])
tfdataset = tf.data.Dataset.from_tensor_slices((features)).batch(batch_size)
# Convert alldataset to a list for not including that latency while measuring model
# performance
# (batch_dataset, batch_size, seq_length)
batched_datasets = [(batch_dataset, batch_dataset['input_ids'].shape[0]) for batch_dataset in tfdataset]
return batched_datasets
def _load_keras_model(self):
"""Load using TextDecoder KerasModel"""
def classifier_fn(model):
def _classifier_fn(inputs):
return model(inputs)
return _classifier_fn
model_name = self.cfg.benchmark.model.name
# Load Auto Regressive Version
model = Model.from_pretrained(model_name=model_name)
model = Classification_Model(model, num_classes=2)
model = model.get_model()
return classifier_fn(model)
def _load_saved_model(self):
"""Load using TextDecoder saved_model"""
def classifier_fn():
model = self.loaded.signatures['serving_default']
def _classifier_fn(inputs):
return model(**inputs)
return _classifier_fn
model_name = self.cfg.benchmark.model.name
model = Model.from_pretrained(model_name=model_name)
model = Classification_Model(model, num_classes=2)
model = model.get_model()
# Save as saved_model
model.save_serialized(self.temp_dir, overwrite=True)
# Load as saved_model
del model
self.loaded = tf.saved_model.load(self.temp_dir)
return classifier_fn()
def load_model_classifier_fn(self):
"""Load Model"""
if self.model_type == "keras_model":
classifier_fn = self._load_keras_model()
if self.model_type == "saved_model":
classifier_fn = self._load_saved_model()
return classifier_fn
def run(self):
#### Load Decoder function
classifier_fn = self.load_model_classifier_fn()
print("Decoder function loaded succesfully")
#### Load dataset
batched_datasets = self.load_and_batch_dataset()
print("Dataset loaded succesfully")
import gc
gc.collect()
#### Run classifier function
# Sample batch (to avoid first time compilation time)
sample_batch_inputs, _ = batched_datasets[0]
outputs = classifier_fn(sample_batch_inputs)
slines = 0
start_time = time.time()
for (batch_inputs, batch_size) in tqdm.tqdm(batched_datasets, unit="batch "):
outputs = classifier_fn(batch_inputs) # noqa
slines += batch_size
end_time = time.time()
shutil.rmtree(self.temp_dir)
time_taken = end_time - start_time
samples_per_second = slines / time_taken
return {"model_type": self.model_type, "time_taken": time_taken, "samples_per_second": samples_per_second}
| [
"tensorflow.saved_model.load",
"tensorflow.data.Dataset.from_tensor_slices",
"time.time",
"tqdm.tqdm",
"shutil.rmtree",
"tf_transformers.models.Classification_Model",
"tf_transformers.models.RobertaModel.from_pretrained",
"tempfile.mkdtemp",
"datasets.load_dataset",
"gc.collect",
"tensorflow.one... | [((742, 795), 'transformers.RobertaTokenizerFast.from_pretrained', 'RobertaTokenizerFast.from_pretrained', (['self.model_name'], {}), '(self.model_name)\n', (778, 795), False, 'from transformers import RobertaTokenizerFast\n'), ((820, 838), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (836, 838), False, 'import tempfile\n'), ((1221, 1261), 'datasets.load_dataset', 'load_dataset', (['dataset_name'], {'split': '"""test"""'}), "(dataset_name, split='test')\n", (1233, 1261), False, 'from datasets import load_dataset\n'), ((1840, 1875), 'tensorflow.ones_like', 'tf.ones_like', (["features['input_ids']"], {}), "(features['input_ids'])\n", (1852, 1875), True, 'import tensorflow as tf\n'), ((1913, 1949), 'tensorflow.zeros_like', 'tf.zeros_like', (["features['input_ids']"], {}), "(features['input_ids'])\n", (1926, 1949), True, 'import tensorflow as tf\n'), ((2681, 2725), 'tf_transformers.models.RobertaModel.from_pretrained', 'Model.from_pretrained', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (2702, 2725), True, 'from tf_transformers.models import RobertaModel as Model\n'), ((2742, 2784), 'tf_transformers.models.Classification_Model', 'Classification_Model', (['model'], {'num_classes': '(2)'}), '(model, num_classes=2)\n', (2762, 2784), False, 'from tf_transformers.models import Classification_Model\n'), ((3214, 3258), 'tf_transformers.models.RobertaModel.from_pretrained', 'Model.from_pretrained', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (3235, 3258), True, 'from tf_transformers.models import RobertaModel as Model\n'), ((3275, 3317), 'tf_transformers.models.Classification_Model', 'Classification_Model', (['model'], {'num_classes': '(2)'}), '(model, num_classes=2)\n', (3295, 3317), False, 'from tf_transformers.models import Classification_Model\n'), ((3515, 3549), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['self.temp_dir'], {}), '(self.temp_dir)\n', (3534, 3549), True, 'import tensorflow as tf\n'), ((4196, 4208), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4206, 4208), False, 'import gc\n'), ((4457, 4468), 'time.time', 'time.time', ([], {}), '()\n', (4466, 4468), False, 'import time\n'), ((4511, 4553), 'tqdm.tqdm', 'tqdm.tqdm', (['batched_datasets'], {'unit': '"""batch """'}), "(batched_datasets, unit='batch ')\n", (4520, 4553), False, 'import tqdm\n'), ((4665, 4676), 'time.time', 'time.time', ([], {}), '()\n', (4674, 4676), False, 'import time\n'), ((4685, 4713), 'shutil.rmtree', 'shutil.rmtree', (['self.temp_dir'], {}), '(self.temp_dir)\n', (4698, 4713), False, 'import shutil\n'), ((1971, 2015), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['features'], {}), '(features)\n', (2005, 2015), True, 'import tensorflow as tf\n'), ((1673, 1708), 'tensorflow.cast', 'tf.cast', (['dataset[x]'], {'dtype': 'tf.int32'}), '(dataset[x], dtype=tf.int32)\n', (1680, 1708), True, 'import tensorflow as tf\n')] |
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import os
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheMode
from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.file import File
from test_utils.os_utils import drop_caches, DropCachesMode, sync
from test_utils.size import Size, Unit
mount_point = "/mnt/test"
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.parametrizex("filesystem", Filesystem)
@pytest.mark.parametrizex("reboot_type", ["soft", "hard"])
@pytest.mark.require_plugin("power_control")
def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
"""
title: Planned system shutdown test.
description: Test for data consistency after clean system shutdown.
pass_criteria:
- DUT should reboot successfully.
- Checksum of file on core device should be the same before and after reboot.
"""
with TestRun.step("Prepare CAS device."):
cache_disk = TestRun.disks['cache']
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
core_dev = TestRun.disks['core']
cache = casadm.start_cache(cache_dev, cache_mode, force=True)
core = cache.add_core(core_dev)
core.create_filesystem(filesystem, blocksize=int(Size(1, Unit.Blocks4096)))
core.mount(mount_point)
with TestRun.step("Create file on cache and count its checksum."):
test_file = File(os.path.join(mount_point, "test_file"))
Dd()\
.input("/dev/zero")\
.output(test_file.full_path)\
.block_size(Size(1, Unit.KibiByte))\
.count(1024)\
.run()
test_file.refresh_item()
test_file_md5 = test_file.md5sum()
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Reset platform."):
if reboot_type == "soft":
TestRun.executor.reboot()
else:
power_control = TestRun.plugin_manager.get_plugin('power_control')
power_control.power_cycle()
with TestRun.step("Load cache."):
casadm.load_cache(cache_dev)
core.mount(mount_point)
with TestRun.step("Check file md5sum."):
test_file.refresh_item()
if test_file_md5 != test_file.md5sum():
TestRun.LOGGER.error("Checksums does not match - file is corrupted.")
else:
TestRun.LOGGER.info("File checksum is correct.")
with TestRun.step("Remove test file."):
test_file.remove()
| [
"pytest.mark.parametrizex",
"storage_devices.disk.DiskTypeSet",
"test_utils.size.Size",
"core.test_run.TestRun.LOGGER.info",
"os.path.join",
"api.cas.casadm.start_cache",
"test_utils.os_utils.drop_caches",
"api.cas.casadm.load_cache",
"core.test_run.TestRun.plugin_manager.get_plugin",
"core.test_r... | [((695, 744), 'pytest.mark.parametrizex', 'pytest.mark.parametrizex', (['"""cache_mode"""', 'CacheMode'], {}), "('cache_mode', CacheMode)\n", (719, 744), False, 'import pytest\n'), ((746, 796), 'pytest.mark.parametrizex', 'pytest.mark.parametrizex', (['"""filesystem"""', 'Filesystem'], {}), "('filesystem', Filesystem)\n", (770, 796), False, 'import pytest\n'), ((798, 855), 'pytest.mark.parametrizex', 'pytest.mark.parametrizex', (['"""reboot_type"""', "['soft', 'hard']"], {}), "('reboot_type', ['soft', 'hard'])\n", (822, 855), False, 'import pytest\n'), ((857, 900), 'pytest.mark.require_plugin', 'pytest.mark.require_plugin', (['"""power_control"""'], {}), "('power_control')\n", (883, 900), False, 'import pytest\n'), ((585, 630), 'storage_devices.disk.DiskTypeSet', 'DiskTypeSet', (['[DiskType.optane, DiskType.nand]'], {}), '([DiskType.optane, DiskType.nand])\n', (596, 630), False, 'from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan\n'), ((666, 692), 'storage_devices.disk.DiskTypeLowerThan', 'DiskTypeLowerThan', (['"""cache"""'], {}), "('cache')\n", (683, 692), False, 'from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan\n'), ((1275, 1310), 'core.test_run.TestRun.step', 'TestRun.step', (['"""Prepare CAS device."""'], {}), "('Prepare CAS device.')\n", (1287, 1310), False, 'from core.test_run import TestRun\n'), ((1521, 1574), 'api.cas.casadm.start_cache', 'casadm.start_cache', (['cache_dev', 'cache_mode'], {'force': '(True)'}), '(cache_dev, cache_mode, force=True)\n', (1539, 1574), False, 'from api.cas import casadm\n'), ((1741, 1801), 'core.test_run.TestRun.step', 'TestRun.step', (['"""Create file on cache and count its checksum."""'], {}), "('Create file on cache and count its checksum.')\n", (1753, 1801), False, 'from core.test_run import TestRun\n'), ((2135, 2141), 'test_utils.os_utils.sync', 'sync', ([], {}), '()\n', (2139, 2141), False, 'from test_utils.os_utils import drop_caches, DropCachesMode, sync\n'), ((2150, 2181), 'test_utils.os_utils.drop_caches', 'drop_caches', (['DropCachesMode.ALL'], {}), '(DropCachesMode.ALL)\n', (2161, 2181), False, 'from test_utils.os_utils import drop_caches, DropCachesMode, sync\n'), ((2192, 2223), 'core.test_run.TestRun.step', 'TestRun.step', (['"""Reset platform."""'], {}), "('Reset platform.')\n", (2204, 2223), False, 'from core.test_run import TestRun\n'), ((2440, 2467), 'core.test_run.TestRun.step', 'TestRun.step', (['"""Load cache."""'], {}), "('Load cache.')\n", (2452, 2467), False, 'from core.test_run import TestRun\n'), ((2477, 2505), 'api.cas.casadm.load_cache', 'casadm.load_cache', (['cache_dev'], {}), '(cache_dev)\n', (2494, 2505), False, 'from api.cas import casadm\n'), ((2548, 2582), 'core.test_run.TestRun.step', 'TestRun.step', (['"""Check file md5sum."""'], {}), "('Check file md5sum.')\n", (2560, 2582), False, 'from core.test_run import TestRun\n'), ((2832, 2865), 'core.test_run.TestRun.step', 'TestRun.step', (['"""Remove test file."""'], {}), "('Remove test file.')\n", (2844, 2865), False, 'from core.test_run import TestRun\n'), ((1828, 1866), 'os.path.join', 'os.path.join', (['mount_point', '"""test_file"""'], {}), "(mount_point, 'test_file')\n", (1840, 1866), False, 'import os\n'), ((2271, 2296), 'core.test_run.TestRun.executor.reboot', 'TestRun.executor.reboot', ([], {}), '()\n', (2294, 2296), False, 'from core.test_run import TestRun\n'), ((2339, 2389), 'core.test_run.TestRun.plugin_manager.get_plugin', 'TestRun.plugin_manager.get_plugin', (['"""power_control"""'], {}), "('power_control')\n", (2372, 2389), False, 'from core.test_run import TestRun\n'), ((2677, 2746), 'core.test_run.TestRun.LOGGER.error', 'TestRun.LOGGER.error', (['"""Checksums does not match - file is corrupted."""'], {}), "('Checksums does not match - file is corrupted.')\n", (2697, 2746), False, 'from core.test_run import TestRun\n'), ((2773, 2821), 'core.test_run.TestRun.LOGGER.info', 'TestRun.LOGGER.info', (['"""File checksum is correct."""'], {}), "('File checksum is correct.')\n", (2792, 2821), False, 'from core.test_run import TestRun\n'), ((1394, 1416), 'test_utils.size.Size', 'Size', (['(1)', 'Unit.GibiByte'], {}), '(1, Unit.GibiByte)\n', (1398, 1416), False, 'from test_utils.size import Size, Unit\n'), ((1672, 1696), 'test_utils.size.Size', 'Size', (['(1)', 'Unit.Blocks4096'], {}), '(1, Unit.Blocks4096)\n', (1676, 1696), False, 'from test_utils.size import Size, Unit\n'), ((1981, 2003), 'test_utils.size.Size', 'Size', (['(1)', 'Unit.KibiByte'], {}), '(1, Unit.KibiByte)\n', (1985, 2003), False, 'from test_utils.size import Size, Unit\n'), ((1876, 1880), 'test_tools.dd.Dd', 'Dd', ([], {}), '()\n', (1878, 1880), False, 'from test_tools.dd import Dd\n')] |
# type: ignore
# This is a small script to parse the header files from wasmtime and generate
# appropriate function definitions in Python for each exported function. This
# also reflects types into Python with `ctypes`. While there's at least one
# other generate that does this already it seemed to not quite fit our purposes
# with lots of extra an unnecessary boilerplate.
from pycparser import c_ast, parse_file
class Visitor(c_ast.NodeVisitor):
def __init__(self):
self.ret = ''
self.ret += '# flake8: noqa\n'
self.ret += '#\n'
self.ret += '# This is a procedurally generated file, DO NOT EDIT\n'
self.ret += '# instead edit `./bindgen.py` at the root of the repo\n'
self.ret += '\n'
self.ret += 'from ctypes import *\n'
self.ret += 'from typing import Any\n'
self.ret += 'from ._ffi import dll, wasm_val_t\n'
self.generated_wasm_ref_t = False
# Skip all function definitions, we don't bind those
def visit_FuncDef(self, node):
pass
def visit_Struct(self, node):
if not node.name or not node.name.startswith('was'):
return
# This is hand-generated since it has an anonymous union in it
if node.name == 'wasm_val_t':
return
# This is defined twice in the header file, but we only want to insert
# one definition.
if node.name == 'wasm_ref_t':
if self.generated_wasm_ref_t:
return
self.generated_wasm_ref_t = True
self.ret += "\n"
self.ret += "class {}(Structure):\n".format(node.name)
if node.decls:
self.ret += " _fields_ = [\n"
for decl in node.decls:
self.ret += " (\"{}\", {}),\n".format(decl.name, type_name(decl.type))
self.ret += " ]\n"
else:
self.ret += " pass\n"
def visit_Typedef(self, node):
if not node.name or not node.name.startswith('was'):
return
self.visit(node.type)
tyname = type_name(node.type)
if tyname != node.name:
self.ret += "\n"
self.ret += "{} = {}\n".format(node.name, type_name(node.type))
def visit_FuncDecl(self, node):
if isinstance(node.type, c_ast.TypeDecl):
ptr = False
ty = node.type
elif isinstance(node.type, c_ast.PtrDecl):
ptr = True
ty = node.type.type
name = ty.declname
# This is probably a type, skip it
if name.endswith('_t'):
return
# Skip anything not related to wasi or wasm
if not name.startswith('was'):
return
# TODO: these are bugs with upstream wasmtime
if name == 'wasm_frame_copy':
return
if name == 'wasm_frame_instance':
return
if name == 'wasm_module_serialize':
return
if name == 'wasm_module_deserialize':
return
if 'ref_as_' in name:
return
if 'extern_const' in name:
return
if 'foreign' in name:
return
ret = ty.type
argpairs = []
argtypes = []
argnames = []
if node.args:
for i, param in enumerate(node.args.params):
argname = param.name
if not argname or argname == "import" or argname == "global":
argname = "arg{}".format(i)
argpairs.append("{}: Any".format(argname))
argnames.append(argname)
argtypes.append(type_name(param.type))
retty = type_name(node.type, ptr, typing=True)
self.ret += "\n"
self.ret += "_{0} = dll.{0}\n".format(name)
self.ret += "_{}.restype = {}\n".format(name, type_name(ret, ptr))
self.ret += "_{}.argtypes = [{}]\n".format(name, ', '.join(argtypes))
self.ret += "def {}({}) -> {}:\n".format(name, ', '.join(argpairs), retty)
self.ret += " return _{}({}) # type: ignore\n".format(name, ', '.join(argnames))
def type_name(ty, ptr=False, typing=False):
while isinstance(ty, c_ast.TypeDecl):
ty = ty.type
if ptr:
if typing:
return "pointer"
if isinstance(ty, c_ast.IdentifierType) and ty.names[0] == "void":
return "c_void_p"
elif not isinstance(ty, c_ast.FuncDecl):
return "POINTER({})".format(type_name(ty, False, typing))
if isinstance(ty, c_ast.IdentifierType):
assert(len(ty.names) == 1)
if ty.names[0] == "void":
return "None"
elif ty.names[0] == "_Bool":
return "c_bool"
elif ty.names[0] == "byte_t":
return "c_ubyte"
elif ty.names[0] == "uint8_t":
return "c_uint8"
elif ty.names[0] == "uint32_t":
return "int" if typing else "c_uint32"
elif ty.names[0] == "uint64_t":
return "c_uint64"
elif ty.names[0] == "size_t":
return "int" if typing else "c_size_t"
elif ty.names[0] == "char":
return "c_char"
elif ty.names[0] == "int":
return "int" if typing else "c_int"
# ctypes values can't stand as typedefs, so just use the pointer type here
elif typing and 'func_callback' in ty.names[0]:
return "pointer"
elif typing and ('size' in ty.names[0] or 'pages' in ty.names[0]):
return "int"
return ty.names[0]
elif isinstance(ty, c_ast.Struct):
return ty.name
elif isinstance(ty, c_ast.FuncDecl):
tys = []
# TODO: apparently errors are thrown if we faithfully represent the
# pointer type here, seems odd?
if isinstance(ty.type, c_ast.PtrDecl):
tys.append("c_size_t")
else:
tys.append(type_name(ty.type))
if ty.args.params:
for param in ty.args.params:
tys.append(type_name(param.type))
return "CFUNCTYPE({})".format(', '.join(tys))
elif isinstance(ty, c_ast.PtrDecl) or isinstance(ty, c_ast.ArrayDecl):
return type_name(ty.type, True, typing)
else:
raise RuntimeError("unknown {}".format(ty))
ast = parse_file(
'./wasmtime/include/wasmtime.h',
use_cpp=True,
cpp_path='gcc',
cpp_args=[
'-E',
'-I./wasmtime/include',
'-D__attribute__(x)=',
'-D__asm__(x)=',
'-D__asm(x)=',
'-D__volatile__(x)=',
'-D_Static_assert(x, y)=',
'-Dstatic_assert(x, y)=',
'-D__restrict=',
'-D__restrict__=',
'-D__extension__=',
'-D__inline__=',
'-D__signed=',
'-D__builtin_va_list=int',
]
)
v = Visitor()
v.visit(ast)
if __name__ == "__main__":
with open("wasmtime/_bindings.py", "w") as f:
f.write(v.ret)
else:
with open("wasmtime/_bindings.py", "r") as f:
contents = f.read()
if contents != v.ret:
raise RuntimeError("bindings need an update, run this script")
| [
"pycparser.parse_file"
] | [((6278, 6657), 'pycparser.parse_file', 'parse_file', (['"""./wasmtime/include/wasmtime.h"""'], {'use_cpp': '(True)', 'cpp_path': '"""gcc"""', 'cpp_args': "['-E', '-I./wasmtime/include', '-D__attribute__(x)=', '-D__asm__(x)=',\n '-D__asm(x)=', '-D__volatile__(x)=', '-D_Static_assert(x, y)=',\n '-Dstatic_assert(x, y)=', '-D__restrict=', '-D__restrict__=',\n '-D__extension__=', '-D__inline__=', '-D__signed=',\n '-D__builtin_va_list=int']"}), "('./wasmtime/include/wasmtime.h', use_cpp=True, cpp_path='gcc',\n cpp_args=['-E', '-I./wasmtime/include', '-D__attribute__(x)=',\n '-D__asm__(x)=', '-D__asm(x)=', '-D__volatile__(x)=',\n '-D_Static_assert(x, y)=', '-Dstatic_assert(x, y)=', '-D__restrict=',\n '-D__restrict__=', '-D__extension__=', '-D__inline__=', '-D__signed=',\n '-D__builtin_va_list=int'])\n", (6288, 6657), False, 'from pycparser import c_ast, parse_file\n')] |
from utils import project_list
from learning import IncrementalLearningModel
def get_testing_dataset_size(prj):
l = IncrementalLearningModel(prj['name'], 'RF', 30, 1)
y_proba, y_test = l.get_predicted_data()
return len(y_test)
def main():
for idx, prj in enumerate(project_list):
print(prj['name'])
# TestAll num of execution
print(get_testing_dataset_size(prj))
main()
| [
"learning.IncrementalLearningModel"
] | [((122, 172), 'learning.IncrementalLearningModel', 'IncrementalLearningModel', (["prj['name']", '"""RF"""', '(30)', '(1)'], {}), "(prj['name'], 'RF', 30, 1)\n", (146, 172), False, 'from learning import IncrementalLearningModel\n')] |
# ================================================================
# MIT License
# Copyright (c) 2022 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import tensorflow as tf
from iseg.layers.normalizations import normalization
from iseg.utils.attention_utils import *
from iseg.layers.model_builder import resize_image, get_training_value
from iseg.vis.vismanager import get_visualization_manager
from car_core.utils import (
get_flatten_one_hot_label,
get_class_sum_features_and_counts,
get_inter_class_relative_loss,
get_intra_class_absolute_loss,
get_pixel_inter_class_relative_loss,
)
class ClassAwareRegularization(tf.keras.Model):
def __init__(
self,
train_mode=False,
use_inter_class_loss=True,
use_intra_class_loss=True,
intra_class_loss_remove_max=False,
use_inter_c2c_loss=True,
use_inter_c2p_loss=False,
intra_class_loss_rate=1,
inter_class_loss_rate=1,
num_class=21,
ignore_label=0,
pooling_rates=[1],
use_batch_class_center=True,
use_last_class_center=False,
last_class_center_decay=0.9,
inter_c2c_loss_threshold=0.5,
inter_c2p_loss_threshold=0.25,
filters=512,
apply_convs=False,
name=None,
):
super().__init__(name=name)
self.vis_manager = get_visualization_manager()
self.train_mode = train_mode
self.use_inter_class_loss = use_inter_class_loss
self.use_intra_class_loss = use_intra_class_loss
self.intra_class_loss_rate = intra_class_loss_rate
self.inter_class_loss_rate = inter_class_loss_rate
self.num_class = num_class
self.ignore_label = ignore_label
self.inter_c2c_loss_threshold = inter_c2c_loss_threshold
self.inter_c2p_loss_threshold = inter_c2p_loss_threshold
self.intra_class_loss_remove_max = intra_class_loss_remove_max
self.use_inter_c2c_loss = use_inter_c2c_loss
self.use_inter_c2p_loss = use_inter_c2p_loss
self.filters = filters
self.apply_convs = apply_convs
if isinstance(pooling_rates, tuple):
pooling_rates = list(pooling_rates)
if not isinstance(pooling_rates, list):
pooling_rates = [pooling_rates]
self.pooling_rates = pooling_rates
self.use_batch_class_center = use_batch_class_center
self.use_last_class_center = use_last_class_center
self.last_class_center_decay = last_class_center_decay
print(f"------CAR settings------")
print(f"------train_mode = {train_mode}")
print(f"------use_intra_class_loss = {use_intra_class_loss}")
print(f"------use_inter_class_loss = {use_inter_class_loss}")
print(f"------intra_class_loss_rate = {intra_class_loss_rate}")
print(f"------inter_class_loss_rate = {inter_class_loss_rate}")
print(f"------use_batch_class_center = {use_batch_class_center}")
print(f"------use_last_class_center = {use_last_class_center}")
print(f"------last_class_center_decay = {last_class_center_decay}")
print(f"------pooling_rates = {pooling_rates}")
print(f"------inter_c2c_loss_threshold = {inter_c2c_loss_threshold}")
print(f"------inter_c2p_loss_threshold = {inter_c2p_loss_threshold}")
print(f"------intra_class_loss_remove_max = {intra_class_loss_remove_max}")
print(f"------use_inter_c2c_loss = {use_inter_c2c_loss}")
print(f"------use_inter_c2p_loss = {use_inter_c2p_loss}")
print(f"------filters = {filters}")
print(f"------apply_convs = {apply_convs}")
print(f"------num_class = {num_class}")
print(f"------ignore_label = {ignore_label}")
def add_car_losses(self, features, label=None, extra_prefix=None, training=None):
# features : [N, H, W, C]
training = get_training_value(training)
loss_name_prefix = f"{self.name}"
if extra_prefix is not None:
loss_name_prefix = f"{loss_name_prefix}_{extra_prefix}"
inputs_shape = tf.shape(features)
height = inputs_shape[-3]
width = inputs_shape[-2]
label = resize_image(label, (height, width), method="nearest")
tf.debugging.check_numerics(features, "features contains nan or inf")
flatten_features = flatten_hw(features)
not_ignore_spatial_mask = tf.cast(label, tf.int32) != self.ignore_label # [N, H, W, 1]
not_ignore_spatial_mask = flatten_hw(not_ignore_spatial_mask)
one_hot_label = get_flatten_one_hot_label(
label, num_class=self.num_class, ignore_label=self.ignore_label
) # [N, HW, class]
####################################################################################
class_sum_features, class_sum_non_zero_map = get_class_sum_features_and_counts(
flatten_features, one_hot_label
) # [N, class, C]
if self.use_batch_class_center:
replica_context = tf.distribute.get_replica_context()
class_sum_features_in_cross_batch = tf.reduce_sum(
class_sum_features, axis=0, keepdims=True, name="class_sum_features_in_cross_batch"
)
class_sum_non_zero_map_in_cross_batch = tf.reduce_sum(
class_sum_non_zero_map, axis=0, keepdims=True, name="class_sum_non_zero_map_in_cross_batch"
)
if replica_context:
class_sum_features_in_cross_batch = replica_context.all_reduce(
tf.distribute.ReduceOp.SUM, class_sum_features_in_cross_batch
)
class_sum_non_zero_map_in_cross_batch = replica_context.all_reduce(
tf.distribute.ReduceOp.SUM, class_sum_non_zero_map_in_cross_batch
)
class_avg_features_in_cross_batch = tf.math.divide_no_nan(
class_sum_features_in_cross_batch, class_sum_non_zero_map_in_cross_batch
) # [1, class, C]
if self.use_last_class_center:
batch_class_ignore_mask = tf.cast(class_sum_non_zero_map_in_cross_batch != 0, tf.int32)
class_center_diff = class_avg_features_in_cross_batch - tf.cast(self.last_class_center, class_avg_features_in_cross_batch.dtype)
class_center_diff *= (1 - self.last_class_center_decay) * tf.cast(batch_class_ignore_mask, class_center_diff.dtype)
self.last_class_center.assign_add(class_center_diff)
class_avg_features_in_cross_batch = tf.cast(self.last_class_center, tf.float32)
class_avg_features = class_avg_features_in_cross_batch
else:
class_avg_features = tf.math.divide_no_nan(
class_sum_features, class_sum_non_zero_map
) # [N, class, C]
####################################################################################
if self.use_inter_class_loss and training:
inter_class_relative_loss = 0
if self.use_inter_c2c_loss:
inter_class_relative_loss += get_inter_class_relative_loss(
class_avg_features, inter_c2c_loss_threshold=self.inter_c2c_loss_threshold,
)
if self.use_inter_c2p_loss:
inter_class_relative_loss += get_pixel_inter_class_relative_loss(
flatten_features, class_avg_features, one_hot_label, inter_c2p_loss_threshold=self.inter_c2p_loss_threshold,
)
self.add_loss(inter_class_relative_loss * self.inter_class_loss_rate)
self.add_metric(inter_class_relative_loss, name=f"{loss_name_prefix}_orl")
if self.use_intra_class_loss:
same_avg_value = tf.matmul(one_hot_label, class_avg_features)
tf.debugging.check_numerics(same_avg_value, "same_avg_value contains nan or inf")
self_absolute_loss = get_intra_class_absolute_loss(
flatten_features,
same_avg_value,
remove_max_value=self.intra_class_loss_remove_max,
not_ignore_spatial_mask=not_ignore_spatial_mask,
)
if training:
self.add_loss(self_absolute_loss * self.intra_class_loss_rate)
self.add_metric(self_absolute_loss, name=f"{loss_name_prefix}_sal")
print("Using self-loss")
def build(self, input_shape):
# Note that, this is not the best design for specified architecture, but a trade-off for generalizability
channels = input_shape[0][-1]
channels = self.filters if channels > self.filters else channels
print(f"car channels = {channels}")
self.linear_conv = tf.keras.layers.Conv2D(channels, (1, 1), use_bias=True, name="linear_conv",)
if self.apply_convs:
self.end_conv = tf.keras.layers.Conv2D(channels, (1, 1), use_bias=False, name="end_conv",)
self.end_norm = normalization(name="end_norm")
if self.use_last_class_center:
self.last_class_center = self.add_weight(
name="last_class_center",
shape=[1, self.num_class, channels],
dtype=tf.float32,
initializer=tf.keras.initializers.GlorotUniform(),
trainable=False,
)
def call(self, inputs, training=None):
inputs, label = inputs
x = inputs
# This linear conv (w/o norm&activation) can be merged
# to the next one (end_conv) during inference
# Simple (x * w0 + b) * w1 dot product
# We keep it for better understanding
x = self.linear_conv(x)
y = tf.identity(x)
if self.train_mode and get_training_value(training):
x = tf.cast(x, tf.float32)
tf.debugging.check_numerics(x, "inputs contains nan or inf")
num_pooling_rates = len(self.pooling_rates)
for i in range(num_pooling_rates):
pooling_rate = self.pooling_rates[i]
sub_x = tf.identity(x, name=f"x_in_rate_{pooling_rate}")
if pooling_rate > 1:
stride_size = (1, pooling_rate, pooling_rate, 1)
sub_x = tf.nn.avg_pool2d(sub_x, stride_size, stride_size, padding="SAME")
self.add_car_losses(sub_x, label=label, extra_prefix=str(pooling_rate), training=training)
if self.apply_convs:
y = self.end_conv(y)
y = self.end_norm(y, training=training)
y = tf.nn.relu(y)
return y
| [
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.math.divide_no_nan",
"tensorflow.cast",
"car_core.utils.get_intra_class_absolute_loss",
"tensorflow.nn.avg_pool2d",
"iseg.vis.vismanager.get_visualization_manager",
"tensorflow.keras.layers.Conv2D",
"iseg.layers.model_builder.get_training_valu... | [((1435, 1462), 'iseg.vis.vismanager.get_visualization_manager', 'get_visualization_manager', ([], {}), '()\n', (1460, 1462), False, 'from iseg.vis.vismanager import get_visualization_manager\n'), ((3980, 4008), 'iseg.layers.model_builder.get_training_value', 'get_training_value', (['training'], {}), '(training)\n', (3998, 4008), False, 'from iseg.layers.model_builder import resize_image, get_training_value\n'), ((4182, 4200), 'tensorflow.shape', 'tf.shape', (['features'], {}), '(features)\n', (4190, 4200), True, 'import tensorflow as tf\n'), ((4285, 4339), 'iseg.layers.model_builder.resize_image', 'resize_image', (['label', '(height, width)'], {'method': '"""nearest"""'}), "(label, (height, width), method='nearest')\n", (4297, 4339), False, 'from iseg.layers.model_builder import resize_image, get_training_value\n'), ((4349, 4418), 'tensorflow.debugging.check_numerics', 'tf.debugging.check_numerics', (['features', '"""features contains nan or inf"""'], {}), "(features, 'features contains nan or inf')\n", (4376, 4418), True, 'import tensorflow as tf\n'), ((4660, 4755), 'car_core.utils.get_flatten_one_hot_label', 'get_flatten_one_hot_label', (['label'], {'num_class': 'self.num_class', 'ignore_label': 'self.ignore_label'}), '(label, num_class=self.num_class, ignore_label=\n self.ignore_label)\n', (4685, 4755), False, 'from car_core.utils import get_flatten_one_hot_label, get_class_sum_features_and_counts, get_inter_class_relative_loss, get_intra_class_absolute_loss, get_pixel_inter_class_relative_loss\n'), ((4939, 5005), 'car_core.utils.get_class_sum_features_and_counts', 'get_class_sum_features_and_counts', (['flatten_features', 'one_hot_label'], {}), '(flatten_features, one_hot_label)\n', (4972, 5005), False, 'from car_core.utils import get_flatten_one_hot_label, get_class_sum_features_and_counts, get_inter_class_relative_loss, get_intra_class_absolute_loss, get_pixel_inter_class_relative_loss\n'), ((8860, 8935), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['channels', '(1, 1)'], {'use_bias': '(True)', 'name': '"""linear_conv"""'}), "(channels, (1, 1), use_bias=True, name='linear_conv')\n", (8882, 8935), True, 'import tensorflow as tf\n'), ((9833, 9847), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (9844, 9847), True, 'import tensorflow as tf\n'), ((4503, 4527), 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), '(label, tf.int32)\n', (4510, 4527), True, 'import tensorflow as tf\n'), ((5117, 5152), 'tensorflow.distribute.get_replica_context', 'tf.distribute.get_replica_context', ([], {}), '()\n', (5150, 5152), True, 'import tensorflow as tf\n'), ((5202, 5305), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['class_sum_features'], {'axis': '(0)', 'keepdims': '(True)', 'name': '"""class_sum_features_in_cross_batch"""'}), "(class_sum_features, axis=0, keepdims=True, name=\n 'class_sum_features_in_cross_batch')\n", (5215, 5305), True, 'import tensorflow as tf\n'), ((5383, 5494), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['class_sum_non_zero_map'], {'axis': '(0)', 'keepdims': '(True)', 'name': '"""class_sum_non_zero_map_in_cross_batch"""'}), "(class_sum_non_zero_map, axis=0, keepdims=True, name=\n 'class_sum_non_zero_map_in_cross_batch')\n", (5396, 5494), True, 'import tensorflow as tf\n'), ((5970, 6069), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', (['class_sum_features_in_cross_batch', 'class_sum_non_zero_map_in_cross_batch'], {}), '(class_sum_features_in_cross_batch,\n class_sum_non_zero_map_in_cross_batch)\n', (5991, 6069), True, 'import tensorflow as tf\n'), ((6839, 6904), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', (['class_sum_features', 'class_sum_non_zero_map'], {}), '(class_sum_features, class_sum_non_zero_map)\n', (6860, 6904), True, 'import tensorflow as tf\n'), ((7881, 7925), 'tensorflow.matmul', 'tf.matmul', (['one_hot_label', 'class_avg_features'], {}), '(one_hot_label, class_avg_features)\n', (7890, 7925), True, 'import tensorflow as tf\n'), ((7939, 8024), 'tensorflow.debugging.check_numerics', 'tf.debugging.check_numerics', (['same_avg_value', '"""same_avg_value contains nan or inf"""'], {}), "(same_avg_value,\n 'same_avg_value contains nan or inf')\n", (7966, 8024), True, 'import tensorflow as tf\n'), ((8055, 8226), 'car_core.utils.get_intra_class_absolute_loss', 'get_intra_class_absolute_loss', (['flatten_features', 'same_avg_value'], {'remove_max_value': 'self.intra_class_loss_remove_max', 'not_ignore_spatial_mask': 'not_ignore_spatial_mask'}), '(flatten_features, same_avg_value,\n remove_max_value=self.intra_class_loss_remove_max,\n not_ignore_spatial_mask=not_ignore_spatial_mask)\n', (8084, 8226), False, 'from car_core.utils import get_flatten_one_hot_label, get_class_sum_features_and_counts, get_inter_class_relative_loss, get_intra_class_absolute_loss, get_pixel_inter_class_relative_loss\n'), ((8995, 9068), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['channels', '(1, 1)'], {'use_bias': '(False)', 'name': '"""end_conv"""'}), "(channels, (1, 1), use_bias=False, name='end_conv')\n", (9017, 9068), True, 'import tensorflow as tf\n'), ((9098, 9128), 'iseg.layers.normalizations.normalization', 'normalization', ([], {'name': '"""end_norm"""'}), "(name='end_norm')\n", (9111, 9128), False, 'from iseg.layers.normalizations import normalization\n'), ((9880, 9908), 'iseg.layers.model_builder.get_training_value', 'get_training_value', (['training'], {}), '(training)\n', (9898, 9908), False, 'from iseg.layers.model_builder import resize_image, get_training_value\n'), ((9927, 9949), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (9934, 9949), True, 'import tensorflow as tf\n'), ((9963, 10023), 'tensorflow.debugging.check_numerics', 'tf.debugging.check_numerics', (['x', '"""inputs contains nan or inf"""'], {}), "(x, 'inputs contains nan or inf')\n", (9990, 10023), True, 'import tensorflow as tf\n'), ((10697, 10710), 'tensorflow.nn.relu', 'tf.nn.relu', (['y'], {}), '(y)\n', (10707, 10710), True, 'import tensorflow as tf\n'), ((6200, 6261), 'tensorflow.cast', 'tf.cast', (['(class_sum_non_zero_map_in_cross_batch != 0)', 'tf.int32'], {}), '(class_sum_non_zero_map_in_cross_batch != 0, tf.int32)\n', (6207, 6261), True, 'import tensorflow as tf\n'), ((6679, 6722), 'tensorflow.cast', 'tf.cast', (['self.last_class_center', 'tf.float32'], {}), '(self.last_class_center, tf.float32)\n', (6686, 6722), True, 'import tensorflow as tf\n'), ((7227, 7337), 'car_core.utils.get_inter_class_relative_loss', 'get_inter_class_relative_loss', (['class_avg_features'], {'inter_c2c_loss_threshold': 'self.inter_c2c_loss_threshold'}), '(class_avg_features, inter_c2c_loss_threshold=\n self.inter_c2c_loss_threshold)\n', (7256, 7337), False, 'from car_core.utils import get_flatten_one_hot_label, get_class_sum_features_and_counts, get_inter_class_relative_loss, get_intra_class_absolute_loss, get_pixel_inter_class_relative_loss\n'), ((7458, 7606), 'car_core.utils.get_pixel_inter_class_relative_loss', 'get_pixel_inter_class_relative_loss', (['flatten_features', 'class_avg_features', 'one_hot_label'], {'inter_c2p_loss_threshold': 'self.inter_c2p_loss_threshold'}), '(flatten_features, class_avg_features,\n one_hot_label, inter_c2p_loss_threshold=self.inter_c2p_loss_threshold)\n', (7493, 7606), False, 'from car_core.utils import get_flatten_one_hot_label, get_class_sum_features_and_counts, get_inter_class_relative_loss, get_intra_class_absolute_loss, get_pixel_inter_class_relative_loss\n'), ((10208, 10256), 'tensorflow.identity', 'tf.identity', (['x'], {'name': 'f"""x_in_rate_{pooling_rate}"""'}), "(x, name=f'x_in_rate_{pooling_rate}')\n", (10219, 10256), True, 'import tensorflow as tf\n'), ((6351, 6423), 'tensorflow.cast', 'tf.cast', (['self.last_class_center', 'class_avg_features_in_cross_batch.dtype'], {}), '(self.last_class_center, class_avg_features_in_cross_batch.dtype)\n', (6358, 6423), True, 'import tensorflow as tf\n'), ((6498, 6555), 'tensorflow.cast', 'tf.cast', (['batch_class_ignore_mask', 'class_center_diff.dtype'], {}), '(batch_class_ignore_mask, class_center_diff.dtype)\n', (6505, 6555), True, 'import tensorflow as tf\n'), ((9380, 9417), 'tensorflow.keras.initializers.GlorotUniform', 'tf.keras.initializers.GlorotUniform', ([], {}), '()\n', (9415, 9417), True, 'import tensorflow as tf\n'), ((10392, 10457), 'tensorflow.nn.avg_pool2d', 'tf.nn.avg_pool2d', (['sub_x', 'stride_size', 'stride_size'], {'padding': '"""SAME"""'}), "(sub_x, stride_size, stride_size, padding='SAME')\n", (10408, 10457), True, 'import tensorflow as tf\n')] |
from serendipity.linear_structures.singly_linked_list import LinkedList
class Set:
def __init__(self):
self._list = LinkedList()
def get_size(self):
return self._list.get_size()
def is_empty(self):
return self._list.is_empty()
def contains(self, e):
return self._list.contains(e)
def add(self, e):
if not self._list.contains(e):
self._list.add_first(e)
# def remove(self, e):
# """借助于链表的移出元素实现即可,此处不实现"""
# pass
| [
"serendipity.linear_structures.singly_linked_list.LinkedList"
] | [((130, 142), 'serendipity.linear_structures.singly_linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (140, 142), False, 'from serendipity.linear_structures.singly_linked_list import LinkedList\n')] |
#!/usr/bin/env python
# coding: utf-8
import argparse
import elitech
import datetime
from elitech.msg import (
StopButton,
ToneSet,
AlarmSetting,
TemperatureUnit,
)
from elitech.msg import _bin
import six
import os
def main():
args = parse_args()
if (args.command == 'simple-set'):
command_simpleset(args)
elif(args.command == 'get'):
command_get(args)
elif(args.command == 'set'):
command_set(args)
elif(args.command == 'devinfo'):
command_devinfo(args)
elif(args.command == 'clock'):
command_clock(args)
elif(args.command == 'raw'):
command_raw_send(args)
elif(args.command == 'latest'):
command_latest(args)
def _convert_time(sec):
hour = int(sec / 3600.0)
min = int((sec - hour * 3600) / 60.0)
sec = sec % 60
return datetime.time(hour=hour, minute=min, second=sec)
def command_simpleset(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
device.init()
dev_info = device.get_devinfo()
device.set_clock(dev_info.station_no)
param_put = dev_info.to_param_put()
if args.interval:
param_put.rec_interval = _convert_time(args.interval)
device.update(param_put)
def command_get(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
device.init()
def output(data_list):
for line in data_list:
if len(line) == 3:
print("{0}\t{1:%Y-%m-%d %H:%M:%S}\t{2:.1f}".format(*line))
elif len(line) == 4:
print("{0}\t{1:%Y-%m-%d %H:%M:%S}\t{2:.1f}\t{3:.1f}".format(*line))
if args.page_size:
device.get_data(callback=output, page_size=args.page_size)
else:
device.get_data(callback=output)
def command_latest(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
device.init()
def output(latest):
if len(latest) == 3:
if args.value_only:
print("{2:.1f}".format(*latest))
else:
print("{0}\t{1:%Y-%m-%d %H:%M:%S}\t{2:.1f}".format(*latest))
elif len(latest) == 4:
if args.value_only:
print("{2:.1f}\t{3:.1f}".format(*latest))
else:
print("{0}\t{1:%Y-%m-%d %H:%M:%S}\t{2:.1f}\t{3:.1f}".format(*latest))
if args.page_size:
device.get_latest(callback=output, page_size=args.page_size)
else:
device.get_latest(callback=output)
def command_set(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
device.encode = args.encode
device.init()
dev_info = device.get_devinfo()
param_put = dev_info.to_param_put()
station_no = dev_info.station_no
if args.interval is not None:
param_put.rec_interval = _convert_time(args.interval)
if args.upper_limit is not None:
param_put.upper_limit = args.upper_limit
if args.lower_limit is not None:
param_put.lower_limit = args.lower_limit
if args.station_no is not None:
param_put.update_station_no = int(args.station_no)
station_no = param_put.update_station_no
if args.stop_button is not None:
param_put.stop_button = StopButton.ENABLE if args.stop_button == 'y' else StopButton.DISABLE
if args.delay is not None:
param_put.delay = float(args.delay)
if args.tone_set is not None:
param_put.tone_set = ToneSet.PERMIT if args.tone_set == 'y' else ToneSet.NONE
if args.alarm is not None:
if args.alarm == 'x':
param_put.alarm = AlarmSetting.NONE
elif args.alarm == '3':
param_put.alarm = AlarmSetting.T3
elif args.alarm == '10':
param_put.alarm = AlarmSetting.T10
if args.temp_unit is not None:
param_put.temp_unit = TemperatureUnit.C if args.temp_unit == 'C' else TemperatureUnit.F
if args.temp_calibration is not None:
param_put.temp_calibration = float(args.temp_calibration)
if args.humi_upper_limit:
param_put.humi_upper_limit = args.humi_upper_limit
if args.humi_lower_limit:
param_put.humi_lower_limit = args.humi_lower_limit
if args.humi_calibration:
param_put.humi_calibration = float(args.humi_calibration)
for k,v in vars(param_put).items():
print("{}={}".format(k, v))
device.update(param_put)
if args.dev_num is not None:
device.set_device_number(station_no, args.dev_num)
print("{}={}".format("dev_num", args.dev_num))
if args.user_info is not None:
if type(args.user_info) == six.binary_type:
args.user_info = args.user_info.decode("utf-8")
device.set_user_info(station_no, args.user_info)
print(u"{}={}".format("user_info", args.user_info))
def command_devinfo(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
device.encode = args.encode
device.init()
dev_info = device.get_devinfo()
for k,v in sorted(vars(dev_info).items()):
if k.startswith("_"): continue
print(u"{}={}".format(k, v))
def command_clock(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
dev_info = device.get_devinfo()
if args.time:
clock = datetime.datetime.strptime(args.time, '%Y%m%d%H%M%S')
else:
clock = None
device.set_clock(dev_info.station_no, clock)
def command_raw_send(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
request_bytes = _bin(args.req)
res = device.raw_send(request_bytes, args.res_len)
print("\nresponse length={}".format(len(res)))
for i, b in enumerate(res):
if six.PY2:
six.print_("{:02X} ".format(ord(b)), sep='', end='')
else:
six.print_("{:02X} ".format(b), end='')
if (i + 1) % 16 == 0:
six.print_()
six.print_()
def parse_args():
"""
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser('description Elitech RC-4 / RC-5 data reader')
parser.add_argument('-c', "--command", choices=['init', 'get', 'latest', 'simple-set', 'set', 'devinfo', 'clock', 'raw'])
parser.add_argument('-i', "--interval", type=int)
parser.add_argument("--upper_limit", type=float)
parser.add_argument("--lower_limit", type=float)
parser.add_argument("--station_no", type=int)
parser.add_argument("--stop_button", choices=['y', 'n'])
parser.add_argument("--delay", choices=['0.0', '0.5', '1.0', '1.5', '2.0', '2.5', '3.0', '3.5', '4.0', '4.5', '5.0', '5.5', '6.0'])
parser.add_argument('--tone_set', choices=['y', 'n'])
parser.add_argument('--alarm', choices=['x', '3', '10'])
parser.add_argument('--temp_unit', choices=['C', 'F'])
parser.add_argument('--temp_calibration', type=float)
parser.add_argument("--humi_upper_limit", type=float)
parser.add_argument("--humi_lower_limit", type=float)
parser.add_argument('--humi_calibration', type=float)
parser.add_argument('--time', type=str)
parser.add_argument('--dev_num', type=str)
parser.add_argument('--user_info', type=str)
parser.add_argument('--encode', type=str, default='utf8', help='user_info encode')
parser.add_argument('--page_size', type=int, help='for command get')
parser.add_argument('--req', type=str, help='for raw command')
parser.add_argument('--res_len', type=int, help='for raw command', default=1000)
parser.add_argument('--value_only', help='for latest command', action='store_true')
parser.add_argument('--ser_baudrate', help='serial port baudrate default=115200', default=115200, type=int)
parser.add_argument('--ser_timeout', help='serial port reading timeout sec', default=5, type=int)
parser.add_argument('serial_port')
return parser.parse_args()
if __name__ == '__main__':
main()
| [
"elitech.msg._bin",
"datetime.time",
"argparse.ArgumentParser",
"datetime.datetime.strptime",
"six.print_",
"elitech.Device"
] | [((844, 892), 'datetime.time', 'datetime.time', ([], {'hour': 'hour', 'minute': 'min', 'second': 'sec'}), '(hour=hour, minute=min, second=sec)\n', (857, 892), False, 'import datetime\n'), ((936, 1005), 'elitech.Device', 'elitech.Device', (['args.serial_port', 'args.ser_baudrate', 'args.ser_timeout'], {}), '(args.serial_port, args.ser_baudrate, args.ser_timeout)\n', (950, 1005), False, 'import elitech\n'), ((1295, 1364), 'elitech.Device', 'elitech.Device', (['args.serial_port', 'args.ser_baudrate', 'args.ser_timeout'], {}), '(args.serial_port, args.ser_baudrate, args.ser_timeout)\n', (1309, 1364), False, 'import elitech\n'), ((1847, 1916), 'elitech.Device', 'elitech.Device', (['args.serial_port', 'args.ser_baudrate', 'args.ser_timeout'], {}), '(args.serial_port, args.ser_baudrate, args.ser_timeout)\n', (1861, 1916), False, 'import elitech\n'), ((2573, 2642), 'elitech.Device', 'elitech.Device', (['args.serial_port', 'args.ser_baudrate', 'args.ser_timeout'], {}), '(args.serial_port, args.ser_baudrate, args.ser_timeout)\n', (2587, 2642), False, 'import elitech\n'), ((4894, 4963), 'elitech.Device', 'elitech.Device', (['args.serial_port', 'args.ser_baudrate', 'args.ser_timeout'], {}), '(args.serial_port, args.ser_baudrate, args.ser_timeout)\n', (4908, 4963), False, 'import elitech\n'), ((5212, 5281), 'elitech.Device', 'elitech.Device', (['args.serial_port', 'args.ser_baudrate', 'args.ser_timeout'], {}), '(args.serial_port, args.ser_baudrate, args.ser_timeout)\n', (5226, 5281), False, 'import elitech\n'), ((5528, 5597), 'elitech.Device', 'elitech.Device', (['args.serial_port', 'args.ser_baudrate', 'args.ser_timeout'], {}), '(args.serial_port, args.ser_baudrate, args.ser_timeout)\n', (5542, 5597), False, 'import elitech\n'), ((5619, 5633), 'elitech.msg._bin', '_bin', (['args.req'], {}), '(args.req)\n', (5623, 5633), False, 'from elitech.msg import _bin\n'), ((5985, 5997), 'six.print_', 'six.print_', ([], {}), '()\n', (5995, 5997), False, 'import six\n'), ((6077, 6147), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""description Elitech RC-4 / RC-5 data reader"""'], {}), "('description Elitech RC-4 / RC-5 data reader')\n", (6100, 6147), False, 'import argparse\n'), ((5352, 5405), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['args.time', '"""%Y%m%d%H%M%S"""'], {}), "(args.time, '%Y%m%d%H%M%S')\n", (5378, 5405), False, 'import datetime\n'), ((5967, 5979), 'six.print_', 'six.print_', ([], {}), '()\n', (5977, 5979), False, 'import six\n')] |
#!/usr/bin/env python
import sys
import string
import re
for line in sys.stdin:
if '"' in line:
entry = re.split(''',(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', line)
else:
entry = line.split(",")
licence_type = entry[2]
amount_due = entry[-6]
print("%s\t%s" % (licence_type, amount_due))
| [
"re.split"
] | [((110, 164), 're.split', 're.split', (['""",(?=(?:[^\'"]|\'[^\']*\'|"[^"]*")*$)"""', 'line'], {}), '(\',(?=(?:[^\\\'"]|\\\'[^\\\']*\\\'|"[^"]*")*$)\', line)\n', (118, 164), False, 'import re\n')] |
from flask_restplus import fields, Model
def add_models_to_namespace(namespace):
namespace.models[route_request_model.name] = route_request_model
route_request_model = Model("Represents a Route Request", {
"id": fields.Integer(description="Unique identifier for the ride"),
"start_point_lat": fields.Float(description="Represents the latitude of the starting point"),
"start_point_long": fields.Float(description="Represents the longitude of the starting point"),
"end_point_lat": fields.Float(description="Represents the latitude of the ending point"),
"end_point_long": fields.Float(description="Represents the longitude of the ending point")
})
| [
"flask_restplus.fields.Float",
"flask_restplus.fields.Integer"
] | [((224, 284), 'flask_restplus.fields.Integer', 'fields.Integer', ([], {'description': '"""Unique identifier for the ride"""'}), "(description='Unique identifier for the ride')\n", (238, 284), False, 'from flask_restplus import fields, Model\n'), ((309, 382), 'flask_restplus.fields.Float', 'fields.Float', ([], {'description': '"""Represents the latitude of the starting point"""'}), "(description='Represents the latitude of the starting point')\n", (321, 382), False, 'from flask_restplus import fields, Model\n'), ((408, 482), 'flask_restplus.fields.Float', 'fields.Float', ([], {'description': '"""Represents the longitude of the starting point"""'}), "(description='Represents the longitude of the starting point')\n", (420, 482), False, 'from flask_restplus import fields, Model\n'), ((505, 576), 'flask_restplus.fields.Float', 'fields.Float', ([], {'description': '"""Represents the latitude of the ending point"""'}), "(description='Represents the latitude of the ending point')\n", (517, 576), False, 'from flask_restplus import fields, Model\n'), ((600, 672), 'flask_restplus.fields.Float', 'fields.Float', ([], {'description': '"""Represents the longitude of the ending point"""'}), "(description='Represents the longitude of the ending point')\n", (612, 672), False, 'from flask_restplus import fields, Model\n')] |
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Module
'''
# ===================================
# Advanced nn.Sequential
# reform nn.Sequentials and nn.Modules
# to a single nn.Sequential
# ===================================
'''
def sequential(*args):
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('sequential does not support OrderedDict input.')
return args[0] # No sequential is needed.
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
'''
# ===================================
# Useful blocks
# --------------------------------
# conv (+ normaliation + relu)
# concat
# sum
# resblock (ResBlock)
# resdenseblock (ResidualDenseBlock_5C)
# resinresdenseblock (RRDB)
# ===================================
'''
# -------------------------------------------------------
# return nn.Sequantial of (Conv + BN + ReLU)
# -------------------------------------------------------
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CBR'):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=1e-04, affine=True))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=1e-1, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=1e-1, inplace=False))
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=0))
else:
raise NotImplementedError('Undefined type: '.format(t))
return sequential(*L)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range=255, rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
# -------------------------------------------------------
# Concat the output of a submodule to its input
# -------------------------------------------------------
class ConcatBlock(nn.Module):
def __init__(self, submodule):
super(ConcatBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = torch.cat((x, self.sub(x)), dim=1)
return output
def __repr__(self):
return self.sub.__repr__() + 'concat'
# -------------------------------------------------------
# Elementwise sum the output of a submodule to its input
# -------------------------------------------------------
class ShortcutBlock(nn.Module):
def __init__(self, submodule):
super(ShortcutBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = x + self.sub(x)
return output
def __repr__(self):
tmpstr = 'Identity + \n|'
modstr = self.sub.__repr__().replace('\n', '\n|')
tmpstr = tmpstr + modstr
return tmpstr
# -------------------------------------------------------
# Res Block: x + conv(relu(conv(x)))
# -------------------------------------------------------
class ResBlock(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC'):
super(ResBlock, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
self.res = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode)
def forward(self, x):
res = self.res(x)
return x + res
# -------------------------------------------------------
# Channel Attention (CA) Layer
# -------------------------------------------------------
class CALayer(nn.Module):
def __init__(self, channel=64, reduction=16):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_fc = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_fc(y)
return x * y
# -------------------------------------------------------
# Residual Channel Attention Block (RCAB)
# -------------------------------------------------------
class RCABlock(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', reduction=16):
super(RCABlock, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
self.res = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode)
self.ca = CALayer(out_channels, reduction)
def forward(self, x):
res = self.res(x)
res = self.ca(res)
return res + x
# -------------------------------------------------------
# Residual Channel Attention Group (RG)
# -------------------------------------------------------
class RCAGroup(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', reduction=16, nb=12):
super(RCAGroup, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
RG = [RCABlock(in_channels, out_channels, kernel_size, stride, padding, bias, mode, reduction) for _ in range(nb)]
RG.append(conv(out_channels, out_channels, mode='C'))
self.rg = nn.Sequential(*RG) # self.rg = ShortcutBlock(nn.Sequential(*RG))
def forward(self, x):
res = self.rg(x)
return res + x
# -------------------------------------------------------
# Residual Dense Block
# style: 5 convs
# -------------------------------------------------------
class ResidualDenseBlock_5C(nn.Module):
def __init__(self, nc=64, gc=32, kernel_size=3, stride=1, padding=1, bias=True, mode='CR'):
super(ResidualDenseBlock_5C, self).__init__()
# gc: growth channel
self.conv1 = conv(nc, gc, kernel_size, stride, padding, bias, mode)
self.conv2 = conv(nc+gc, gc, kernel_size, stride, padding, bias, mode)
self.conv3 = conv(nc+2*gc, gc, kernel_size, stride, padding, bias, mode)
self.conv4 = conv(nc+3*gc, gc, kernel_size, stride, padding, bias, mode)
self.conv5 = conv(nc+4*gc, nc, kernel_size, stride, padding, bias, mode[:-1])
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(torch.cat((x, x1), 1))
x3 = self.conv3(torch.cat((x, x1, x2), 1))
x4 = self.conv4(torch.cat((x, x1, x2, x3), 1))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5.mul_(0.2) + x
# -------------------------------------------------------
# Residual in Residual Dense Block
# 3x5c
# -------------------------------------------------------
class RRDB(nn.Module):
def __init__(self, nc=64, gc=32, kernel_size=3, stride=1, padding=1, bias=True, mode='CR'):
super(RRDB, self).__init__()
self.RDB1 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode)
self.RDB2 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode)
self.RDB3 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode)
def forward(self, x):
out = self.RDB1(x)
out = self.RDB2(out)
out = self.RDB3(out)
return out.mul_(0.2) + x
'''
# ======================
# Upsampler
# ======================
'''
# -------------------------------------------------------
# conv + subp + relu
# -------------------------------------------------------
def upsample_pixelshuffle(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
up1 = conv(in_channels, out_channels * (int(mode[0]) ** 2), kernel_size, stride, padding, bias, mode='C'+mode)
return up1
# -------------------------------------------------------
# nearest_upsample + conv + relu
# -------------------------------------------------------
def upsample_upconv(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
if mode[0] == '2':
uc = 'UC'
elif mode[0] == '3':
uc = 'uC'
mode = mode.replace(mode[0], uc)
up1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode)
return up1
# -------------------------------------------------------
# convTranspose + relu
# -------------------------------------------------------
def upsample_convtranspose(in_channels=64, out_channels=3, kernel_size=2, stride=2, padding=0, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
kernel_size = int(mode[0])
stride = int(mode[0])
mode = mode.replace(mode[0], 'T')
up1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode)
return up1
'''
# ======================
# Downsampler
# ======================
'''
# -------------------------------------------------------
# strideconv + relu
# -------------------------------------------------------
def downsample_strideconv(in_channels=64, out_channels=64, kernel_size=2, stride=2, padding=0, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
kernel_size = int(mode[0])
stride = int(mode[0])
mode = mode.replace(mode[0], 'C')
down1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode)
return down1
# -------------------------------------------------------
# maxpooling + conv + relu
# -------------------------------------------------------
def downsample_maxpool(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=0, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
kernel_size_pool = int(mode[0])
stride_pool = int(mode[0])
mode = mode.replace(mode[0], 'MC')
pool = conv(kernel_size=kernel_size_pool, stride=stride_pool, mode=mode[0])
pool_tail = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode[1:])
return sequential(pool, pool_tail)
# -------------------------------------------------------
# averagepooling + conv + relu
# -------------------------------------------------------
def downsample_avgpool(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
kernel_size_pool = int(mode[0])
stride_pool = int(mode[0])
mode = mode.replace(mode[0], 'AC')
pool = conv(kernel_size=kernel_size_pool, stride=stride_pool, mode=mode[0])
pool_tail = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode[1:])
return sequential(pool, pool_tail)
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.PixelShuffle",
"torch.nn.Sequential",
"torch.eye",
"torch.Tensor",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.A... | [((807, 830), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (820, 830), True, 'import torch.nn as nn\n'), ((3235, 3256), 'torch.Tensor', 'torch.Tensor', (['rgb_std'], {}), '(rgb_std)\n', (3247, 3256), False, 'import torch\n'), ((5496, 5519), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (5516, 5519), True, 'import torch.nn as nn\n'), ((7411, 7429), 'torch.nn.Sequential', 'nn.Sequential', (['*RG'], {}), '(*RG)\n', (7424, 7429), True, 'import torch.nn as nn\n'), ((5574, 5639), 'torch.nn.Conv2d', 'nn.Conv2d', (['channel', '(channel // reduction)', '(1)'], {'padding': '(0)', 'bias': '(True)'}), '(channel, channel // reduction, 1, padding=0, bias=True)\n', (5583, 5639), True, 'import torch.nn as nn\n'), ((5657, 5678), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5664, 5678), True, 'import torch.nn as nn\n'), ((5696, 5761), 'torch.nn.Conv2d', 'nn.Conv2d', (['(channel // reduction)', 'channel', '(1)'], {'padding': '(0)', 'bias': '(True)'}), '(channel // reduction, channel, 1, padding=0, bias=True)\n', (5705, 5761), True, 'import torch.nn as nn\n'), ((5779, 5791), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5789, 5791), True, 'import torch.nn as nn\n'), ((8411, 8432), 'torch.cat', 'torch.cat', (['(x, x1)', '(1)'], {}), '((x, x1), 1)\n', (8420, 8432), False, 'import torch\n'), ((8458, 8483), 'torch.cat', 'torch.cat', (['(x, x1, x2)', '(1)'], {}), '((x, x1, x2), 1)\n', (8467, 8483), False, 'import torch\n'), ((8509, 8538), 'torch.cat', 'torch.cat', (['(x, x1, x2, x3)', '(1)'], {}), '((x, x1, x2, x3), 1)\n', (8518, 8538), False, 'import torch\n'), ((8564, 8597), 'torch.cat', 'torch.cat', (['(x, x1, x2, x3, x4)', '(1)'], {}), '((x, x1, x2, x3, x4), 1)\n', (8573, 8597), False, 'import torch\n'), ((1441, 1575), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'bias': 'bias'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding, bias=bias)\n', (1450, 1575), True, 'import torch.nn as nn\n'), ((3381, 3403), 'torch.Tensor', 'torch.Tensor', (['rgb_mean'], {}), '(rgb_mean)\n', (3393, 3403), False, 'import torch\n'), ((1616, 1758), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'bias': 'bias'}), '(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)\n', (1634, 1758), True, 'import torch.nn as nn\n'), ((3284, 3296), 'torch.eye', 'torch.eye', (['(3)'], {}), '(3)\n', (3293, 3296), False, 'import torch\n'), ((1800, 1867), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {'momentum': '(0.9)', 'eps': '(0.0001)', 'affine': '(True)'}), '(out_channels, momentum=0.9, eps=0.0001, affine=True)\n', (1814, 1867), True, 'import torch.nn as nn\n'), ((1912, 1956), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['out_channels'], {'affine': '(True)'}), '(out_channels, affine=True)\n', (1929, 1956), True, 'import torch.nn as nn\n'), ((2002, 2023), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2009, 2023), True, 'import torch.nn as nn\n'), ((2069, 2091), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (2076, 2091), True, 'import torch.nn as nn\n'), ((2137, 2183), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (2149, 2183), True, 'import torch.nn as nn\n'), ((2230, 2277), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(False)'}), '(negative_slope=0.1, inplace=False)\n', (2242, 2277), True, 'import torch.nn as nn\n'), ((2324, 2357), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', ([], {'upscale_factor': '(2)'}), '(upscale_factor=2)\n', (2339, 2357), True, 'import torch.nn as nn\n'), ((2403, 2436), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', ([], {'upscale_factor': '(3)'}), '(upscale_factor=3)\n', (2418, 2436), True, 'import torch.nn as nn\n'), ((2482, 2515), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', ([], {'upscale_factor': '(4)'}), '(upscale_factor=4)\n', (2497, 2515), True, 'import torch.nn as nn\n'), ((2561, 2604), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (2572, 2604), True, 'import torch.nn as nn\n'), ((2650, 2693), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(3)', 'mode': '"""nearest"""'}), "(scale_factor=3, mode='nearest')\n", (2661, 2693), True, 'import torch.nn as nn\n'), ((2739, 2802), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': '(0)'}), '(kernel_size=kernel_size, stride=stride, padding=0)\n', (2751, 2802), True, 'import torch.nn as nn\n'), ((2848, 2911), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': '(0)'}), '(kernel_size=kernel_size, stride=stride, padding=0)\n', (2860, 2911), True, 'import torch.nn as nn\n')] |
# -*- encoding: utf-8 -*-
"""
Created by <NAME> at 01/09/2021 at 19:51:44
Project: py_dss_tools [set, 2021]
"""
import attr
import pandas as pd
from py_dss_tools.model.other import VSource
from py_dss_tools.utils import Utils
@attr.s
class Circuit(VSource):
_name = attr.ib(validator=attr.validators.instance_of(str), default='')
_basekv = attr.ib(validator=attr.validators.instance_of((int, float)), default=115)
_pu = attr.ib(validator=attr.validators.instance_of((int, float)), default=1.001)
_phases = attr.ib(validator=attr.validators.instance_of(int), default=3)
_bus1 = attr.ib(validator=attr.validators.instance_of(str), default='')
_angle = attr.ib(validator=attr.validators.instance_of((int, float)), default=0)
# TODO Rever default values
_mvasc3 = attr.ib(validator=attr.validators.instance_of((int, float)), default=21000)
_mvasc1 = attr.ib(validator=attr.validators.instance_of((int, float)), default=24000)
# TODO: checar existência de mais de um Circuit no momento da criação
def __attrs_post_init__(self):
if self._name != '':
self._name = Utils.remove_blank_spaces(self._name)
else:
self._name = 'my_circuit_' + Utils.generate_random_string()
def to_dataframe(self):
return pd.DataFrame.from_records([self.__dict__])
def to_dict(self) -> dict:
return self.__dict__
def to_list(self) -> list:
return list(self.__dict__)
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
Utils.check_instance(value, 'name', ['str'], )
self._name = Utils.remove_blank_spaces(value)
@property
def basekv(self):
return self._basekv
@basekv.setter
def basekv(self, value):
self._basekv = value
@property
def phases(self):
return self._phases
@phases.setter
def phases(self, value):
self._phases = value
# @property
# def df_lines(self):
# return self._df_lines
# @df_lines.setter
# def df_lines(self, value):
# a_series = pd.Series(value, index=self._df_lines.columns)
# self._df_lines = self._df_lines.append(a_series, ignore_index=True)
# def create_circuit(self, dss_file):
# self.dss.text("compile [{}]".format(dss_file))
#
# def get_all_buses(self):
# buses = Bus(self.dss)
# return buses.get_buses()
# def get_all_lines(self):
# self.dss.lines_first()
# while self.dss.lines_next() != 0:
# print(self.dss.lines_read_phases())
# print(self.dss.lines_read_units())
#
# def reset(self):
# """
# Resets all Monitors, Energymeters, etc. If no argument specified, resets all options listed.
# :return:
# """
# self.dss.text("reset")
#
# def sample(self):
# """
# Force all monitors and meters to take a sample for the most recent solution. Keep in mind that meters will
# perform integration.
# :return:
# """
# self.dss.text("sample")
#
# def seq_currents(self):
# """
# Returns the sequence currents into all terminals of the active circuit element (see Select command) in Result
# string. Returned as comma-separated magnitude only values.Order of returned values: 0, 1, 2 (for each
# terminal).
# :return:
# """
# aux = self.dss.text("seqcurrents").strip().replace(" ", "").split(sep=",")
# seq_currents = list()
# for n in range(len(aux)):
# if aux[n] != '':
# seq_currents.append(float(aux[n]))
# return seq_currents
#
# def seq_powers(self):
# """
# Returns the sequence powers into all terminals of the active circuit element (see Select command) in Result
# string. Returned as comma-separated kw, kvar pairs.Order of returned values: 0, 1, 2 (for each terminal).
# :return:
# """
# aux = self.dss.text("seqpowers").strip().replace(" ", "").split(sep=",")
# seq_powers = list()
# for n in range(len(aux)):
# if aux[n] != '':
# seq_powers.append(float(aux[n]))
# return seq_powers
#
# def seq_voltages(self):
# """
# Returns the sequence voltages at all terminals of the active circuit element (see Select command) in Result
# string. Returned as comma-separated magnitude only values.Order of returned values: 0, 1, 2 (for each
# terminal).
# :return:
# """
# aux = self.dss.text("seqvoltages").strip().replace(" ", "").split(sep=",")
# seq_voltages = list()
# for n in range(len(aux)):
# if aux[n] != '':
# seq_voltages.append(float(aux[n]))
# return seq_voltages
#
# def get_voltages(self):
# return self.dss.circuit_allbusvmagpu()
#
# def get_voltage_min(self):
# v = Circuit.get_voltages(self.dss)
# return min(v)
#
# def get_voltage_max(self):
# v = Circuit.get_voltages(self.dss)
# return max(v)
#
# def get_active_power(self):
# return self.dss.circuit_total_power()[0]
#
# def get_reactive_power(self):
# return self.dss.circuit_total_power()[1]
#
# def create_load(self, **kwargs):
# pass
#
# def create_transformer(self, **kwargs):
# pass
#
# def create_line_code(self, **kwargs):
# pass
#
# def create_line(self, **kwargs):
# pass
#
# def create_pv_system(self, **kwargs):
# pass
#
# def create_fuse(self, **kwargs):
# pass
"""
new circuit.5Leg
~ bus1=MainBus basekV=230 pu=1.0 isc3=15000 isc1=17000 phases=3 z0=[10, 10] z1=[10, 10] angle=0 mvasc3=200000
mvasc1=200000
"""
# def _str_(self):
# output = ""
# for _, var in vars(self).items():
# output += str(var)
# return output
# def _str_(self):
# return "".join(
# f"{attrib_name} = {attrib_value}\n"
# for attrib_name, attrib_value in self._dict_.items()
# if '_Circuit_df' not in attrib_name and 'dss' not in attrib_name
# )
| [
"pandas.DataFrame.from_records",
"py_dss_tools.utils.Utils.check_instance",
"py_dss_tools.utils.Utils.remove_blank_spaces",
"py_dss_tools.utils.Utils.generate_random_string",
"attr.validators.instance_of"
] | [((1296, 1338), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['[self.__dict__]'], {}), '([self.__dict__])\n', (1321, 1338), True, 'import pandas as pd\n'), ((1601, 1645), 'py_dss_tools.utils.Utils.check_instance', 'Utils.check_instance', (['value', '"""name"""', "['str']"], {}), "(value, 'name', ['str'])\n", (1621, 1645), False, 'from py_dss_tools.utils import Utils\n'), ((1669, 1701), 'py_dss_tools.utils.Utils.remove_blank_spaces', 'Utils.remove_blank_spaces', (['value'], {}), '(value)\n', (1694, 1701), False, 'from py_dss_tools.utils import Utils\n'), ((293, 325), 'attr.validators.instance_of', 'attr.validators.instance_of', (['str'], {}), '(str)\n', (320, 325), False, 'import attr\n'), ((371, 412), 'attr.validators.instance_of', 'attr.validators.instance_of', (['(int, float)'], {}), '((int, float))\n', (398, 412), False, 'import attr\n'), ((455, 496), 'attr.validators.instance_of', 'attr.validators.instance_of', (['(int, float)'], {}), '((int, float))\n', (482, 496), False, 'import attr\n'), ((545, 577), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (572, 577), False, 'import attr\n'), ((620, 652), 'attr.validators.instance_of', 'attr.validators.instance_of', (['str'], {}), '(str)\n', (647, 652), False, 'import attr\n'), ((697, 738), 'attr.validators.instance_of', 'attr.validators.instance_of', (['(int, float)'], {}), '((int, float))\n', (724, 738), False, 'import attr\n'), ((815, 856), 'attr.validators.instance_of', 'attr.validators.instance_of', (['(int, float)'], {}), '((int, float))\n', (842, 856), False, 'import attr\n'), ((905, 946), 'attr.validators.instance_of', 'attr.validators.instance_of', (['(int, float)'], {}), '((int, float))\n', (932, 946), False, 'import attr\n'), ((1128, 1165), 'py_dss_tools.utils.Utils.remove_blank_spaces', 'Utils.remove_blank_spaces', (['self._name'], {}), '(self._name)\n', (1153, 1165), False, 'from py_dss_tools.utils import Utils\n'), ((1221, 1251), 'py_dss_tools.utils.Utils.generate_random_string', 'Utils.generate_random_string', ([], {}), '()\n', (1249, 1251), False, 'from py_dss_tools.utils import Utils\n')] |
from django.conf import settings
from django.db import models
class Tasks(models.Model):
"Generated Model"
task_name = models.TextField()
| [
"django.db.models.TextField"
] | [((129, 147), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (145, 147), False, 'from django.db import models\n')] |
import os
import json
from vector2d import Vector2D
from interpolator import Interpolator
from utils import map_range
# Each game controller axis returns a value in the closed interval [-1, 1]. We
# limit the number of decimal places we use with the PRECISION constant. This is
# done for a few reasons: 1) it makes the numbers more human-friendly (easier to
# read) and 2) it reduces the number of thruster updates.
#
# To elaborate on this last point, I was seeing a lot of very small fluctations
# with the values coming from my PS4 controller. The change in values were so
# small, they effectively would not change the current thruster value. By
# reducing the precision, these very small fluctuations get filtered out,
# resulting in fewer thruster updates. Also, I found that when I let go of a
# joystick, the value would hover around 0.0 but would never actually become
# zero. This means the thrusters would always be active, consuming battery power
# unnecessarily. Again, by limiting the precision, these small fluctuations were
# filtered out resulting in consistent zero values when then joysticks were in
# their resting positions.
#
# Using three digits of precisions was an arbitrary choice that just happened to
# work the first time. If we find that we need more fine control of the
# thrusters, we may need to increase this value.
PRECISION = 3
# Define a series of comstants, one for each thruster
HL = 0 # horizontal left
VL = 1 # vertical left
VC = 2 # vertical center
VR = 3 # vertical right
HR = 4 # horizontal right
LIGHT = 5
# Define a series of constants, one for each game controller axis
JL_H = 0 # left joystick horizontal axis
JL_V = 1 # left joystick vertical axis
JR_H = 2 # right joystick horizontal axis
JR_V = 3 # right joystick vertical axis
AL = 4 # left analog button
AR = 5 # right analog button
UP = 3
DOWN = 1
RESET = 0
# 271,[320],467
# Define constants for the PWM to run a thruster in full reverse, full forward,
# or neutral
FULL_REVERSE = 246
NEUTRAL = 369
FULL_FORWARD = 496
LIGHT_STEP = 0.05
# Use this file to load/store thruster and sensitivity settings
SETTINGS_FILE = 'thruster_settings.json'
class ThrusterController:
def __init__(self, simulate=False):
# setup motor controller. The PWM controller can control up to 16
# different devices. We have to add devices, one for each thruster that
# we can control. The first parameter is the human-friendly name of the
# device. That is used for logging to the console and/or a database. The
# next parameter indicates which PWM connector this device is connected
# to. This is refered to as the PWM channel. The last two values
# indicate at what time intervals (ticks) the PWM should turn on and
# off, respectively. We simply start each device at 0 time and control
# the duration of the pulses by adjusting the off time. Note that we may
# be able to shuffle on/off times to even out the current draw from the
# thrusters, but so far, that hasn't been an issue. It's even possible
# that the PWM controller may do that for us already.
if simulate is False:
from pwm_controller import PWMController
self.motor_controller = PWMController()
self.motor_controller.add_device("HL", HL, 0, NEUTRAL)
self.motor_controller.add_device("VL", VL, 0, NEUTRAL)
self.motor_controller.add_device("VC", VC, 0, NEUTRAL)
self.motor_controller.add_device("VR", VR, 0, NEUTRAL)
self.motor_controller.add_device("HR", HR, 0, NEUTRAL)
self.motor_controller.add_device("LIGHT", LIGHT, 0, FULL_REVERSE)
else:
self.motor_controller = None
# setup the joysticks. We use a 2D vector to represent the x and y
# values of the joysticks.
self.j1 = Vector2D()
self.j2 = Vector2D()
# create interpolators
self.horizontal_left = Interpolator()
self.vertical_left = Interpolator()
self.vertical_center = Interpolator()
self.vertical_right = Interpolator()
self.horizontal_right = Interpolator()
# setup interpolators from a file or manually
if os.path.isfile(SETTINGS_FILE):
with open(SETTINGS_FILE, 'r') as f:
self.set_settings(json.load(f), False)
else:
# Set the sensitivity to be applied to each thruster. 0 indicates a
# linear response which is the default when no sensitivity is applied. 1
# indicates full sensitivity. Values between 0 and 1 can be used to
# increase and to decrease the overall sensitivity. Increasing sensivity
# dampens lower values and amplifies larger values giving more precision
# at lower power levels.
self.sensitivity = 0.7
# We use a cubic to apply sensitivity. If you find that full sensitivity
# (dampening) does not give you fine enough control, you can increase\
# the degree of the polynomial used for dampening. Note that this must
# be a positive odd number. Any other values will cause unexpected
# results.
self.power = 3
# setup the various interpolators for each thruster. Each item we add
# to the interpolator consists of two values: an angle in degrees and a
# thrust value. An interpolator works by returning a value for any given
# input value. More specifically in this case, we will give each
# interpolator an angle and it will return a thrust value for that
# angle. Since we have only given the interpolator values for very
# specific angles, it will have to determine values for angles we have
# not provided. It does this using linear interpolation.
self.horizontal_left.addIndexValue(0.0, -1.0)
self.horizontal_left.addIndexValue(90.0, 1.0)
self.horizontal_left.addIndexValue(180.0, 1.0)
self.horizontal_left.addIndexValue(270.0, -1.0)
self.horizontal_left.addIndexValue(360.0, -1.0)
self.vertical_left.addIndexValue(0.0, 1.0)
self.vertical_left.addIndexValue(90.0, -1.0)
self.vertical_left.addIndexValue(180.0, -1.0)
self.vertical_left.addIndexValue(270.0, 1.0)
self.vertical_left.addIndexValue(360.0, 1.0)
self.vertical_center.addIndexValue(0.0, 0.0)
self.vertical_center.addIndexValue(90.0, 1.0)
self.vertical_center.addIndexValue(180.0, 0.0)
self.vertical_center.addIndexValue(270.0, -1.0)
self.vertical_center.addIndexValue(360.0, 0.0)
self.vertical_right.addIndexValue(0.0, -1.0)
self.vertical_right.addIndexValue(90.0, -1.0)
self.vertical_right.addIndexValue(180.0, 1.0)
self.vertical_right.addIndexValue(270.0, 1.0)
self.vertical_right.addIndexValue(360.0, -1.0)
self.horizontal_right.addIndexValue(0.0, 1.0)
self.horizontal_right.addIndexValue(90.0, 1.0)
self.horizontal_right.addIndexValue(180.0, -1.0)
self.horizontal_right.addIndexValue(270.0, -1.0)
self.horizontal_right.addIndexValue(360.0, 1.0)
# setup ascent/descent controllers
self.ascent = -1.0
self.descent = -1.0
# setup light
self.light = 0.0
def __del__(self):
'''
When an instance of this class gets destroyed, we need to make sure that
we turn off all motors. Otherwise, we could end up in a situation where
the vehicle could have thrusters running when we don't have scripts
running to control it.
'''
self.set_motor(HL, 0.0)
self.set_motor(VL, 0.0)
self.set_motor(VC, 0.0)
self.set_motor(VL, 0.0)
self.set_motor(HR, 0.0)
def update_axis(self, axis, value):
'''
This is the main method of this class. It is responsible for taking an
controller input value (referred to as an axis value) and then
converting that into the appropriate thrust values for the appropriate
thrusters associated with that axis.
For the two joysticks, we convert the joystick position into an angle.
We know which thrusters each joystick controls, so we feed the
calculated angle into the thruster interpolators for that joystick. This
gives us the new thruster value for each thruster, which we then apply
to the PWM controller devices for those thrusters.
Note that the angle of the joystick does not give us all of the
information that we need. If the joystick is close to the center
position, then we don't need to apply as much thrust. If it is pushed
all the way to the edge, then we nee 100% thrust. So, we treat the
center as 0% and the edge as 100%. The values we get back from the
interpolators are 100% values, so we simply apply the joystick
percentage to the interpolator value to find the actual thrust value we
need to use.
Things get a bit more complicated for the vertical thrusters because it
is possible that we will be pitiching or rolling the vehicle while
simultaneously trying to move the vehicle directly up or down. If we
pitch or roll the vehicle only, then the process is exactly as we
described above. However, if are pithing and/or rolling AND moveing the
vehicle vertically, we need to combine the two operations into one set
of thruster values. We have to first determine the values for pitch and
roll, then we increase or decrease all thruster values equally in the up
or down direction. However it is possible that we will not be able to
increase/decrease all thrusters by the same amount since we are already
applying thrust for pitch and roll. This means we need to make sure our
values do not go outside the closed intervale [-1,1]. This means that as
we pitch or roll harder, the vehical will flattern out as we apply
vertical thrust.
'''
# We need to keep track of which thrusters need updating. We use the
# following flags for that purpose
update_horizontal_thrusters = False
update_vertical_thrusters = False
# Round the incoming value to the specified precision to reduce input
# noise
value = round(value, PRECISION)
# Update the appropriate joystick vector based on which controller axis
# has changed. Note that we make sure the value is different from what
# we have already to prevent unnecessary updates. Recall that the
# controller may send values whose differences are smaller than our
# precision. This means we will get an update from the controller, but
# we decided to ignore it since it won't result in a significant change
# to our thrusters.
if axis == JL_H:
if self.j1.x != value:
self.j1.x = value
update_horizontal_thrusters = True
elif axis == JL_V:
if self.j1.y != value:
self.j1.y = value
update_horizontal_thrusters = True
elif axis == JR_H:
if self.j2.x != value:
self.j2.x = value
update_vertical_thrusters = True
elif axis == JR_V:
if self.j2.y != value:
self.j2.y = value
update_vertical_thrusters = True
elif axis == AL:
if self.descent != value:
self.descent = value
update_vertical_thrusters = True
elif axis == AR:
if self.ascent != value:
self.ascent = value
update_vertical_thrusters = True
else:
pass
# print("unknown axis ", event.axis)
# updating horizontal thrusters is easy: find current angle, convert
# angle to thruster values, apply values
if update_horizontal_thrusters:
left_value = self.horizontal_left.valueAtIndex(self.j1.angle)
right_value = self.horizontal_right.valueAtIndex(self.j1.angle)
power = min(1.0, self.j1.length)
self.set_motor(HL, left_value * power)
self.set_motor(HR, right_value * power)
# updating vertical thrusters is trickier. We do the same as above, but
# then post-process the values if we are applying vertical up/down
# thrust. As mentioned above, we have to be careful to stay within our
# [-1,1] interval.
if update_vertical_thrusters:
power = min(1.0, self.j2.length)
back_value = self.vertical_center.valueAtIndex(self.j2.angle) * power
front_left_value = self.vertical_left.valueAtIndex(self.j2.angle) * power
front_right_value = self.vertical_right.valueAtIndex(self.j2.angle) * power
if self.ascent != -1.0:
percent = (1.0 + self.ascent) / 2.0
max_thrust = max(back_value, front_left_value, front_right_value)
max_adjust = (1.0 - max_thrust) * percent
# back_value += max_adjust
front_left_value += max_adjust
front_right_value += max_adjust
elif self.descent != -1.0:
percent = (1.0 + self.descent) / 2.0
min_thrust = min(back_value, front_left_value, front_right_value)
max_adjust = (min_thrust - -1.0) * percent
# back_value -= max_adjust
front_left_value -= max_adjust
front_right_value -= max_adjust
self.set_motor(VC, back_value)
self.set_motor(VL, front_left_value)
self.set_motor(VR, front_right_value)
def update_button(self, button, value):
if button == UP:
self.light = min(1.0, self.light + LIGHT_STEP)
elif button == DOWN:
self.light = max(0.0, self.light - LIGHT_STEP)
elif button == RESET:
self.light = 0.0
light_value = map_range(self.light, 0.0, 1.0, -1.0, 1.0)
print("button %s, light = %s, light_value = %s" % (button, self.light, light_value))
self.set_motor(LIGHT, light_value)
def set_motor(self, motor_number, value):
if self.motor_controller is not None:
motor = self.motor_controller.devices[motor_number]
value = self.apply_sensitivity(value)
pwm_value = int(map_range(value, -1.0, 1.0, FULL_REVERSE, FULL_FORWARD))
# print("setting motor {0} to {1}".format(motor_number, pwm_value))
motor.off = pwm_value
def apply_sensitivity(self, value):
return self.sensitivity * value**self.power + (1.0 - self.sensitivity) * value
def get_settings(self):
return {
'version': 1,
'sensitivity': {
'strength': self.sensitivity,
'power': self.power
},
'thrusters': [
self.horizontal_left.to_array(),
self.vertical_left.to_array(),
self.vertical_center.to_array(),
self.vertical_right.to_array(),
self.horizontal_right.to_array()
]
}
def set_settings(self, data, save=True):
if data['version'] == 1:
# save settings for future loading
if save:
if data['name'] == "":
filename = SETTINGS_FILE
else:
filename = os.path.join("settings", data['name'] + ".json")
with open(filename, 'w') as out:
out.write(json.dumps(data, indent=2))
# update current settings
self.sensitivity = float(data['sensitivity']['strength'])
self.power = float(data['sensitivity']['power'])
self.horizontal_left.from_array(data['thrusters'][0])
self.vertical_left.from_array(data['thrusters'][1])
self.vertical_center.from_array(data['thrusters'][2])
self.vertical_right.from_array(data['thrusters'][3])
self.horizontal_right.from_array(data['thrusters'][4])
else:
print("Unsupported data version number '{}'".format(data['version']))
if __name__ == "__main__":
pass
| [
"json.dumps",
"os.path.join",
"os.path.isfile",
"pwm_controller.PWMController",
"interpolator.Interpolator",
"json.load",
"utils.map_range",
"vector2d.Vector2D"
] | [((3896, 3906), 'vector2d.Vector2D', 'Vector2D', ([], {}), '()\n', (3904, 3906), False, 'from vector2d import Vector2D\n'), ((3925, 3935), 'vector2d.Vector2D', 'Vector2D', ([], {}), '()\n', (3933, 3935), False, 'from vector2d import Vector2D\n'), ((3999, 4013), 'interpolator.Interpolator', 'Interpolator', ([], {}), '()\n', (4011, 4013), False, 'from interpolator import Interpolator\n'), ((4043, 4057), 'interpolator.Interpolator', 'Interpolator', ([], {}), '()\n', (4055, 4057), False, 'from interpolator import Interpolator\n'), ((4089, 4103), 'interpolator.Interpolator', 'Interpolator', ([], {}), '()\n', (4101, 4103), False, 'from interpolator import Interpolator\n'), ((4134, 4148), 'interpolator.Interpolator', 'Interpolator', ([], {}), '()\n', (4146, 4148), False, 'from interpolator import Interpolator\n'), ((4181, 4195), 'interpolator.Interpolator', 'Interpolator', ([], {}), '()\n', (4193, 4195), False, 'from interpolator import Interpolator\n'), ((4262, 4291), 'os.path.isfile', 'os.path.isfile', (['SETTINGS_FILE'], {}), '(SETTINGS_FILE)\n', (4276, 4291), False, 'import os\n'), ((14347, 14389), 'utils.map_range', 'map_range', (['self.light', '(0.0)', '(1.0)', '(-1.0)', '(1.0)'], {}), '(self.light, 0.0, 1.0, -1.0, 1.0)\n', (14356, 14389), False, 'from utils import map_range\n'), ((3283, 3298), 'pwm_controller.PWMController', 'PWMController', ([], {}), '()\n', (3296, 3298), False, 'from pwm_controller import PWMController\n'), ((14761, 14816), 'utils.map_range', 'map_range', (['value', '(-1.0)', '(1.0)', 'FULL_REVERSE', 'FULL_FORWARD'], {}), '(value, -1.0, 1.0, FULL_REVERSE, FULL_FORWARD)\n', (14770, 14816), False, 'from utils import map_range\n'), ((4375, 4387), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4384, 4387), False, 'import json\n'), ((15836, 15884), 'os.path.join', 'os.path.join', (['"""settings"""', "(data['name'] + '.json')"], {}), "('settings', data['name'] + '.json')\n", (15848, 15884), False, 'import os\n'), ((15965, 15991), 'json.dumps', 'json.dumps', (['data'], {'indent': '(2)'}), '(data, indent=2)\n', (15975, 15991), False, 'import json\n')] |
import operator
import unittest
from asq.queryables import Queryable
__author__ = "<NAME>"
class TestPreScan(unittest.TestCase):
def test_pre_scan_empty_default(self):
a = []
b = Queryable(a).pre_scan().to_list()
c = []
self.assertEqual(b, c)
def test_pre_scan_single_default(self):
a = [47]
b = Queryable(a).pre_scan().to_list()
c = [0]
self.assertEqual(b, c)
def test_pre_scan_default(self):
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
b = Queryable(a).pre_scan().to_list()
c = [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]
self.assertEqual(b, c)
def test_pre_scan_empty_func(self):
a = []
b = Queryable(a).pre_scan(operator.mul).to_list()
c = []
self.assertEqual(b, c)
def test_pre_scan_single_func(self):
a = [47]
b = Queryable(a).pre_scan(operator.mul, seed=1).to_list()
c = [1]
self.assertEqual(b, c)
def test_pre_scan_func(self):
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
b = Queryable(a).pre_scan(operator.mul, seed=1).to_list()
c = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880]
self.assertEqual(b, c)
def test_pre_scan_func_callable(self):
self.assertRaises(TypeError, lambda: Queryable([1, 2, 3]).pre_scan("not callable"))
def test_pre_scan_closed(self):
b = Queryable([])
b.close()
self.assertRaises(ValueError, lambda: b.pre_scan())
| [
"asq.queryables.Queryable"
] | [((1446, 1459), 'asq.queryables.Queryable', 'Queryable', (['[]'], {}), '([])\n', (1455, 1459), False, 'from asq.queryables import Queryable\n'), ((212, 224), 'asq.queryables.Queryable', 'Queryable', (['a'], {}), '(a)\n', (221, 224), False, 'from asq.queryables import Queryable\n'), ((372, 384), 'asq.queryables.Queryable', 'Queryable', (['a'], {}), '(a)\n', (381, 384), False, 'from asq.queryables import Queryable\n'), ((553, 565), 'asq.queryables.Queryable', 'Queryable', (['a'], {}), '(a)\n', (562, 565), False, 'from asq.queryables import Queryable\n'), ((741, 753), 'asq.queryables.Queryable', 'Queryable', (['a'], {}), '(a)\n', (750, 753), False, 'from asq.queryables import Queryable\n'), ((910, 922), 'asq.queryables.Queryable', 'Queryable', (['a'], {}), '(a)\n', (919, 922), False, 'from asq.queryables import Queryable\n'), ((1108, 1120), 'asq.queryables.Queryable', 'Queryable', (['a'], {}), '(a)\n', (1117, 1120), False, 'from asq.queryables import Queryable\n'), ((1347, 1367), 'asq.queryables.Queryable', 'Queryable', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1356, 1367), False, 'from asq.queryables import Queryable\n')] |
from django.core.exceptions import SuspiciousOperation
from django.core.signing import Signer, BadSignature
from django.forms import HiddenInput
signer = Signer()
class SignedHiddenInput(HiddenInput):
def __init__(self, include_field_name=True, attrs=None):
self.include_field_name = include_field_name
super(SignedHiddenInput, self).__init__(attrs=attrs)
def value_from_datadict(self, data, files, name):
value = super(SignedHiddenInput, self).value_from_datadict(data, files, name)
try:
value = signer.unsign(value)
except BadSignature:
raise SuspiciousOperation()
if self.include_field_name:
name_key = '{0}-'.format(name)
if not value.startswith(name_key):
raise SuspiciousOperation()
value = value.replace(name_key, '', 1)
return value
def render(self, name, value, attrs=None):
value = self.sign_value(name, value)
return super(SignedHiddenInput, self).render(name, value, attrs=attrs)
def sign_value(self, name, value):
if self.include_field_name:
value = '-'.join(map(str, [name, value]))
value = signer.sign(value)
return value
def value(self):
pass | [
"django.core.signing.Signer",
"django.core.exceptions.SuspiciousOperation"
] | [((155, 163), 'django.core.signing.Signer', 'Signer', ([], {}), '()\n', (161, 163), False, 'from django.core.signing import Signer, BadSignature\n'), ((621, 642), 'django.core.exceptions.SuspiciousOperation', 'SuspiciousOperation', ([], {}), '()\n', (640, 642), False, 'from django.core.exceptions import SuspiciousOperation\n'), ((791, 812), 'django.core.exceptions.SuspiciousOperation', 'SuspiciousOperation', ([], {}), '()\n', (810, 812), False, 'from django.core.exceptions import SuspiciousOperation\n')] |
from fastapi import APIRouter
from starlette.requests import Request
router = APIRouter()
@router.get('/')
async def read_root(request: Request):
return "ML serving with fastapi"
@router.get('api/predict')
async def predict_number(request: Request):
model = request.app.ml_model
return model.predict('bla')
| [
"fastapi.APIRouter"
] | [((79, 90), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (88, 90), False, 'from fastapi import APIRouter\n')] |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for inference_gym.targets.eight_schools."""
import tensorflow.compat.v2 as tf
from inference_gym.internal import test_util
from inference_gym.targets import eight_schools
@test_util.multi_backend_test(globals(), 'targets.eight_schools_test')
class EightSchoolsTest(test_util.InferenceGymTestCase):
def testEightSchools(self):
"""Checks that unconstrained parameters yield finite joint densities."""
model = eight_schools.EightSchools()
self.validate_log_prob_and_transforms(
model,
sample_transformation_shapes=dict(identity={
'avg_effect': [],
'log_stddev': [],
'school_effects': [8],
}),
check_ground_truth_mean_standard_error=True,
check_ground_truth_mean=True,
check_ground_truth_standard_deviation=True)
@test_util.numpy_disable_gradient_test
def testEightSchoolsHMC(self):
"""Checks approximate samples from the model against the ground truth."""
model = eight_schools.EightSchools()
self.validate_ground_truth_using_hmc(
model,
num_chains=4,
num_steps=4000,
num_leapfrog_steps=10,
step_size=0.4,
)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.compat.v2.test.main",
"inference_gym.targets.eight_schools.EightSchools"
] | [((1887, 1901), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (1899, 1901), True, 'import tensorflow.compat.v2 as tf\n'), ((1109, 1137), 'inference_gym.targets.eight_schools.EightSchools', 'eight_schools.EightSchools', ([], {}), '()\n', (1135, 1137), False, 'from inference_gym.targets import eight_schools\n'), ((1664, 1692), 'inference_gym.targets.eight_schools.EightSchools', 'eight_schools.EightSchools', ([], {}), '()\n', (1690, 1692), False, 'from inference_gym.targets import eight_schools\n')] |
import os
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
def isAccess(path):
try:
os.listdir(path)
return True
except PermissionError:
return False
@login_required
def isExist(request):
return HttpResponse(os.path.exists(os.path.abspath(request.POST['path'])))
def getPathHierrarhy(fullPath):
pathes=[]
currpath=""
if fullPath:
for dir in fullPath[1:].split("/"):
path=Path()
path.dir=dir
currpath+=dir+"/"
path.hierrarhy=currpath
pathes.append(path)
pathes[-1].hierrarhy=pathes[-1].hierrarhy[0:-1]
return pathes
def getPathHierrarhyFile(fullPath):
pathes=[]
currpath=""
if fullPath:
for dir in fullPath[1:].split("/"):
path=Path()
path.dir=dir
currpath+=dir+"/"
path.hierrarhy=currpath
pathes.append(path)
pathes[-1].hierrarhy=pathes[-1].hierrarhy[0:-1]
del pathes[-1]
return pathes
class Path:
dir=""
hierrarhy="" | [
"os.path.abspath",
"os.listdir"
] | [((143, 159), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (153, 159), False, 'import os\n'), ((306, 343), 'os.path.abspath', 'os.path.abspath', (["request.POST['path']"], {}), "(request.POST['path'])\n", (321, 343), False, 'import os\n')] |
from typing import Union
from scipy.spatial.qhull import Delaunay
from shapely.geometry import LineString
from subsurface.structs.base_structures import StructuredData
import numpy as np
try:
import segyio
segyio_imported = True
except ImportError:
segyio_imported = False
def read_in_segy(filepath: str, coords=None) -> StructuredData:
"""Reader for seismic data stored in sgy/segy files
Args:
filepath (str): the path of the sgy/segy file
coords (dict): If data is a numpy array coords provides the values for
the xarray dimension. These dimensions are 'x', 'y' and 'z'
Returns: a StructuredData object with data, the traces with samples written into an xr.Dataset, optionally with
labels defined by coords
"""
segyfile = segyio.open(filepath, ignore_geometry=True)
data = np.asarray([np.copy(tr) for tr in segyfile.trace[:]])
sd = StructuredData.from_numpy(data) # data holds traces * (samples per trace) values
segyfile.close()
return sd
def create_mesh_from_coords(coords: Union[dict, LineString],
zmin: Union[float, int], zmax: Union[float, int] = 0.0):
"""Creates a mesh for plotting StructuredData
Args:
coords (Union[dict, LineString]): the x and y, i.e. latitude and longitude, location of the traces of the seismic profile
zmax (float): the maximum elevation of the seismic profile, by default 0.0
zmin (float): the location in z where the lowest sample was taken
Returns: vertices and faces for creating an UnstructuredData object
"""
if type(coords) == LineString:
linestring = coords
n = len(list(linestring.coords))
coords = np.array([[x[0] for x in list(linestring.coords)],
[y[1] for y in list(linestring.coords)]]).T
else:
n = len(coords['x'])
coords = np.array([coords['x'],
coords['y']]).T
# duplicating the line, once with z=lower and another with z=upper values
vertices = np.zeros((2*n, 3))
vertices[:n, :2] = coords
vertices[:n, 2] = zmin
vertices[n:, :2] = coords
vertices[n:, 2] = zmax
# i+n --- i+n+1
# |\ |
# | \ |
# | \ |
# | \ |
# i --- i+1
tri = Delaunay(vertices[:, [0, 2]])
faces = tri.simplices
return vertices, faces
| [
"numpy.copy",
"subsurface.structs.base_structures.StructuredData.from_numpy",
"numpy.array",
"numpy.zeros",
"scipy.spatial.qhull.Delaunay",
"segyio.open"
] | [((793, 836), 'segyio.open', 'segyio.open', (['filepath'], {'ignore_geometry': '(True)'}), '(filepath, ignore_geometry=True)\n', (804, 836), False, 'import segyio\n'), ((913, 944), 'subsurface.structs.base_structures.StructuredData.from_numpy', 'StructuredData.from_numpy', (['data'], {}), '(data)\n', (938, 944), False, 'from subsurface.structs.base_structures import StructuredData\n'), ((2065, 2085), 'numpy.zeros', 'np.zeros', (['(2 * n, 3)'], {}), '((2 * n, 3))\n', (2073, 2085), True, 'import numpy as np\n'), ((2310, 2339), 'scipy.spatial.qhull.Delaunay', 'Delaunay', (['vertices[:, [0, 2]]'], {}), '(vertices[:, [0, 2]])\n', (2318, 2339), False, 'from scipy.spatial.qhull import Delaunay\n'), ((861, 872), 'numpy.copy', 'np.copy', (['tr'], {}), '(tr)\n', (868, 872), True, 'import numpy as np\n'), ((1906, 1942), 'numpy.array', 'np.array', (["[coords['x'], coords['y']]"], {}), "([coords['x'], coords['y']])\n", (1914, 1942), True, 'import numpy as np\n')] |
#!/bin/python
# should be started from project base directory
# helper script to regenerate helm chart file: partial of charts/external-dns-management/templates/deployment.yaml
import re
import os
helpFilename = "/tmp/dns-controller-manager-help.txt"
rc = os.system("make build-local && ./dns-controller-manager --help | grep ' --' > {}".format(helpFilename))
if rc != 0:
exit(rc)
f = open(helpFilename,"r")
options = f.read()
os.remove(helpFilename)
def toCamelCase(name):
str = ''.join(x.capitalize() for x in re.split("[.-]", name))
str = str[0].lower() + str[1:]
str = str.replace("alicloudDns", "alicloudDNS")
str = str.replace("azureDns", "azureDNS")
str = str.replace("googleClouddns", "googleCloudDNS")
str = str.replace("ingressDns", "ingressDNS")
str = str.replace("serviceDns", "serviceDNS")
str = str.replace("googleClouddns", "googleCloudDNS")
str = str.replace("cloudflareDns", "cloudflareDNS")
str = str.replace("infobloxDns", "infobloxDNS")
return str
excluded = {"name", "help", "identifier", "dry-run"}
excludedPattern = [re.compile(".*cache-dir$"), re.compile(".*blocked-zone$"), re.compile(".*remote-access-.+")]
def isExcluded(name):
if name == "" or name in excluded:
return True
for prog in excludedPattern:
if prog.match(name):
return True
return False
for line in options.split("\n"):
m = re.match(r"\s+(?:-[^-]+)?--(\S+)\s", line)
if m:
name = m.group(1)
if not isExcluded(name):
camelCase = toCamelCase(name)
txt = """ {{- if .Values.configuration.%s }}
- --%s={{ .Values.configuration.%s }}
{{- end }}""" % (camelCase, name, camelCase)
print(txt)
defaultValues = {
"controllers": "all",
"persistentCache": "false",
"persistentCacheStorageSize": "1Gi",
"persistentCacheStorageSizeAlicloud": "20Gi",
"serverPortHttp": "8080",
"ttl": 120,
}
print("configuration:")
for line in options.split("\n"):
m = re.match(r"\s+(?:-[^-]+)?--(\S+)\s", line)
if m:
name = m.group(1)
if not isExcluded(name):
camelCase = toCamelCase(name)
if camelCase in defaultValues:
txt = " %s: %s" % (camelCase, defaultValues[camelCase])
else:
txt = "# %s:" % camelCase
print(txt)
| [
"re.split",
"re.compile",
"re.match",
"os.remove"
] | [((433, 456), 'os.remove', 'os.remove', (['helpFilename'], {}), '(helpFilename)\n', (442, 456), False, 'import os\n'), ((1070, 1096), 're.compile', 're.compile', (['""".*cache-dir$"""'], {}), "('.*cache-dir$')\n", (1080, 1096), False, 'import re\n'), ((1098, 1127), 're.compile', 're.compile', (['""".*blocked-zone$"""'], {}), "('.*blocked-zone$')\n", (1108, 1127), False, 'import re\n'), ((1129, 1161), 're.compile', 're.compile', (['""".*remote-access-.+"""'], {}), "('.*remote-access-.+')\n", (1139, 1161), False, 'import re\n'), ((1370, 1414), 're.match', 're.match', (['"""\\\\s+(?:-[^-]+)?--(\\\\S+)\\\\s"""', 'line'], {}), "('\\\\s+(?:-[^-]+)?--(\\\\S+)\\\\s', line)\n", (1378, 1414), False, 'import re\n'), ((1964, 2008), 're.match', 're.match', (['"""\\\\s+(?:-[^-]+)?--(\\\\S+)\\\\s"""', 'line'], {}), "('\\\\s+(?:-[^-]+)?--(\\\\S+)\\\\s', line)\n", (1972, 2008), False, 'import re\n'), ((521, 543), 're.split', 're.split', (['"""[.-]"""', 'name'], {}), "('[.-]', name)\n", (529, 543), False, 'import re\n')] |
from MDP import MDP
import unittest
class MDPTestCase(unittest.TestCase):
def test_small1(self):
lst = [['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd']]
self.__printInput(lst)
mdp = MDP(lst)
mdp.run()
# Get the result Transition Probabilities (dictionary)
tp = mdp.getTransitionProbs()
self.__printOutput(tp)
solution = {'a': {'b': 1}, 'b': {'c':1}, 'c' : {'d': 1}}
self.assertEqual(tp, solution)
def test_small2(self):
seq1 = ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd']
seq2 = ['a', 'b', 'b', 'a', 'a', 'd', 'd', 'b', 'b', 'c', 'a']
lst = [seq1, seq2]
self.__printInput(lst)
mdp = MDP(lst)
mdp.run()
# Get the result Transition Probabilities (dictionary)
tp = mdp.getTransitionProbs()
self.__printOutput(tp)
solution = {'a': {'b': 2/3, 'd': 1/3}, 'b': {'c': 2/3, 'a': 1/3},
'c': {'d': 1/2, 'a': 1/2}, 'd': {'b': 1}}
self.assertEqual(tp, solution)
def __printInput(self, lst):
# Uncomment HERE to see input
# print("\n......Input: ", lst)
pass
def __printOutput(self, o):
# Uncomment HERE to see output
# print(".....Output:", o)
pass
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"MDP.MDP"
] | [((1337, 1352), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1350, 1352), False, 'import unittest\n'), ((205, 213), 'MDP.MDP', 'MDP', (['lst'], {}), '(lst)\n', (208, 213), False, 'from MDP import MDP\n'), ((704, 712), 'MDP.MDP', 'MDP', (['lst'], {}), '(lst)\n', (707, 712), False, 'from MDP import MDP\n')] |
# Generated by Django 2.2.10 on 2020-05-02 05:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('purchases', '0008_auto_20200430_1617'),
]
operations = [
migrations.RenameField(
model_name='itempurchase',
old_name='supplier_price',
new_name='price',
),
]
| [
"django.db.migrations.RenameField"
] | [((230, 328), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""itempurchase"""', 'old_name': '"""supplier_price"""', 'new_name': '"""price"""'}), "(model_name='itempurchase', old_name='supplier_price',\n new_name='price')\n", (252, 328), False, 'from django.db import migrations\n')] |
import dash
from dash import Output, Input, dcc
from dash import html
from tabs import tab1, tab2
# from tab2_callbacks import tab2_out, upload_prediction, render_graph2
import flask
server = flask.Flask(__name__) # define flask app.server
external_stylesheets = [
{
"href": "https://fonts.googleapis.com/css2?"
"family=Lato:wght@400;700&display=swap",
"rel": "stylesheet",
},
# dbc.themes.BOOTSTRAP,
# "https://cdn.jsdelivr.net/npm/bootstrap@5.1.0/dist/css/bootstrap.min.css"
]
app = dash.Dash(__name__,
server=server,
external_stylesheets=external_stylesheets)
app.title = "Tweet the Stocks"
app.layout = html.Div(
children=[
html.Div(
children=[
html.H1(children="Tweet the Stocks", className="header-title"),
html.P(
children="Explore the correlation of stock prices and the related tagged tweets in 2019",
className="header-description",
),
],
className="header",
),
html.Div(
children=[
html.Div(
dcc.Tabs(id="tabs", value='tab1', children=[
dcc.Tab(label='Historical records', value='tab1', ),
dcc.Tab(label='Prediction', value='tab2'),
], colors={
"border": "white",
"primary": "#e36209",
"background": "#fafbfc"
})),
],
className="tabs",
),
tab1.layout,
tab2.layout,
]
)
@app.callback(
Output('tab1', 'style'), Output('tab2', 'style'),
[Input('tabs', 'value')])
def show_hide_tab(tab):
if tab == 'tab1':
return {'display': 'block'}, {'display': 'none'}
elif tab == 'tab2':
return {'display': 'none'}, {'display': 'block'}
@app.callback(Output('popover1', 'children'), Input('import', 'n_clicks'), Input('upload-data', 'contents'),Input('tabs', 'value'))
def hint(clicks, file_content, tab):
if clicks > 0 and file_content and tab=="tab1":
return f"Calculating tweet sentiment scores..."
return ""
| [
"flask.Flask",
"dash.Input",
"dash.html.H1",
"dash.Output",
"dash.html.P",
"dash.dcc.Tab",
"dash.Dash"
] | [((195, 216), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (206, 216), False, 'import flask\n'), ((537, 614), 'dash.Dash', 'dash.Dash', (['__name__'], {'server': 'server', 'external_stylesheets': 'external_stylesheets'}), '(__name__, server=server, external_stylesheets=external_stylesheets)\n', (546, 614), False, 'import dash\n'), ((1700, 1723), 'dash.Output', 'Output', (['"""tab1"""', '"""style"""'], {}), "('tab1', 'style')\n", (1706, 1723), False, 'from dash import Output, Input, dcc\n'), ((1725, 1748), 'dash.Output', 'Output', (['"""tab2"""', '"""style"""'], {}), "('tab2', 'style')\n", (1731, 1748), False, 'from dash import Output, Input, dcc\n'), ((1981, 2011), 'dash.Output', 'Output', (['"""popover1"""', '"""children"""'], {}), "('popover1', 'children')\n", (1987, 2011), False, 'from dash import Output, Input, dcc\n'), ((2013, 2040), 'dash.Input', 'Input', (['"""import"""', '"""n_clicks"""'], {}), "('import', 'n_clicks')\n", (2018, 2040), False, 'from dash import Output, Input, dcc\n'), ((2042, 2074), 'dash.Input', 'Input', (['"""upload-data"""', '"""contents"""'], {}), "('upload-data', 'contents')\n", (2047, 2074), False, 'from dash import Output, Input, dcc\n'), ((2075, 2097), 'dash.Input', 'Input', (['"""tabs"""', '"""value"""'], {}), "('tabs', 'value')\n", (2080, 2097), False, 'from dash import Output, Input, dcc\n'), ((1755, 1777), 'dash.Input', 'Input', (['"""tabs"""', '"""value"""'], {}), "('tabs', 'value')\n", (1760, 1777), False, 'from dash import Output, Input, dcc\n'), ((774, 836), 'dash.html.H1', 'html.H1', ([], {'children': '"""Tweet the Stocks"""', 'className': '"""header-title"""'}), "(children='Tweet the Stocks', className='header-title')\n", (781, 836), False, 'from dash import html\n'), ((854, 992), 'dash.html.P', 'html.P', ([], {'children': '"""Explore the correlation of stock prices and the related tagged tweets in 2019"""', 'className': '"""header-description"""'}), "(children=\n 'Explore the correlation of stock prices and the related tagged tweets in 2019'\n , className='header-description')\n", (860, 992), False, 'from dash import html\n'), ((1258, 1307), 'dash.dcc.Tab', 'dcc.Tab', ([], {'label': '"""Historical records"""', 'value': '"""tab1"""'}), "(label='Historical records', value='tab1')\n", (1265, 1307), False, 'from dash import Output, Input, dcc\n'), ((1335, 1376), 'dash.dcc.Tab', 'dcc.Tab', ([], {'label': '"""Prediction"""', 'value': '"""tab2"""'}), "(label='Prediction', value='tab2')\n", (1342, 1376), False, 'from dash import Output, Input, dcc\n')] |
from unittest import TestCase
from evaluate import Calculator
calc = Calculator()
class TestCalculator(TestCase):
def test_evaluate_01(self):
self.assertEqual(calc.evaluate(string='127'), 127)
def test_evaluate_02(self):
self.assertEqual(calc.evaluate(string='2 + 3'), 5)
def test_evaluate_03(self):
self.assertEqual(calc.evaluate(string='2 - 3 - 4'), -5)
def test_evaluate_04(self):
self.assertEqual(calc.evaluate(string='10 * 5 / 2'), 25)
def test_evaluate_05(self):
self.assertEqual(calc.evaluate(string='2 / 2 + 3 * 4 - 6'), 7)
def test_evaluate_06(self):
self.assertEqual(calc.evaluate(string='2 + 3 * 4 / 3 - 6 / 3 * 3 + 8'),
8)
def test_evaluate_07(self):
self.assertEqual(calc.evaluate(string='1.1 + 2.2 + 3.3'), 6.6)
def test_evaluate_08(self):
self.assertEqual(calc.evaluate(string='1.1 * 2.2 * 3.3'), 7.986)
| [
"evaluate.Calculator"
] | [((71, 83), 'evaluate.Calculator', 'Calculator', ([], {}), '()\n', (81, 83), False, 'from evaluate import Calculator\n')] |
import os
import lxml.etree as et
import pandas as pd
import numpy as np
import regex as re
def get_element_text(element):
"""
Extract text while
-skipping footnote numbers
-Adding a space before and after emphasized text
"""
head = element.text if element.tag != 'SU' else ''
child = ' '.join(s for e in element.getchildren()
for s in [get_element_text(e)] if s)
tail = element.tail
return ' '.join(s for s in (head,child,tail) if s)
def update_header(header,element):
"""
Track hierarchical headers
"""
header_text = ''.join([s for s in element.itertext()])
level_string = element.get('SOURCE','HED')
new_level = int(level_string[-1].replace('D','0')) + 1
if new_level < len(header):
header = header[:new_level-1] + [header_text]
else:
header += [''] * (new_level - len(header)) # Make header length = new_level
header[-1] = header_text # Make last entry equal to header text
return header
def get_ancestor(element,tag):
for a in element.iterancestors():
if a.tag == tag:
return a
return None
def parse_cfr_string(s,verbose=False):
try:
title = s.split()[0]
for part in re.search(r'CFR(.*?(?P<part>(_|\d+|\b[IVXCL]+\b)))+',s,re.IGNORECASE).captures('part'):
yield (title,part)
except Exception:
if verbose: print('Warning: Failed to parse CFR string "%s"' % s)
yield (np.nan,np.nan)
def build_lstsub_cfr_map(root,verbose=False):
# First associate each lstsub with the appropriate part elements
lstsub_map = {e:[] for e in root.xpath('.//LSTSUB')}
lstsub = None
for e in root.xpath('.//LSTSUB|.//PART'):
if e.tag == 'LSTSUB':
lstsub = e
elif lstsub is not None:
lstsub_map[lstsub].append(e)
# Then create a map from part elements to cfr
cfr_map = {}
for lstsub,part_elements in lstsub_map.items():
cfrs = [cfr for e in lstsub.xpath('./CFR')
for cfr in parse_cfr_string(get_element_text(e),verbose)]
if len(cfrs) == len(part_elements):
for cfr,part_element in zip(cfrs,part_elements):
cfr_map[part_element] = cfr
return cfr_map
def build_header_part_map(root):
part_map = {}
for part_element in root.xpath('.//PART'):
for header_element in part_element.xpath(r'descendant::HD[1]'):
s = get_element_text(header_element)
for m in re.finditer(r'part\s+(\d+|_)',s,re.IGNORECASE):
part_map[part_element] = m[1]
return part_map
def build_part_cfr_map(root,verbose=False):
"""
Build a mapping from part elements to CFR (title,part) tuples.
If document has one CFR reference an one part element then mapping is trivial.
If document has multiple CFR references and/or multiple part elements, *try*
to infer correct CFR for each part element using the "List of Subjects"
table that comes before a CFR reg part, and part headers.
"""
doc_cfrs = [cfr for e in root.xpath('.//PREAMB/CFR')
for cfr in parse_cfr_string(get_element_text(e),verbose)]
part_elements = root.xpath('.//PART')
if not doc_cfrs and not part_elements:
return {}
elif len(doc_cfrs) == 1 and len(part_elements) == 1:
# Trivial case with one CFR part and no ambiguity
return {part_elements[0]:doc_cfrs[0]}
else:
# Multiple sections case. Use lstsub and header information to infer CFR.
lstsub_cfr_map = build_lstsub_cfr_map(root)
header_part_map = build_header_part_map(root)
cfr_map = {}
for e in part_elements:
if e in lstsub_cfr_map:
cfr_map[e] = lstsub_cfr_map[e]
if e in header_part_map and lstsub_cfr_map[e][1] != header_part_map[e]:
if verbose: print('Warning: lstsub and header cfr do not agree for %s\nlstsub_cfr_map=%s\nheader_part_map=%s' % (str(e),str(lstsub_cfr_map),str(header_part_map)))
elif e in header_part_map:
part = header_part_map[e]
potential_titles = set(cfr[0] for cfr in doc_cfrs if cfr[1] == part)
if part == '_':
title = '_'
elif len(potential_titles) == 1:
title = list(potential_titles)[0]
else:
title = np.nan
if verbose: print('Warning: Could not infer title for part element %s\npotential_titles=%s' % (str(e),str(potential_titles)))
cfr_map[e] = (title,part)
else:
cfr_map[e] = (np.nan,np.nan)
if verbose: print('Warning: Could not infer CFR for part element %s' % str(e))
return cfr_map
def split_numbering(s):
if type(s) is str:
bracketed = re.match(r'\s*?((\(\s*(\d+|[A-Z]{1,3}|[a-z]{1,3})\s*\))\s*)+',s)
if bracketed:
number = bracketed.group(0) # re.sub('\s+','',bracketed.group(0))
s = s[len(bracketed.group(0)):]
return number,s
dotted = re.match(r'\s*((\d+|[A-Z]+)\.)\s',s)
if dotted:
number = dotted.group(0) # .strip()
s = s[len(dotted.group(0)):]
return number,s
return np.nan,s
def clean_paragraph_text(s):
'''
Adjust paragraph text, primarily to improve spacy tokenization
(kind of a hack, but oh well)
'''
#Add spaces to split on opening brackets
s = re.sub(r'(\S)\(',r'\g<1> (',s)
#Add spaces around emdash and dash
s = re.sub(r'(\S)([\u2014-])(\S)',r'\g<1> \g<2> \g<3>',s)
return s
def parse_xml_file(xmlFile,**args):
with open(xmlFile,'rb') as f:
tree = et.parse(f)
return parse_reg_xml_tree(tree,**args)
def parse_reg_xml_tree(tree,nlp=None,extract_numbering=True,verbose=False,
split_paragraphs=True):
root = tree.getroot()
part_cfr_map = build_part_cfr_map(root,verbose)
paragraphs = []
header = []
part_element = None # Need to track part elements because they are not hierarchical
for element in root.iter():
if element.tag == 'PART':
part_element = element
elif element.tag == 'HD':
header = update_header(header,element)
if element.tag in ['P','AMDPAR','HD','EXTRACT','GPOTABLE','SECTION']:
text = get_element_text(element)
paragraph = {
'xml_path':tree.getpath(element),
'header':tuple(header),
'legal':False,
'tag':element.tag
}
reg_element = get_ancestor(element,'REGTEXT')
if reg_element is not None:
paragraph['legal'] = True
paragraph['cfr_title'] = reg_element.get('TITLE')
paragraph['cfr_part'] = reg_element.get('PART')
elif part_element is not None and re.search(r'(SECTION|AMDPAR|EXTRACT|SUBPART)',paragraph['xml_path']):
paragraph['legal'] = True
paragraph['cfr_title'],paragraph['cfr_part'] = part_cfr_map[part_element]
section_element = get_ancestor(element,'SECTION')
if section_element is not None:
try:
paragraph['cfr_section'] = section_element.xpath('./SECTNO/text()')[0].split()[1]
except Exception:
if verbose: print('Warning: Failed to get section number for section %s' % section_element)
try:
paragraph['section_subject'] = section_element.xpath('.//SUBJECT/text()')[0]
except Exception:
if verbose: print('Warning: Section %s has no subject information' % section_element)
ftnt_element = get_ancestor(element,'FTNT')
if ftnt_element is not None:
try:
paragraph['footnote'] = ftnt_element.xpath('.//SU/text()')[0]
except Exception:
paragraph['footnote'] = 0
if verbose: print('Warning: Footnote %s has no numbering information' % ftnt_element)
else:
paragraph['footnotes'] = element.xpath('./SU/text()')
"""
Agencies are inconsistent about how they use paragraph formatting:
-In some documents, XML <p> items correspond to paragraphs
-In some documents, <p> items contain multiple paragraphs split by newline characters
For the sake of consistentency, split all paragraphs on newlines by default,
keeping trailing whitespace with each paragraph
"""
if split_paragraphs:
par_texts = [m.group(0) for m in re.finditer(r'\s*.*\n?\s*',text)]
else:
par_texts = [text]
for par_text in par_texts:
if extract_numbering:
paragraph['numbering'],par_text = split_numbering(par_text)
paragraph['text'] = par_text
paragraphs.append(paragraph.copy())
paragraph_df = pd.DataFrame(paragraphs)
# Ensure dataframe has all columns
for c in ['cfr_title','cfr_part','section','section_subject','footnote','footnotes']:
if c not in paragraph_df.columns:
paragraph_df[c] = np.nan
# paragraph_df['text'] = paragraph_df['text'].apply(clean_paragraph_text)
if nlp is not None:
paragraph_df['doc'] = paragraph_df['text'].apply(nlp)
return paragraph_df
def clean_html_text(text):
# Strip header and footer
text = re.sub(r'.+(?=AGENCY)','',text,flags=re.DOTALL)
text = re.sub(r'\[(FR )?Doc.+?$','',text,flags=re.DOTALL | re.REVERSE)
# Replace bullets with bullet char
text = re.sub(r'<bullet>','•',text)
# Replace doube-dash with em-dash
text = re.sub(r'(?<=[^\s-])--(?=[^\s-])','—',text)
# Replace 'Sec.' with §
text = re.sub(r'(?<=\s)Sec\.','§',text)
# Remove html tags (not worth parsing)
text = re.sub(r'<\\?.+?>','',text)
# #Remove dashed horizontal lines
# text = re.sub(r'\n-{5,}\n','\n',text)
# Delete page markers
text = re.sub(r'\n\s*\[\[Page.+\]\]\s*\n',' ',text,flags=re.IGNORECASE)
# Replace in-paragraph line-breaks with spaces
text = re.sub(r'[ -]\n(?=\S)',' ',text)
# Convert inline titles to their own lines (starting next line with tab)
text = re.sub(r'(?<=(^|\n)[^a-z]+:\s)','\n ',text)
return text
def tag_html_paragraph(s):
if s.startswith('<P>'):
return 'P'
elif re.match(r'\d\n\S',s):
return 'AMDPAR'
elif re.match(r'§\s+\d',s):
return 'SECTION'
elif re.match(r'\*+\s*',s):
return 'STARS'
elif re.match(r'\S',s):
if '\n' in s.strip():
return 'EXTRACT'
else:
return 'HD'
def parse_footnotes(s):
footnote_pattern = r'(?<!\d\s*)\\(?P<i>\d+)\\'
# Parse footnotes
footnote_paragraph_match = re.match(footnote_pattern,s)
if footnote_paragraph_match:
footnote = footnote_paragraph_match.group('i')
footnotes = []
else:
footnote = np.nan
footnotes = [m.group('i') for m in re.finditer(footnote_pattern,s)]
s = re.sub(footnote_pattern,'',s)
return footnote,footnotes,s
def line_code(line):
# line = line.strip()
if not line.strip():
return ' '
if line.count('-') == len(line):
return '-'
if line.startswith(' '):
return 'c'
stars = line.count('*')
if stars >= 0.5*len(line):
return '*'
alphas = sum(map(str.isalpha,line))
if alphas + stars > 0.5*len(line):
return 'a'
return '.'
# ''.join(map(line_code,text.split('\n')))
# print('\n'.join(map(lambda line: line_code(line)+' '+line,text.split('\n'))))
def split_tables(text):
'''
Identify tables using pseudo-likelyhood approach.
Want to create table partitions (start line, end line) such that:
-Tables are at least 3 lines long
-Tables start and end with a horizontal line
-Tables sort common table chars {'-',' ','.'} from other chars
as completely as possible
'''
lines = text.split('\n')
line_codes = ''.join(map(line_code,lines))
table_matches = list(re.finditer(r'.(-[ c\.]*){2,}',line_codes))
if table_matches:
for table_match in table_matches:
pre_table = '\n'.join(lines[:table_match.start()])
table = '\n'.join(lines[table_match.start():table_match.end()])
yield pre_table,table
if table_match.end() < len(lines):
yield '\n'.join(lines[table_match.end():]),''
else:
yield text,''
def extract_html_paragraphs(text,extract_numbering=True):
header = None
legal_header = False
legal = False
# Split tables
for text,table in split_tables(text):
# Mark paragraphs indicated by leading tabs (4 spaces)
text = re.sub(r'(?<=\n) {4}(?!\s)',' <P>',text)
for s in (m.group(0) for m in re.finditer(r'.+?[^\n]($|\n{2}\s*|(?=<P>))',text,flags=re.DOTALL)):
# Skip dashed lines
if not re.match(r'^-{5,}\s*$',s):
tag = tag_html_paragraph(s)
footnote = np.nan
footnotes = []
# Drop dashed lines
s = re.sub(r'\n-{5,}\n','\n',s)
if s:
if tag == 'P':
# Trim tab indentation
s = s[3:]
# Parse and remove footnote numbers
footnote,footnotes,s = parse_footnotes(s)
elif tag == 'AMDPAR':
# Trim amendment indicator ("0")
s = s[2:]
elif tag == 'HD':
header = s.strip()
legal_header = bool(re.match(r'(Part\s\d+|Subpart|§|Appendix)',header,re.IGNORECASE))
elif tag == 'SECTION':
header = s.strip()
legal_header = True
legal = legal_header or tag in {'AMDPAR','SECTION','STARS'}
paragraph_info = {'tag':tag,'text':s,'footnote':footnote,'footnotes':footnotes,'header':header,'legal':legal}
if extract_numbering:
paragraph_info['numbering'],paragraph_info['text'] = split_numbering(s)
yield paragraph_info
if table:
yield {'tag':'GPOTABLE','text':table,'header':header,'legal':legal}
def parse_html(s,extract_numbering=True):
text = clean_html_text(s)
parsed_df = pd.DataFrame(list(extract_html_paragraphs(text,extract_numbering=extract_numbering)))
return parsed_df
def parse_html_file(html_filename,extract_numbering=True):
with open(html_filename,encoding='utf8') as f:
text = f.read()
return parse_html(text,extract_numbering=extract_numbering)
def extract_frdoc_number(s):
'''
This function extracts the document from an FRDOC string. The standard
format is something like:
"[FR Doc. 12-1149 Filed 1-18-12; 8:45 am]"
Where "12-1149" is the document number. However, contents are clearly
manually entered, because there can be a variety of deviations from this
format. For example:
"[FR Doc.12-1149; Filed 1-18-12; 8:45 am]"
"[FR Doc. 12-1149 1-18-12; 8:45 am]"
"[JR Doc. 12-1149 Filed 1-18-12; 8:45 am]"
"[FR Doc. 12- 1149 Filed 1-18-12; 8:45 am]"
"[FR Doc. 1149 Filed 1-18-12; 8:45 am]"
Many document numbers also start with "E" or "Z" instead of the first digits
of the year.
Reprints and corrections are also sometimes labeled by prepending to the
document number with something like "C1-", "R1-", or "X"
This function assumes the document number is located either immediately
following "FR Doc.", or immediately preceeding "Filed". Document numbers are
allowed to start with E or Z (as in "E7-1592").
Sometimes the year component is ommitted ("12-" in the example above).
In these cases, the function attempts to locate the year from the date,
assuming it appears just before the time, and prepends it to the document
number.
Finally, the document number is standardized by converting to ascii,
removing all whitespace, and making letters uppercase.
If the function cannot parse the document number it prints a warning and
returns None.
'''
# Define the general fr doc pattern with up to three parts
fr_pattern = r'''((?P<part1>[CRX]\d*)[\s-]*)? # Optional prepended part
(?P<part2>[EZ]?\d+) # Required initial or middle number
(\s?-+\s?(?P<part3>\d*))? # Optional third number
'''
s = unidecode(s)
# Case 1: Format is "[FR Doc. #####..."
m = re.search(fr'FR\sDoc\.?\s*{fr_pattern}',s,flags=re.IGNORECASE | re.VERBOSE)
if not m or len(m.group(0)) < 3:
# Case 2: Format is "...###### Filed..."
m = re.search(fr'[.\s]{fr_pattern};?\s*Fil',s,flags=re.IGNORECASE | re.VERBOSE)
if m:
# Rebuild the document number from parts
d = m.group('part2')
if m.group('part1'):
d = m.group('part1') + '-' + d
if m.group('part3'):
d = d + '-' + m.group('part3')
d = unidecode(d)
d = d.upper()
return d
else:
print(f'Warning: Could not parse document number in "{s}"')
return None
def standardize_frdoc_number(d):
"""
The document numbers used in on federalregister.gov are also parsed from
raw data, and can include errors such as small pieces of text appended to
the end of the string.
Document numbers also sometimes have multiple reprentations. For example,
2005-0034
05-0034
5-0034
E5-0034
2005-34
Would presumably all refer to the same document.
This function standardizes documents numbers by:
1) Removing any trailing non-numeric characters
2) Dropping first two digits of the year when 4 digits are present
3) Dropping leading zeros from the last number
"""
try:
# Remove trailing non-numeric chars
d = re.sub(r'[^0-9]+$','',d)
# Remove "E" - never seems completely necessary
d = d.replace('E','')
# Split into parts
parts = d.rsplit('-')
# Clean year. Could be in any part except the last.
for i in range(len(parts)-1) in parts[:-1]:
if re.match(r'(19|20?)\d\d',parts[i]):
parts[i] = re.sub(r'(19|200?)','',parts[i])
break
try:
parts[-1] = str(int(parts[-1]))
except Exception:
pass
return '-'.join(parts)
except Exception:
return d
class FrdocResolver():
def __init__(self):
self.info_df = load_info_df(fields=['frdoc_number','publication_date','volume','start_page','end_page'])
self.info_df['standardized_frdoc'] = self.info_df['frdoc_number'].apply(standardize_frdoc_number)
self.all_frdocs = set(d for d in self.info_df['frdoc_number'].dropna() if d.strip())
def __call__(self,doc_info):
if doc_info['frdoc_string']:
frdoc_number = extract_frdoc_number(doc_info['frdoc_string'])
# Search based on extracted frdoc number
if frdoc_number in self.all_frdocs:
return frdoc_number
# Search based on the standardized frdoc number
standardized_frdoc = standardize_frdoc_number(frdoc_number)
candidates_df = self.info_df[self.info_df['standardized_frdoc'] == standardized_frdoc]
if len(candidates_df) == 1:
return candidates_df['frdoc_number'].values[0]
# Search based on the publication date, volume and pages (FR citation)
candidates_df = self.info_df[(self.info_df['publication_date'] == doc_info['publication_date']) \
& (self.info_df['volume'] == doc_info['volume']) \
& (self.info_df['start_page'] == doc_info['start_page']) \
& (self.info_df['end_page'] == doc_info['end_page'])]
if len(candidates_df) == 1:
return candidates_df['frdoc_number'].values[0]
if doc_info['frdoc_string']:
# Try to refine search by seeing if frdoc is within frdoc_string (need to strip whitespace)
frdoc_string_nospace = re.sub(r'\s','',doc_info['frdoc_string'])
candidates_df['frdoc_number_nospace'] = [re.sub(r'\s','',d) for d in candidates_df['frdoc_number']]
candidates_df['frdoc_match'] = [(d in frdoc_string_nospace) for d in candidates_df['frdoc_number_nospace']]
candidates_df = candidates_df[candidates_df['frdoc_match']]
if len(candidates_df) == 1:
return candidates_df['frdoc_number'].values[0]
print('Warning: Could not resolve frdoc for document with the following identify info:')
print(doc_info)
if len(candidates_df):
print('Candidates:')
print(candidates_df)
else:
print('Candidates: None')
return None
| [
"regex.search",
"regex.finditer",
"lxml.etree.parse",
"regex.match",
"regex.sub",
"pandas.DataFrame"
] | [((5550, 5583), 'regex.sub', 're.sub', (['"""(\\\\S)\\\\("""', '"""\\\\g<1> ("""', 's'], {}), "('(\\\\S)\\\\(', '\\\\g<1> (', s)\n", (5556, 5583), True, 'import regex as re\n'), ((5629, 5688), 'regex.sub', 're.sub', (['"""(\\\\S)([\\\\u2014-])(\\\\S)"""', '"""\\\\g<1> \\\\g<2> \\\\g<3>"""', 's'], {}), "('(\\\\S)([\\\\u2014-])(\\\\S)', '\\\\g<1> \\\\g<2> \\\\g<3>', s)\n", (5635, 5688), True, 'import regex as re\n'), ((9228, 9252), 'pandas.DataFrame', 'pd.DataFrame', (['paragraphs'], {}), '(paragraphs)\n', (9240, 9252), True, 'import pandas as pd\n'), ((9723, 9772), 'regex.sub', 're.sub', (['""".+(?=AGENCY)"""', '""""""', 'text'], {'flags': 're.DOTALL'}), "('.+(?=AGENCY)', '', text, flags=re.DOTALL)\n", (9729, 9772), True, 'import regex as re\n'), ((9782, 9848), 'regex.sub', 're.sub', (['"""\\\\[(FR )?Doc.+?$"""', '""""""', 'text'], {'flags': '(re.DOTALL | re.REVERSE)'}), "('\\\\[(FR )?Doc.+?$', '', text, flags=re.DOTALL | re.REVERSE)\n", (9788, 9848), True, 'import regex as re\n'), ((9897, 9926), 'regex.sub', 're.sub', (['"""<bullet>"""', '"""•"""', 'text'], {}), "('<bullet>', '•', text)\n", (9903, 9926), True, 'import regex as re\n'), ((9975, 10021), 'regex.sub', 're.sub', (['"""(?<=[^\\\\s-])--(?=[^\\\\s-])"""', '"""—"""', 'text'], {}), "('(?<=[^\\\\s-])--(?=[^\\\\s-])', '—', text)\n", (9981, 10021), True, 'import regex as re\n'), ((10058, 10093), 'regex.sub', 're.sub', (['"""(?<=\\\\s)Sec\\\\."""', '"""§"""', 'text'], {}), "('(?<=\\\\s)Sec\\\\.', '§', text)\n", (10064, 10093), True, 'import regex as re\n'), ((10146, 10176), 'regex.sub', 're.sub', (['"""<\\\\\\\\?.+?>"""', '""""""', 'text'], {}), "('<\\\\\\\\?.+?>', '', text)\n", (10152, 10176), True, 'import regex as re\n'), ((10295, 10369), 'regex.sub', 're.sub', (['"""\\\\n\\\\s*\\\\[\\\\[Page.+\\\\]\\\\]\\\\s*\\\\n"""', '""" """', 'text'], {'flags': 're.IGNORECASE'}), "('\\\\n\\\\s*\\\\[\\\\[Page.+\\\\]\\\\]\\\\s*\\\\n', ' ', text, flags=re.IGNORECASE)\n", (10301, 10369), True, 'import regex as re\n'), ((10423, 10458), 'regex.sub', 're.sub', (['"""[ -]\\\\n(?=\\\\S)"""', '""" """', 'text'], {}), "('[ -]\\\\n(?=\\\\S)', ' ', text)\n", (10429, 10458), True, 'import regex as re\n'), ((10545, 10594), 'regex.sub', 're.sub', (['"""(?<=(^|\\\\n)[^a-z]+:\\\\s)"""', '"""\n """', 'text'], {}), "('(?<=(^|\\\\n)[^a-z]+:\\\\s)', '\\n ', text)\n", (10551, 10594), True, 'import regex as re\n'), ((11108, 11137), 'regex.match', 're.match', (['footnote_pattern', 's'], {}), '(footnote_pattern, s)\n', (11116, 11137), True, 'import regex as re\n'), ((11369, 11400), 'regex.sub', 're.sub', (['footnote_pattern', '""""""', 's'], {}), "(footnote_pattern, '', s)\n", (11375, 11400), True, 'import regex as re\n'), ((17091, 17170), 'regex.search', 're.search', (['f"""FR\\\\sDoc\\\\.?\\\\s*{fr_pattern}"""', 's'], {'flags': '(re.IGNORECASE | re.VERBOSE)'}), "(f'FR\\\\sDoc\\\\.?\\\\s*{fr_pattern}', s, flags=re.IGNORECASE | re.VERBOSE)\n", (17100, 17170), True, 'import regex as re\n'), ((4898, 4969), 'regex.match', 're.match', (['"""\\\\s*?((\\\\(\\\\s*(\\\\d+|[A-Z]{1,3}|[a-z]{1,3})\\\\s*\\\\))\\\\s*)+"""', 's'], {}), "('\\\\s*?((\\\\(\\\\s*(\\\\d+|[A-Z]{1,3}|[a-z]{1,3})\\\\s*\\\\))\\\\s*)+', s)\n", (4906, 4969), True, 'import regex as re\n'), ((5154, 5194), 'regex.match', 're.match', (['"""\\\\s*((\\\\d+|[A-Z]+)\\\\.)\\\\s"""', 's'], {}), "('\\\\s*((\\\\d+|[A-Z]+)\\\\.)\\\\s', s)\n", (5162, 5194), True, 'import regex as re\n'), ((5784, 5795), 'lxml.etree.parse', 'et.parse', (['f'], {}), '(f)\n', (5792, 5795), True, 'import lxml.etree as et\n'), ((10694, 10718), 'regex.match', 're.match', (['"""\\\\d\\\\n\\\\S"""', 's'], {}), "('\\\\d\\\\n\\\\S', s)\n", (10702, 10718), True, 'import regex as re\n'), ((12425, 12468), 'regex.finditer', 're.finditer', (['""".(-[ c\\\\.]*){2,}"""', 'line_codes'], {}), "('.(-[ c\\\\.]*){2,}', line_codes)\n", (12436, 12468), True, 'import regex as re\n'), ((13104, 13150), 'regex.sub', 're.sub', (['"""(?<=\\\\n) {4}(?!\\\\s)"""', '""" <P>"""', 'text'], {}), "('(?<=\\\\n) {4}(?!\\\\s)', ' <P>', text)\n", (13110, 13150), True, 'import regex as re\n'), ((17266, 17344), 'regex.search', 're.search', (['f"""[.\\\\s]{fr_pattern};?\\\\s*Fil"""', 's'], {'flags': '(re.IGNORECASE | re.VERBOSE)'}), "(f'[.\\\\s]{fr_pattern};?\\\\s*Fil', s, flags=re.IGNORECASE | re.VERBOSE)\n", (17275, 17344), True, 'import regex as re\n'), ((18492, 18517), 'regex.sub', 're.sub', (['"""[^0-9]+$"""', '""""""', 'd'], {}), "('[^0-9]+$', '', d)\n", (18498, 18517), True, 'import regex as re\n'), ((2519, 2568), 'regex.finditer', 're.finditer', (['"""part\\\\s+(\\\\d+|_)"""', 's', 're.IGNORECASE'], {}), "('part\\\\s+(\\\\d+|_)', s, re.IGNORECASE)\n", (2530, 2568), True, 'import regex as re\n'), ((10750, 10773), 'regex.match', 're.match', (['"""§\\\\s+\\\\d"""', 's'], {}), "('§\\\\s+\\\\d', s)\n", (10758, 10773), True, 'import regex as re\n'), ((18790, 18826), 'regex.match', 're.match', (['"""(19|20?)\\\\d\\\\d"""', 'parts[i]'], {}), "('(19|20?)\\\\d\\\\d', parts[i])\n", (18798, 18826), True, 'import regex as re\n'), ((20804, 20847), 'regex.sub', 're.sub', (['"""\\\\s"""', '""""""', "doc_info['frdoc_string']"], {}), "('\\\\s', '', doc_info['frdoc_string'])\n", (20810, 20847), True, 'import regex as re\n'), ((1252, 1325), 'regex.search', 're.search', (['"""CFR(.*?(?P<part>(_|\\\\d+|\\\\b[IVXCL]+\\\\b)))+"""', 's', 're.IGNORECASE'], {}), "('CFR(.*?(?P<part>(_|\\\\d+|\\\\b[IVXCL]+\\\\b)))+', s, re.IGNORECASE)\n", (1261, 1325), True, 'import regex as re\n'), ((10807, 10830), 'regex.match', 're.match', (['"""\\\\*+\\\\s*"""', 's'], {}), "('\\\\*+\\\\s*', s)\n", (10815, 10830), True, 'import regex as re\n'), ((11327, 11359), 'regex.finditer', 're.finditer', (['footnote_pattern', 's'], {}), '(footnote_pattern, s)\n', (11338, 11359), True, 'import regex as re\n'), ((13187, 13256), 'regex.finditer', 're.finditer', (['""".+?[^\\\\n]($|\\\\n{2}\\\\s*|(?=<P>))"""', 'text'], {'flags': 're.DOTALL'}), "('.+?[^\\\\n]($|\\\\n{2}\\\\s*|(?=<P>))', text, flags=re.DOTALL)\n", (13198, 13256), True, 'import regex as re\n'), ((13306, 13332), 'regex.match', 're.match', (['"""^-{5,}\\\\s*$"""', 's'], {}), "('^-{5,}\\\\s*$', s)\n", (13314, 13332), True, 'import regex as re\n'), ((13499, 13529), 'regex.sub', 're.sub', (['"""\\\\n-{5,}\\\\n"""', '"""\n"""', 's'], {}), "('\\\\n-{5,}\\\\n', '\\n', s)\n", (13505, 13529), True, 'import regex as re\n'), ((18853, 18886), 'regex.sub', 're.sub', (['"""(19|200?)"""', '""""""', 'parts[i]'], {}), "('(19|200?)', '', parts[i])\n", (18859, 18886), True, 'import regex as re\n'), ((20899, 20919), 'regex.sub', 're.sub', (['"""\\\\s"""', '""""""', 'd'], {}), "('\\\\s', '', d)\n", (20905, 20919), True, 'import regex as re\n'), ((7039, 7107), 'regex.search', 're.search', (['"""(SECTION|AMDPAR|EXTRACT|SUBPART)"""', "paragraph['xml_path']"], {}), "('(SECTION|AMDPAR|EXTRACT|SUBPART)', paragraph['xml_path'])\n", (7048, 7107), True, 'import regex as re\n'), ((10862, 10880), 'regex.match', 're.match', (['"""\\\\S"""', 's'], {}), "('\\\\S', s)\n", (10870, 10880), True, 'import regex as re\n'), ((8863, 8898), 'regex.finditer', 're.finditer', (['"""\\\\s*.*\\\\n?\\\\s*"""', 'text'], {}), "('\\\\s*.*\\\\n?\\\\s*', text)\n", (8874, 8898), True, 'import regex as re\n'), ((14053, 14120), 'regex.match', 're.match', (['"""(Part\\\\s\\\\d+|Subpart|§|Appendix)"""', 'header', 're.IGNORECASE'], {}), "('(Part\\\\s\\\\d+|Subpart|§|Appendix)', header, re.IGNORECASE)\n", (14061, 14120), True, 'import regex as re\n')] |
"""
Script to generate a custom list of stopwords that extend upon existing lists.
"""
import json
import spacy
from urllib.request import urlopen
from itertools import chain
def combine(*lists):
"Combine an arbitrary number of lists into a single list"
return list(chain(*lists))
def get_spacy_lemmas():
spacy_lemma_url = "https://raw.githubusercontent.com/explosion/spacy-lookups-data/master/spacy_lookups_data/data/en_lemma_lookup.json"
with urlopen(spacy_lemma_url) as response:
lemmas = response.read()
return json.loads(lemmas)
def lookup_verbs(roots, spacy_lemmas):
"""Return a full of list light verbs and all its forms"""
def flatten(list_of_lists):
"Return a flattened list of a list of lists"
return [item for sublist in list_of_lists for item in sublist]
verblist = []
for root in roots:
verbs = [key for key in spacy_lemmas if spacy_lemmas[key] == root]
verbs.append(root)
verblist.append(verbs)
return flatten(verblist)
if __name__ == "__main__":
# We first get the default spaCy stopword list
nlp = spacy.blank('en')
spacy_stopwords = nlp.Defaults.stop_words
spacy_lemmas = get_spacy_lemmas()
# Create custom lists depending on the class of words seen in the data
person_titles = ['mr', 'mrs', 'ms', 'dr', 'mr.', 'mrs.', 'ms.', 'dr.', 'e']
broken_words = ['don', 'isn', 'mustn', 'shouldn', 'couldn', 'doesn', 'didn']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '000']
url_terms = ['http', 'https', 'ref', 'href', 'com', 'src']
days_of_the_week = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
months_of_the_year = ['january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
time_periods = ['minute', 'minutes', 'hour', 'hours', 'day', 'days', 'week', 'weeks',
'month', 'months', 'year', 'years']
time_related = ['yesterday', 'today', 'tomorrow', 'day', 'night', 'morning',
'afternoon', 'evening', 'edt', 'est', 'time', 'times']
common_nouns = ['new', 'york', 'nytimes', 'press', 'news', 'report', 'page', 'user', 'file', 'video', 'pic',
'photo', 'online', 'social', 'media', 'group', 'inbox', 'item',
'advertisement', 'world', 'store', 'story', 'life', 'family',
'people', 'man', 'woman', 'friend', 'friends']
social_media = ['twitter', 'facebook', 'google', 'gmail', 'video', 'photo', 'image',
'user', 'social', 'media', 'page', 'online', 'stream', 'post',
'app']
light_verb_roots = [
'ask', 'come', 'go', 'know', 'look', 'see', 'talk', 'try', 'use', 'want', 'call', 'click',
'continue', 'comment', 'do', 'feel', 'find', 'give', 'get', 'have', 'include', 'like', 'live',
'love', 'make', 'post', 'read', 'say', 'speak', 'send', 'share', 'show', 'sign', 'tag',
'take', 'tell', 'think', 'update', 'work', 'write'
]
# Convert light verb roots to all its forms using lemma lookup
light_verbs_full = lookup_verbs(light_verb_roots, spacy_lemmas)
# Combine into a single lit of stopwords
add_stopwords = set(
combine(
person_titles, broken_words, numbers, url_terms, days_of_the_week, months_of_the_year,
time_periods, time_related, common_nouns, social_media, light_verbs_full
)
)
# Combine all stopwords into one list and export to text file
combined_stopwords = spacy_stopwords.union(add_stopwords)
stopword_list = sorted(list(combined_stopwords))
# Write out stopwords to file
with open('custom_stopwords.txt', 'w') as f:
for word in stopword_list:
f.write(word + '\n')
print(f"Exported {len(stopword_list)} words to stopword list.")
| [
"itertools.chain",
"json.loads",
"spacy.blank",
"urllib.request.urlopen"
] | [((547, 565), 'json.loads', 'json.loads', (['lemmas'], {}), '(lemmas)\n', (557, 565), False, 'import json\n'), ((1120, 1137), 'spacy.blank', 'spacy.blank', (['"""en"""'], {}), "('en')\n", (1131, 1137), False, 'import spacy\n'), ((276, 289), 'itertools.chain', 'chain', (['*lists'], {}), '(*lists)\n', (281, 289), False, 'from itertools import chain\n'), ((465, 489), 'urllib.request.urlopen', 'urlopen', (['spacy_lemma_url'], {}), '(spacy_lemma_url)\n', (472, 489), False, 'from urllib.request import urlopen\n')] |
# -*- coding: utf-8 -*-
# @Time : 18/12/10 上午10:27
# @Author : L_zejie
# @Site :
# @File : setup.py.py
# @Software: PyCharm Community Edition
from setuptools import setup, find_packages
setup(
name="DynamicPool",
packages=find_packages(),
version='0.14',
description="动态任务阻塞线程/进程池",
author="L_zejie",
author_email='<EMAIL>',
url="https://github.com/Lzejie/DynamicPool",
license="MIT Licence",
keywords=["Thread Pool", "Dynamic Pool", "Dynamic Thread Pool", "Dynamic Process Pool"],
classifiers=[],
install_requires=[]
)
| [
"setuptools.find_packages"
] | [((242, 257), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (255, 257), False, 'from setuptools import setup, find_packages\n')] |
#!/usr/bin/env py.test
"""
Test SinglyLinkedList class.
"""
import copy
import unittest
from py_alg_dat import singly_linked_list
class TestSinglyLinkedList(unittest.TestCase):
"""
Test SinglyLinkedList class.
"""
def setUp(self):
self.list1 = singly_linked_list.SinglyLinkedList()
self.list1.append('b')
self.list1.append('c')
self.list1.append('d')
### Begin test of local class SinglyLinkedListElement ###
def test_singly_linked_list_element_equal(self):
"""
Test operator (list element) "equal".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
self.assertEqual(elem1, elem2)
def test_singly_linked_list_element_not_equal(self):
"""
Test operator (list element) "equal" - inverted.
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', None)
self.assertNotEqual(elem1, elem2)
def test_singly_linked_list_element_copy_equal(self):
"""
Test operator (list element) "copy".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
e_copy = copy.copy(elem)
self.assertEqual(elem, e_copy)
def test_singly_linked_list_element_copy_not_equal(self):
"""
Test operator (list element) "copy" - inverted.
"""
a_list = singly_linked_list.SinglyLinkedList()
elem = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
e_copy = copy.copy(elem)
elem.data = 'aa'
self.assertNotEqual(elem, e_copy)
def test_singly_linked_list_element_get_data(self):
"""
Test method (list element) "get_data".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
self.assertEqual('a', elem.get_data())
def test_singly_linked_list_element_get_next(self):
"""
Test method (list element) "get_next".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', None)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem2)
self.assertEqual(elem2, elem1.get_next())
def test_singly_linked_list_element_insert_empty(self):
"""
Test method (list element) "insert".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
elem1.insert('b')
test1 = a_list.get_head() == a_list[0]
test2 = a_list.get_tail() == a_list[0]
test3 = a_list[0].get_next() is None
test4 = len(a_list) == 1
test = test1 and test2 and test3 and test4
self.assertTrue(test)
def test_singly_linked_list_element_insert_head(self):
"""
Test method (list element) "insert".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', None)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem2)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
elem0.insert('aa')
test1 = a_list.get_head() == a_list[0]
test2 = a_list.get_tail() == a_list[len(a_list) - 1]
test3 = a_list[0].get_next() == elem1
test4 = len(a_list) == 3
test = test1 and test2 and test3 and test4
self.assertTrue(test)
def test_singly_linked_list_element_insert_middle(self):
"""
Test method (list element) "insert".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem5 = singly_linked_list.SinglyLinkedListElement(a_list, 'e', None)
elem4 = singly_linked_list.SinglyLinkedListElement(a_list, 'd', elem5)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'cc', elem4)
elem3 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', elem4)
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem3)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem2)
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
a_list.append(elem3.get_data())
a_list.append(elem4.get_data())
a_list.append(elem5.get_data())
elem3.insert('cc')
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
res.append(a_list[3])
res.append(a_list[4])
ref = []
ref.append(elem1)
ref.append(elem2)
ref.append(elemx)
ref.append(elem4)
ref.append(elem5)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[4]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() == a_list[3]
t_6 = a_list[3].get_next() == a_list[4]
t_7 = a_list[4].get_next() is None
t_8 = len(a_list) == 5
t_9 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7 and t_8 and t_9
self.assertTrue(test)
def test_singly_linked_list_element_insert_tail(self):
"""
Test method (list element) "insert".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
a_list.append(elem1.get_data())
elem1.insert('aa')
test1 = a_list.get_head() == a_list[0]
test2 = a_list.get_tail() == a_list[0]
test3 = a_list[0].get_next() is None
test4 = len(a_list) == 1
test = test1 and test2 and test3 and test4
self.assertTrue(test)
def test_singly_linked_list_element_insert_before_first_one(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains a single
element prior to the insertion of the second element
and the new element is inserted before the first element.
Before inserting:
list = [b]
After inserting:
list = [a] -> [b]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', None)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem1.get_data())
elem1.insert_before('a')
res = []
res.append(a_list[0])
res.append(a_list[1])
ref = []
ref.append(elemx)
ref.append(elem1)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[1]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() is None
t_5 = len(a_list) == 2
t_6 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6
self.assertTrue(test)
def test_singly_linked_list_element_insert_before_first_two(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains two elements
prior to the insertion of the third element and the new
element is inserted before the first element.
Before inserting:
list = [b] -> [c]
After inserting:
list = [a] -> [b] -> [c]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', None)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem2)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
elem1.insert_before('a')
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
ref = []
ref.append(elemx)
ref.append(elem1)
ref.append(elem2)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[2]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = len(a_list) == 3
t_6 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6
self.assertTrue(test)
def test_singly_linked_list_element_insert_before_middle(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains three elements
prior to the insertion of the fourh element and the new
element is inserted before the third element.
Before inserting:
list = [a] -> [b] -> [d]
After inserting:
list = [a] -> [b] -> [c] -> [d]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'd', None)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'c', elem2)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elemx)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
elem2.insert_before('c')
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
res.append(a_list[3])
ref = []
ref.append(elem0)
ref.append(elem1)
ref.append(elemx)
ref.append(elem2)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[3]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() == a_list[3]
t_6 = a_list[3].get_next() is None
t_7 = len(a_list) == 4
t_8 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7 and t_8
self.assertTrue(test)
def test_singly_linked_list_element_insert_after_first_one(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains a single
element prior to the insertion of the second element
and the new element is inserted after this element.
Before inserting:
list = [a]
After inserting:
list = [a] -> [b]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'b', None)
a_list.append(elem0.get_data())
elem0.insert_after('b')
res = []
res.append(a_list[0])
res.append(a_list[1])
ref = []
ref.append(elem0)
ref.append(elemx)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[1]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() is None
t_5 = len(a_list) == 2
t_6 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6
self.assertTrue(test)
def test_singly_linked_list_element_insert_after_first_two(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains two elements
prior to the insertion of the third element and the new
element is inserted after the second element.
Before inserting:
list = [a] -> [b]
After inserting:
list = [a] -> [b] -> [c]
"""
a_list = singly_linked_list.SinglyLinkedList()
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'c', None)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elemx)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
elem1.insert_after('c')
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
ref = []
ref.append(elem0)
ref.append(elem1)
ref.append(elemx)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[2]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() is None
t_6 = len(a_list) == 3
t_7 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7
self.assertTrue(test)
def test_singly_linked_list_element_insert_after_middle(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains three elements
prior to the insertion of the fourh element and the new
element is inserted after the third element.
Before inserting:
list = [a] -> [b] -> [c]
After inserting:
list = [a] -> [b] -> [c] -> [d]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'd', None)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'c', elem2)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elemx)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
elem1.insert_after('c')
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
res.append(a_list[3])
ref = []
ref.append(elem0)
ref.append(elem1)
ref.append(elemx)
ref.append(elem2)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[3]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() == a_list[3]
t_6 = len(a_list) == 4
t_7 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7
self.assertTrue(test)
def test_singly_linked_list_element_remove_first_one(self):
"""
Testing removing a linked list element from a linked
list. In this test the linked list contains a single
element prior to removing this element.
Before removing:
list = [a]
After removing:
list = [None]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
a_list.append(elem0.get_data())
elem0.remove()
res = []
ref = []
t_1 = len(a_list) == 0
t_2 = ref == res
test = t_1 and t_2
self.assertTrue(test)
def test_singly_linked_list_element_remove_first_two(self):
"""
Testing removing a linked list element from a linked
list. In this test the linked list contains two
elements prior to removing the first element.
Before removing:
list = [a] -> [b]
After removing:
list = [b]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', None)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
elem0.remove()
res = []
res.append(a_list[0])
ref = []
ref.append(elem1)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[0]
t_3 = elem1.get_next() is None
t_4 = len(a_list) == 1
t_5 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5
self.assertTrue(test)
def test_singly_linked_list_element_remove_middle(self):
"""
Testing removing a linked list element from a linked
list. In this test the linked list contains five
elements prior to removing. The element being removed
is the third element.
Before removing:
list = [a] -> [b] -> [c] -> [d] -> [e]
After removing:
list = [a] -> [b] -> [d] -> [e]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem5 = singly_linked_list.SinglyLinkedListElement(a_list, 'e', None)
elem4 = singly_linked_list.SinglyLinkedListElement(a_list, 'd', elem5)
elem3 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', elem4)
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem3)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem2)
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
a_list.append(elem3.get_data())
a_list.append(elem4.get_data())
a_list.append(elem5.get_data())
elem3.remove()
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
res.append(a_list[3])
ref = []
ref.append(elem1)
ref.append(elem2)
ref.append(elem4)
ref.append(elem5)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[3]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() == a_list[3]
t_6 = a_list[3].get_next() is None
t_7 = len(a_list) == 4
t_8 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7 and t_8
self.assertTrue(test)
def test_singly_linked_list_element_remove_end(self):
"""
Testing removing a linked list element from a linked
list. In this test the linked list contains five
elements prior to removing. The element being removed
is the last element.
Before removing:
list = [a] -> [b] -> [c] -> [d] -> [e]
After removing:
list = [a] -> [b] -> [c] -> [d]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem4 = singly_linked_list.SinglyLinkedListElement(a_list, 'e', None)
elem3 = singly_linked_list.SinglyLinkedListElement(a_list, 'd', elem4)
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', elem3)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem2)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
a_list.append(elem3.get_data())
a_list.append(elem4.get_data())
elem4.remove()
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
res.append(a_list[3])
ref = []
ref.append(elem0)
ref.append(elem1)
ref.append(elem2)
ref.append(elem3)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[3]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() == a_list[3]
t_6 = a_list[3].get_next() is None
t_7 = len(a_list) == 4
t_8 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7 and t_8
self.assertTrue(test)
def test_singly_linked_list_element_remove_not_present(self):
"""
Testing removing a linked list element from a linked
list. In this test the linked list contains two
elements prior to removing. The element which should
be removed is not in the list, so nothing should be
removed and the pointers should be intact.
Before removing:
list = [a] -> [b]
After removing:
list = [a] -> [b]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', None)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem2)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
elem2.remove()
res = []
res.append(a_list[0])
res.append(a_list[1])
ref = []
ref.append(elem0)
ref.append(elem1)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[1]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() is None
t_5 = len(a_list) == 2
t_6 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6
self.assertTrue(test)
### End test of local class SinglyLinkedListElement ###
### Begin test of class SinglyLinkedList ###
def test_singly_linked_list_len(self):
"""
Test operator "len".
"""
self.assertEqual(3, len(self.list1))
def test_singly_linked_list_equal(self):
"""
Test operator "equal".
"""
a_list1 = singly_linked_list.SinglyLinkedList()
a_list2 = singly_linked_list.SinglyLinkedList()
a_list1.append('a')
a_list1.append('b')
a_list1.append('c')
a_list2.append('a')
a_list2.append('b')
a_list2.append('c')
self.assertEqual(a_list1, a_list2)
def test_singly_linked_list_not_equal(self):
"""
Test operator "equal" - inverted.
"""
a_list1 = singly_linked_list.SinglyLinkedList()
a_list2 = singly_linked_list.SinglyLinkedList()
a_list1.append('a')
a_list1.append('b')
a_list1.append('c')
a_list2.append('a')
a_list2.append('b')
a_list2.append('d')
self.assertNotEqual(a_list1, a_list2)
def test_singly_linked_list_copy_not_equal(self):
"""
Test operator "copy" - inverted.
"""
a_list1 = singly_linked_list.SinglyLinkedList()
a_list1.append('a')
a_list1.append('b')
a_list1.append('c')
a_list2 = copy.copy(a_list1)
a_list1[len(a_list1) - 1] = 'cc'
self.assertNotEqual(a_list1, a_list2)
def test_singly_linked_list_copy_equal(self):
"""
Test operator "copy".
"""
a_list1 = singly_linked_list.SinglyLinkedList()
a_list1.append('a')
a_list1.append('b')
a_list1.append('c')
a_list2 = copy.copy(a_list1)
# print ""
# print l1
# print l2
# print len( l1 )
# print len( l2 )
# print l1[0]
# print l2[0]
# print l1[1]
# print l2[1]
# print l1[2]
# print l2[2]
# print l1.get_head()
# print l2.get_head()
# print l1.get_tail()
# # NOTE: it appears that the tail is different!!!
# print l2.get_tail()
self.assertEqual(a_list1, a_list2)
def test_singly_linked_list_contains(self):
"""
Test operator "contains".
"""
self.assertTrue('b' in self.list1)
def test_singly_linked_list_contains_not(self):
"""
Test operator "contains" - inverted.
"""
self.assertFalse('bb' in self.list1)
def test_singly_linked_list_get_item(self):
"""
Test operator "get_item".
"""
elem = singly_linked_list.SinglyLinkedListElement(
self.list1, 'b', None)
self.assertEqual(elem, self.list1[0])
def test_singly_linked_list_get_item_raise(self):
"""
Test operator "get_item" - raises exception.
"""
self.assertRaises(IndexError, lambda: self.list1[10])
def test_singly_linked_list_get_head(self):
"""
Test method "get_head".
"""
self.assertEqual('b', self.list1.get_head().get_data())
def test_singly_linked_list_get_tail(self):
"""
Test method "get_tail".
"""
self.assertEqual('d', self.list1.get_tail().get_data())
def test_singly_linked_list_is_empty(self):
"""
Test method "is_empty".
"""
a_list = singly_linked_list.SinglyLinkedList()
self.assertTrue(a_list.is_empty())
def test_singly_linked_list_is_empty_not(self):
"""
Test method "is_empty" - inverted.
"""
self.assertFalse(self.list1.is_empty())
def test_singly_linked_list_clear(self):
"""
Test method "clear".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.clear()
self.assertTrue(a_list.is_empty())
def test_singly_linked_list_get_first(self):
"""
Test method "get_first".
"""
self.assertEqual('b', self.list1.get_first())
def test_singly_linked_list_get_last(self):
"""
Test method "get_last".
"""
self.assertEqual('d', self.list1.get_last())
def test_singly_linked_list_prepend(self):
"""
Test method "get_prepend".
"""
self.list1.prepend('a')
self.assertEqual('a', self.list1.get_first())
def test_singly_linked_list_insert_at_empty(self):
"""
Test method "insert_at".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.insert_at(0, 'b')
self.assertEqual('b', a_list[0].get_data())
def test_singly_linked_list_insert_at_head(self):
"""
Test method "insert_at".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.append('b')
a_list.append('c')
a_list.insert_at(0, 'aa')
t_1 = 'aa' == a_list.get_head().data
t_2 = a_list[0] == a_list.get_head()
t_3 = a_list[len(a_list) - 1] == a_list.get_tail()
test = t_1 and t_2 and t_3
self.assertTrue(test)
def test_singly_linked_list_insert_at_middle(self):
"""
Test method "insert_at".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.append('b')
a_list.append('c')
a_list.append('d')
a_list.append('e')
a_list.insert_at(2, 'cc')
self.assertEqual('cc', a_list[2].get_data())
def test_singly_linked_list_insert_at_tail(self):
"""
Test method "insert_at".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.append('b')
a_list.append('c')
a_list.insert_at(len(a_list) - 1, 'cc')
t_1 = a_list[0] == a_list.get_head()
t_2 = a_list[len(a_list) - 1] == a_list.get_tail()
t_3 = 'cc' == a_list.get_tail().data
t_4 = len(a_list) == 3
test = t_1 and t_2 and t_3 and t_4
self.assertTrue(test)
def test_singly_linked_list_insert_before_element(self):
"""
Test method "insert_before_element".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.append('b')
a_list.append('c')
elem2 = a_list[2]
a_list.insert_before_element('cc', elem2)
self.assertEqual('cc', a_list[2].get_data())
def test_singly_linked_list_insert_after_element(self):
"""
Test method "insert_after_element".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.append('b')
a_list.append('c')
elem2 = a_list[2]
a_list.insert_after_element('cc', elem2)
self.assertEqual('cc', a_list[3].get_data())
def test_singly_linked_list_remove(self):
"""
Test method "remove".
"""
self.list1.remove('d')
self.assertEqual('c', self.list1.get_last())
### End test of class SinglyLinkedList ###
| [
"copy.copy",
"py_alg_dat.singly_linked_list.SinglyLinkedList",
"py_alg_dat.singly_linked_list.SinglyLinkedListElement"
] | [((275, 312), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (310, 312), False, 'from py_alg_dat import singly_linked_list\n'), ((610, 647), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (645, 647), False, 'from py_alg_dat import singly_linked_list\n'), ((664, 725), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'None'], {}), "(a_list, 'a', None)\n", (706, 725), False, 'from py_alg_dat import singly_linked_list\n'), ((742, 803), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'None'], {}), "(a_list, 'a', None)\n", (784, 803), False, 'from py_alg_dat import singly_linked_list\n'), ((999, 1036), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (1034, 1036), False, 'from py_alg_dat import singly_linked_list\n'), ((1053, 1114), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'None'], {}), "(a_list, 'a', None)\n", (1095, 1114), False, 'from py_alg_dat import singly_linked_list\n'), ((1131, 1192), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'None'], {}), "(a_list, 'b', None)\n", (1173, 1192), False, 'from py_alg_dat import singly_linked_list\n'), ((1380, 1417), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (1415, 1417), False, 'from py_alg_dat import singly_linked_list\n'), ((1433, 1494), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'None'], {}), "(a_list, 'a', None)\n", (1475, 1494), False, 'from py_alg_dat import singly_linked_list\n'), ((1512, 1527), 'copy.copy', 'copy.copy', (['elem'], {}), '(elem)\n', (1521, 1527), False, 'import copy\n'), ((1727, 1764), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (1762, 1764), False, 'from py_alg_dat import singly_linked_list\n'), ((1780, 1841), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'None'], {}), "(a_list, 'a', None)\n", (1822, 1841), False, 'from py_alg_dat import singly_linked_list\n'), ((1859, 1874), 'copy.copy', 'copy.copy', (['elem'], {}), '(elem)\n', (1868, 1874), False, 'import copy\n'), ((2087, 2124), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (2122, 2124), False, 'from py_alg_dat import singly_linked_list\n'), ((2140, 2201), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'None'], {}), "(a_list, 'a', None)\n", (2182, 2201), False, 'from py_alg_dat import singly_linked_list\n'), ((2394, 2431), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (2429, 2431), False, 'from py_alg_dat import singly_linked_list\n'), ((2448, 2509), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'None'], {}), "(a_list, 'b', None)\n", (2490, 2509), False, 'from py_alg_dat import singly_linked_list\n'), ((2526, 2588), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem2'], {}), "(a_list, 'a', elem2)\n", (2568, 2588), False, 'from py_alg_dat import singly_linked_list\n'), ((2786, 2823), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (2821, 2823), False, 'from py_alg_dat import singly_linked_list\n'), ((2840, 2901), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'None'], {}), "(a_list, 'a', None)\n", (2882, 2901), False, 'from py_alg_dat import singly_linked_list\n'), ((3329, 3366), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (3364, 3366), False, 'from py_alg_dat import singly_linked_list\n'), ((3383, 3444), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""c"""', 'None'], {}), "(a_list, 'c', None)\n", (3425, 3444), False, 'from py_alg_dat import singly_linked_list\n'), ((3461, 3523), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'elem2'], {}), "(a_list, 'b', elem2)\n", (3503, 3523), False, 'from py_alg_dat import singly_linked_list\n'), ((3540, 3602), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem1'], {}), "(a_list, 'a', elem1)\n", (3582, 3602), False, 'from py_alg_dat import singly_linked_list\n'), ((4168, 4205), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (4203, 4205), False, 'from py_alg_dat import singly_linked_list\n'), ((4222, 4283), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""e"""', 'None'], {}), "(a_list, 'e', None)\n", (4264, 4283), False, 'from py_alg_dat import singly_linked_list\n'), ((4300, 4362), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""d"""', 'elem5'], {}), "(a_list, 'd', elem5)\n", (4342, 4362), False, 'from py_alg_dat import singly_linked_list\n'), ((4379, 4442), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""cc"""', 'elem4'], {}), "(a_list, 'cc', elem4)\n", (4421, 4442), False, 'from py_alg_dat import singly_linked_list\n'), ((4459, 4521), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""c"""', 'elem4'], {}), "(a_list, 'c', elem4)\n", (4501, 4521), False, 'from py_alg_dat import singly_linked_list\n'), ((4538, 4600), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'elem3'], {}), "(a_list, 'b', elem3)\n", (4580, 4600), False, 'from py_alg_dat import singly_linked_list\n'), ((4617, 4679), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem2'], {}), "(a_list, 'a', elem2)\n", (4659, 4679), False, 'from py_alg_dat import singly_linked_list\n'), ((5866, 5903), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (5901, 5903), False, 'from py_alg_dat import singly_linked_list\n'), ((5920, 5981), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'None'], {}), "(a_list, 'a', None)\n", (5962, 5981), False, 'from py_alg_dat import singly_linked_list\n'), ((6764, 6801), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (6799, 6801), False, 'from py_alg_dat import singly_linked_list\n'), ((6818, 6879), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'None'], {}), "(a_list, 'b', None)\n", (6860, 6879), False, 'from py_alg_dat import singly_linked_list\n'), ((6896, 6958), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem1'], {}), "(a_list, 'a', elem1)\n", (6938, 6958), False, 'from py_alg_dat import singly_linked_list\n'), ((7977, 8014), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (8012, 8014), False, 'from py_alg_dat import singly_linked_list\n'), ((8031, 8092), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""c"""', 'None'], {}), "(a_list, 'c', None)\n", (8073, 8092), False, 'from py_alg_dat import singly_linked_list\n'), ((8109, 8171), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'elem2'], {}), "(a_list, 'b', elem2)\n", (8151, 8171), False, 'from py_alg_dat import singly_linked_list\n'), ((8188, 8250), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem1'], {}), "(a_list, 'a', elem1)\n", (8230, 8250), False, 'from py_alg_dat import singly_linked_list\n'), ((9384, 9421), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (9419, 9421), False, 'from py_alg_dat import singly_linked_list\n'), ((9438, 9499), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""d"""', 'None'], {}), "(a_list, 'd', None)\n", (9480, 9499), False, 'from py_alg_dat import singly_linked_list\n'), ((9516, 9578), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""c"""', 'elem2'], {}), "(a_list, 'c', elem2)\n", (9558, 9578), False, 'from py_alg_dat import singly_linked_list\n'), ((9595, 9657), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'elemx'], {}), "(a_list, 'b', elemx)\n", (9637, 9657), False, 'from py_alg_dat import singly_linked_list\n'), ((9674, 9736), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem1'], {}), "(a_list, 'a', elem1)\n", (9716, 9736), False, 'from py_alg_dat import singly_linked_list\n'), ((11044, 11081), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (11079, 11081), False, 'from py_alg_dat import singly_linked_list\n'), ((11098, 11159), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'None'], {}), "(a_list, 'a', None)\n", (11140, 11159), False, 'from py_alg_dat import singly_linked_list\n'), ((11176, 11237), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'None'], {}), "(a_list, 'b', None)\n", (11218, 11237), False, 'from py_alg_dat import singly_linked_list\n'), ((12255, 12292), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (12290, 12292), False, 'from py_alg_dat import singly_linked_list\n'), ((12309, 12370), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""c"""', 'None'], {}), "(a_list, 'c', None)\n", (12351, 12370), False, 'from py_alg_dat import singly_linked_list\n'), ((12387, 12449), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'elemx'], {}), "(a_list, 'b', elemx)\n", (12429, 12449), False, 'from py_alg_dat import singly_linked_list\n'), ((12466, 12528), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem1'], {}), "(a_list, 'a', elem1)\n", (12508, 12528), False, 'from py_alg_dat import singly_linked_list\n'), ((13710, 13747), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (13745, 13747), False, 'from py_alg_dat import singly_linked_list\n'), ((13764, 13825), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""d"""', 'None'], {}), "(a_list, 'd', None)\n", (13806, 13825), False, 'from py_alg_dat import singly_linked_list\n'), ((13842, 13904), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""c"""', 'elem2'], {}), "(a_list, 'c', elem2)\n", (13884, 13904), False, 'from py_alg_dat import singly_linked_list\n'), ((13921, 13983), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'elemx'], {}), "(a_list, 'b', elemx)\n", (13963, 13983), False, 'from py_alg_dat import singly_linked_list\n'), ((14000, 14062), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem1'], {}), "(a_list, 'a', elem1)\n", (14042, 14062), False, 'from py_alg_dat import singly_linked_list\n'), ((15232, 15269), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (15267, 15269), False, 'from py_alg_dat import singly_linked_list\n'), ((15286, 15347), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'None'], {}), "(a_list, 'a', None)\n", (15328, 15347), False, 'from py_alg_dat import singly_linked_list\n'), ((15933, 15970), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (15968, 15970), False, 'from py_alg_dat import singly_linked_list\n'), ((15987, 16048), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'None'], {}), "(a_list, 'b', None)\n", (16029, 16048), False, 'from py_alg_dat import singly_linked_list\n'), ((16065, 16127), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem1'], {}), "(a_list, 'a', elem1)\n", (16107, 16127), False, 'from py_alg_dat import singly_linked_list\n'), ((17042, 17079), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (17077, 17079), False, 'from py_alg_dat import singly_linked_list\n'), ((17096, 17157), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""e"""', 'None'], {}), "(a_list, 'e', None)\n", (17138, 17157), False, 'from py_alg_dat import singly_linked_list\n'), ((17174, 17236), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""d"""', 'elem5'], {}), "(a_list, 'd', elem5)\n", (17216, 17236), False, 'from py_alg_dat import singly_linked_list\n'), ((17253, 17315), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""c"""', 'elem4'], {}), "(a_list, 'c', elem4)\n", (17295, 17315), False, 'from py_alg_dat import singly_linked_list\n'), ((17332, 17394), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'elem3'], {}), "(a_list, 'b', elem3)\n", (17374, 17394), False, 'from py_alg_dat import singly_linked_list\n'), ((17411, 17473), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem2'], {}), "(a_list, 'a', elem2)\n", (17453, 17473), False, 'from py_alg_dat import singly_linked_list\n'), ((18844, 18881), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (18879, 18881), False, 'from py_alg_dat import singly_linked_list\n'), ((18898, 18959), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""e"""', 'None'], {}), "(a_list, 'e', None)\n", (18940, 18959), False, 'from py_alg_dat import singly_linked_list\n'), ((18976, 19038), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""d"""', 'elem4'], {}), "(a_list, 'd', elem4)\n", (19018, 19038), False, 'from py_alg_dat import singly_linked_list\n'), ((19055, 19117), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""c"""', 'elem3'], {}), "(a_list, 'c', elem3)\n", (19097, 19117), False, 'from py_alg_dat import singly_linked_list\n'), ((19134, 19196), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'elem2'], {}), "(a_list, 'b', elem2)\n", (19176, 19196), False, 'from py_alg_dat import singly_linked_list\n'), ((19213, 19275), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem1'], {}), "(a_list, 'a', elem1)\n", (19255, 19275), False, 'from py_alg_dat import singly_linked_list\n'), ((20700, 20737), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (20735, 20737), False, 'from py_alg_dat import singly_linked_list\n'), ((20754, 20815), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""c"""', 'None'], {}), "(a_list, 'c', None)\n", (20796, 20815), False, 'from py_alg_dat import singly_linked_list\n'), ((20832, 20894), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""b"""', 'elem2'], {}), "(a_list, 'b', elem2)\n", (20874, 20894), False, 'from py_alg_dat import singly_linked_list\n'), ((20911, 20973), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['a_list', '"""a"""', 'elem1'], {}), "(a_list, 'a', elem1)\n", (20953, 20973), False, 'from py_alg_dat import singly_linked_list\n'), ((21925, 21962), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (21960, 21962), False, 'from py_alg_dat import singly_linked_list\n'), ((21981, 22018), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (22016, 22018), False, 'from py_alg_dat import singly_linked_list\n'), ((22364, 22401), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (22399, 22401), False, 'from py_alg_dat import singly_linked_list\n'), ((22420, 22457), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (22455, 22457), False, 'from py_alg_dat import singly_linked_list\n'), ((22810, 22847), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (22845, 22847), False, 'from py_alg_dat import singly_linked_list\n'), ((22950, 22968), 'copy.copy', 'copy.copy', (['a_list1'], {}), '(a_list1)\n', (22959, 22968), False, 'import copy\n'), ((23179, 23216), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (23214, 23216), False, 'from py_alg_dat import singly_linked_list\n'), ((23319, 23337), 'copy.copy', 'copy.copy', (['a_list1'], {}), '(a_list1)\n', (23328, 23337), False, 'import copy\n'), ((24240, 24305), 'py_alg_dat.singly_linked_list.SinglyLinkedListElement', 'singly_linked_list.SinglyLinkedListElement', (['self.list1', '"""b"""', 'None'], {}), "(self.list1, 'b', None)\n", (24282, 24305), False, 'from py_alg_dat import singly_linked_list\n'), ((25019, 25056), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (25054, 25056), False, 'from py_alg_dat import singly_linked_list\n'), ((25384, 25421), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (25419, 25421), False, 'from py_alg_dat import singly_linked_list\n'), ((26157, 26194), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (26192, 26194), False, 'from py_alg_dat import singly_linked_list\n'), ((26409, 26446), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (26444, 26446), False, 'from py_alg_dat import singly_linked_list\n'), ((26907, 26944), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (26942, 26944), False, 'from py_alg_dat import singly_linked_list\n'), ((27296, 27333), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (27331, 27333), False, 'from py_alg_dat import singly_linked_list\n'), ((27864, 27901), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (27899, 27901), False, 'from py_alg_dat import singly_linked_list\n'), ((28258, 28295), 'py_alg_dat.singly_linked_list.SinglyLinkedList', 'singly_linked_list.SinglyLinkedList', ([], {}), '()\n', (28293, 28295), False, 'from py_alg_dat import singly_linked_list\n')] |
import urllib.parse
import time
import hashlib
import hmac
from bravado.requests_client import Authenticator
class APIKeyAuthenticator(Authenticator):
"""?api_key authenticator.
This authenticator adds BitMEX API key support via header.
:param host: Host to authenticate for.
:param api_key: API key.
:param api_secret: API secret.
"""
def __init__(self, host, api_key, api_secret):
super(APIKeyAuthenticator, self).__init__(host)
self.api_key = api_key
self.api_secret = api_secret
# Forces this to apply to all requests.
def matches(self, url):
if "swagger.json" in url:
return False
return True
# Add the proper headers via the `expires` scheme.
def apply(self, r):
# 5s grace period in case of clock skew
expires = int(round(time.time()) + 5)
r.headers['api-expires'] = str(expires)
r.headers['api-key'] = self.api_key
prepared = r.prepare()
body = prepared.body or ''
url = prepared.path_url
# print(json.dumps(r.data, separators=(',',':')))
r.headers['api-signature'] = self.generate_signature(self.api_secret, r.method, url, expires, body)
return r
# Generates an API signature.
# A signature is HMAC_SHA256(secret, verb + path + nonce + data), hex encoded.
# Verb must be uppercased, url is relative, nonce must be an increasing 64-bit integer
# and the data, if present, must be JSON without whitespace between keys.
#
# For example, in psuedocode (and in real code below):
#
# verb=POST
# url=/api/v1/order
# nonce=1416993995705
# data={"symbol":"XBTZ14","quantity":1,"price":395.01}
# signature = HEX(HMAC_SHA256(secret, 'POST/api/v1/order1416993995705{"symbol":"XBTZ14","quantity":1,"price":395.01}'))
def generate_signature(self, secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
# Parse the url so we can remove the base and extract just the path.
parsedURL = urllib.parse.urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
message = bytes(verb + path + str(nonce) + data, 'utf-8')
# print("Computing HMAC: %s" % message)
signature = hmac.new(bytes(secret, 'utf-8'), message, digestmod=hashlib.sha256).hexdigest()
return signature | [
"time.time"
] | [((846, 857), 'time.time', 'time.time', ([], {}), '()\n', (855, 857), False, 'import time\n')] |
from .models import db, User
from m import Router
from m.utils import jsonify
router = Router(prefix='')
@router.route('/', methods=['POST'])
def home(ctx, request):
name = request.json().get('name')
user = User(name=name)
db.session.add(user)
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
@router.route('/{name}', methods=['GET'])
def get(ctx, request):
name = request.args.get('name')
user = User.query.filter(User.name == name).first_or_404('user {} not exist'.format(name))
return jsonify(code=200, user=user.dictify())
| [
"m.Router"
] | [((88, 105), 'm.Router', 'Router', ([], {'prefix': '""""""'}), "(prefix='')\n", (94, 105), False, 'from m import Router\n')] |
# -*- coding: utf-8 -*-
import os
import sys
from pyzabbix import ZabbixMetric, ZabbixSender
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Buffers import Buffer
from lumbermill.utils.Decorators import ModuleDocstringParser
from lumbermill.utils.DynamicValues import mapDynamicValue
@ModuleDocstringParser
class Zabbix(BaseThreadedModule):
"""
Send events to zabbix.
hostname: Hostname for which the metrics should be stored.
fields: Event fields to send.
field_prefix: Prefix to prepend to field names. For e.g. cpu_count field with default prefix, the Zabbix key is lumbermill_cpu_count.
timestamp_field: Field to provide timestamp. If not provided, current timestamp is used.
agent_conf: Path to zabbix_agent configuration file. If set to True defaults to /etc/zabbix/zabbix_agentd.conf.
server: Address of zabbix server. If port differs from default it can be set by appending it, e.g. 127.0.0.1:10052.
store_interval_in_secs: sending data to es in x seconds intervals.
batch_size: sending data to es if event count is above, even if store_interval_in_secs is not reached.
backlog_size: maximum count of events waiting for transmission. Events above count will be dropped.
Configuration template:
- output.Zabbix:
hostname: # <type: string; is: required>
fields: # <type: list; is: required>
field_prefix: # <default: "lumbermill_"; type: string; is: optional>
timestamp_field: # <default: "timestamp"; type: string; is: optional>
agent_conf: # <default: True; type: boolean||string; is: optional>
server: # <default: False; type: boolean||string; is: required if agent_conf is False else optional>
store_interval_in_secs: # <default: 10; type: integer; is: optional>
batch_size: # <default: 500; type: integer; is: optional>
backlog_size: # <default: 500; type: integer; is: optional>
"""
module_type = "output"
"""Set module type"""
def configure(self, configuration):
BaseThreadedModule.configure(self, configuration)
self.hostname = self.getConfigurationValue("hostname")
self.fields = self.getConfigurationValue("fields")
self.field_prefix = self.getConfigurationValue("field_prefix")
self.timestamp_field = self.getConfigurationValue("timestamp_field")
self.batch_size = self.getConfigurationValue('batch_size')
self.backlog_size = self.getConfigurationValue('backlog_size')
self.agent_conf = self.getConfigurationValue("agent_conf")
if self.agent_conf:
if self.agent_conf is True:
self.agent_conf = "/etc/zabbix/zabbix_agentd.conf"
if not os.path.isfile(self.agent_conf):
self.logger.error("%s does not point to an existing file." % self.agent_conf)
self.lumbermill.shutDown()
self.zabbix_sender = ZabbixSender(use_config=self.agent_conf)
else:
self.logger.error("asdads")
server = self.getConfigurationValue("server")
port = 10051
if ":" in self.server:
server, port = self.server.split(":")
self.zabbix_sender = ZabbixSender(zabbix_server=server, port=port)
self.buffer = Buffer(self.getConfigurationValue('batch_size'), self.storeData,
self.getConfigurationValue('store_interval_in_secs'),
maxsize=self.getConfigurationValue('backlog_size'))
def getStartMessage(self):
if self.agent_conf:
return "Config: %s. Max buffer size: %d" % (self.agent_conf, self.getConfigurationValue('backlog_size'))
else:
return "Server: %s. Max buffer size: %d" % (self.getConfigurationValue("server"), self.getConfigurationValue('backlog_size'))
def initAfterFork(self):
BaseThreadedModule.initAfterFork(self)
self.buffer = Buffer(self.getConfigurationValue('batch_size'), self.storeData,
self.getConfigurationValue('store_interval_in_secs'),
maxsize=self.getConfigurationValue('backlog_size'))
def handleEvent(self, event):
self.buffer.append(event)
yield None
def storeData(self, events):
packet = []
for event in events:
if self.timestamp_field:
try:
timestamp = event[self.timestamp_field]
except KeyError:
timestamp = None
hostname = mapDynamicValue(self.hostname, mapping_dict=event, use_strftime=True)
for field_name in self.fields:
try:
packet.append(ZabbixMetric(hostname, "%s%s" % (self.field_prefix, field_name), event[field_name], timestamp))
except KeyError:
pass
#self.logger.warning("Could not send metrics for %s:%s. Field not found." % (hostname, field_name))
response = self.zabbix_sender.send(packet)
if response.failed != 0:
self.logger.warning("%d of %d metrics were not processed correctly." % (response.total-response.processed, response.total))
def shutDown(self):
self.buffer.flush()
| [
"pyzabbix.ZabbixMetric",
"lumbermill.utils.DynamicValues.mapDynamicValue",
"os.path.isfile",
"pyzabbix.ZabbixSender",
"lumbermill.BaseThreadedModule.BaseThreadedModule.configure",
"lumbermill.BaseThreadedModule.BaseThreadedModule.initAfterFork"
] | [((2237, 2286), 'lumbermill.BaseThreadedModule.BaseThreadedModule.configure', 'BaseThreadedModule.configure', (['self', 'configuration'], {}), '(self, configuration)\n', (2265, 2286), False, 'from lumbermill.BaseThreadedModule import BaseThreadedModule\n'), ((4084, 4122), 'lumbermill.BaseThreadedModule.BaseThreadedModule.initAfterFork', 'BaseThreadedModule.initAfterFork', (['self'], {}), '(self)\n', (4116, 4122), False, 'from lumbermill.BaseThreadedModule import BaseThreadedModule\n'), ((3119, 3159), 'pyzabbix.ZabbixSender', 'ZabbixSender', ([], {'use_config': 'self.agent_conf'}), '(use_config=self.agent_conf)\n', (3131, 3159), False, 'from pyzabbix import ZabbixMetric, ZabbixSender\n'), ((3420, 3465), 'pyzabbix.ZabbixSender', 'ZabbixSender', ([], {'zabbix_server': 'server', 'port': 'port'}), '(zabbix_server=server, port=port)\n', (3432, 3465), False, 'from pyzabbix import ZabbixMetric, ZabbixSender\n'), ((4756, 4825), 'lumbermill.utils.DynamicValues.mapDynamicValue', 'mapDynamicValue', (['self.hostname'], {'mapping_dict': 'event', 'use_strftime': '(True)'}), '(self.hostname, mapping_dict=event, use_strftime=True)\n', (4771, 4825), False, 'from lumbermill.utils.DynamicValues import mapDynamicValue\n'), ((2916, 2947), 'os.path.isfile', 'os.path.isfile', (['self.agent_conf'], {}), '(self.agent_conf)\n', (2930, 2947), False, 'import os\n'), ((4924, 5023), 'pyzabbix.ZabbixMetric', 'ZabbixMetric', (['hostname', "('%s%s' % (self.field_prefix, field_name))", 'event[field_name]', 'timestamp'], {}), "(hostname, '%s%s' % (self.field_prefix, field_name), event[\n field_name], timestamp)\n", (4936, 5023), False, 'from pyzabbix import ZabbixMetric, ZabbixSender\n')] |
#!/usr/bin/python
#author: zhaofeng-shu33
import numpy as np
from ace_cream import ace_cream
def pearson_correlation(X,Y):
return (np.mean(X*Y, axis=0) -np.mean(X, axis = 0)* np.mean(Y, axis = 0)) / ( np.std(X, axis = 0) * np.std(Y, axis = 0))
if __name__ == '__main__':
N_SIZE = 1000
ERROR_PROBABILITY = 0.1
x = np.random.choice([0,1],size=N_SIZE)
y = np.random.uniform(size=N_SIZE)
for i in range(len(x)):
if(y[i] < ERROR_PROBABILITY):
y[i] = 2
else:
y[i] = x[i]
dic_Y = {0:6, 1:8, 2:3}
dic_X = {0:7, 1:9}
for i in range(len(y)):
y[i] = dic_Y[y[i]]
x[i] = dic_X[x[i]]
print('rho(x,y)',pearson_correlation(x,y))
# use fortran ace by 1985 article author
tx, ty = ace_cream(x, y, cat = [-1,0])
print('mapped X symbol list: ')
print(np.unique(tx))
print('mapped Y symbol list: ')
print(np.unique(ty))
print('mean(tx) = %f, std(tx) = %f'%(np.mean(tx), np.std(tx)))
print('mean(ty) = %f, std(ty) = %f'%(np.mean(ty), np.std(ty)))
print('rho(tx,ty)',pearson_correlation(tx,ty))
# matches theoretical result: np.sqrt(1-ERROR_PROBABILITY)
| [
"numpy.mean",
"numpy.unique",
"numpy.random.choice",
"ace_cream.ace_cream",
"numpy.std",
"numpy.random.uniform"
] | [((332, 369), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'N_SIZE'}), '([0, 1], size=N_SIZE)\n', (348, 369), True, 'import numpy as np\n'), ((376, 406), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'N_SIZE'}), '(size=N_SIZE)\n', (393, 406), True, 'import numpy as np\n'), ((770, 798), 'ace_cream.ace_cream', 'ace_cream', (['x', 'y'], {'cat': '[-1, 0]'}), '(x, y, cat=[-1, 0])\n', (779, 798), False, 'from ace_cream import ace_cream\n'), ((846, 859), 'numpy.unique', 'np.unique', (['tx'], {}), '(tx)\n', (855, 859), True, 'import numpy as np\n'), ((907, 920), 'numpy.unique', 'np.unique', (['ty'], {}), '(ty)\n', (916, 920), True, 'import numpy as np\n'), ((137, 159), 'numpy.mean', 'np.mean', (['(X * Y)'], {'axis': '(0)'}), '(X * Y, axis=0)\n', (144, 159), True, 'import numpy as np\n'), ((207, 224), 'numpy.std', 'np.std', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (213, 224), True, 'import numpy as np\n'), ((229, 246), 'numpy.std', 'np.std', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (235, 246), True, 'import numpy as np\n'), ((159, 177), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (166, 177), True, 'import numpy as np\n'), ((181, 199), 'numpy.mean', 'np.mean', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (188, 199), True, 'import numpy as np\n'), ((964, 975), 'numpy.mean', 'np.mean', (['tx'], {}), '(tx)\n', (971, 975), True, 'import numpy as np\n'), ((977, 987), 'numpy.std', 'np.std', (['tx'], {}), '(tx)\n', (983, 987), True, 'import numpy as np\n'), ((1031, 1042), 'numpy.mean', 'np.mean', (['ty'], {}), '(ty)\n', (1038, 1042), True, 'import numpy as np\n'), ((1044, 1054), 'numpy.std', 'np.std', (['ty'], {}), '(ty)\n', (1050, 1054), True, 'import numpy as np\n')] |
# Generated by Django 3.1.7 on 2021-03-19 15:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("track", "0021_auto_20200915_1528"),
]
operations = [
migrations.RemoveField(
model_name="track",
name="parser",
),
migrations.DeleteModel(
name="Point",
),
]
| [
"django.db.migrations.DeleteModel",
"django.db.migrations.RemoveField"
] | [((225, 282), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""track"""', 'name': '"""parser"""'}), "(model_name='track', name='parser')\n", (247, 282), False, 'from django.db import migrations\n'), ((327, 363), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Point"""'}), "(name='Point')\n", (349, 363), False, 'from django.db import migrations\n')] |
'''
Name: color_segmentation.py
Version: 1.0
Summary: Extract plant traits (leaf area, width, height, ) by paralell processing
Author: <NAME>
Author-email: <EMAIL>
Created: 2018-09-29
USAGE:
python3 color_kmeans_vis.py -p /home/suxingliu/plant-image-analysis/sample_test/ -i 01.jpg -m 01_seg.jpg -c 5
'''
#!/usr/bin/python
# import the necessary packages
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
import matplotlib.pyplot as plt
import argparse
import utils
import cv2
import numpy as np
import matplotlib.image as mpimg
import pylab as P
import os
def mkdir(path):
# remove space at the beginning
path=path.strip()
# remove slash at the end
path=path.rstrip("\\")
# path exist?
# True
# False
isExists=os.path.exists(path)
# process
if not isExists:
# construct the path and folder
print (path+'folder constructed!')
# make dir
os.makedirs(path)
return True
else:
# if exists, return
print (path+'path exists!')
return False
def color_quantization(image, mask):
#grab image width and height
(h, w) = image.shape[:2]
#change the color storage order
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#apply the mask to get the segmentation of plant
masked_image = cv2.bitwise_and(image, image, mask = mask)
# reshape the image to be a list of pixels
pixels = masked_image.reshape((masked_image.shape[0] * masked_image.shape[1], 3))
############################################################
#Clustering process
###############################################################
# cluster the pixel intensities
clt = MiniBatchKMeans(n_clusters = args["clusters"])
#clt = KMeans(n_clusters = args["clusters"])
clt.fit(pixels)
#assign labels to each cluster
labels = clt.fit_predict(pixels)
#obtain the quantized clusters using each label
quant = clt.cluster_centers_.astype("uint8")[labels]
# reshape the feature vectors to images
quant = quant.reshape((h, w, 3))
image_rec = pixels.reshape((h, w, 3))
# convert from L*a*b* to RGB
quant = cv2.cvtColor(quant, cv2.COLOR_RGB2BGR)
image_rec = cv2.cvtColor(image_rec, cv2.COLOR_RGB2BGR)
# display the images and wait for a keypress
#cv2.imshow("image", np.hstack([image_rec, quant]))
#cv2.waitKey(0)
#define result path for labeled images
result_img_path = save_path + 'cluster_out.png'
# save color_quantization results
cv2.imwrite(result_img_path,quant)
# build a histogram of clusters and then create a figure representing the number of pixels labeled to each color
hist = utils.centroid_histogram(clt)
# remove the background color cluster
if (args["mask"] == "None"):
clt.cluster_centers_ = clt.cluster_centers_[1: len(clt.cluster_centers_)]
else:
clt.cluster_centers_ = clt.cluster_centers_[1: len(clt.cluster_centers_)]
#build a histogram of clusters using center lables
numLabels = utils.plot_centroid_histogram(save_path,clt)
#create a figure representing the distribution of each color
bar = utils.plot_colors(hist, clt.cluster_centers_)
#save a figure of color bar
utils.plot_color_bar(save_path, bar)
if __name__ == '__main__':
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True, help="Current directory for image files.")
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
ap.add_argument("-m", "--mask", required = True, help = "Path to the mask image", default = "None")
ap.add_argument("-c", "--clusters", required = True, type = int, help = "# of clusters")
args = vars(ap.parse_args())
# setting path for results storage
current_path = args["path"]
filename = args["image"]
image_path = current_path + filename
# construct result folder
mkpath = current_path + str(filename[0:-4])
mkdir(mkpath)
global save_path
save_path = mkpath + '/'
print ("results_folder: " + save_path)
# load the image
image = cv2.imread(image_path)
# set mask path
mask_path = current_path + args["mask"]
# load mask image as grayscale
im_gray = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
#extract the binary mask
(thresh, mask) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
color_quantization(image,mask)
'''
#save mask image
if (args["mask"] == "None"):
#read mask image as gray scale
im_gray = cv2.imread(fig_path_save, cv2.CV_LOAD_IMAGE_GRAYSCALE)
(thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#fill samll holes and area along edge of image
from skimage.segmentation import clear_border
# remove artifacts connected to image border
cleared = im_bw.copy()
im_bw_cleared = clear_border(cleared)
#from skimage import morphology
#im_bw_cleared = morphology.remove_small_objects(im_bw, 20000, connectivity=2)
#remove small holes and objects
from scipy import ndimage as ndi
label_objects, num_labels = ndi.label(im_bw_cleared)
#print num_labels
sizes = np.bincount(label_objects.ravel())
mask_sizes = sizes > 500
mask_sizes[0] = 0
img_cleaned = mask_sizes[label_objects]
#change output image type
from skimage import img_as_ubyte
img_cleaned = img_as_ubyte(img_cleaned)
#save output mask image
fig_name = (str(filename[0:-4]) + '_' +'mask.png')
fig_path_mask = current_path + fig_name
cv2.imwrite(fig_path_mask, img_cleaned)
fig_path_mask = save_path + fig_name
cv2.imwrite(fig_path_mask, img_cleaned)
'''
| [
"os.path.exists",
"cv2.imwrite",
"utils.centroid_histogram",
"argparse.ArgumentParser",
"cv2.threshold",
"utils.plot_centroid_histogram",
"sklearn.cluster.MiniBatchKMeans",
"os.makedirs",
"cv2.bitwise_and",
"utils.plot_colors",
"cv2.cvtColor",
"cv2.imread",
"utils.plot_color_bar"
] | [((792, 812), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (806, 812), False, 'import os\n'), ((1261, 1299), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1273, 1299), False, 'import cv2\n'), ((1377, 1417), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask'}), '(image, image, mask=mask)\n', (1392, 1417), False, 'import cv2\n'), ((1773, 1817), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': "args['clusters']"}), "(n_clusters=args['clusters'])\n", (1788, 1817), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((2247, 2285), 'cv2.cvtColor', 'cv2.cvtColor', (['quant', 'cv2.COLOR_RGB2BGR'], {}), '(quant, cv2.COLOR_RGB2BGR)\n', (2259, 2285), False, 'import cv2\n'), ((2302, 2344), 'cv2.cvtColor', 'cv2.cvtColor', (['image_rec', 'cv2.COLOR_RGB2BGR'], {}), '(image_rec, cv2.COLOR_RGB2BGR)\n', (2314, 2344), False, 'import cv2\n'), ((2622, 2657), 'cv2.imwrite', 'cv2.imwrite', (['result_img_path', 'quant'], {}), '(result_img_path, quant)\n', (2633, 2657), False, 'import cv2\n'), ((2786, 2815), 'utils.centroid_histogram', 'utils.centroid_histogram', (['clt'], {}), '(clt)\n', (2810, 2815), False, 'import utils\n'), ((3138, 3183), 'utils.plot_centroid_histogram', 'utils.plot_centroid_histogram', (['save_path', 'clt'], {}), '(save_path, clt)\n', (3167, 3183), False, 'import utils\n'), ((3259, 3304), 'utils.plot_colors', 'utils.plot_colors', (['hist', 'clt.cluster_centers_'], {}), '(hist, clt.cluster_centers_)\n', (3276, 3304), False, 'import utils\n'), ((3343, 3379), 'utils.plot_color_bar', 'utils.plot_color_bar', (['save_path', 'bar'], {}), '(save_path, bar)\n', (3363, 3379), False, 'import utils\n'), ((3496, 3521), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3519, 3521), False, 'import argparse\n'), ((4304, 4326), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (4314, 4326), False, 'import cv2\n'), ((4446, 4489), 'cv2.imread', 'cv2.imread', (['mask_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(mask_path, cv2.IMREAD_GRAYSCALE)\n', (4456, 4489), False, 'import cv2\n'), ((4545, 4614), 'cv2.threshold', 'cv2.threshold', (['im_gray', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (4558, 4614), False, 'import cv2\n'), ((960, 977), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (971, 977), False, 'import os\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import pickle
df = pd.read_json('result.json',lines=True)
print(df) | [
"pandas.read_json"
] | [((72, 111), 'pandas.read_json', 'pd.read_json', (['"""result.json"""'], {'lines': '(True)'}), "('result.json', lines=True)\n", (84, 111), True, 'import pandas as pd\n')] |
import logging
from django.core.exceptions import ImproperlyConfigured
from rest_framework.exceptions import APIException
from rest_framework.views import APIView
from rest_framework.response import Response
from .checks import (
internal_services,
external_services,
)
logger = logging.getLogger('hhs_server.%s' % __name__)
class ServiceUnavailable(APIException):
status_code = 503
default_detail = 'Service temporarily unavailable, try again later.'
default_code = 'service_unavailable'
class Check(APIView):
def get(self, request, format=None):
try:
for check in self.get_services():
v2 = True if request.path.endswith('external_v2') else False
if not check(v2):
raise ServiceUnavailable()
except ServiceUnavailable:
raise
except Exception as e:
logger.exception("health check raised exception. {reason}".format(reason=e))
raise ServiceUnavailable(detail="Service temporarily unavailable, try again later. There is an issue with the - {svc}"
" - service check. Reason: {reason}".format(svc=check.__name__, reason=e))
return Response({'message': 'all\'s well'})
def get_services(self):
if not hasattr(self, "services"):
raise ImproperlyConfigured
if len(self.services) < 1:
raise ImproperlyConfigured(
"please specify at least one service to check")
return self.services
class CheckInternal(Check):
services = internal_services
class CheckExternal(Check):
services = external_services
| [
"logging.getLogger",
"rest_framework.response.Response",
"django.core.exceptions.ImproperlyConfigured"
] | [((288, 333), 'logging.getLogger', 'logging.getLogger', (["('hhs_server.%s' % __name__)"], {}), "('hhs_server.%s' % __name__)\n", (305, 333), False, 'import logging\n'), ((1233, 1268), 'rest_framework.response.Response', 'Response', (['{\'message\': "all\'s well"}'], {}), '({\'message\': "all\'s well"})\n', (1241, 1268), False, 'from rest_framework.response import Response\n'), ((1433, 1501), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""please specify at least one service to check"""'], {}), "('please specify at least one service to check')\n", (1453, 1501), False, 'from django.core.exceptions import ImproperlyConfigured\n')] |
from machine import Pin
import time
from led import LED
tube_btn = Pin(21, Pin.IN, Pin.PULL_UP)
sys_led = Pin(25, Pin.OUT)
print('Blinking LED to power check (no LED? Check LED batteries and/or script).')
LED.led_blink(5)
print('Blink code finish - Listening for presses.')
while True:
first = tube_btn.value()
time.sleep(0.01)
second = tube_btn.value()
if first and not second:
print('Button pressed.')
LED.led_display(2)
elif not first and second:
print('Button released.')
| [
"led.LED.led_blink",
"led.LED.led_display",
"time.sleep",
"machine.Pin"
] | [((68, 96), 'machine.Pin', 'Pin', (['(21)', 'Pin.IN', 'Pin.PULL_UP'], {}), '(21, Pin.IN, Pin.PULL_UP)\n', (71, 96), False, 'from machine import Pin\n'), ((108, 124), 'machine.Pin', 'Pin', (['(25)', 'Pin.OUT'], {}), '(25, Pin.OUT)\n', (111, 124), False, 'from machine import Pin\n'), ((208, 224), 'led.LED.led_blink', 'LED.led_blink', (['(5)'], {}), '(5)\n', (221, 224), False, 'from led import LED\n'), ((323, 339), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (333, 339), False, 'import time\n'), ((440, 458), 'led.LED.led_display', 'LED.led_display', (['(2)'], {}), '(2)\n', (455, 458), False, 'from led import LED\n')] |
from nipype.interfaces import utility as util
from nipype.pipeline import engine as pe
from .interfaces import dsi_studio as dsi
import nipype.interfaces.diffusion_toolkit as dtk
from nipype.algorithms.misc import Gunzip
import os.path
def create_pipeline(name="dsi_track", opt="", ensemble=""):
parameters = {'nos': 5000}
ensemble_dict = {'angle': 'angle_thres',
'min_length': 'min_length'}
inputnode = pe.Node(
interface=util.IdentityInterface(
fields=["odf", "seed", "angle", "algorithm", "min_length"]),
name="inputnode")
if opt is not None:
opt_list = opt.split(',')
for o in opt_list:
try:
[key, value] = o.split(':')
parameters[key] = value
except ValueError:
print(o + ': irregular format, skipping')
if ensemble:
tckgen = pe.MapNode(dsi.FiberTrack(),
name='track', iterfield=ensemble_dict[ensemble])
gunzip = pe.MapNode(interface=Gunzip(), name="gunzip",
iterfield='in_file')
else:
tckgen = pe.Node(dsi.FiberTrack(), name='track')
gunzip = pe.Node(interface=Gunzip(), name="gunzip")
tckgen.inputs.nos = int(parameters['nos'])
tckmerge = pe.Node(interface=dtk.TrackMerge(), name="merge")
output_fields = ["tck"]
outputnode = pe.Node(
interface=util.IdentityInterface(fields=output_fields),
name="outputnode")
workflow = pe.Workflow(name=name)
workflow.base_output_dir = name
workflow.connect([(inputnode, tckgen, [("odf", "in_file"),
("angle", "angle_thres"),
("min_length", "min_length")]),
(tckgen, gunzip, [("out_file", "in_file")])])
if inputnode.inputs.seed:
workflow.connect([(inputnode, tckgen, [("seed", "seed_image")])])
if ensemble:
workflow.connect([
(gunzip, tckmerge, [("out_file", "track_files")]),
(tckmerge, outputnode, [("track_file", "tck")])
])
else:
workflow.connect([(gunzip, outputnode, [("out_file", "tck")])])
return workflow
def get_parent():
return "dsi_rec"
| [
"nipype.interfaces.diffusion_toolkit.TrackMerge",
"nipype.pipeline.engine.Workflow",
"nipype.interfaces.utility.IdentityInterface",
"nipype.algorithms.misc.Gunzip"
] | [((1522, 1544), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (1533, 1544), True, 'from nipype.pipeline import engine as pe\n'), ((468, 554), 'nipype.interfaces.utility.IdentityInterface', 'util.IdentityInterface', ([], {'fields': "['odf', 'seed', 'angle', 'algorithm', 'min_length']"}), "(fields=['odf', 'seed', 'angle', 'algorithm',\n 'min_length'])\n", (490, 554), True, 'from nipype.interfaces import utility as util\n'), ((1328, 1344), 'nipype.interfaces.diffusion_toolkit.TrackMerge', 'dtk.TrackMerge', ([], {}), '()\n', (1342, 1344), True, 'import nipype.interfaces.diffusion_toolkit as dtk\n'), ((1433, 1477), 'nipype.interfaces.utility.IdentityInterface', 'util.IdentityInterface', ([], {'fields': 'output_fields'}), '(fields=output_fields)\n', (1455, 1477), True, 'from nipype.interfaces import utility as util\n'), ((1046, 1054), 'nipype.algorithms.misc.Gunzip', 'Gunzip', ([], {}), '()\n', (1052, 1054), False, 'from nipype.algorithms.misc import Gunzip\n'), ((1222, 1230), 'nipype.algorithms.misc.Gunzip', 'Gunzip', ([], {}), '()\n', (1228, 1230), False, 'from nipype.algorithms.misc import Gunzip\n')] |
import pandas as pd
from phc.easy.ocr.suggestion import (expand_array_column,
expand_medication_administrations,
frame_for_type)
sample = expand_array_column(
pd.DataFrame(
[
{
"suggestions": [
{
"id": "728e79cd-6cd2-421f-9e38-3181200c301",
"condition": {
"conditionCode": [],
"onsetDate": [],
"abatementDate": [],
"bodySite": [],
},
"observation": {},
"medicationAdministration": {
"medicationCode": [
{
"value": {
"system": "http://www.nlm.nih.gov/research/umls/rxnorm",
"code": "3640",
"display": "doxycycline",
},
"dataSource": {"source": "comprehend"},
"confidence": 0.996650755405426,
"sourceText": {
"text": "doxycycline",
"location": {
"startIndex": 11,
"endIndex": 22,
},
},
}
],
"date": [],
"endDate": [],
"status": [
{
"value": "unknown",
"dataSource": {"source": "comprehend"},
"confidence": 0.9,
},
{
"value": "completed",
"dataSource": {"source": "comprehend"},
"confidence": 0.9,
},
{
"value": "in-progress",
"dataSource": {"source": "comprehend"},
"confidence": 0.9,
},
],
"dosage": [
{
"value": {
"id": "0",
"strength": None,
"dosage": None,
"duration": None,
"form": None,
"frequencey": None,
"rate": None,
"route": "po",
},
"dataSource": {"source": "comprehend"},
"confidence": 0.996650755405426,
"sourceText": {
"text": "po",
"location": {
"startIndex": 23,
"endIndex": 25,
},
},
}
],
},
}
],
"anchorDate": "2021-02-24T12:58:32.058Z",
"version": 4,
"suggestionId": "00022-00007-00001",
}
]
),
key="suggestions",
)
def test_medication_administration_expansion():
df = expand_medication_administrations(
frame_for_type(sample, "medicationAdministration")
)
pd.testing.assert_frame_equal(
df,
pd.DataFrame(
[
{
"anchorDate": "2021-02-24T12:58:32.058Z",
"version": 4,
"suggestionId": "00022-00007-00001",
"id": "728e79cd-6cd2-421f-9e38-3181200c301",
"status_value": "unknown",
"status_confidence": 0.9,
"status_dataSource_source": "comprehend",
"dosage_confidence": 0.996650755405426,
"dosage_dataSource_source": "comprehend",
"dosage_value_id": "0",
"dosage_value_strength": None,
"dosage_value_dosage": None,
"dosage_value_duration": None,
"dosage_value_form": None,
"dosage_value_frequencey": None,
"dosage_value_rate": None,
"dosage_value_route": "po",
"code_confidence": 0.996650755405426,
"code_dataSource_source": "comprehend",
"code_value_system": "http://www.nlm.nih.gov/research/umls/rxnorm",
"code_value_code": "3640",
"code_value_display": "doxycycline",
"dosage_sourceText": "po",
"code_sourceText": "doxycycline",
"type": "medicationAdministration",
},
{
"anchorDate": "2021-02-24T12:58:32.058Z",
"version": 4,
"suggestionId": "00022-00007-00001",
"id": "728e79cd-6cd2-421f-9e38-3181200c301",
"status_value": "completed",
"status_confidence": 0.9,
"status_dataSource_source": "comprehend",
"dosage_confidence": 0.996650755405426,
"dosage_dataSource_source": "comprehend",
"dosage_value_id": "0",
"dosage_value_strength": None,
"dosage_value_dosage": None,
"dosage_value_duration": None,
"dosage_value_form": None,
"dosage_value_frequencey": None,
"dosage_value_rate": None,
"dosage_value_route": "po",
"code_confidence": 0.996650755405426,
"code_dataSource_source": "comprehend",
"code_value_system": "http://www.nlm.nih.gov/research/umls/rxnorm",
"code_value_code": "3640",
"code_value_display": "doxycycline",
"dosage_sourceText": "po",
"code_sourceText": "doxycycline",
"type": "medicationAdministration",
},
{
"anchorDate": "2021-02-24T12:58:32.058Z",
"version": 4,
"suggestionId": "00022-00007-00001",
"id": "728e79cd-6cd2-421f-9e38-3181200c301",
"status_value": "in-progress",
"status_confidence": 0.9,
"status_dataSource_source": "comprehend",
"dosage_confidence": 0.996650755405426,
"dosage_dataSource_source": "comprehend",
"dosage_value_id": "0",
"dosage_value_strength": None,
"dosage_value_dosage": None,
"dosage_value_duration": None,
"dosage_value_form": None,
"dosage_value_frequencey": None,
"dosage_value_rate": None,
"dosage_value_route": "po",
"code_confidence": 0.996650755405426,
"code_dataSource_source": "comprehend",
"code_value_system": "http://www.nlm.nih.gov/research/umls/rxnorm",
"code_value_code": "3640",
"code_value_display": "doxycycline",
"dosage_sourceText": "po",
"code_sourceText": "doxycycline",
"type": "medicationAdministration",
},
]
),
)
| [
"pandas.DataFrame",
"phc.easy.ocr.suggestion.frame_for_type"
] | [((238, 1501), 'pandas.DataFrame', 'pd.DataFrame', (["[{'suggestions': [{'id': '728e79cd-6cd2-421f-9e38-3181200c301', 'condition':\n {'conditionCode': [], 'onsetDate': [], 'abatementDate': [], 'bodySite':\n []}, 'observation': {}, 'medicationAdministration': {'medicationCode':\n [{'value': {'system': 'http://www.nlm.nih.gov/research/umls/rxnorm',\n 'code': '3640', 'display': 'doxycycline'}, 'dataSource': {'source':\n 'comprehend'}, 'confidence': 0.996650755405426, 'sourceText': {'text':\n 'doxycycline', 'location': {'startIndex': 11, 'endIndex': 22}}}],\n 'date': [], 'endDate': [], 'status': [{'value': 'unknown', 'dataSource':\n {'source': 'comprehend'}, 'confidence': 0.9}, {'value': 'completed',\n 'dataSource': {'source': 'comprehend'}, 'confidence': 0.9}, {'value':\n 'in-progress', 'dataSource': {'source': 'comprehend'}, 'confidence': \n 0.9}], 'dosage': [{'value': {'id': '0', 'strength': None, 'dosage':\n None, 'duration': None, 'form': None, 'frequencey': None, 'rate': None,\n 'route': 'po'}, 'dataSource': {'source': 'comprehend'}, 'confidence': \n 0.996650755405426, 'sourceText': {'text': 'po', 'location': {\n 'startIndex': 23, 'endIndex': 25}}}]}}], 'anchorDate':\n '2021-02-24T12:58:32.058Z', 'version': 4, 'suggestionId':\n '00022-00007-00001'}]"], {}), "([{'suggestions': [{'id': '728e79cd-6cd2-421f-9e38-3181200c301',\n 'condition': {'conditionCode': [], 'onsetDate': [], 'abatementDate': [],\n 'bodySite': []}, 'observation': {}, 'medicationAdministration': {\n 'medicationCode': [{'value': {'system':\n 'http://www.nlm.nih.gov/research/umls/rxnorm', 'code': '3640',\n 'display': 'doxycycline'}, 'dataSource': {'source': 'comprehend'},\n 'confidence': 0.996650755405426, 'sourceText': {'text': 'doxycycline',\n 'location': {'startIndex': 11, 'endIndex': 22}}}], 'date': [],\n 'endDate': [], 'status': [{'value': 'unknown', 'dataSource': {'source':\n 'comprehend'}, 'confidence': 0.9}, {'value': 'completed', 'dataSource':\n {'source': 'comprehend'}, 'confidence': 0.9}, {'value': 'in-progress',\n 'dataSource': {'source': 'comprehend'}, 'confidence': 0.9}], 'dosage':\n [{'value': {'id': '0', 'strength': None, 'dosage': None, 'duration':\n None, 'form': None, 'frequencey': None, 'rate': None, 'route': 'po'},\n 'dataSource': {'source': 'comprehend'}, 'confidence': 0.996650755405426,\n 'sourceText': {'text': 'po', 'location': {'startIndex': 23, 'endIndex':\n 25}}}]}}], 'anchorDate': '2021-02-24T12:58:32.058Z', 'version': 4,\n 'suggestionId': '00022-00007-00001'}])\n", (250, 1501), True, 'import pandas as pd\n'), ((4195, 4245), 'phc.easy.ocr.suggestion.frame_for_type', 'frame_for_type', (['sample', '"""medicationAdministration"""'], {}), "(sample, 'medicationAdministration')\n", (4209, 4245), False, 'from phc.easy.ocr.suggestion import expand_array_column, expand_medication_administrations, frame_for_type\n'), ((4308, 7056), 'pandas.DataFrame', 'pd.DataFrame', (["[{'anchorDate': '2021-02-24T12:58:32.058Z', 'version': 4, 'suggestionId':\n '00022-00007-00001', 'id': '728e79cd-6cd2-421f-9e38-3181200c301',\n 'status_value': 'unknown', 'status_confidence': 0.9,\n 'status_dataSource_source': 'comprehend', 'dosage_confidence': \n 0.996650755405426, 'dosage_dataSource_source': 'comprehend',\n 'dosage_value_id': '0', 'dosage_value_strength': None,\n 'dosage_value_dosage': None, 'dosage_value_duration': None,\n 'dosage_value_form': None, 'dosage_value_frequencey': None,\n 'dosage_value_rate': None, 'dosage_value_route': 'po',\n 'code_confidence': 0.996650755405426, 'code_dataSource_source':\n 'comprehend', 'code_value_system':\n 'http://www.nlm.nih.gov/research/umls/rxnorm', 'code_value_code':\n '3640', 'code_value_display': 'doxycycline', 'dosage_sourceText': 'po',\n 'code_sourceText': 'doxycycline', 'type': 'medicationAdministration'},\n {'anchorDate': '2021-02-24T12:58:32.058Z', 'version': 4, 'suggestionId':\n '00022-00007-00001', 'id': '728e79cd-6cd2-421f-9e38-3181200c301',\n 'status_value': 'completed', 'status_confidence': 0.9,\n 'status_dataSource_source': 'comprehend', 'dosage_confidence': \n 0.996650755405426, 'dosage_dataSource_source': 'comprehend',\n 'dosage_value_id': '0', 'dosage_value_strength': None,\n 'dosage_value_dosage': None, 'dosage_value_duration': None,\n 'dosage_value_form': None, 'dosage_value_frequencey': None,\n 'dosage_value_rate': None, 'dosage_value_route': 'po',\n 'code_confidence': 0.996650755405426, 'code_dataSource_source':\n 'comprehend', 'code_value_system':\n 'http://www.nlm.nih.gov/research/umls/rxnorm', 'code_value_code':\n '3640', 'code_value_display': 'doxycycline', 'dosage_sourceText': 'po',\n 'code_sourceText': 'doxycycline', 'type': 'medicationAdministration'},\n {'anchorDate': '2021-02-24T12:58:32.058Z', 'version': 4, 'suggestionId':\n '00022-00007-00001', 'id': '728e79cd-6cd2-421f-9e38-3181200c301',\n 'status_value': 'in-progress', 'status_confidence': 0.9,\n 'status_dataSource_source': 'comprehend', 'dosage_confidence': \n 0.996650755405426, 'dosage_dataSource_source': 'comprehend',\n 'dosage_value_id': '0', 'dosage_value_strength': None,\n 'dosage_value_dosage': None, 'dosage_value_duration': None,\n 'dosage_value_form': None, 'dosage_value_frequencey': None,\n 'dosage_value_rate': None, 'dosage_value_route': 'po',\n 'code_confidence': 0.996650755405426, 'code_dataSource_source':\n 'comprehend', 'code_value_system':\n 'http://www.nlm.nih.gov/research/umls/rxnorm', 'code_value_code':\n '3640', 'code_value_display': 'doxycycline', 'dosage_sourceText': 'po',\n 'code_sourceText': 'doxycycline', 'type': 'medicationAdministration'}]"], {}), "([{'anchorDate': '2021-02-24T12:58:32.058Z', 'version': 4,\n 'suggestionId': '00022-00007-00001', 'id':\n '728e79cd-6cd2-421f-9e38-3181200c301', 'status_value': 'unknown',\n 'status_confidence': 0.9, 'status_dataSource_source': 'comprehend',\n 'dosage_confidence': 0.996650755405426, 'dosage_dataSource_source':\n 'comprehend', 'dosage_value_id': '0', 'dosage_value_strength': None,\n 'dosage_value_dosage': None, 'dosage_value_duration': None,\n 'dosage_value_form': None, 'dosage_value_frequencey': None,\n 'dosage_value_rate': None, 'dosage_value_route': 'po',\n 'code_confidence': 0.996650755405426, 'code_dataSource_source':\n 'comprehend', 'code_value_system':\n 'http://www.nlm.nih.gov/research/umls/rxnorm', 'code_value_code':\n '3640', 'code_value_display': 'doxycycline', 'dosage_sourceText': 'po',\n 'code_sourceText': 'doxycycline', 'type': 'medicationAdministration'},\n {'anchorDate': '2021-02-24T12:58:32.058Z', 'version': 4, 'suggestionId':\n '00022-00007-00001', 'id': '728e79cd-6cd2-421f-9e38-3181200c301',\n 'status_value': 'completed', 'status_confidence': 0.9,\n 'status_dataSource_source': 'comprehend', 'dosage_confidence': \n 0.996650755405426, 'dosage_dataSource_source': 'comprehend',\n 'dosage_value_id': '0', 'dosage_value_strength': None,\n 'dosage_value_dosage': None, 'dosage_value_duration': None,\n 'dosage_value_form': None, 'dosage_value_frequencey': None,\n 'dosage_value_rate': None, 'dosage_value_route': 'po',\n 'code_confidence': 0.996650755405426, 'code_dataSource_source':\n 'comprehend', 'code_value_system':\n 'http://www.nlm.nih.gov/research/umls/rxnorm', 'code_value_code':\n '3640', 'code_value_display': 'doxycycline', 'dosage_sourceText': 'po',\n 'code_sourceText': 'doxycycline', 'type': 'medicationAdministration'},\n {'anchorDate': '2021-02-24T12:58:32.058Z', 'version': 4, 'suggestionId':\n '00022-00007-00001', 'id': '728e79cd-6cd2-421f-9e38-3181200c301',\n 'status_value': 'in-progress', 'status_confidence': 0.9,\n 'status_dataSource_source': 'comprehend', 'dosage_confidence': \n 0.996650755405426, 'dosage_dataSource_source': 'comprehend',\n 'dosage_value_id': '0', 'dosage_value_strength': None,\n 'dosage_value_dosage': None, 'dosage_value_duration': None,\n 'dosage_value_form': None, 'dosage_value_frequencey': None,\n 'dosage_value_rate': None, 'dosage_value_route': 'po',\n 'code_confidence': 0.996650755405426, 'code_dataSource_source':\n 'comprehend', 'code_value_system':\n 'http://www.nlm.nih.gov/research/umls/rxnorm', 'code_value_code':\n '3640', 'code_value_display': 'doxycycline', 'dosage_sourceText': 'po',\n 'code_sourceText': 'doxycycline', 'type': 'medicationAdministration'}])\n", (4320, 7056), True, 'import pandas as pd\n')] |
from django.contrib import admin
from .models import Auction, Lot, Bid
class BidAdmin(admin.ModelAdmin):
readonly_fields = (
'user',
'auction',
'bid_amount',
'bid_time',
)
admin.site.register(Auction)
admin.site.register(Lot)
admin.site.register(Bid, BidAdmin)
| [
"django.contrib.admin.site.register"
] | [((223, 251), 'django.contrib.admin.site.register', 'admin.site.register', (['Auction'], {}), '(Auction)\n', (242, 251), False, 'from django.contrib import admin\n'), ((252, 276), 'django.contrib.admin.site.register', 'admin.site.register', (['Lot'], {}), '(Lot)\n', (271, 276), False, 'from django.contrib import admin\n'), ((277, 311), 'django.contrib.admin.site.register', 'admin.site.register', (['Bid', 'BidAdmin'], {}), '(Bid, BidAdmin)\n', (296, 311), False, 'from django.contrib import admin\n')] |
import os
from aim.web.api.utils import APIRouter # wrapper for fastapi.APIRouter
from fastapi.responses import FileResponse
from aim.web.api.projects.project import Project
general_router = APIRouter()
@general_router.get('/static-files/{path:path}/')
async def serve_static_files(path):
from aim import web
static_file_name = os.path.join(os.path.dirname(web.__file__), 'ui', 'build', path)
compressed_file_name = '{}.gz'.format(static_file_name)
if os.path.exists(compressed_file_name):
return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'})
return FileResponse(static_file_name)
@general_router.get('/static/{exp_name}/{commit_hash}/media/images/{path}/')
async def serve_images(exp_name, commit_hash, path):
project = Project()
image_file = os.path.join(project.repo_path,
exp_name, commit_hash,
'objects', 'media', 'images',
path)
return FileResponse(image_file)
# do not change the placement of this method
# as it also serves as a fallback for wrong url routes
@general_router.get('/{path:path}/')
async def serve_index_html():
from aim import web
static_file_name = os.path.join(os.path.dirname(web.__file__), 'ui', 'build', 'index.html')
compressed_file_name = '{}.gz'.format(static_file_name)
if os.path.exists(compressed_file_name):
return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'})
return FileResponse(static_file_name)
| [
"os.path.exists",
"aim.web.api.projects.project.Project",
"fastapi.responses.FileResponse",
"aim.web.api.utils.APIRouter",
"os.path.join",
"os.path.dirname"
] | [((195, 206), 'aim.web.api.utils.APIRouter', 'APIRouter', ([], {}), '()\n', (204, 206), False, 'from aim.web.api.utils import APIRouter\n'), ((474, 510), 'os.path.exists', 'os.path.exists', (['compressed_file_name'], {}), '(compressed_file_name)\n', (488, 510), False, 'import os\n'), ((611, 641), 'fastapi.responses.FileResponse', 'FileResponse', (['static_file_name'], {}), '(static_file_name)\n', (623, 641), False, 'from fastapi.responses import FileResponse\n'), ((788, 797), 'aim.web.api.projects.project.Project', 'Project', ([], {}), '()\n', (795, 797), False, 'from aim.web.api.projects.project import Project\n'), ((815, 909), 'os.path.join', 'os.path.join', (['project.repo_path', 'exp_name', 'commit_hash', '"""objects"""', '"""media"""', '"""images"""', 'path'], {}), "(project.repo_path, exp_name, commit_hash, 'objects', 'media',\n 'images', path)\n", (827, 909), False, 'import os\n'), ((1007, 1031), 'fastapi.responses.FileResponse', 'FileResponse', (['image_file'], {}), '(image_file)\n', (1019, 1031), False, 'from fastapi.responses import FileResponse\n'), ((1388, 1424), 'os.path.exists', 'os.path.exists', (['compressed_file_name'], {}), '(compressed_file_name)\n', (1402, 1424), False, 'import os\n'), ((1525, 1555), 'fastapi.responses.FileResponse', 'FileResponse', (['static_file_name'], {}), '(static_file_name)\n', (1537, 1555), False, 'from fastapi.responses import FileResponse\n'), ((355, 384), 'os.path.dirname', 'os.path.dirname', (['web.__file__'], {}), '(web.__file__)\n', (370, 384), False, 'import os\n'), ((527, 599), 'fastapi.responses.FileResponse', 'FileResponse', (['compressed_file_name'], {'headers': "{'Content-Encoding': 'gzip'}"}), "(compressed_file_name, headers={'Content-Encoding': 'gzip'})\n", (539, 599), False, 'from fastapi.responses import FileResponse\n'), ((1261, 1290), 'os.path.dirname', 'os.path.dirname', (['web.__file__'], {}), '(web.__file__)\n', (1276, 1290), False, 'import os\n'), ((1441, 1513), 'fastapi.responses.FileResponse', 'FileResponse', (['compressed_file_name'], {'headers': "{'Content-Encoding': 'gzip'}"}), "(compressed_file_name, headers={'Content-Encoding': 'gzip'})\n", (1453, 1513), False, 'from fastapi.responses import FileResponse\n')] |
import threading
import enum
import os
import sys
import copy
import json
import asyncio
import attr
import uuid
import functools
import datetime
import multiprocessing
from time import monotonic
import time
import copy
from collections import deque
from concurrent.futures import ThreadPoolExecutor
from async_timeout import timeout
from collections import namedtuple
from typing import ( # noqa: F401 pylint: disable=unused-import
Optional, Any, Callable, List, TypeVar, Dict, Coroutine, Set,
TYPE_CHECKING, Awaitable, Iterator)
from os.path import join
dir_path = os.path.dirname(os.path.realpath(__file__))
import merceedge.util as util
import merceedge.util.dt as dt_util
import merceedge.util.id as id_util
import merceedge.util.yaml as yaml_util
import merceedge.util.module as module_util
from merceedge.util.async_util import (
Context,
callback,
is_callback,
run_callback_threadsafe,
run_coroutine_threadsafe,
fire_coroutine_threadsafe,
CALLBACK_TYPE,
T
)
from merceedge.util.signal import async_register_signal_handling
from merceedge.exceptions import (
MerceEdgeError,
ComponentTemplateNotFound
)
from merceedge.const import (
MATCH_ALL,
EVENT_TIME_CHANGED,
EVENT_SERVICE_EXECUTED,
EVENT_CALL_SERVICE,
EVENT_STATE_CHANGED,
EVENT_TIMER_OUT_OF_SYNC,
EVENT_EDGE_STOP,
ATTR_NOW,
ATTR_DATE,
ATTR_TIME,
ATTR_SECONDS,
)
from merceedge.service import ServiceRegistry
from merceedge.providers import ServiceProviderFactory
from merceedge.api_server.models import (
ComponentDBModel,
WireDBModel
)
from merceedge.settings import (
logger_access,
logger_code,
logger_console
)
DOMAIN = "merceedge"
_LOGGER = logger_code
class MerceEdge(object):
"""Root object of Merce Edge node"""
def __init__(self, user_config):
self.user_config = user_config
self.loop = asyncio.get_event_loop()
executor_opts = {'max_workers': None} # type: Dict[str, Any]
if sys.version_info[:2] >= (3, 6):
executor_opts['thread_name_prefix'] = 'SyncWorker'
self.executor = ThreadPoolExecutor(**executor_opts)
self.loop.set_default_executor(self.executor)
self._pending_tasks = [] # type: list
self._track_task = True
self.exit_code = 0
# _async_stop will set this instead of stopping the loop
# self._stopped = asyncio.Event()
self.bus = EventBus(self)
self.services = ServiceRegistry(self)
self.component_templates = {} # key: component template name
self.components = {} # key: component id
self.wires = {} # key: wire id
self.wireload_factory = WireLoadFactory(user_config)
def dyload_component(self, component_config):
"""dynamic load new component"""
# TODO
def start(self):
"""Start.
Note: This function is only used for testing.
For regular use, use "await edge.run()".
"""
# Register the async start
fire_coroutine_threadsafe(self.async_start(), self.loop)
# Run forever
try:
# Block until stopped
_LOGGER.info("Starting MerceEdge core loop")
self.loop.run_forever()
except KeyboardInterrupt:
# Optionally show a message if the shutdown may take a while
_LOGGER.info("Attempting graceful shutdown, press Ctrl+C again to exit…", flush=True)
# Do not show `asyncio.CancelledError` exceptions during shutdown
# (a lot of these may be generated, skip this if you prefer to see them)
def shutdown_exception_handler(loop, context):
if "exception" not in context \
or not isinstance(context["exception"], asyncio.CancelledError):
loop.default_exception_handler(context)
self.loop.set_exception_handler(shutdown_exception_handler)
# Handle shutdown gracefully by waiting for all tasks to be cancelled
tasks = asyncio.gather(*asyncio.Task.all_tasks(loop=self.loop), loop=self.loop, return_exceptions=True)
tasks.add_done_callback(lambda t: self.loop.stop())
tasks.cancel()
# Keep the event loop running until it is either destroyed or all
# tasks have really terminated
while not tasks.done() and not self.loop.is_closed():
self.loop.run_forever()
finally:
self.loop.close()
return self.exit_code
def stop(self):
fire_coroutine_threadsafe(self.async_stop(), self.loop)
def load_local_component_templates(self, config_yml_dict):
# 1. Absolute path 2. MERCE_EDGE_HOME path
try:
component_template_paths = config_yml_dict['component_template']['paths']
for path in component_template_paths:
ab_path = ''
if path.startswith('/') or path[1]==":":
ab_path = path
else:
ab_path = os.path.join(os.environ['MERCE_EDGE_HOME'], 'merceedge', path)
self._load_local_component_templates(ab_path)
except KeyError:
raise MerceEdgeError('config.yaml foramt invalide')
def _load_local_component_templates(self, component_template_path):
"""Read local component templates path, generate component template objects
"""
template_configs = []
template_configs += [each for each in os.listdir(component_template_path) if each.endswith('.yaml')]
for template_config in template_configs:
com_tmp_yaml = yaml_util.load_yaml(join(component_template_path, template_config))
# new_com_tmp = Component(com_tmp_yaml)
self.component_templates[com_tmp_yaml['component']['name']] = com_tmp_yaml
def _generate_component_instance(self, component_template_name, id=None, init_params=None):
"""Deepcopy component from component template
"""
com_tmp_yaml = self.component_templates.get(component_template_name, None)
if com_tmp_yaml:
if com_tmp_yaml['component'].get('virtual', False):
new_com_cls = self.wireload_factory.get_class(com_tmp_yaml['component']['name'])
new_com = new_com_cls(self, com_tmp_yaml, id, init_params)
else:
new_com = Component(self, com_tmp_yaml, id, init_params)
self.components[new_com.id] = new_com
return new_com
else:
# TODO logger warn no such name component compnent
pass
return None
def generate_component_instance(self, component_template_name, component_id, init_params=None):
""" Get component from self.components dict by id, if not exit, create new one, and
save into self.components
"""
component = self.components.get(component_id, None)
if component is None:
component = self._generate_component_instance(component_template_name, component_id, init_params)
if component:
return component
raise ComponentTemplateNotFound
async def connect_interface(self,
output_component_id, output_name,
input_component_id, input_name,
output_params={}, input_params={},
wire_id=None):
""" connenct wire
"""
output_sink = self.components[output_component_id].outputs[output_name]
input_slot = self.components[input_component_id].inputs[input_name]
wire = Wire(edge=self, output_sink=output_sink, input_slot=input_slot, id=wire_id)
wire.set_input_params(output_params)
wire.set_output_params(input_params)
# print(wire.output_sink.name, wire.output_sink, output_params, wire.output_sink.attrs)
# print(wire.input_slot.name, wire.input_slot, input_params, wire.input_slot.attrs)
self.wires[wire.id] = wire
await self.components[output_component_id].outputs[output_name].conn_output_sink(output_wire_params=output_params)
await self.components[input_component_id].inputs[input_name].conn_input_slot(input_wire_params=input_params)
wire.connect()
return wire
def delete_wire(self, wire_id):
"""Disconnect wire
"""
try:
wire = self.wires[wire_id]
wire.disconnect()
del self.wires[wire.id]
return wire
except KeyError:
return None
def stop_wireload_exec(self):
# for wireid, wire in self.wires.items():
# if wire.wire_load:
# wire.wire_load.is_stop = True
# TODO
pass
def restore_entities_from_db(self):
"""Restore components / wires from local db when edge start.
1. 获取所有的组件信息, 根据组件类型名称创建组件对象, 注意:组件的uuid从记录读取
2. 获取所有的连线信息,连接相关接口
"""
# TODO
# Restruct components
component_db_list = ComponentDBModel.query.all()
for component_db_record in component_db_list:
self._generate_component_instance(component_db_record.template_name,
component_db_record.uuid)
# Restruct wires
wire_db_list = WireDBModel.query.all()
for wire_db_record in wire_db_list:
try:
output_component_uuid = wire_db_record.output_component_uuid
input_component_uuid = wire_db_record.input_component_uuid
output_name = wire_db_record.output_name
input_name = wire_db_record.input_name
wire_id = wire_db_record.id
# TODO need modify
self.connect_interface(output_component_uuid, output_name,
input_component_uuid, input_name,
wire_id)
except KeyError:
# TODO logger warn
continue
async def load_formula(self, formula_path):
formula_yaml = yaml_util.load_yaml(formula_path)
try:
components = formula_yaml['components']
wires = formula_yaml['wires']
for component in components:
# TODO init component parameters
self.generate_component_instance(component['template'],
component['id'],
component.get('parameters', None))
for wire in wires:
# struct components
output_com = self.components[wire['output_sink']['component_id']]
input_com = self.components[wire['input_slot']['component_id']]
# struct wire
output_name = wire['output_sink']['output']
input_name = wire['input_slot']['input']
# wire interface paramaters
output_params = wire['output_sink'].get('parameters', {})
input_params = wire['input_slot'].get('parameters', {})
await self.connect_interface(output_com.id, output_name,
input_com.id, input_name,
output_params, input_params)
except KeyError as e:
_LOGGER.error("Load formula error, program exit!: {}".format(e))
sys.exit(-1)
except ComponentTemplateNotFound:
_LOGGER.error(ComponentTemplateNotFound.__str__)
def add_job(self, target: Callable[..., None], *args: Any) -> None:
"""Add job to the executor pool.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call add_job with None")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@callback
def async_add_job(
self,
target: Callable[..., Any],
*args: Any) -> Optional[asyncio.Future]:
"""Add a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
task = None
# Check for partials to properly determine if coroutine function
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
if asyncio.iscoroutine(check_target):
task = self.loop.create_task(target) # type: ignore
elif is_callback(check_target):
self.loop.call_soon(target, *args)
elif asyncio.iscoroutinefunction(check_target):
# print('iscoroutinefunction {}'.format(check_target.__name__))
task = self.loop.create_task(target(*args))
else:
task = self.loop.run_in_executor( # type: ignore
None, target, *args)
# If a task is scheduled
if self._track_task and task is not None:
# print("5!!!")
self._pending_tasks.append(task)
return task
@callback
def async_run_job(self, target: Callable[..., None], *args: Any) -> None:
"""Run a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if not asyncio.iscoroutine(target) and is_callback(target):
target(*args)
else:
self.async_add_job(target, *args)
@callback
def async_create_task(self, target: Coroutine) -> asyncio.tasks.Task:
"""Create a task from within the eventloop.
This method must be run in the event loop.
target: target to call.
"""
task = self.loop.create_task(target) # type: asyncio.tasks.Task
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_add_executor_job(
self,
target: Callable[..., T],
*args: Any) -> Awaitable[T]:
"""Add an executor job from within the event loop."""
task = self.loop.run_in_executor(
None, target, *args)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_track_tasks(self) -> None:
"""Track tasks so you can wait for all tasks to be done."""
self._track_task = True
@callback
def async_stop_track_tasks(self) -> None:
"""Stop track tasks so you can't wait for all tasks to be done."""
self._track_task = False
def block_till_done(self) -> None:
"""Block till all pending work is done."""
run_coroutine_threadsafe(
self.async_block_till_done(), loop=self.loop).result()
async def async_block_till_done(self) -> None:
"""Block till all pending work is done."""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
while self._pending_tasks:
_LOGGER.debug("async_block_till_done -----")
pending = [task for task in self._pending_tasks
if not task.done()]
self._pending_tasks.clear()
_LOGGER.debug(pending)
if pending:
_LOGGER.debug('pending')
await asyncio.wait(pending)
else:
_LOGGER.debug('no pending')
await asyncio.sleep(0)
async def async_run(self) -> int:
""" MerceEdge main entry point.
Start and block until stopped.
This method is a coroutine.
"""
# _async_stop will set this instead of stopping the loop
self._stopped = asyncio.Event()
await self.async_start()
async_register_signal_handling(self)
_LOGGER.debug("self._stopped.wait() start")
print(self._stopped)
await self._stopped.wait()
_LOGGER.debug("self._stopped.wait() stop")
return self.exit_code
async def async_start(self) -> None:
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
# _LOGGER.info("Starting Merce Edge")
setattr(self.loop, '_thread_ident', threading.get_ident())
# self.bus.async_fire(EVENT_HOMEASSISTANT_START)
try:
# Only block for EVENT_HOMEASSISTANT_START listener
self.async_stop_track_tasks()
with timeout(15):
await self.async_block_till_done()
except asyncio.TimeoutError:
# TODO warning
pass
# _LOGGER.warning(
# 'Something is blocking Home Assistant from wrapping up the '
# 'start up phase. We\'re going to continue anyway. Please '
# 'report the following info at http://bit.ly/2ogP58T : %s',
# ', '.join(self.config.components))
# Allow automations to set up the start triggers before changing state
await asyncio.sleep(0)
# if self.state != CoreState.starting:
# _LOGGER.warning(
# 'Home Assistant startup has been interrupted. '
# 'Its state may be inconsistent.')
# return
# self.state = CoreState.running
_async_create_timer(self)
async def async_stop(self, exit_code: int = 0, *,
force: bool = False) -> None:
"""Stop MerceEdge and shuts down all threads.
The "force" flag commands async_stop to proceed regardless of
Home Assistan't current state. You should not set this flag
unless you're testing.
This method is a coroutine.
"""
_LOGGER.debug("Stop all wire load execution...")
self.stop_wireload_exec()
self.async_track_tasks()
self.bus.async_fire(EVENT_EDGE_STOP)
await self.async_block_till_done()
self.executor.shutdown()
_LOGGER.debug('MerceEdge loop stop...')
self.loop.stop()
def wireload_emit_output_payload(self, output_name, emit_call, payload):
self.add_job(emit_call)
class Entity(object):
"""ABC for Merce Edge entity(Component, Interface, etc.)"""
id = id_util.generte_unique_id()
attrs = {}
def load_attrs(self, config):
# TODO
raise NotImplementedError
def get_attrs(self, attr_key):
try:
return self.attrs.get(attr_key)
except KeyError as e:
_LOGGER.error(str(e))
return None
def set_attrs(self, _attrs):
self.attrs.update(_attrs)
class Component(Entity):
"""ABC for Merce Edge components"""
def __init__(self, edge, model_template_config, id=None, init_params=None):
"""
model_template_config: yaml object
"""
self.edge = edge
self.model_template_config = model_template_config
self.id = id or id_util.generte_unique_id()
self.inputs = {}
self.outputs = {}
self.init_params = init_params or {}
# self.components = {}
# init interfaces
self._init_interfaces()
@property
def parameters(self):
return self.init_params
@parameters.setter
def parameters(self, params):
self.init_params = params
def _init_interfaces(self):
"""initiate inputs & outputs
"""
inputs = self.model_template_config['component'].get('inputs', None)
if inputs:
for _input in inputs:
self.inputs[_input['name']] = Input(edge=self.edge,
name=_input['name'],
component=self,
attrs=_input['protocol'],
propreties=_input.get('propreties', None))
outputs = self.model_template_config['component'].get('outputs', None)
if outputs:
for _output in outputs:
self.outputs[_output['name']] = Output(edge=self.edge,
name=_output['name'],
component=self,
attrs=_output['protocol'],
propreties=_output.get('propreties', None))
def get_start_wires_info(self):
""" Get wires infomation that start from component
"""
wires = []
for output in self.outputs:
for wire in output.output_wires:
# TODO
pass
return wires
class Interface(Entity):
"""Interface ABC
1. Read configuration file and load interface using service(eg: mqtt service).
2. Listen message from EventBus, or call fire event provide by service(eg: mqtt service).
"""
def __init__(self, edge, name, component,
attrs=None, propreties=None):
self.edge = edge
self.name = name
self.component = component
self.propreties = propreties or {}
self.attrs = attrs or {}
self._set_protocol()
def _set_protocol(self):
self.protocol = self.attrs.get('name', 'virtual_interface')
class Output(Interface):
"""Virtual output interface, receive data from real world
"""
def __init__(self, edge, name, component, attrs=None, propreties=None):
super(Output, self).__init__(edge, name, component, attrs, propreties)
self.output_wires = {}
self.data = {}
# read output configuration
# print("init output {} {}".format(name, protocol))
self._init_provider()
def wires_info(self):
info = {}
for wire_id, wire in self.output_wires.items():
info[wire_id] = wire.__repr__()
return info
def add_wire(self, wire):
"""Add new wire"""
self.output_wires[wire.id] = wire
def del_wire(self, wire_id):
"""Remove wire
"""
self.provider.disconn_output_sink(self)
del self.output_wires[wire_id]
def _init_provider(self):
try:
self.provider = ServiceProviderFactory.get_provider(self.protocol)
_LOGGER.debug("Output {} load provider {}".format(self.name, self.provider))
# if self.provider:
# self.provider.new_instance_setup(self.name, self.attrs, True)
# self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
except KeyError as e:
# log no such provider key error
_LOGGER.error("Cannot load {} provider".format(self.protocol))
raise
async def conn_output_sink(self, output_wire_params={}):
""" register EventBus listener"""
self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
await self.provider.conn_output_sink(output=self,
output_wire_params=output_wire_params,
callback=self.output_sink_callback)
def output_sink_callback(self, event):
"""Send output Event"""
# 发送wirefire Event(连线的时候Wire的Output需要注册Input的wirefire事件)
wirefire_event_type = "wirefire_{}_{}".format(self.component.id, self.name)
self.edge.bus.fire(wirefire_event_type, event.data)
class Input(Interface):
"""Input"""
def __init__(self, edge, name, component, attrs=None, propreties=None):
super(Input, self).__init__(edge, name, component, attrs, propreties)
self.input_wires = {}
self._init_provider()
def wires_info(self):
info = {}
for wire_id, wire in self.input_wires.items():
info[wire_id] = wire.__repr__()
return json.dumps(info)
def add_wire(self, wire):
"""Add new wire"""
self.input_wires[wire.id] = wire
def del_wire(self, wire_id):
"""Remove wire
"""
del self.input_wires[wire_id]
def _init_provider(self):
try:
self.provider = ServiceProviderFactory.get_provider(self.protocol)
# self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
except KeyError:
# TODO log no such provider key error
raise
async def conn_input_slot(self, input_wire_params={}):
self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
await self.provider.conn_input_slot(self, input_wire_params)
async def emit_data_to_input(self, event):
# Emit data to EventBus and invoke configuration service send data function.
await self.provider.emit_input_slot(self, event.data)
class State(object):
"""Component State"""
# raise NotImplementedError
# TODO
pass
class Wire(Entity):
"""Wire """
def __init__(self, edge: MerceEdge, output_sink: Output, input_slot: Input, id=None):
self.edge = edge
self.id = id or id_util.generte_unique_id()
self.input = output_sink
self.output = input_slot
self.input_params = dict()
self.output_params = dict()
self.input.add_wire(self)
self.output.add_wire(self)
def connect(self):
outcom_id = self.output_sink.component.id
out_name = self.output_sink.name
wirefire_event_type = "wirefire_{}_{}".format(outcom_id, out_name)
self.edge.bus.async_listen(wirefire_event_type, self.input_slot.emit_data_to_input)
def _add_input(self, output_sink: Output):
output_sink.add_wire(self)
def _add_output(self, input_slot: Input):
input_slot.add_wire(self)
@property
def output_sink(self):
return self.input
@property
def input_slot(self):
return self.output
def __repr__(self):
wire_info = {}
wire_info["input"] = {"component_id": self.input.component.id,
"name": self.input.name}
wire_info["output"] = {"component_id": self.output.component.id,
"name": self.output.name}
return wire_info
def set_input_params(self, parameters):
self.input_params = parameters
self.input.set_attrs(parameters)
def set_output_params(self, parameters):
self.output_params = parameters
self.output.set_attrs(parameters)
def disconnect(self):
self.input.del_wire(self.id)
self.output.del_wire(self.id)
class WireLoadFactory:
def __init__(self, config):
"""
config: user configuration
"""
self._classes = {}
paths = config['wireload']['paths']
self._load(paths)
def _load(self, paths):
"""Walk throuth path and load WireLoad subclass
"""
classes = {}
for path in paths:
path = os.path.join(dir_path, path)
classes = module_util.load_modules(path, WireLoad)
self._classes.update(classes)
_LOGGER.debug("Load wireloads modules: {}".format(self._classes))
def get_class(self, wireload_name):
return self._classes.get(wireload_name, None)
class WireLoad(Component):
"""Wire load abstract class. Mounted on wire, processing data through wire.
Filter, Analiysis, Process, etc.
"""
name = ''
def __init__(self, edge, model_template_config, component_id=None, init_params=None):
super(WireLoad, self).__init__(edge, model_template_config, id=component_id, init_params=init_params)
self.input_q = asyncio.Queue(maxsize=3, loop=self.edge.loop)
self.output_q = asyncio.Queue(maxsize=3, loop=self.edge.loop)
self.is_stop = False
self.emit_output_call = self.emit_output_payload
def before_run_setup(self):
"""Need implemented"""
raise NotImplementedError
async def put_input_payload(self, payload):
await self.input_q.put(payload)
self.edge.add_job(self.run)
async def put_output_payload(self, output_name, payload):
await self.output_q.put((output_name, payload))
self.edge.wireload_emit_output_payload(output_name, self.emit_output_call, payload)
def process(self, input_payload):
"""Need implemented"""
raise NotImplementedError
async def run(self):
while True:
if self.is_stop:
_LOGGER.debug("stop wireload------------")
break
input_payload = await self.input_q.get()
await self.process(input_payload)
del input_payload
# if result:
# await self.output_q.put(result)
# self.edge.add_job(self.emit_output_payload)
async def emit_output_payload(self):
output_payload = await self.output_q.get()
try:
if output_payload:
# self.outputs[output_payload[0]].output_sink_callback(output_payload[1])
event_type = "{}_{}_{}".format("virtual_wire_event", self.id, output_payload[0])
self.edge.bus.async_fire(event_type, output_payload[1])
except KeyError as e:
_LOGGER.warn("Cannot find output: {}".format(e))
class Event(object):
# pylint: disable=too-few-public-methods
"""Represents an event within the Bus."""
__slots__ = ['event_type', 'data', 'time_fired', 'context']
def __init__(self, event_type: str, data: Optional[Dict] = None,
time_fired: Optional[int] = None,
context: Optional[Context] = None) -> None:
"""Initialize a new event."""
self.event_type = event_type
# TODO
self.data = data
self.time_fired = time_fired or dt_util.utcnow()
self.context = context or Context()
def as_dict(self) -> Dict:
"""Create a dict representation of this Event."""
return {
'event_type': self.event_type,
'data': dict(self.data),
'time_fired': self.time_fired,
'context': self.context.as_dict()
}
def __repr__(self) -> str:
# pylint: disable=maybe-no-member
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}: {}>".format(
self.event_type,
util.repr_helper(self.data))
return "<Event {}>".format(self.event_type)
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
return (self.__class__ == other.__class__ and # type: ignore
self.event_type == other.event_type and
self.data == other.data and
self.time_fired == other.time_fired and
self.context == other.context)
class EventBus(object):
"""Allows firing of and listening for events.
NOTE: This part of code references home-assistant and chage a little.
"""
def __init__(self, edge: MerceEdge) -> None:
"""Initialize a new event bus."""
self._listeners = {} # type: Dict[str, List[Callable]]
self.edge = edge
@callback
def async_listeners(self) -> Dict[str, int]:
"""Dict with events and the number of listeners."""
return {key: len(self._listeners[key])
for key in self._listeners}
@property
def listeners(self) -> Dict[str, int]:
"""Dict with events and the number of listeners.
"""
return run_callback_threadsafe( # type: ignore
self.edge.loop, self.async_listeners
).result()
def fire(self, event_type: str, event_data: Optional[Dict] = None,
context: Optional[Context] = None) -> None:
"""Fire an event."""
self.edge.loop.call_soon_threadsafe(
self.async_fire, event_type, event_data, context)
@callback
def async_fire(self, event_type: str, event_data: Optional[Dict] = None,
context: Optional[Context] = None) -> None:
"""Fire an event.
This method must be run in the event loop
"""
# _LOGGER.info("async_fire: {}".format(event_type))
listeners = self._listeners.get(event_type, [])
# EVENT_HOMEASSISTANT_CLOSE should go only to his listeners
match_all_listeners = self._listeners.get(MATCH_ALL)
if (match_all_listeners is not None):
listeners = match_all_listeners + listeners
event = Event(event_type, event_data, None, context)
# if event_type != EVENT_TIME_CHANGED:
# _LOGGER.debug("Bus:Handling %s", event)
if not listeners:
return
for func in listeners:
self.edge.async_add_job(func, event)
def listen(
self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
async_remove_listener = run_callback_threadsafe(
self.edge.loop, self.async_listen, event_type, listener).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(
self.edge.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
This method must be run in the event loop.
"""
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def remove_listener() -> None:
"""Remove the listener."""
self._async_remove_listener(event_type, listener)
return remove_listener
def listen_once(
self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
async_remove_listener = run_callback_threadsafe(
self.edge.loop, self.async_listen_once, event_type, listener,
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(
self.edge.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen_once(
self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
@callback
def onetime_listener(event: Event) -> None:
"""Remove listener from event bus and then fire listener."""
if hasattr(onetime_listener, 'run'):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(onetime_listener, 'run', True)
self._async_remove_listener(event_type, onetime_listener)
self.edge.async_run_job(listener, event)
return self.async_listen(event_type, onetime_listener)
@callback
def _async_remove_listener(
self, event_type: str, listener: Callable) -> None:
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.warning("Unable to remove unknown listener %s", listener)
def _async_create_timer(edge) -> None:
"""Create a timer that will start on EVENT_EDGE_START."""
handle = None
def schedule_tick(now: datetime.datetime) -> None:
"""Schedule a timer tick when the next second rolls around."""
nonlocal handle
slp_seconds = 1 - (now.microsecond / 10**6)
target = monotonic() + slp_seconds
handle = edge.loop.call_later(slp_seconds, fire_time_event, target)
@callback
def fire_time_event(target: float) -> None:
"""Fire next time event."""
now = dt_util.utcnow()
edge.bus.async_fire(EVENT_TIME_CHANGED,
{ATTR_NOW: now})
# If we are more than a second late, a tick was missed
late = monotonic() - target
if late > 1:
edge.bus.async_fire(EVENT_TIMER_OUT_OF_SYNC,
{ATTR_SECONDS: late})
schedule_tick(now)
@callback
def stop_timer(_: Event) -> None:
"""Stop the timer."""
if handle is not None:
handle.cancel()
edge.bus.async_listen_once(EVENT_EDGE_STOP, stop_timer)
_LOGGER.info("Timer:starting")
schedule_tick(dt_util.utcnow()) | [
"merceedge.providers.ServiceProviderFactory.get_provider",
"asyncio.iscoroutinefunction",
"merceedge.api_server.models.ComponentDBModel.query.all",
"merceedge.util.id.generte_unique_id",
"merceedge.util.repr_helper",
"sys.exit",
"merceedge.util.async_util.Context",
"merceedge.util.yaml.load_yaml",
"... | [((593, 619), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (609, 619), False, 'import os\n'), ((18680, 18707), 'merceedge.util.id.generte_unique_id', 'id_util.generte_unique_id', ([], {}), '()\n', (18705, 18707), True, 'import merceedge.util.id as id_util\n'), ((1907, 1931), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1929, 1931), False, 'import asyncio\n'), ((2132, 2167), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '(**executor_opts)\n', (2150, 2167), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((2496, 2517), 'merceedge.service.ServiceRegistry', 'ServiceRegistry', (['self'], {}), '(self)\n', (2511, 2517), False, 'from merceedge.service import ServiceRegistry\n'), ((9194, 9222), 'merceedge.api_server.models.ComponentDBModel.query.all', 'ComponentDBModel.query.all', ([], {}), '()\n', (9220, 9222), False, 'from merceedge.api_server.models import ComponentDBModel, WireDBModel\n'), ((9493, 9516), 'merceedge.api_server.models.WireDBModel.query.all', 'WireDBModel.query.all', ([], {}), '()\n', (9514, 9516), False, 'from merceedge.api_server.models import ComponentDBModel, WireDBModel\n'), ((10275, 10308), 'merceedge.util.yaml.load_yaml', 'yaml_util.load_yaml', (['formula_path'], {}), '(formula_path)\n', (10294, 10308), True, 'import merceedge.util.yaml as yaml_util\n'), ((12750, 12783), 'asyncio.iscoroutine', 'asyncio.iscoroutine', (['check_target'], {}), '(check_target)\n', (12769, 12783), False, 'import asyncio\n'), ((16128, 16143), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (16141, 16143), False, 'import asyncio\n'), ((16186, 16222), 'merceedge.util.signal.async_register_signal_handling', 'async_register_signal_handling', (['self'], {}), '(self)\n', (16216, 16222), False, 'from merceedge.util.signal import async_register_signal_handling\n'), ((24376, 24392), 'json.dumps', 'json.dumps', (['info'], {}), '(info)\n', (24386, 24392), False, 'import json\n'), ((28204, 28249), 'asyncio.Queue', 'asyncio.Queue', ([], {'maxsize': '(3)', 'loop': 'self.edge.loop'}), '(maxsize=3, loop=self.edge.loop)\n', (28217, 28249), False, 'import asyncio\n'), ((28274, 28319), 'asyncio.Queue', 'asyncio.Queue', ([], {'maxsize': '(3)', 'loop': 'self.edge.loop'}), '(maxsize=3, loop=self.edge.loop)\n', (28287, 28319), False, 'import asyncio\n'), ((37792, 37808), 'merceedge.util.dt.utcnow', 'dt_util.utcnow', ([], {}), '()\n', (37806, 37808), True, 'import merceedge.util.dt as dt_util\n'), ((38424, 38440), 'merceedge.util.dt.utcnow', 'dt_util.utcnow', ([], {}), '()\n', (38438, 38440), True, 'import merceedge.util.dt as dt_util\n'), ((12863, 12888), 'merceedge.util.async_util.is_callback', 'is_callback', (['check_target'], {}), '(check_target)\n', (12874, 12888), False, 'from merceedge.util.async_util import Context, callback, is_callback, run_callback_threadsafe, run_coroutine_threadsafe, fire_coroutine_threadsafe, CALLBACK_TYPE, T\n'), ((13751, 13770), 'merceedge.util.async_util.is_callback', 'is_callback', (['target'], {}), '(target)\n', (13762, 13770), False, 'from merceedge.util.async_util import Context, callback, is_callback, run_callback_threadsafe, run_coroutine_threadsafe, fire_coroutine_threadsafe, CALLBACK_TYPE, T\n'), ((15361, 15377), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (15374, 15377), False, 'import asyncio\n'), ((16672, 16693), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (16691, 16693), False, 'import threading\n'), ((17445, 17461), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (17458, 17461), False, 'import asyncio\n'), ((19392, 19419), 'merceedge.util.id.generte_unique_id', 'id_util.generte_unique_id', ([], {}), '()\n', (19417, 19419), True, 'import merceedge.util.id as id_util\n'), ((22770, 22820), 'merceedge.providers.ServiceProviderFactory.get_provider', 'ServiceProviderFactory.get_provider', (['self.protocol'], {}), '(self.protocol)\n', (22805, 22820), False, 'from merceedge.providers import ServiceProviderFactory\n'), ((24683, 24733), 'merceedge.providers.ServiceProviderFactory.get_provider', 'ServiceProviderFactory.get_provider', (['self.protocol'], {}), '(self.protocol)\n', (24718, 24733), False, 'from merceedge.providers import ServiceProviderFactory\n'), ((25587, 25614), 'merceedge.util.id.generte_unique_id', 'id_util.generte_unique_id', ([], {}), '()\n', (25612, 25614), True, 'import merceedge.util.id as id_util\n'), ((27497, 27525), 'os.path.join', 'os.path.join', (['dir_path', 'path'], {}), '(dir_path, path)\n', (27509, 27525), False, 'import os\n'), ((27548, 27588), 'merceedge.util.module.load_modules', 'module_util.load_modules', (['path', 'WireLoad'], {}), '(path, WireLoad)\n', (27572, 27588), True, 'import merceedge.util.module as module_util\n'), ((30425, 30441), 'merceedge.util.dt.utcnow', 'dt_util.utcnow', ([], {}), '()\n', (30439, 30441), True, 'import merceedge.util.dt as dt_util\n'), ((30476, 30485), 'merceedge.util.async_util.Context', 'Context', ([], {}), '()\n', (30483, 30485), False, 'from merceedge.util.async_util import Context, callback, is_callback, run_callback_threadsafe, run_coroutine_threadsafe, fire_coroutine_threadsafe, CALLBACK_TYPE, T\n'), ((37577, 37588), 'time.monotonic', 'monotonic', ([], {}), '()\n', (37586, 37588), False, 'from time import monotonic\n'), ((37982, 37993), 'time.monotonic', 'monotonic', ([], {}), '()\n', (37991, 37993), False, 'from time import monotonic\n'), ((5265, 5310), 'merceedge.exceptions.MerceEdgeError', 'MerceEdgeError', (['"""config.yaml foramt invalide"""'], {}), "('config.yaml foramt invalide')\n", (5279, 5310), False, 'from merceedge.exceptions import MerceEdgeError, ComponentTemplateNotFound\n'), ((5556, 5591), 'os.listdir', 'os.listdir', (['component_template_path'], {}), '(component_template_path)\n', (5566, 5591), False, 'import os\n'), ((5715, 5761), 'os.path.join', 'join', (['component_template_path', 'template_config'], {}), '(component_template_path, template_config)\n', (5719, 5761), False, 'from os.path import join\n'), ((11668, 11680), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (11676, 11680), False, 'import sys\n'), ((12950, 12991), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['check_target'], {}), '(check_target)\n', (12977, 12991), False, 'import asyncio\n'), ((13719, 13746), 'asyncio.iscoroutine', 'asyncio.iscoroutine', (['target'], {}), '(target)\n', (13738, 13746), False, 'import asyncio\n'), ((16889, 16900), 'async_timeout.timeout', 'timeout', (['(15)'], {}), '(15)\n', (16896, 16900), False, 'from async_timeout import timeout\n'), ((31044, 31071), 'merceedge.util.repr_helper', 'util.repr_helper', (['self.data'], {}), '(self.data)\n', (31060, 31071), True, 'import merceedge.util as util\n'), ((32185, 32246), 'merceedge.util.async_util.run_callback_threadsafe', 'run_callback_threadsafe', (['self.edge.loop', 'self.async_listeners'], {}), '(self.edge.loop, self.async_listeners)\n', (32208, 32246), False, 'from merceedge.util.async_util import Context, callback, is_callback, run_callback_threadsafe, run_coroutine_threadsafe, fire_coroutine_threadsafe, CALLBACK_TYPE, T\n'), ((33732, 33817), 'merceedge.util.async_util.run_callback_threadsafe', 'run_callback_threadsafe', (['self.edge.loop', 'self.async_listen', 'event_type', 'listener'], {}), '(self.edge.loop, self.async_listen, event_type, listener\n )\n', (33755, 33817), False, 'from merceedge.util.async_util import Context, callback, is_callback, run_callback_threadsafe, run_coroutine_threadsafe, fire_coroutine_threadsafe, CALLBACK_TYPE, T\n'), ((35043, 35132), 'merceedge.util.async_util.run_callback_threadsafe', 'run_callback_threadsafe', (['self.edge.loop', 'self.async_listen_once', 'event_type', 'listener'], {}), '(self.edge.loop, self.async_listen_once, event_type,\n listener)\n', (35066, 35132), False, 'from merceedge.util.async_util import Context, callback, is_callback, run_callback_threadsafe, run_coroutine_threadsafe, fire_coroutine_threadsafe, CALLBACK_TYPE, T\n'), ((5097, 5159), 'os.path.join', 'os.path.join', (["os.environ['MERCE_EDGE_HOME']", '"""merceedge"""', 'path'], {}), "(os.environ['MERCE_EDGE_HOME'], 'merceedge', path)\n", (5109, 5159), False, 'import os\n'), ((15744, 15765), 'asyncio.wait', 'asyncio.wait', (['pending'], {}), '(pending)\n', (15756, 15765), False, 'import asyncio\n'), ((15850, 15866), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (15863, 15866), False, 'import asyncio\n'), ((33926, 33988), 'merceedge.util.async_util.run_callback_threadsafe', 'run_callback_threadsafe', (['self.edge.loop', 'async_remove_listener'], {}), '(self.edge.loop, async_remove_listener)\n', (33949, 33988), False, 'from merceedge.util.async_util import Context, callback, is_callback, run_callback_threadsafe, run_coroutine_threadsafe, fire_coroutine_threadsafe, CALLBACK_TYPE, T\n'), ((35252, 35314), 'merceedge.util.async_util.run_callback_threadsafe', 'run_callback_threadsafe', (['self.edge.loop', 'async_remove_listener'], {}), '(self.edge.loop, async_remove_listener)\n', (35275, 35314), False, 'from merceedge.util.async_util import Context, callback, is_callback, run_callback_threadsafe, run_coroutine_threadsafe, fire_coroutine_threadsafe, CALLBACK_TYPE, T\n'), ((4099, 4137), 'asyncio.Task.all_tasks', 'asyncio.Task.all_tasks', ([], {'loop': 'self.loop'}), '(loop=self.loop)\n', (4121, 4137), False, 'import asyncio\n')] |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory"
" containing %r. It appears you've customized things.\n"
"You'll have to run django-admin.py, passing it your settings module.\n"
"(If the file settings.py does indeed exist,"
"it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings) | [
"django.core.management.execute_manager",
"sys.stderr.write",
"sys.exit"
] | [((524, 549), 'django.core.management.execute_manager', 'execute_manager', (['settings'], {}), '(settings)\n', (539, 549), False, 'from django.core.management import execute_manager\n'), ((167, 472), 'sys.stderr.write', 'sys.stderr.write', (['("""Error: Can\'t find the file \'settings.py\' in the directory containing %r. It appears you\'ve customized things.\nYou\'ll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist,it\'s causing an ImportError somehow.)\n"""\n % __file__)'], {}), '(\n """Error: Can\'t find the file \'settings.py\' in the directory containing %r. It appears you\'ve customized things.\nYou\'ll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist,it\'s causing an ImportError somehow.)\n"""\n % __file__)\n', (183, 472), False, 'import sys\n'), ((483, 494), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (491, 494), False, 'import sys\n')] |
import os
import sys
import shutil
import asyncio
import aioboto3
from glob import glob
from PIL import Image
from fnmatch import fnmatch
from src.secrets import (
SPACES_REGION,
SPACES_BUCKET,
SPACES_PREFIX,
SPACES_ENDPOINT_URL,
SPACES_ACCESS_KEY,
SPACES_SECRET_KEY
)
from src.format import (
get_filename,
get_image_id
)
from src.logger import logger
LOCAL_IMAGES_PATH = sys.path[0]
async def download_file(key, bucket):
if not key.endswith('/'):
await bucket.download_file(key, key)
elif not os.path.exists(key):
os.makedirs(key)
async def download_files(bucket, prefix):
async with aioboto3.resource('s3',
region_name=SPACES_REGION,
endpoint_url=SPACES_ENDPOINT_URL,
aws_access_key_id=SPACES_ACCESS_KEY,
aws_secret_access_key=SPACES_SECRET_KEY) as resource:
bucket = await resource.Bucket(bucket)
tasks = [asyncio.ensure_future(download_file(s3_obj.key, bucket)) async for s3_obj in
bucket.objects.filter(Prefix=prefix)]
await asyncio.gather(*tasks)
async def download_images():
try:
await download_files(SPACES_BUCKET, SPACES_PREFIX)
logger.info(f'Images from S3 have been downloaded successfully')
except Exception as error:
logger.error(f'Error to download images from S3: {error}')
raise
async def upload_file(subdir, file, image, bucket):
if fnmatch(file, f'{image.height}*.jpg'):
full_path = os.path.join(subdir, file)
with open(full_path, 'rb') as data:
await bucket.put_object(ACL='public-read', Key=full_path[len(LOCAL_IMAGES_PATH) + 1:], Body=data,
ContentType='image/jpg')
async def upload_files(bucket, prefix, image):
tasks = []
async with aioboto3.resource('s3',
region_name=SPACES_REGION,
endpoint_url=SPACES_ENDPOINT_URL,
aws_access_key_id=SPACES_ACCESS_KEY,
aws_secret_access_key=SPACES_SECRET_KEY) as resource:
bucket = await resource.Bucket(bucket)
for subdir, dirs, files in os.walk(LOCAL_IMAGES_PATH + f'/{prefix}'):
for file in files:
tasks.append(asyncio.ensure_future(upload_file(subdir, file, image, bucket)))
await asyncio.gather(*tasks)
async def upload_images(image):
try:
await upload_files(SPACES_BUCKET, SPACES_PREFIX, image)
logger.info('Images have been uploaded successfully into S3')
except Exception as error:
logger.error(f'Error to upload new images sizes to S3: {error}')
raise
async def get_local_images():
images = []
for filename in glob(LOCAL_IMAGES_PATH + f'/{SPACES_PREFIX}/*/720*.jpg'):
img = Image.open(filename)
image = {
"content": img,
"image_id": get_image_id(filename),
"filename": get_filename(filename)
}
images.append(image)
return images
async def save_local_images(resized_images):
try:
for (i, new_image) in enumerate(resized_images):
new_image['content'].save('{}/{}{}/{}{}'.format(LOCAL_IMAGES_PATH, SPACES_PREFIX, new_image['image_id'],
new_image['content'].height, new_image['filename']))
except Exception as error:
logger.error(f'Error to save images in local directories: {error}')
raise
async def remove_local_images():
path = os.path.join(LOCAL_IMAGES_PATH, 'test')
try:
if os.path.exists(path):
shutil.rmtree(path)
logger.info('Local images directory has been removed successfully')
except shutil.Error as error:
logger.error(f'Error to remove local images directory: {error}')
raise | [
"os.path.exists",
"PIL.Image.open",
"asyncio.gather",
"os.makedirs",
"src.format.get_filename",
"os.walk",
"os.path.join",
"src.format.get_image_id",
"shutil.rmtree",
"src.logger.logger.error",
"fnmatch.fnmatch",
"src.logger.logger.info",
"aioboto3.resource",
"glob.glob"
] | [((1537, 1574), 'fnmatch.fnmatch', 'fnmatch', (['file', 'f"""{image.height}*.jpg"""'], {}), "(file, f'{image.height}*.jpg')\n", (1544, 1574), False, 'from fnmatch import fnmatch\n'), ((2872, 2928), 'glob.glob', 'glob', (["(LOCAL_IMAGES_PATH + f'/{SPACES_PREFIX}/*/720*.jpg')"], {}), "(LOCAL_IMAGES_PATH + f'/{SPACES_PREFIX}/*/720*.jpg')\n", (2876, 2928), False, 'from glob import glob\n'), ((3671, 3710), 'os.path.join', 'os.path.join', (['LOCAL_IMAGES_PATH', '"""test"""'], {}), "(LOCAL_IMAGES_PATH, 'test')\n", (3683, 3710), False, 'import os\n'), ((653, 824), 'aioboto3.resource', 'aioboto3.resource', (['"""s3"""'], {'region_name': 'SPACES_REGION', 'endpoint_url': 'SPACES_ENDPOINT_URL', 'aws_access_key_id': 'SPACES_ACCESS_KEY', 'aws_secret_access_key': 'SPACES_SECRET_KEY'}), "('s3', region_name=SPACES_REGION, endpoint_url=\n SPACES_ENDPOINT_URL, aws_access_key_id=SPACES_ACCESS_KEY,\n aws_secret_access_key=SPACES_SECRET_KEY)\n", (670, 824), False, 'import aioboto3\n'), ((1300, 1364), 'src.logger.logger.info', 'logger.info', (['f"""Images from S3 have been downloaded successfully"""'], {}), "(f'Images from S3 have been downloaded successfully')\n", (1311, 1364), False, 'from src.logger import logger\n'), ((1596, 1622), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (1608, 1622), False, 'import os\n'), ((1916, 2087), 'aioboto3.resource', 'aioboto3.resource', (['"""s3"""'], {'region_name': 'SPACES_REGION', 'endpoint_url': 'SPACES_ENDPOINT_URL', 'aws_access_key_id': 'SPACES_ACCESS_KEY', 'aws_secret_access_key': 'SPACES_SECRET_KEY'}), "('s3', region_name=SPACES_REGION, endpoint_url=\n SPACES_ENDPOINT_URL, aws_access_key_id=SPACES_ACCESS_KEY,\n aws_secret_access_key=SPACES_SECRET_KEY)\n", (1933, 2087), False, 'import aioboto3\n'), ((2306, 2347), 'os.walk', 'os.walk', (["(LOCAL_IMAGES_PATH + f'/{prefix}')"], {}), "(LOCAL_IMAGES_PATH + f'/{prefix}')\n", (2313, 2347), False, 'import os\n'), ((2625, 2686), 'src.logger.logger.info', 'logger.info', (['"""Images have been uploaded successfully into S3"""'], {}), "('Images have been uploaded successfully into S3')\n", (2636, 2686), False, 'from src.logger import logger\n'), ((2944, 2964), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (2954, 2964), False, 'from PIL import Image\n'), ((3731, 3751), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3745, 3751), False, 'import os\n'), ((3793, 3860), 'src.logger.logger.info', 'logger.info', (['"""Local images directory has been removed successfully"""'], {}), "('Local images directory has been removed successfully')\n", (3804, 3860), False, 'from src.logger import logger\n'), ((549, 568), 'os.path.exists', 'os.path.exists', (['key'], {}), '(key)\n', (563, 568), False, 'import os\n'), ((578, 594), 'os.makedirs', 'os.makedirs', (['key'], {}), '(key)\n', (589, 594), False, 'import os\n'), ((1171, 1193), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (1185, 1193), False, 'import asyncio\n'), ((1404, 1462), 'src.logger.logger.error', 'logger.error', (['f"""Error to download images from S3: {error}"""'], {}), "(f'Error to download images from S3: {error}')\n", (1416, 1462), False, 'from src.logger import logger\n'), ((2488, 2510), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (2502, 2510), False, 'import asyncio\n'), ((2726, 2790), 'src.logger.logger.error', 'logger.error', (['f"""Error to upload new images sizes to S3: {error}"""'], {}), "(f'Error to upload new images sizes to S3: {error}')\n", (2738, 2790), False, 'from src.logger import logger\n'), ((3035, 3057), 'src.format.get_image_id', 'get_image_id', (['filename'], {}), '(filename)\n', (3047, 3057), False, 'from src.format import get_filename, get_image_id\n'), ((3083, 3105), 'src.format.get_filename', 'get_filename', (['filename'], {}), '(filename)\n', (3095, 3105), False, 'from src.format import get_filename, get_image_id\n'), ((3544, 3611), 'src.logger.logger.error', 'logger.error', (['f"""Error to save images in local directories: {error}"""'], {}), "(f'Error to save images in local directories: {error}')\n", (3556, 3611), False, 'from src.logger import logger\n'), ((3765, 3784), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (3778, 3784), False, 'import shutil\n'), ((3903, 3967), 'src.logger.logger.error', 'logger.error', (['f"""Error to remove local images directory: {error}"""'], {}), "(f'Error to remove local images directory: {error}')\n", (3915, 3967), False, 'from src.logger import logger\n')] |
import re
import operator
from collections import namedtuple
SCHEMA_TYPES = {'str', 'int', 'bool'}
ROWID_KEY = '_rowid'
class Literal(namedtuple('Literal', 'value')):
@classmethod
def eval_value(cls, value):
if not isinstance(value, str):
raise ValueError(f"Parameter {value} must be a str")
if value in ('True', 'False'):
return eval(value)
try:
return int(value)
except Exception:
pass
try:
return eval(value)
except Exception:
pass
raise ValueError(f'Paramater {value} is not valid')
def __new__(cls, value):
evaled_value = cls.eval_value(value)
return super().__new__(cls, evaled_value)
class Column(namedtuple('Column', 'name')):
pass
class Comparison(namedtuple('Comparison', 'left, op, right')):
ops = {
'=': operator.eq,
'!=': operator.ne,
'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge
}
def match(self, row):
if type(self.left) is Column:
left = Literal(row[self.left.name]).value
elif type(self.left) is Literal:
left = self.left.value
else:
raise ValueError(f'Invalid left value type; {self.left}')
if type(self.right) is Column:
right = Literal(row[self.right.name]).value
elif type(self.right) is Literal:
right = self.right.value
else:
raise ValueError(f'Invalid right value type; {self.left}')
return self.ops[self.op](left, right)
class ConditionList(namedtuple('ConditionList', 'comp_type, comparisons')):
types = {'or': any, 'and': all}
def match(self, row):
if not self.comp_type:
return True
return self.types[self.comp_type](comp.match(row)
for comp in self.comparisons)
class CreateDbCmd(namedtuple('CreateDbCmd', 'name')):
def execute(self, db_manager):
db_manager.create_db(self.name)
class UseDbCmd(namedtuple('UseDbCmd', 'name')):
def execute(self, db_manager):
db_manager.use_db(self.name)
class DeleteDbCmd(namedtuple('DeleteDbCmd', 'name')):
def execute(self, db_manager):
db_manager.delete_db(self.name)
class CreateTableCmd(namedtuple('CreateTableCmd', 'name, schema')):
def validate(self):
if set(self.schema.values()) - SCHEMA_TYPES:
raise CommandError(f'Only schema accepted types are {SCHEMA_TYPES}')
def execute(self, db_manager):
self.validate()
db_manager.create_table(name=self.name, schema=self.schema)
class DeleteTableCmd(namedtuple('DeleteTableCmd', 'name')):
def execute(self, db_manager):
db_manager.delete_table(name=self.name)
class AddColumnCmd(namedtuple('AddColumnCmd', 'name, col_type, col_name')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.name)
if self.col_name in schema:
raise CommandError(f'{self.col_name} col is already existing')
if self.col_type not in SCHEMA_TYPES:
raise CommandError(f'Only schema accepted types are {SCHEMA_TYPES}')
def execute(self, db_manager):
self.validate(db_manager)
db_manager.add_column(name=self.name,
col_name=self.col_name, col_type=self.col_type)
class DelColumnCmd(namedtuple('DelColumnCmd', 'name, col_name')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.name)
if self.col_name not in schema:
raise CommandError(f'Col {self.col_name} does not exist')
def execute(self, db_manager):
self.validate(db_manager)
db_manager.del_column(name=self.name, col_name=self.col_name)
def validate_cmd_row_values(schema={}, row={}):
for col_name, col_val in row.items():
lit_val = Literal(col_val)
needed_col_type = eval(schema[col_name])
if not isinstance(lit_val.value, needed_col_type):
raise CommandError(f'Col\'s {col_name} value {col_val} has to be {schema[col_name]}')
class InsertCmd(namedtuple('InsertCmd', 'table, row')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
if self.row.keys() != schema.keys():
raise CommandError(f'Schema {schema.keys()} is mandatory')
validate_cmd_row_values(schema=schema, row=self.row)
def execute(self, db_manager):
self.validate(db_manager)
db_manager.insert_row(table=self.table, row=self.row)
def validate_cmd_conditions_list(schema={}, conditions_list=[]):
for comparison in conditions_list.comparisons:
col = comparison.left
lit = comparison.right
needed_col_type = eval(schema[col.name])
if col.name not in schema:
raise CommandError(f'Col {col.name} in conditions does not exist in schema')
if not isinstance(lit.value, needed_col_type):
raise CommandError(f'Col\'s {col.name} value {lit.value} has to be {schema[col.name]}')
class QueryCmd(namedtuple('QueryCmd', 'table, projection, conditions_list')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
if self.projection[0] != '*':
if set(self.projection) - set(schema.keys()):
raise CommandError(f'Query projection is enforced by schema; Only {schema.keys()} or * are allowed')
validate_cmd_conditions_list(schema=schema,
conditions_list=self.conditions_list)
def execute(self, db_manager):
self.validate(db_manager)
star_proj = len(self.projection) == 1 and self.projection[0] == '*'
for row in db_manager.scan_rows(table=self.table):
if self.conditions_list.match(row):
result_row = {ROWID_KEY: row[ROWID_KEY]}
del row[ROWID_KEY]
for key, val in row.items():
if not star_proj:
if key in self.projection:
result_row[key] = Literal(val).value
else:
result_row[key] = Literal(val).value
yield result_row
class DeleteCmd(namedtuple('DeleteCmd', 'table, conditions_list')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
validate_cmd_conditions_list(schema, self.conditions_list)
def execute(self, db_manager):
self.validate(db_manager)
for row in db_manager.scan_rows(table=self.table):
if self.conditions_list.match(row):
db_manager.delete_row(table=self.table, rowid=row['_rowid'])
class UpdateCmd(namedtuple('UpdateCmd', 'table, values, conditions_list')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
validate_cmd_row_values(schema=schema, row=self.values)
validate_cmd_conditions_list(schema=schema,
conditions_list=self.conditions_list)
def execute(self, db_manager):
self.validate(db_manager)
for row in db_manager.scan_rows(table=self.table):
if self.conditions_list.match(row):
db_manager.update_row(table=self.table,
rowid=row['_rowid'], new_row=self.values)
class FromCsvCmd(namedtuple('FromCsvCmd', 'csv_path')):
def execute(self, db_manager):
db_manager.from_csv(csv_path=self.csv_path)
class ToCsvCmd(namedtuple('ToCsvCmd', 'csv_path')):
def execute(self, db_manager):
db_manager.to_csv(csv_path=self.csv_path)
class SchemaCmd(namedtuple('FromCsvCmd', 'table_name')):
def execute(self, db_manager):
schema = db_manager.get_table_schema(self.table_name)
return schema
class TablesCmd(namedtuple('TablesCmd', 'db_name')):
def execute(self, db_manager):
yield from db_manager.get_tables(db_name=self.db_name)
class DbCmd(namedtuple('DbCmd', '')):
def validate(self, db_manager):
pass
def execute(self, db_manager):
self.validate(db_manager)
current_db = db_manager.get_current_db()
return current_db
class CommandError(Exception):
""" Generic command error """
def __init__(self, message):
super(CommandError, self).__init__(message)
class QueryParser(object):
re_db_create = re.compile(r'^create\s+sdb\s+(?P<name>\w+);$')
re_db_use = re.compile(r'^use\s+sdb\s+(?P<name>\w+);$')
re_db_delete = re.compile(r'^delete\s+sdb\s+(?P<name>\w+);$')
re_table_create_main = re.compile(r'^create\s+table\s+(?P<name>\w+)\s+columns\s+(?P<columns>((int|str|bool):(\w+)\s?)+);$')
re_table_create_col = re.compile(r'(int|str|bool):(\w+)')
re_table_delete = re.compile(r'^delete\s+table\s+(?P<name>\w+);$')
re_table_add_column = re.compile(r'^change\s+table\s+(?P<name>\w+)\s+add\s+column\s+(?P<col_type>int|str|bool):(?P<col_name>\w+);$')
re_table_del_column = re.compile(r'^change\s+table\s+(?P<name>\w+)\s+del\s+column\s+(?P<col_name>\w+);$')
re_table_insert_main = re.compile(r'^insert\s+into\s+(?P<table_name>\w+)\s+values\s+(?P<values>(\w+=(True|False|\d+?|\"(\w|[\/\<\>:`~.,?!@;\'#$%\^&*\-_+=\[\{\]\}\\\|()\ ])*?\")\s?)+?);$')
re_table_values = re.compile(r'(\w+)=(True|False|(\d+)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")')
re_where_conditions = re.compile(r'(?P<col_name>\w+?)(?P<op>=|!=|<|>|<=|>=)(?P<value>(\d+)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")')
re_table_scan_rows = re.compile(r'^query\s+(?P<projection>\*|(\w+\,?)+?)\s+(?P<table_name>\w+)(\s+where\s+op:(?P<op>or|and)\s+conditions\s+(?P<conditions>((\w+?)(=|!=|<|>|<=|>=)((\d+?)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")(\s+)?)+))?;$')
re_table_update_rows = re.compile(r'^update\s+(?P<table_name>\w+)\s+set\s+(?P<setters>(((\w+)=(True|False|(\d+)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\"))\s?)+)(\s+where\s+op:(?P<op>or|and)\s+conditions\s+(?P<conditions>((\w+?)(=|!=|<|>|<=|>=)((\d+?)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")(\s+)?)+))?;$')
re_table_delete_rows = re.compile(r'^delete\s+in\s+(?P<table_name>\w+)(\s+where\s+op:(?P<op>or|and)\s+conditions\s+(?P<conditions>((\w+?)(=|!=|<|>|<=|>=)((\d+?)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")(\s+)?)+))?;$')
re_from_csv = re.compile(r'^from\s+csv\s+(?P<csv_path>[^ ]+?\.csv)\s*?;$')
re_to_csv = re.compile(r'^to\s+csv\s+(?P<csv_path>[^ ]+?\.csv)\s*?;$')
re_schema = re.compile(r'^schema\s+(?P<table_name>\w+)\s*?;$')
re_tables = re.compile(r'^tables\s+(?P<db_name>\w+)\s*?;$')
re_db = re.compile(r'^db\s*?;$')
def __init__(self):
pass
def _get_parse_methods(self):
for meth_name in dir(self.__class__):
meth = getattr(self.__class__, meth_name)
if meth_name.startswith('_parse') and callable(meth):
yield meth
def parse(self, query):
for meth in self._get_parse_methods():
rv = meth(self, query)
if rv is not None:
return rv
raise CommandError('No command matches; fix or retry (another) query')
def _parse_db_create(self, query):
result = self.re_db_create.fullmatch(query)
if not result:
return
return CreateDbCmd(name=result.group('name'))
def _parse_db_use(self, query):
result = self.re_db_use.fullmatch(query)
if not result:
return
return UseDbCmd(name=result.group('name'))
def _parse_db_delete(self, query):
result = self.re_db_delete.fullmatch(query)
if not result:
return
return DeleteDbCmd(name=result.group('name'))
def _parse_table_create(self, query):
result_main = self.re_table_create_main.fullmatch(query)
if not result_main:
return
name = result_main.group('name')
columns_str = result_main.group('columns')
result_cols = self.re_table_create_col.findall(columns_str)
if not result_cols:
return
schema = {col_name:col_type for col_type, col_name in result_cols}
return CreateTableCmd(name=name, schema=schema)
def _parse_table_delete(self, query):
result = self.re_table_delete.fullmatch(query)
if not result:
return
return DeleteTableCmd(name=result.group('name'))
def _parse_add_column(self, query):
result = self.re_table_add_column.fullmatch(query)
if not result:
return
name = result.group('name')
col_type = result.group('col_type')
col_name = result.group('col_name')
return AddColumnCmd(name=name, col_type=col_type, col_name=col_name)
def _parse_del_column(self, query):
result = self.re_table_del_column.fullmatch(query)
if not result:
return
name = result.group('name')
col_name = result.group('col_name')
return DelColumnCmd(name=name, col_name=col_name)
def _parse_insert_row(self, query):
result_main = self.re_table_insert_main.fullmatch(query)
if not result_main:
return
name = result_main.group('table_name')
values_str = result_main.group('values')
result_values = self.re_table_values.findall(values_str)
if not result_values:
return
row = {col_name:col_value
for col_name, col_value, _, _ in result_values}
return InsertCmd(table=name, row=row)
def _parse_scan_rows(self, query):
result_main = self.re_table_scan_rows.fullmatch(query)
if not result_main:
return
projection = result_main.group('projection').split(',')
name = result_main.group('table_name')
main_op = result_main.group('op')
conditions_str = result_main.group('conditions')
conditions = ConditionList('', [])
if conditions_str:
result_conditions = self.re_where_conditions.findall(conditions_str)
conditions = ConditionList(main_op,
[Comparison(Column(left), op, Literal(right))
for left, op, right, _, _, _ in result_conditions])
return QueryCmd(table=name, projection=projection,
conditions_list=conditions)
def _parse_table_update_rows(self, query):
result_main = self.re_table_update_rows.fullmatch(query)
if not result_main:
return
setters_str = result_main.group('setters')
result_setters = self.re_table_values.findall(setters_str)
if not result_setters:
return
name = result_main.group('table_name')
main_op = result_main.group('op')
conditions_str = result_main.group('conditions')
conditions = ConditionList('', [])
if conditions_str:
result_conditions = self.re_where_conditions.findall(conditions_str)
conditions = ConditionList(main_op,
[Comparison(Column(left), op, Literal(right))
for left, op, right, _, _, _ in result_conditions])
new_values = {col_name: col_value for col_name, col_value, _, _ in result_setters}
return UpdateCmd(table=name, values=new_values, conditions_list=conditions)
def _parse_table_delete_rows(self, query):
result_main = self.re_table_delete_rows.fullmatch(query)
if not result_main:
return
name = result_main.group('table_name')
main_op = result_main.group('op')
conditions_str = result_main.group('conditions')
conditions = ConditionList('', [])
if conditions_str:
result_conditions = self.re_where_conditions.findall(conditions_str)
conditions = ConditionList(main_op,
[Comparison(Column(left), op, Literal(right))
for left, op, right, _, _, _ in result_conditions])
return DeleteCmd(table=name, conditions_list=conditions)
def _parse_tables(self, query):
result = self.re_tables.fullmatch(query)
if not result:
return
return TablesCmd(db_name=result.group('db_name'))
def _parse_db(self, query):
result = self.re_db.fullmatch(query)
if not result:
return
return DbCmd()
def _parse_from_csv(self, query):
result = self.re_from_csv.fullmatch(query)
if not result:
return
return FromCsvCmd(csv_path=result.group('csv_path'))
def _parse_to_csv(self, query):
result = self.re_to_csv.fullmatch(query)
if not result:
return
return ToCsvCmd(csv_path=result.group('csv_path'))
def _parse_schema(self, query):
result = self.re_schema.fullmatch(query)
if not result:
return
return SchemaCmd(table_name=result.group('table_name')) | [
"collections.namedtuple",
"re.compile"
] | [((137, 167), 'collections.namedtuple', 'namedtuple', (['"""Literal"""', '"""value"""'], {}), "('Literal', 'value')\n", (147, 167), False, 'from collections import namedtuple\n'), ((789, 817), 'collections.namedtuple', 'namedtuple', (['"""Column"""', '"""name"""'], {}), "('Column', 'name')\n", (799, 817), False, 'from collections import namedtuple\n'), ((847, 890), 'collections.namedtuple', 'namedtuple', (['"""Comparison"""', '"""left, op, right"""'], {}), "('Comparison', 'left, op, right')\n", (857, 890), False, 'from collections import namedtuple\n'), ((1676, 1729), 'collections.namedtuple', 'namedtuple', (['"""ConditionList"""', '"""comp_type, comparisons"""'], {}), "('ConditionList', 'comp_type, comparisons')\n", (1686, 1729), False, 'from collections import namedtuple\n'), ((2006, 2039), 'collections.namedtuple', 'namedtuple', (['"""CreateDbCmd"""', '"""name"""'], {}), "('CreateDbCmd', 'name')\n", (2016, 2039), False, 'from collections import namedtuple\n'), ((2133, 2163), 'collections.namedtuple', 'namedtuple', (['"""UseDbCmd"""', '"""name"""'], {}), "('UseDbCmd', 'name')\n", (2143, 2163), False, 'from collections import namedtuple\n'), ((2257, 2290), 'collections.namedtuple', 'namedtuple', (['"""DeleteDbCmd"""', '"""name"""'], {}), "('DeleteDbCmd', 'name')\n", (2267, 2290), False, 'from collections import namedtuple\n'), ((2390, 2434), 'collections.namedtuple', 'namedtuple', (['"""CreateTableCmd"""', '"""name, schema"""'], {}), "('CreateTableCmd', 'name, schema')\n", (2400, 2434), False, 'from collections import namedtuple\n'), ((2745, 2781), 'collections.namedtuple', 'namedtuple', (['"""DeleteTableCmd"""', '"""name"""'], {}), "('DeleteTableCmd', 'name')\n", (2755, 2781), False, 'from collections import namedtuple\n'), ((2887, 2941), 'collections.namedtuple', 'namedtuple', (['"""AddColumnCmd"""', '"""name, col_type, col_name"""'], {}), "('AddColumnCmd', 'name, col_type, col_name')\n", (2897, 2941), False, 'from collections import namedtuple\n'), ((3499, 3543), 'collections.namedtuple', 'namedtuple', (['"""DelColumnCmd"""', '"""name, col_name"""'], {}), "('DelColumnCmd', 'name, col_name')\n", (3509, 3543), False, 'from collections import namedtuple\n'), ((4248, 4285), 'collections.namedtuple', 'namedtuple', (['"""InsertCmd"""', '"""table, row"""'], {}), "('InsertCmd', 'table, row')\n", (4258, 4285), False, 'from collections import namedtuple\n'), ((5225, 5285), 'collections.namedtuple', 'namedtuple', (['"""QueryCmd"""', '"""table, projection, conditions_list"""'], {}), "('QueryCmd', 'table, projection, conditions_list')\n", (5235, 5285), False, 'from collections import namedtuple\n'), ((6428, 6477), 'collections.namedtuple', 'namedtuple', (['"""DeleteCmd"""', '"""table, conditions_list"""'], {}), "('DeleteCmd', 'table, conditions_list')\n", (6438, 6477), False, 'from collections import namedtuple\n'), ((6922, 6979), 'collections.namedtuple', 'namedtuple', (['"""UpdateCmd"""', '"""table, values, conditions_list"""'], {}), "('UpdateCmd', 'table, values, conditions_list')\n", (6932, 6979), False, 'from collections import namedtuple\n'), ((7608, 7644), 'collections.namedtuple', 'namedtuple', (['"""FromCsvCmd"""', '"""csv_path"""'], {}), "('FromCsvCmd', 'csv_path')\n", (7618, 7644), False, 'from collections import namedtuple\n'), ((7750, 7784), 'collections.namedtuple', 'namedtuple', (['"""ToCsvCmd"""', '"""csv_path"""'], {}), "('ToCsvCmd', 'csv_path')\n", (7760, 7784), False, 'from collections import namedtuple\n'), ((7889, 7927), 'collections.namedtuple', 'namedtuple', (['"""FromCsvCmd"""', '"""table_name"""'], {}), "('FromCsvCmd', 'table_name')\n", (7899, 7927), False, 'from collections import namedtuple\n'), ((8066, 8100), 'collections.namedtuple', 'namedtuple', (['"""TablesCmd"""', '"""db_name"""'], {}), "('TablesCmd', 'db_name')\n", (8076, 8100), False, 'from collections import namedtuple\n'), ((8214, 8237), 'collections.namedtuple', 'namedtuple', (['"""DbCmd"""', '""""""'], {}), "('DbCmd', '')\n", (8224, 8237), False, 'from collections import namedtuple\n'), ((8637, 8685), 're.compile', 're.compile', (['"""^create\\\\s+sdb\\\\s+(?P<name>\\\\w+);$"""'], {}), "('^create\\\\s+sdb\\\\s+(?P<name>\\\\w+);$')\n", (8647, 8685), False, 'import re\n'), ((8700, 8745), 're.compile', 're.compile', (['"""^use\\\\s+sdb\\\\s+(?P<name>\\\\w+);$"""'], {}), "('^use\\\\s+sdb\\\\s+(?P<name>\\\\w+);$')\n", (8710, 8745), False, 'import re\n'), ((8763, 8811), 're.compile', 're.compile', (['"""^delete\\\\s+sdb\\\\s+(?P<name>\\\\w+);$"""'], {}), "('^delete\\\\s+sdb\\\\s+(?P<name>\\\\w+);$')\n", (8773, 8811), False, 'import re\n'), ((8837, 8953), 're.compile', 're.compile', (['"""^create\\\\s+table\\\\s+(?P<name>\\\\w+)\\\\s+columns\\\\s+(?P<columns>((int|str|bool):(\\\\w+)\\\\s?)+);$"""'], {}), "(\n '^create\\\\s+table\\\\s+(?P<name>\\\\w+)\\\\s+columns\\\\s+(?P<columns>((int|str|bool):(\\\\w+)\\\\s?)+);$'\n )\n", (8847, 8953), False, 'import re\n'), ((8964, 8999), 're.compile', 're.compile', (['"""(int|str|bool):(\\\\w+)"""'], {}), "('(int|str|bool):(\\\\w+)')\n", (8974, 8999), False, 'import re\n'), ((9022, 9072), 're.compile', 're.compile', (['"""^delete\\\\s+table\\\\s+(?P<name>\\\\w+);$"""'], {}), "('^delete\\\\s+table\\\\s+(?P<name>\\\\w+);$')\n", (9032, 9072), False, 'import re\n'), ((9097, 9223), 're.compile', 're.compile', (['"""^change\\\\s+table\\\\s+(?P<name>\\\\w+)\\\\s+add\\\\s+column\\\\s+(?P<col_type>int|str|bool):(?P<col_name>\\\\w+);$"""'], {}), "(\n '^change\\\\s+table\\\\s+(?P<name>\\\\w+)\\\\s+add\\\\s+column\\\\s+(?P<col_type>int|str|bool):(?P<col_name>\\\\w+);$'\n )\n", (9107, 9223), False, 'import re\n'), ((9234, 9333), 're.compile', 're.compile', (['"""^change\\\\s+table\\\\s+(?P<name>\\\\w+)\\\\s+del\\\\s+column\\\\s+(?P<col_name>\\\\w+);$"""'], {}), "(\n '^change\\\\s+table\\\\s+(?P<name>\\\\w+)\\\\s+del\\\\s+column\\\\s+(?P<col_name>\\\\w+);$'\n )\n", (9244, 9333), False, 'import re\n'), ((9345, 9544), 're.compile', 're.compile', (['"""^insert\\\\s+into\\\\s+(?P<table_name>\\\\w+)\\\\s+values\\\\s+(?P<values>(\\\\w+=(True|False|\\\\d+?|\\\\"(\\\\w|[\\\\/\\\\<\\\\>:`~.,?!@;\\\\\'#$%\\\\^&*\\\\-_+=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|()\\\\ ])*?\\\\")\\\\s?)+?);$"""'], {}), '(\n \'^insert\\\\s+into\\\\s+(?P<table_name>\\\\w+)\\\\s+values\\\\s+(?P<values>(\\\\w+=(True|False|\\\\d+?|\\\\"(\\\\w|[\\\\/\\\\<\\\\>:`~.,?!@;\\\\\\\'#$%\\\\^&*\\\\-_+=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|()\\\\ ])*?\\\\")\\\\s?)+?);$\'\n )\n', (9355, 9544), False, 'import re\n'), ((9532, 9702), 're.compile', 're.compile', (['"""(\\\\w+)=(True|False|(\\\\d+)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\")"""'], {}), '(\n \'(\\\\w+)=(True|False|(\\\\d+)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\")\'\n )\n', (9542, 9702), False, 'import re\n'), ((9682, 9897), 're.compile', 're.compile', (['"""(?P<col_name>\\\\w+?)(?P<op>=|!=|<|>|<=|>=)(?P<value>(\\\\d+)|(True|False)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\")"""'], {}), '(\n \'(?P<col_name>\\\\w+?)(?P<op>=|!=|<|>|<=|>=)(?P<value>(\\\\d+)|(True|False)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\")\'\n )\n', (9692, 9897), False, 'import re\n'), ((9876, 10210), 're.compile', 're.compile', (['"""^query\\\\s+(?P<projection>\\\\*|(\\\\w+\\\\,?)+?)\\\\s+(?P<table_name>\\\\w+)(\\\\s+where\\\\s+op:(?P<op>or|and)\\\\s+conditions\\\\s+(?P<conditions>((\\\\w+?)(=|!=|<|>|<=|>=)((\\\\d+?)|(True|False)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\")(\\\\s+)?)+))?;$"""'], {}), '(\n \'^query\\\\s+(?P<projection>\\\\*|(\\\\w+\\\\,?)+?)\\\\s+(?P<table_name>\\\\w+)(\\\\s+where\\\\s+op:(?P<op>or|and)\\\\s+conditions\\\\s+(?P<conditions>((\\\\w+?)(=|!=|<|>|<=|>=)((\\\\d+?)|(True|False)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\")(\\\\s+)?)+))?;$\'\n )\n', (9886, 10210), False, 'import re\n'), ((10180, 10658), 're.compile', 're.compile', (['"""^update\\\\s+(?P<table_name>\\\\w+)\\\\s+set\\\\s+(?P<setters>(((\\\\w+)=(True|False|(\\\\d+)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\"))\\\\s?)+)(\\\\s+where\\\\s+op:(?P<op>or|and)\\\\s+conditions\\\\s+(?P<conditions>((\\\\w+?)(=|!=|<|>|<=|>=)((\\\\d+?)|(True|False)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\")(\\\\s+)?)+))?;$"""'], {}), '(\n \'^update\\\\s+(?P<table_name>\\\\w+)\\\\s+set\\\\s+(?P<setters>(((\\\\w+)=(True|False|(\\\\d+)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\"))\\\\s?)+)(\\\\s+where\\\\s+op:(?P<op>or|and)\\\\s+conditions\\\\s+(?P<conditions>((\\\\w+?)(=|!=|<|>|<=|>=)((\\\\d+?)|(True|False)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\")(\\\\s+)?)+))?;$\'\n )\n', (10190, 10658), False, 'import re\n'), ((10591, 10896), 're.compile', 're.compile', (['"""^delete\\\\s+in\\\\s+(?P<table_name>\\\\w+)(\\\\s+where\\\\s+op:(?P<op>or|and)\\\\s+conditions\\\\s+(?P<conditions>((\\\\w+?)(=|!=|<|>|<=|>=)((\\\\d+?)|(True|False)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\")(\\\\s+)?)+))?;$"""'], {}), '(\n \'^delete\\\\s+in\\\\s+(?P<table_name>\\\\w+)(\\\\s+where\\\\s+op:(?P<op>or|and)\\\\s+conditions\\\\s+(?P<conditions>((\\\\w+?)(=|!=|<|>|<=|>=)((\\\\d+?)|(True|False)|\\\\"([A-Za-z0-9\\\\/\\\\<\\\\>\\\\:\\\\`\\\\~\\\\.\\\\,\\\\?\\\\!\\\\@\\\\;\\\\\\\'\\\\#\\\\$\\\\%\\\\^\\\\&\\\\*\\\\-\\\\_\\\\+\\\\=\\\\[\\\\{\\\\]\\\\}\\\\\\\\\\\\|\\\\(\\\\)\\\\ ])*?\\\\")(\\\\s+)?)+))?;$\'\n )\n', (10601, 10896), False, 'import re\n'), ((10860, 10923), 're.compile', 're.compile', (['"""^from\\\\s+csv\\\\s+(?P<csv_path>[^ ]+?\\\\.csv)\\\\s*?;$"""'], {}), "('^from\\\\s+csv\\\\s+(?P<csv_path>[^ ]+?\\\\.csv)\\\\s*?;$')\n", (10870, 10923), False, 'import re\n'), ((10937, 10998), 're.compile', 're.compile', (['"""^to\\\\s+csv\\\\s+(?P<csv_path>[^ ]+?\\\\.csv)\\\\s*?;$"""'], {}), "('^to\\\\s+csv\\\\s+(?P<csv_path>[^ ]+?\\\\.csv)\\\\s*?;$')\n", (10947, 10998), False, 'import re\n'), ((11012, 11064), 're.compile', 're.compile', (['"""^schema\\\\s+(?P<table_name>\\\\w+)\\\\s*?;$"""'], {}), "('^schema\\\\s+(?P<table_name>\\\\w+)\\\\s*?;$')\n", (11022, 11064), False, 'import re\n'), ((11079, 11128), 're.compile', 're.compile', (['"""^tables\\\\s+(?P<db_name>\\\\w+)\\\\s*?;$"""'], {}), "('^tables\\\\s+(?P<db_name>\\\\w+)\\\\s*?;$')\n", (11089, 11128), False, 'import re\n'), ((11139, 11163), 're.compile', 're.compile', (['"""^db\\\\s*?;$"""'], {}), "('^db\\\\s*?;$')\n", (11149, 11163), False, 'import re\n')] |
"""
A module for calculating the relationship (or distance) between 2 strings.
Namely:
- edit_distance()
- needleman_wunsch()
- align()
- coverage()
"""
from typing import Callable, Tuple, List
from enum import IntEnum
from operator import itemgetter
from functools import lru_cache
import unittest
class BackTrack(IntEnum):
" Used by align() for backtracking. "
DELETE = 1
INSERT = 2
SUBSTITUTE = 3
UNASSIGNED = 4
# (Score, backtrack direction)
MatrixElement = Tuple[float, BackTrack]
Matrix = List[List[MatrixElement]]
def print_matrix(mat: Matrix) -> None:
" Pretty print an alignment matrix. "
def show_backtrack(val: BackTrack) -> str:
if val == BackTrack.DELETE:
return "d"
if val == BackTrack.INSERT:
return "i"
if val == BackTrack.SUBSTITUTE:
return "m"
return "?"
rowstr: List[List[str]] = [[] for _ in mat[0]]
for col in mat:
for irow, elem in enumerate(col):
rowstr[irow].append(
"(" + str(elem[0]) + ", " + show_backtrack(elem[1]) + ")"
)
print("\n".join([" ".join(r) for r in rowstr]) + "\n")
@lru_cache(maxsize=None)
def edit_distance(reference: str, target: str) -> Matrix:
""" Computes the edit distance matrix between a and b. """
rows = len(reference) + 1
cols = len(target) + 1
dist = [[(0.0, BackTrack.UNASSIGNED) for _ in range(cols)] for _ in range(rows)]
for i in range(1, rows):
dist[i][0] = (i, BackTrack.DELETE)
for j in range(1, cols):
dist[0][j] = (j, BackTrack.INSERT)
for col in range(1, cols):
for row in range(1, rows):
if reference[row - 1] == target[col - 1]:
cost = 0
else:
cost = 1
options = [
(dist[row - 1][col - 1][0] + cost, BackTrack.SUBSTITUTE),
(dist[row][col - 1][0] + 1, BackTrack.INSERT),
(dist[row - 1][col][0] + 1, BackTrack.DELETE),
]
dist[row][col] = min(options, key=itemgetter(0))
return dist
@lru_cache(maxsize=None)
def needleman_wunsch(reference: str, target: str) -> Matrix:
""" Computes the Needleman-Wunsch matrix between a and b. """
gap_open = 2
gap_extend = 0.1 # Expected length is 10
def gap_penalty(gaps: List[MatrixElement], direction: BackTrack) -> float:
penalty = float(gap_open)
for gap in gaps[::-1]:
if gap[1] == direction:
penalty += gap_extend
else:
penalty += gap[0]
break
return penalty
rows = len(reference) + 1
cols = len(target) + 1
dist = [[(0.0, BackTrack.UNASSIGNED) for _ in range(cols)] for _ in range(rows)]
for i in range(1, rows):
boundaryrow = [dist[r][0] for r in range(0, i)]
dist[i][0] = (
gap_penalty(boundaryrow, BackTrack.DELETE) - gap_open,
BackTrack.DELETE,
)
for j in range(1, cols):
dist[0][j] = (
gap_penalty(dist[0][:j], BackTrack.INSERT) - gap_open,
BackTrack.INSERT,
)
for col in range(1, cols):
for row in range(1, rows):
insert_penalty = gap_penalty(
[dist[row][c] for c in range(col)], BackTrack.INSERT
)
delete_penalty = gap_penalty(
[dist[r][col] for r in range(row)], BackTrack.DELETE
)
dist[row][col] = min(
[
(insert_penalty, BackTrack.INSERT),
(delete_penalty, BackTrack.DELETE),
(
dist[row - 1][col - 1][0]
+ (0 if reference[row - 1] == target[col - 1] else 1),
BackTrack.SUBSTITUTE,
),
],
key=itemgetter(0),
)
return dist
def align(
reference: str, target: str, scoringfn: Callable[[str, str], Matrix]
) -> Tuple[str, str]:
""" Compute the alignment between a and b using a provided scoring function. """
i = len(reference)
j = len(target)
matrix = scoringfn(reference, target)
_reference = ""
_target = ""
while (i, j) != (0, 0):
backtrack = matrix[i][j][1]
if backtrack == BackTrack.SUBSTITUTE:
i -= 1
j -= 1
_reference += reference[i]
_target += target[j]
elif backtrack == BackTrack.INSERT:
j -= 1
_reference += "-"
_target += target[j]
elif backtrack == BackTrack.DELETE:
i -= 1
_reference += reference[i]
_target += "-"
return (_reference[::-1], _target[::-1])
@lru_cache(maxsize=None)
def coverage(reference: str, target: str) -> int:
"""
The number of substitutions in an alignment.
>>> coverage("---ATGGC", "GTTA-GGG")
4
"""
return sum([ref != "-" and tgt != "-" for ref, tgt in zip(reference, target)])
####### TESTING ########
class TestDistance(unittest.TestCase):
" Unit tests functions in this file. "
def test_coverage(self):
" Unit tests for the merge() function. "
self.assertEqual(coverage("", ""), 0)
self.assertEqual(coverage("A", "A"), 1)
self.assertEqual(coverage("A", "G"), 1)
self.assertEqual(coverage("-A", "AA"), 1)
self.assertEqual(coverage("A-", "AA"), 1)
self.assertEqual(coverage("AA", "-A"), 1)
self.assertEqual(coverage("AA", "A-"), 1)
self.assertEqual(coverage("A-A", "AAA"), 2)
self.assertEqual(coverage("AAA", "A-A"), 2)
def test_align_edit_distance(self):
" Unit tests for align() using edit_distance(). "
self.assertEqual(align("", "", edit_distance), ("", ""))
self.assertEqual(align("A", "A", edit_distance), ("A", "A"))
self.assertEqual(align("AB", "A", edit_distance), ("AB", "A-"))
self.assertEqual(align("AB", "B", edit_distance), ("AB", "-B"))
self.assertEqual(align("A", "AB", edit_distance), ("A-", "AB"))
self.assertEqual(align("B", "AB", edit_distance), ("-B", "AB"))
self.assertEqual(align("AB", "CD", edit_distance), ("AB", "CD"))
def test_align_needleman_wunsch(self):
" Unit tests for align() using needleman_wunsch(). "
self.assertEqual(align("", "", needleman_wunsch), ("", ""))
self.assertEqual(align("A", "A", needleman_wunsch), ("A", "A"))
self.assertEqual(align("AB", "A", needleman_wunsch), ("AB", "-A"))
self.assertEqual(align("AB", "B", needleman_wunsch), ("AB", "-B"))
self.assertEqual(align("A", "AB", needleman_wunsch), ("-A", "AB"))
self.assertEqual(align("B", "AB", needleman_wunsch), ("-B", "AB"))
self.assertEqual(align("AB", "CD", needleman_wunsch), ("AB", "CD"))
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| [
"unittest.main",
"functools.lru_cache",
"doctest.testmod",
"operator.itemgetter"
] | [((1194, 1217), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (1203, 1217), False, 'from functools import lru_cache\n'), ((2137, 2160), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (2146, 2160), False, 'from functools import lru_cache\n'), ((4814, 4837), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (4823, 4837), False, 'from functools import lru_cache\n'), ((6988, 7005), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (7003, 7005), False, 'import doctest\n'), ((7011, 7026), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7024, 7026), False, 'import unittest\n'), ((2102, 2115), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (2112, 2115), False, 'from operator import itemgetter\n'), ((3928, 3941), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (3938, 3941), False, 'from operator import itemgetter\n')] |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import math
import datetime
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
class Indicators():
def __init__(self, dataframe, params = []):
self.dataframe = dataframe
self.params = params
self.dataframe['return'] = 0
for i in range(1,len(dataframe['return'])):
#http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
dataframe.loc[i,'return'] = (self.dataframe.loc[i,'open']-self.dataframe.loc[i-1,'open'])/self.dataframe.loc[i-1,'open']
self.Return = dataframe['return']
self.dataframe['time'] = dataframe['tradeDate']
self.dataframe['cumulative_return'] = self.dataframe['open']
self.dataframe['cumulative_return'] = self.dataframe['cumulative_return']/self.dataframe.loc[0,'open']
self.dataframe['cumulative_return'] = dataframe['cumulative_return']#*1000000
self.dataframe.index = pd.to_datetime(dataframe['tradeDate'])#!!!!!
#分年计算
self.year_slice = {}
i = 0
y = time.strptime(self.dataframe['time'].iat[0],"%Y-%m-%d").tm_year
for j in range(1,len(self.dataframe)):
if y != time.strptime(self.dataframe['time'].iat[j],"%Y-%m-%d").tm_year:
self.year_slice[str(y)] = dataframe[i:j-1]
y = time.strptime(self.dataframe['time'].iat[j],"%Y-%m-%d").tm_year
i = j
self.year_slice[str(y)] = dataframe[i:]
###年化收益
def annual_return(self,asset,year):
R = self.year_slice[year][asset].iat[-1]/self.year_slice[year][asset].iat[0]
t1 = time.strptime(self.year_slice[year]['time'].iat[0],"%Y-%m-%d")
t2 = time.strptime(self.year_slice[year]['time'].iat[-1],"%Y-%m-%d")
d1 = datetime.datetime(t1.tm_year, t1.tm_mon, t1.tm_mday)
d2 = datetime.datetime(t1.tm_year, t2.tm_mon, t2.tm_mday)
n = (d2-d1).days
n = n/244
# print('The annual return for %s in %s is %f' %(asset,year,math.pow(R, 1/n)-1))
return math.pow(R, 1/n)-1
###最大回撤
def max_draw(self,asset,year):
self.year_slice[year]['max'] = 0
self.year_slice[year].ix[0,'max'] = self.year_slice[year].ix[0,asset]#loc, iloc, and ix
for i in range(1, len(self.year_slice[year][asset])):
if self.year_slice[year].ix[i, asset] > self.year_slice[year].ix[i-1, 'max']:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i, asset]
else:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i-1, 'max']
self.year_slice[year]['retreat']=(self.year_slice[year][asset]- self.year_slice[year]['max'])/self.year_slice[year]['max']
print('The max draw for %s in %s is %f' %(asset,year,abs(min(self.year_slice[year]['retreat']))))
return abs(min(self.year_slice[year]['retreat']))
###波动率
def volatility(self,asset,year):
print('The volatility for %s in %s is %f' %(asset,year,np.std(self.year_slice[year][asset])*math.sqrt(244/len(self.year_slice[year][asset]))))
return np.std(self.year_slice[year][asset])*math.sqrt(244/len(self.year_slice[year][asset]))
###夏普比率
def sharp(self, asset,no_risk_R,year):
print('The Sharp Ratio for %s in %s is %.7f' %(asset,year,(self.annual_return(asset,year)-no_risk_R)/(self.volatility(asset,year)*math.sqrt(244/len(self.year_slice[year][asset]))+1e-10)))
return (self.annual_return(asset,year)-no_risk_R)/(self.volatility(asset,year)*math.sqrt(244/len(self.year_slice[year][asset]))+1e-10)
###卡玛比率
def calmar(self,asset,year):
print('The Calmar Ratio for %s in %s is %f' %(asset,year,self.annual_return(asset,year)/self.max_draw(asset,year)))
return self.annual_return(asset,year)/self.max_draw(asset,year)
###日胜率
def daily_win_ratio(self,asset,year):
#df的条件选择不是self.dataframe[asset][self.dataframe[asset] > 0]而是self.dataframe[self.dataframe[asset] > 0][asset]
#!!
pnl = asset.replace('asset','pnl')
n1 = len(self.year_slice[year][self.year_slice[year][pnl] > 0][pnl])
n2 = len(self.year_slice[year][pnl])
print('The daily win ratio for %s in %s is %f' %(asset,year,n1/n2))
return n1/n2
###日盈亏比
def win_lose_ratio(self,asset,year):
self.year_slice[year]['dif'] = self.year_slice[year][asset] - self.year_slice[year][asset].shift(1)
print('The win lose ratio for %s in %s is %f' %(asset,year,abs(min(self.year_slice[year]['retreat']))))
return abs(sum(self.year_slice[year][self.year_slice[year]['dif']>0]['dif']))/abs(sum(self.year_slice[year][self.year_slice[year]['dif']<0]['dif']))
###大回撤区间
def worst_draw_interval(self,asset,year):
self.year_slice[year]['max'] = 0
self.year_slice[year].ix[0,'max'] = self.year_slice[year].ix[0,asset]
self.year_slice[year]['max_time'] = self.year_slice[year]['time']
for i in range(1, len(self.year_slice[year][asset])):
if self.year_slice[year].ix[i, asset] > self.year_slice[year].ix[i-1, 'max']:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i, asset]
else:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i-1, 'max']
self.year_slice[year].ix[i, 'max_time'] = self.year_slice[year].ix[i-1, 'max_time']
self.year_slice[year]['retreat']=(self.year_slice[year][asset]- self.year_slice[year]['max'])/self.year_slice[year]['max']
max_draw = min(self.year_slice[year]['retreat'])
data = self.year_slice[year][self.year_slice[year]['retreat'] == max_draw]
t1 = data['tradeDate']#
t2 = data['max_time']
#print('The worst draw interval for %s in %s is %s %s' %(asset,year,str(t1),str(t2)))
return t1,t2
###总换手
def total_turnover(self,asset,year):
turnover = asset.replace('asset','turnover')
print('The total turnover for %s in %s is %f' %(asset,year,sum(self.year_slice[year][turnover])))
return sum(self.year_slice[year][turnover])
###日均换手
def average_daily_turnover(self,asset,year):
t1 = time.strptime(self.year_slice[year]['time'].iat[0],"%Y-%m-%d")
t2 = time.strptime(self.year_slice[year]['time'].iat[-1],"%Y-%m-%d")
d1 = datetime.datetime(t1.tm_year, t1.tm_mon, t1.tm_mday)
d2 = datetime.datetime(t1.tm_year, t2.tm_mon, t2.tm_mday)
n = (d2-d1).days
print('The average daily turnover for %s in %s is %f' %(asset,year,self.total_turnover(asset,year)/n))
return self.total_turnover(asset,year)/n
###日均持仓
def average_daily_position(self,asset,year):
position = asset.replace('asset','position')
print('The average daily position for %s in %s is %f' %(asset,year,self.year_slice[year][position].mean()))
return self.year_slice[year][position].mean()
###次均收益
def minor_average_return(self,asset,year):
position = asset.replace('asset','position')
sum_pos = sum(self.year_slice[year][self.year_slice[year][position]!=0][position])
num = len(self.year_slice[year][self.year_slice[year][position]!=0][position])
print('The minor average return for %s in %s is %f' %(asset,year,sum_pos/num))
return sum_pos/num
def write_indicators_concat(self,path):
frames = []
for items in self.year_slice:
temp_data = []
temp_index = []
for k in self.params:
x = [items,
self.annual_return('asset'+ str(k),items),
self.max_draw('asset'+ str(k),items),
self.volatility('asset'+ str(k),items),
self.sharp('asset'+ str(k),0,items),
self.calmar('asset'+ str(k),items),
self.daily_win_ratio('asset'+ str(k),items),
self.win_lose_ratio('asset'+ str(k),items),
self.total_turnover('asset'+ str(k),items),
self.average_daily_turnover('asset'+ str(k),items),
self.average_daily_position('asset'+ str(k),items),
self.minor_average_return('asset'+ str(k),items)]
temp_data.append(x)
temp_index.append('asset'+ str(k))
DataFrame = pd.DataFrame(temp_data,index=temp_index,columns=['year','annual_return', 'max_draw', 'volatility', 'sharp','calmar','daily_win_ratio','win_lose_ratio','total_turnover','average_daily_turnover','average_daily_position','minor_average_return'])
frames.append(DataFrame)
DataFrame = pd.concat(frames)
DataFrame.to_csv(path_or_buf=path)
def plot_figure(self,asset_num):
t1 = time.strptime(self.dataframe['time'].iat[0],"%Y-%m-%d")
t2 = time.strptime(self.dataframe['time'].iat[-1],"%Y-%m-%d")
d1 = datetime.datetime(t1.tm_year, t1.tm_mon, t1.tm_mday)
d2 = datetime.datetime(t1.tm_year, t2.tm_mon, t2.tm_mday)
plt.figure()
plt.subplots_adjust(hspace=1, wspace=1)
plt.subplot(3,1,1)
self.dataframe['asset'+ str(asset_num)].plot(legend = True)
self.dataframe['cumulative_return'].plot(x=None, y=None, kind='line', ax=None, subplots=False, sharex=None, sharey=False, layout=None, figsize=None, use_index=True, title=None, grid=None, legend=True, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False)
plt.subplot(3,1,2)
f2 = plt.bar(range(len(self.dataframe['transaction'+ str(asset_num)])), self.dataframe['transaction'+ str(asset_num)].tolist(),tick_label= None,label='transaction'+ str(asset_num))
plt.legend((f2,),('transaction'+ str(asset_num),))
plt.subplot(3,1,3)
f3 = plt.bar(range(len(self.dataframe['pnl'+ str(asset_num)])),self.dataframe['pnl'+ str(asset_num)].tolist(),label='pnl'+ str(asset_num))
plt.legend((f3,),('pnl'+ str(asset_num),))
plt.show()
if __name__=='__main__':
indicators = Indicators('/Users/zhubaobao/Documents/Quant/ZXJT/total3.csv', [5,10,20])
#indicators.write_indicators_concat('/Users/zhubaobao/Documents/Quant/ZXJT/write_indicators.csv')
indicators.plot_figure(10)
| [
"datetime.datetime",
"matplotlib.pyplot.subplots_adjust",
"time.strptime",
"math.pow",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"numpy.std",
"pandas.DataFrame",
"pandas.concat",
"warnings.filterwarnings",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] | [((151, 184), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (174, 184), False, 'import warnings\n'), ((1033, 1071), 'pandas.to_datetime', 'pd.to_datetime', (["dataframe['tradeDate']"], {}), "(dataframe['tradeDate'])\n", (1047, 1071), True, 'import pandas as pd\n'), ((1704, 1767), 'time.strptime', 'time.strptime', (["self.year_slice[year]['time'].iat[0]", '"""%Y-%m-%d"""'], {}), "(self.year_slice[year]['time'].iat[0], '%Y-%m-%d')\n", (1717, 1767), False, 'import time\n'), ((1780, 1844), 'time.strptime', 'time.strptime', (["self.year_slice[year]['time'].iat[-1]", '"""%Y-%m-%d"""'], {}), "(self.year_slice[year]['time'].iat[-1], '%Y-%m-%d')\n", (1793, 1844), False, 'import time\n'), ((1857, 1909), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't1.tm_mon', 't1.tm_mday'], {}), '(t1.tm_year, t1.tm_mon, t1.tm_mday)\n', (1874, 1909), False, 'import datetime\n'), ((1923, 1975), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't2.tm_mon', 't2.tm_mday'], {}), '(t1.tm_year, t2.tm_mon, t2.tm_mday)\n', (1940, 1975), False, 'import datetime\n'), ((6236, 6299), 'time.strptime', 'time.strptime', (["self.year_slice[year]['time'].iat[0]", '"""%Y-%m-%d"""'], {}), "(self.year_slice[year]['time'].iat[0], '%Y-%m-%d')\n", (6249, 6299), False, 'import time\n'), ((6312, 6376), 'time.strptime', 'time.strptime', (["self.year_slice[year]['time'].iat[-1]", '"""%Y-%m-%d"""'], {}), "(self.year_slice[year]['time'].iat[-1], '%Y-%m-%d')\n", (6325, 6376), False, 'import time\n'), ((6389, 6441), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't1.tm_mon', 't1.tm_mday'], {}), '(t1.tm_year, t1.tm_mon, t1.tm_mday)\n', (6406, 6441), False, 'import datetime\n'), ((6455, 6507), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't2.tm_mon', 't2.tm_mday'], {}), '(t1.tm_year, t2.tm_mon, t2.tm_mday)\n', (6472, 6507), False, 'import datetime\n'), ((8663, 8680), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (8672, 8680), True, 'import pandas as pd\n'), ((8775, 8831), 'time.strptime', 'time.strptime', (["self.dataframe['time'].iat[0]", '"""%Y-%m-%d"""'], {}), "(self.dataframe['time'].iat[0], '%Y-%m-%d')\n", (8788, 8831), False, 'import time\n'), ((8844, 8901), 'time.strptime', 'time.strptime', (["self.dataframe['time'].iat[-1]", '"""%Y-%m-%d"""'], {}), "(self.dataframe['time'].iat[-1], '%Y-%m-%d')\n", (8857, 8901), False, 'import time\n'), ((8914, 8966), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't1.tm_mon', 't1.tm_mday'], {}), '(t1.tm_year, t1.tm_mon, t1.tm_mday)\n', (8931, 8966), False, 'import datetime\n'), ((8980, 9032), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't2.tm_mon', 't2.tm_mday'], {}), '(t1.tm_year, t2.tm_mon, t2.tm_mday)\n', (8997, 9032), False, 'import datetime\n'), ((9041, 9053), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9051, 9053), True, 'import matplotlib.pyplot as plt\n'), ((9062, 9101), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1)', 'wspace': '(1)'}), '(hspace=1, wspace=1)\n', (9081, 9101), True, 'import matplotlib.pyplot as plt\n'), ((9111, 9131), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (9122, 9131), True, 'import matplotlib.pyplot as plt\n'), ((9628, 9648), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (9639, 9648), True, 'import matplotlib.pyplot as plt\n'), ((9904, 9924), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (9915, 9924), True, 'import matplotlib.pyplot as plt\n'), ((10130, 10140), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10138, 10140), True, 'import matplotlib.pyplot as plt\n'), ((1147, 1203), 'time.strptime', 'time.strptime', (["self.dataframe['time'].iat[0]", '"""%Y-%m-%d"""'], {}), "(self.dataframe['time'].iat[0], '%Y-%m-%d')\n", (1160, 1203), False, 'import time\n'), ((2122, 2140), 'math.pow', 'math.pow', (['R', '(1 / n)'], {}), '(R, 1 / n)\n', (2130, 2140), False, 'import math\n'), ((3176, 3212), 'numpy.std', 'np.std', (['self.year_slice[year][asset]'], {}), '(self.year_slice[year][asset])\n', (3182, 3212), True, 'import numpy as np\n'), ((8363, 8627), 'pandas.DataFrame', 'pd.DataFrame', (['temp_data'], {'index': 'temp_index', 'columns': "['year', 'annual_return', 'max_draw', 'volatility', 'sharp', 'calmar',\n 'daily_win_ratio', 'win_lose_ratio', 'total_turnover',\n 'average_daily_turnover', 'average_daily_position', 'minor_average_return']"}), "(temp_data, index=temp_index, columns=['year', 'annual_return',\n 'max_draw', 'volatility', 'sharp', 'calmar', 'daily_win_ratio',\n 'win_lose_ratio', 'total_turnover', 'average_daily_turnover',\n 'average_daily_position', 'minor_average_return'])\n", (8375, 8627), True, 'import pandas as pd\n'), ((1278, 1334), 'time.strptime', 'time.strptime', (["self.dataframe['time'].iat[j]", '"""%Y-%m-%d"""'], {}), "(self.dataframe['time'].iat[j], '%Y-%m-%d')\n", (1291, 1334), False, 'import time\n'), ((1422, 1478), 'time.strptime', 'time.strptime', (["self.dataframe['time'].iat[j]", '"""%Y-%m-%d"""'], {}), "(self.dataframe['time'].iat[j], '%Y-%m-%d')\n", (1435, 1478), False, 'import time\n'), ((3073, 3109), 'numpy.std', 'np.std', (['self.year_slice[year][asset]'], {}), '(self.year_slice[year][asset])\n', (3079, 3109), True, 'import numpy as np\n')] |
# Copyright 2021 The OpenBytes Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from concurrent.futures import as_completed, ThreadPoolExecutor
from typing import Any, Dict, List
import requests
from tqdm import tqdm
from odi.client.storage.storage import Storage
class _S3Obj:
def __init__(self, name: str, path: str, size: int, url: str) -> None:
self._name = name
self._path = path
self._size = size
self._url = url
@property
def name(self) -> str:
return self._name
@property
def path(self) -> str:
return self._path
@property
def size(self) -> int:
return self._size
@property
def url(self) -> str:
return self._url
class S3(Storage):
@classmethod
def _load_obj(cls, data: List[Dict[str, Any]]) -> List[_S3Obj]:
objs = []
for d in data:
objs.append(_S3Obj(name=d["name"], path=d["fullPath"], size=d["size"], url=d["url"]))
return objs
def upload(self) -> Any:
pass
def download(self, data: List[Dict[str, Any]], path="") -> Any:
# done, not done
objs = self._load_obj(data)
size = self._size_convert(size=sum(o.size for o in objs), origin="b", target="mb")
total = len(objs)
print(f"Total: {total}, {size}MB.")
with tqdm(total=total) as pbar:
with ThreadPoolExecutor(max_workers=total) as executor:
futures = [executor.submit(self._download_obj, obj) for obj in objs]
for future in as_completed(futures):
result = future.result()
pbar.update(1)
@classmethod
def _download_obj(cls, obj: _S3Obj) -> None:
if not os.path.exists(os.path.dirname(obj.path)):
os.makedirs(os.path.dirname(obj.path))
with requests.get(obj.url, stream=True) as r:
with open(obj.path, "wb") as file:
for chunk in r.iter_content(chunk_size=1024):
file.write(chunk)
| [
"concurrent.futures.ThreadPoolExecutor",
"tqdm.tqdm",
"requests.get",
"concurrent.futures.as_completed",
"os.path.dirname"
] | [((1887, 1904), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total'}), '(total=total)\n', (1891, 1904), False, 'from tqdm import tqdm\n'), ((2389, 2423), 'requests.get', 'requests.get', (['obj.url'], {'stream': '(True)'}), '(obj.url, stream=True)\n', (2401, 2423), False, 'import requests\n'), ((1931, 1968), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'total'}), '(max_workers=total)\n', (1949, 1968), False, 'from concurrent.futures import as_completed, ThreadPoolExecutor\n'), ((2097, 2118), 'concurrent.futures.as_completed', 'as_completed', (['futures'], {}), '(futures)\n', (2109, 2118), False, 'from concurrent.futures import as_completed, ThreadPoolExecutor\n'), ((2297, 2322), 'os.path.dirname', 'os.path.dirname', (['obj.path'], {}), '(obj.path)\n', (2312, 2322), False, 'import os\n'), ((2349, 2374), 'os.path.dirname', 'os.path.dirname', (['obj.path'], {}), '(obj.path)\n', (2364, 2374), False, 'import os\n')] |
"""
Name : c14_11_rainbow_callMaxOn2_viaSimulation.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import scipy as sp
from scipy import zeros, sqrt, shape
#
sp.random.seed(123) # fix our random numbers
s1=100. # stock price 1
s2=95. # stock price 2
k=102.0 # exercise price
T=8./12. # maturity in years
r=0.08 # risk-free rate
rho=0.75 # correlation between 2
sigma1=0.15 # volatility for stock 1
sigma2=0.20 # volatility for stock 1
nSteps=100. # number of steps
nSimulation=1000 # number of simulations
#
# step 1: generate correlated random number
dt =T/nSteps
call = sp.zeros([nSimulation], dtype=float)
x = range(0, int(nSteps), 1)
#
# step 2: call call prices
for j in range(0, nSimulation):
x1=sp.random.normal(size=nSimulation)
x2=sp.random.normal(size=nSimulation)
y1=x1
y2=rho*x1+sp.sqrt(1-rho**2)*x2
sT1=s1
sT2=s2
for i in x[:-1]:
e1=y1[i]
e2=y2[i]
sT1*=sp.exp((r-0.5*sigma1**2)*dt+sigma1*e1*sqrt(dt))
sT2*=sp.exp((r-0.5*sigma2**2)*dt+sigma2*e2*sqrt(dt))
minOf2=min(sT1,sT2)
call[j]=max(minOf2-k,0)
#
# Step 3: summation and discount back
call=sp.mean(call)*sp.exp(-r*T)
print('Rainbow call on minimum of 2 assets = ', round(call,3))
| [
"scipy.sqrt",
"scipy.exp",
"scipy.zeros",
"scipy.random.normal",
"scipy.mean",
"scipy.random.seed"
] | [((287, 306), 'scipy.random.seed', 'sp.random.seed', (['(123)'], {}), '(123)\n', (301, 306), True, 'import scipy as sp\n'), ((815, 851), 'scipy.zeros', 'sp.zeros', (['[nSimulation]'], {'dtype': 'float'}), '([nSimulation], dtype=float)\n', (823, 851), True, 'import scipy as sp\n'), ((953, 987), 'scipy.random.normal', 'sp.random.normal', ([], {'size': 'nSimulation'}), '(size=nSimulation)\n', (969, 987), True, 'import scipy as sp\n'), ((995, 1029), 'scipy.random.normal', 'sp.random.normal', ([], {'size': 'nSimulation'}), '(size=nSimulation)\n', (1011, 1029), True, 'import scipy as sp\n'), ((1385, 1398), 'scipy.mean', 'sp.mean', (['call'], {}), '(call)\n', (1392, 1398), True, 'import scipy as sp\n'), ((1399, 1413), 'scipy.exp', 'sp.exp', (['(-r * T)'], {}), '(-r * T)\n', (1405, 1413), True, 'import scipy as sp\n'), ((1054, 1075), 'scipy.sqrt', 'sp.sqrt', (['(1 - rho ** 2)'], {}), '(1 - rho ** 2)\n', (1061, 1075), True, 'import scipy as sp\n'), ((1205, 1213), 'scipy.sqrt', 'sqrt', (['dt'], {}), '(dt)\n', (1209, 1213), False, 'from scipy import zeros, sqrt, shape\n'), ((1267, 1275), 'scipy.sqrt', 'sqrt', (['dt'], {}), '(dt)\n', (1271, 1275), False, 'from scipy import zeros, sqrt, shape\n')] |
import os
from os.path import join
INVESTIGATE = False # Records coverages and saves them. Generates a plot in the end. Do not use with automate.
TEST_OUTSIDE_FUZZER = False # Runs FATE as standalone (1+1) EA
BLACKBOX = True and TEST_OUTSIDE_FUZZER # Disables white-box information such as thresholds and feat imp.
FORCE_DEFAULT_EPSILON = True or TEST_OUTSIDE_FUZZER # Runs all datasets with the default epsilon
FORCE_DEFAULT_MUTATION_CHANCE = False or TEST_OUTSIDE_FUZZER # Runs all datasets with the default mutation chance
LIMIT_TIME = True # If false, run 10 times as long
############ FATE Standalone ############
CROSSOVER_CHANCE = 0.001 # Chance that crossover occurs
CROSSOVER_RANDOM_CHANCE = 1.0 # Actual chance for crossover with random features is 0.001
# CROSSOVER_CHANCE * CROSSOVER_RANDOM_CHANCE
NUM_RUNS = 100000000 # Unlimited. Change for smaller amount of runs
POPULATION_SIZE = 1 # Population size.
############ RQ 1 defaults ############
MEASURE_EXEC_P_S = True # Parse the number of executions per second.
ALLOW_FLOAT_MIS_CLASSIFICATION = True # If True, do not filter mis-classifications from the produced AE
CONSISTENT_DRAWS = True # Seeds random with 0, to create consistent check-set draws
FUZZ_ONE_POINT_PER_INSTANCE = True # compile to generic fuzz target and fuzz per point
USE_CUSTOM_MUTATOR = True # If False, use the standard mutator of LibFuzzer
USE_CROSSOVER = True and USE_CUSTOM_MUTATOR # Combines mutation with crossover (split at random location)
USE_GAUSSIAN = True # Gaussian vs random uniform mutation
USE_PROBABILITY_STEPS_SPECIAL = True # Proba descent based on small proba diff between 2nd class predicted
PROBA_LIMIT_WITHIN_EPSILON = True # Only save seeds if within epsilon
WRITE_AE_ONLY_IF_BETTER_OUTSIDE_BRANCHES = True # Saves execution time
ALWAYS_OPTIMIZE = True # Otherwise only optimize small files
MUTATE_DEPTH = 7 if TEST_OUTSIDE_FUZZER else 5 # The maximum number of consecutive mutations per seed for LibFuzzer
DEFAULT_EPSILON = 0.1 if TEST_OUTSIDE_FUZZER else 0.2 # Default epsilon
DEFAULT_MUTATE_CHANCE = 0.5 if TEST_OUTSIDE_FUZZER else 0.1 # Chance that a single features is mutated
FUZZER = 'libFuzzer'
# FUZZER = 'AFL++'
# FUZZER = 'honggfuzz'
# FUZZER = 'AFLGo'
FUZZERS = ['libFuzzer', 'AFL++', 'AFLGo', 'honggfuzz']
if FUZZER not in FUZZERS:
raise ValueError(f'Fuzzer {FUZZER} not recognised, should be one of [{", ".join(FUZZERS)}]')
if FUZZER == 'honggfuzz' and USE_CUSTOM_MUTATOR:
raise ValueError('Honggfuzz and custom mutator is not supported')
############ RQ 2 defaults ############
AE_MUTATE_TOWARDS_VICTIM = True # If AE, mutate values only towards victim point.
MUTATE_BIGGEST_CHANCE = 0.5 # When an AE is found, the chance to only mutate all biggest difference fs towards victim
ALSO_MUTATE_BIGGEST = True # Always mutate all features > the biggest l-inf distance - 0.01. Only with FUZZ_ONE
# These alter the chance that a feature is mutated
BIAS_MUTATE_BIG_DIFFS = True
USE_THRESHOLDS_FOR_MUTATION = True and not BLACKBOX # move to optimal boundary value after drawing from mutation dist
# Fuzzes for each datapoint with and without AE init
DOUBLE_FUZZ_WITH_AE = True and not (TEST_OUTSIDE_FUZZER or INVESTIGATE)
USE_FEATURE_IMPORTANCE = True and not BLACKBOX # prioritize more important features for mutation
INITIALIZE_WITH_POINT_IN_BETWEEN = True and DOUBLE_FUZZ_WITH_AE
INITIALIZE_WITH_EXTRA_POINTS_IN_BETWEEN = True and INITIALIZE_WITH_POINT_IN_BETWEEN
if TEST_OUTSIDE_FUZZER and (not FUZZ_ONE_POINT_PER_INSTANCE):
raise ValueError('Test outside fuzzer conflicting options')
if TEST_OUTSIDE_FUZZER and DOUBLE_FUZZ_WITH_AE and (POPULATION_SIZE < 2 or CROSSOVER_RANDOM_CHANCE > 0.99):
raise ValueError('Test outside fuzzer double fuzz configuration problem')
############ RQ 1.2 defaults ############
FILTER_BAD_AE = True # If True, discards all AE that are worse than FAILURE_THRES
FUZZ_ONLY_COV_FOR_FOREST = False # Only insert coverage-guidance for the lines that belong to the Forest
FUZZ_ONLY_COV_FOR_CHECK = True # Only insert coverage-guidance for the lines that belong to the objective function
FUZZ_WITHOUT_COVERAGE_GUIDANCE = False # If True, baseline: removes almost all coverage guidance (except TestOneInput)
if FUZZER == 'AFL++' and FUZZ_WITHOUT_COVERAGE_GUIDANCE:
raise ValueError('AFL++ crashes because the fuzzer name cannot be set with the -n (no instrument) option')
############ Objective function settings ############
COMBINE_DISTANCE_AND_PROBABILITY = False # distance = distance + probability
USE_PROBABILITY_STEPS = False # probability steps in the check function ELSE branch
PROBA_SPECIAL_ALWAYS = False
PROBA_SPECIAL_START_STEP = 0.2
PROBA_SPECIAL_STEP_SIZE = 0.01
WRITE_AE_ALWAYS_IN_IF = False # Slower option for the objective function
if USE_PROBABILITY_STEPS and USE_PROBABILITY_STEPS_SPECIAL:
raise ValueError('Select at most one type of probability step')
if WRITE_AE_ALWAYS_IN_IF and WRITE_AE_ONLY_IF_BETTER_OUTSIDE_BRANCHES:
raise ValueError('Only one write_X can be used on the settings')
############ Fuzzer settings ############
NEVER_OPTIMIZE = False
FORCE_ENTROPIC = False # libfuzzer. Experimental. Enables entropic power schedule.
NO_ENTROPIC = False
FOCUS_FUNCTION = "0" # focus_function 0 Experimental. Fuzzing will focus on inputs that trigger calls
# # to this function. If -focus_function=auto and -data_flow_trace is used, libFuzzer will choose the
# focus functions automatically.
if sum([FUZZ_WITHOUT_COVERAGE_GUIDANCE, FUZZ_ONLY_COV_FOR_CHECK, FUZZ_ONLY_COV_FOR_FOREST]) > 1:
raise ValueError('Only one coverage guidance option can be used at the same time')
if NEVER_OPTIMIZE and ALWAYS_OPTIMIZE:
raise ValueError('Conflicting optimize options')
############ AFL settings ############
# TIME_NO_NEW_COV = 10
IS_AE_CHANCE = 0.5 # Because we cannot access the fuzzer logic in the mutator
NUM_CYCLES_IN_LOOP = 1000 # Number of consecutive iterations after which we start with a clean sheet
AFL_USE_DICT = True and not USE_CUSTOM_MUTATOR
AFL_USE_CMP_LOG = False and not USE_CUSTOM_MUTATOR
ENABLE_DETERMINISTIC = False
SKIP_DETERMINISTIC = False
# see docs/power_schedules.md
AFL_SCHEDULE = None # one of fast(default, use None), explore, exploit, seek, rare, mmopt, coe, lin, quad
# AFL generic
AFL_MUTATE_FILENAME = "afl_mutation.cc"
AFL_OUTPUT_DIR = "afl_out"
# AFL++
AFLPP_DICT_PATH = join(os.getcwd(), 'afl_dict')
AFLPP_TEMPLATE_PATH = "templates/aflpp.jinja2"
MUTATE_TEMPLATE_PATH = "templates/mutate.jinja2"
AFLPP_COMPILER_PATH = "afl-clang-lto++"
# AFLPP_COMPILER_PATH = "afl-clang-fast++"
# AFLGo
AFL_GO_COMPILER_PATH = "/home/cas/AFLGo/afl-clang-fast++"
AFL_GO_FUZZ_PATH = "/home/cas/AFLGo/afl-fuzz"
AFL_GO_GEN_DIST_PATH = "/home/cas/AFLGo/scripts/gen_distance_fast.py"
AFL_GO_TARGETS_FILE = 'BBtargets.txt'
AFLGO_TEMPLATE_PATH = "templates/aflgo.jinja2"
############ honggfuzz settings ############
HONG_COMPILER_PATH = "/home/cas/honggfuzz/hfuzz_cc/hfuzz-clang++"
HONG_FUZZER_PATH = "/home/cas/honggfuzz/honggfuzz"
HONG_OUTPUT_DIR = "hongg_out"
############ Mutation settings ############
MINIMIZE_THRESHOLD_LIST = False # Removes all thresholds within 0.0001 from each other
IS_AE_FAKE = False # Fakes the model query if the current input is an AE
USE_WAS_AE = False # Saves the result of the last known model query
STEEP_CURVE = False # If True, square the draw from the gaussian distribution, such that smaller draws are more likely
# feature importance is calculated by its occurrence
FEATURE_IMPORTANCE_BASED_ON_OCCURRENCE = False and USE_FEATURE_IMPORTANCE
MUTATE_LESS_WHEN_CLOSER = False # When True, multiplies mutation with largest diff between fuzzed and victim.
# as splitting threshold in the forest. Cannot be true together with AE_MUTATE_TOWARDS_VICTIM
AE_CHECK_IN_MUTATE = (ALSO_MUTATE_BIGGEST or BIAS_MUTATE_BIG_DIFFS or USE_THRESHOLDS_FOR_MUTATION or
AE_MUTATE_TOWARDS_VICTIM or MUTATE_LESS_WHEN_CLOSER) and FUZZ_ONE_POINT_PER_INSTANCE \
and FUZZER != 'AFL++'
if MUTATE_LESS_WHEN_CLOSER and AE_MUTATE_TOWARDS_VICTIM:
raise ValueError('Mutate less and AE mutate towards original cannot be used together')
############ AE init ############
# k-ANN structure
ANN_TREES = 10 # the amount of trees for the "annoy" lookup
K_ANN = 10 # how many nearest neighbours to find
NO_SEED_INIT = False # When True, each run is only seeded with all-0 features. No input is not possible, because
# The custom mutator would otherwise break.
INITIALIZE_WITH_AE = False # use ANN to seed with K_ANN closest data-points from other classes
INITIALIZE_WITH_AVG_OPPOSITE = False # For binary-classification: seed with average member of the other class
INITIALIZE_WITH_POINT_IN_BETWEEN = INITIALIZE_WITH_POINT_IN_BETWEEN or \
(True and INITIALIZE_WITH_AE)
INITIALIZE_WITH_EXTRA_POINTS_IN_BETWEEN = INITIALIZE_WITH_EXTRA_POINTS_IN_BETWEEN or \
(True and INITIALIZE_WITH_POINT_IN_BETWEEN)
INITIALIZE_WITH_FULL_TRAIN_SET = False # Put all instances of other class from test set in corpus.
if INITIALIZE_WITH_FULL_TRAIN_SET and (INITIALIZE_WITH_AE or DOUBLE_FUZZ_WITH_AE):
raise ValueError('INITIALIZE_WITH_FULL_TRAIN_SET cannot be used with INITIALIZE_WITH_AE or DOUBLE_FUZZ_WITH_AE')
if sum([INITIALIZE_WITH_AE, INITIALIZE_WITH_AVG_OPPOSITE, INITIALIZE_WITH_FULL_TRAIN_SET]) > 1:
raise ValueError('Conflicting initialize options')
############ Testing ############
DEBUG = False # If True, shows output and runs 1 sample with 1 thread only.
MEASURE_COVERAGE = False # Measure coverage through instrumentation, costs exec/s
SKIP_COMPILATION = False
COMPILE_ONLY = False
PRINT_NUMBER_OF_LEAVES = False # Estimate for model size
INVESTIGATE_WITH_SCATTER = False and INVESTIGATE # Shows a scatter plot instead of a line plot when INVESTIGATE
NUM_INVESTIGATE_RUNS = 5 # The number of repetitions for creating plots.
FAILURE_THRES = 0.9 # See FILTER_BAD_AE
SHOW_OUTPUT = False or DEBUG # Shows fuzzer output
CREATE_LOOKUP = False or INITIALIZE_WITH_AE or INITIALIZE_WITH_AVG_OPPOSITE or INVESTIGATE \
or INITIALIZE_WITH_FULL_TRAIN_SET or DOUBLE_FUZZ_WITH_AE
if DEBUG and MEASURE_EXEC_P_S:
raise ValueError('Debug and measure exec/s cannot be used at the same time')
if INVESTIGATE and DOUBLE_FUZZ_WITH_AE:
raise ValueError('Double fuzz together with investigate should not be used.')
NUM_DEBUG = 1
NUM_THREADS = 10 if not DEBUG else NUM_DEBUG # Number of simultaneous fuzzing instances, but is also
# Used for training the ensembles, the MILP attack and the lt-attack (Zhang)
NUM_ADV_SUPER_QUICK = 10 # The number of victims to attack for runs with the -qq flag.
NUM_ADV_QUICK = 50 # The number of victims to attack for runs with the -q flag.
NUM_ADV_CHECKS = 500 if not DEBUG else NUM_DEBUG # number of adversarial victims
MAX_POINTS_LOOKUP = 5000 # The AE lookup will be created over this amount of training samples maximum
DEFAULT_TIME_PER_POINT = 1 # The default fuzzing time per datapoint
MODEL_TYPES = ['RF', 'GB'] # the identifiers of the model types (Random Forest, Gradient Boosting)
DISTANCE_NORMS = ['l_0', 'l_1', 'l_2', 'l_inf']
DISTANCE_NORM = 'l_inf' # the norm to calculate the distance in the fuzzer
if DISTANCE_NORM not in DISTANCE_NORMS:
raise ValueError(f'Norm {DISTANCE_NORM} not recognised, should be one of [{", ".join(DISTANCE_NORMS)}]')
DISTANCE_STEPS = [round(0.005 * i, 3) for i in reversed(range(1, 201))] # [1.0, 0.995, ..., 0.005]
# DISTANCE_STEPS = [round(0.001 * i, 3) for i in reversed(range(1, 1001))] # [1.0, 0.999, ..., 0.001]
# DISTANCE_STEPS = [round(0.01 * i, 2) for i in reversed(range(1, 101))] # [1.0, 0.99, ..., 0.01]
# DISTANCE_STEPS = [round(0.1 * i, 1) for i in reversed(range(1, 11))] # [1.0, 0.99, ..., 0.01]
# DISTANCE_STEPS = [0.8, 0.7, 0.6] \
# + [round(0.01 * i, 2) for i in reversed(range(11, 51))] \
# + [round(0.001 * i, 3) for i in reversed(range(1, 101))] # Decreasing
# DISTANCE_STEPS = [0.8, 0.7] \
# + [round(0.01 * i, 2) for i in reversed(range(25, 70))] \
# + [round(0.001 * i, 3) for i in reversed(range(20, 250))] \
# + [round(0.0001 * i, 4) for i in reversed(range(1, 200))] # Decreasing very small
DISTANCE_STEPS.append(0.000001)
PROBABILITY_STEPS = [round(0.01 * i, 2) for i in reversed(range(1, 51))] # [1.0, 0.99, ..., 0.01]
# PROBABILITY_STEPS = [0.8, 0.7, 0.6] \
# + [round(0.01 * i, 2) for i in reversed(range(1, 51))] # [0.8, 0.7, ..., 0.5, 0.49...]
# PROBABILITY_STEPS = [round(0.5 + 0.05 * i, 2) for i in reversed(range(1, 11))] \
# + [round(0.2 + 0.01 * i, 2) for i in reversed(range(1, 31))] \
# + [round(0.005 * i, 3) for i in reversed(range(1, 41))]
# Directories, all relative to main folder (code)
CHECK_DIR = "python/.CHECK"
IMAGE_DIR = 'python/img'
RESULTS_DIR = "python/.RESULTS"
COVERAGES_DIR = "python/.COVERAGES"
MODEL_DIR = "python/models"
JSON_DIR = join(MODEL_DIR, 'json')
DATA_DIR = "python/data"
LIB_SVM_DIR = join(DATA_DIR, 'libsvm')
OPEN_ML_DIR = join(DATA_DIR, 'openml')
ZHANG_DATA_DIR = join(DATA_DIR, 'zhang')
CORPUS_DIR = ".GENERATED_CORPUS"
ADV_DIR = ".ADVERSARIAL_EXAMPLES"
ZHANG_CONFIG_DIR = ".ZHANG_CONFIGS"
# Files
NUM_FEATURES_PATH = ".num_features"
LIBFUZZER_TEMPLATE_PATH = "templates/libfuzzer.jinja2"
OUTPUT_FILE = "fuzzme.cc"
# WARNING, run with --reload (once for each dataset) after changing these.
DEFAULT_LEARNING_RATE_GB = 0.1
TEST_FRACTION = 0.2
NUM_SAMPLES = 2500 # For synthetic datasets
# Better not change these
SMALL_PERTURBATION_THRESHOLD = 0.00001
THRESHOLD_DIGITS = 7
BYTES_PER_FEATURE = 8 # float = 4, double = 8
TIME_PRECISION = 4
def get_num_adv(): return NUM_ADV_CHECKS
| [
"os.path.join",
"os.getcwd"
] | [((13058, 13081), 'os.path.join', 'join', (['MODEL_DIR', '"""json"""'], {}), "(MODEL_DIR, 'json')\n", (13062, 13081), False, 'from os.path import join\n'), ((13121, 13145), 'os.path.join', 'join', (['DATA_DIR', '"""libsvm"""'], {}), "(DATA_DIR, 'libsvm')\n", (13125, 13145), False, 'from os.path import join\n'), ((13160, 13184), 'os.path.join', 'join', (['DATA_DIR', '"""openml"""'], {}), "(DATA_DIR, 'openml')\n", (13164, 13184), False, 'from os.path import join\n'), ((13202, 13225), 'os.path.join', 'join', (['DATA_DIR', '"""zhang"""'], {}), "(DATA_DIR, 'zhang')\n", (13206, 13225), False, 'from os.path import join\n'), ((6405, 6416), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6414, 6416), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import io
import json
import pytest # type: ignore
import xmllint_map_html.cli as cli
import xmllint_map_html.xmllint_map_html as xmh
def test_main_ok_minimal(capsys):
job = ['']
report_expected = ''
assert cli.main(argv=job) == 0
out, err = capsys.readouterr()
assert out.strip() == report_expected.strip()
| [
"xmllint_map_html.cli.main"
] | [((308, 326), 'xmllint_map_html.cli.main', 'cli.main', ([], {'argv': 'job'}), '(argv=job)\n', (316, 326), True, 'import xmllint_map_html.cli as cli\n')] |
"""Test is_boundary_not_x_monotone method in WeightedPointBoundary."""
# Standard
from typing import List, Any
from random import randint
# Models
from voronoi_diagrams.models import (
WeightedSite,
WeightedPointBisector,
WeightedPointBoundary,
)
# Math
from decimal import Decimal
class TestWeightedPointBoundaryIsBoundaryConcaveToY:
"""Test formula."""
def test_with_concave_to_y_boundary(self):
"""Test with a boundary that is concave to y."""
p = WeightedSite(Decimal(-20), Decimal(10), Decimal(2))
# q is the one in the top.
q = WeightedSite(Decimal(-5), Decimal(10), Decimal(7))
bisector = WeightedPointBisector(sites=(p, q))
boundary_plus = WeightedPointBoundary(bisector=bisector, sign=True)
boundary_minus = WeightedPointBoundary(bisector=bisector, sign=False)
assert not boundary_plus.is_boundary_not_x_monotone()
assert boundary_minus.is_boundary_not_x_monotone()
def test_with_normal_boundary(self):
"""Test with a boundary that is not concave to y."""
p = WeightedSite(Decimal(-20), Decimal(10), Decimal(2))
# q is the one in the top.
q = WeightedSite(Decimal(-8), Decimal(18), Decimal(7))
bisector = WeightedPointBisector(sites=(p, q))
boundary_plus = WeightedPointBoundary(bisector=bisector, sign=True)
boundary_minus = WeightedPointBoundary(bisector=bisector, sign=False)
assert not boundary_plus.is_boundary_not_x_monotone()
assert not boundary_minus.is_boundary_not_x_monotone()
def test_with_stopped_boundary(self):
"""Test with a boundary that is not concave to y."""
p = WeightedSite(Decimal(-20), Decimal(10), Decimal(2))
# q is the one in the top.
q = WeightedSite(Decimal(-5), Decimal(15), Decimal(7))
bisector = WeightedPointBisector(sites=(p, q))
boundary_plus = WeightedPointBoundary(bisector=bisector, sign=True)
boundary_minus = WeightedPointBoundary(bisector=bisector, sign=False)
assert not boundary_plus.is_boundary_not_x_monotone()
assert not boundary_minus.is_boundary_not_x_monotone()
| [
"voronoi_diagrams.models.WeightedPointBisector",
"voronoi_diagrams.models.WeightedPointBoundary",
"decimal.Decimal"
] | [((661, 696), 'voronoi_diagrams.models.WeightedPointBisector', 'WeightedPointBisector', ([], {'sites': '(p, q)'}), '(sites=(p, q))\n', (682, 696), False, 'from voronoi_diagrams.models import WeightedSite, WeightedPointBisector, WeightedPointBoundary\n'), ((721, 772), 'voronoi_diagrams.models.WeightedPointBoundary', 'WeightedPointBoundary', ([], {'bisector': 'bisector', 'sign': '(True)'}), '(bisector=bisector, sign=True)\n', (742, 772), False, 'from voronoi_diagrams.models import WeightedSite, WeightedPointBisector, WeightedPointBoundary\n'), ((798, 850), 'voronoi_diagrams.models.WeightedPointBoundary', 'WeightedPointBoundary', ([], {'bisector': 'bisector', 'sign': '(False)'}), '(bisector=bisector, sign=False)\n', (819, 850), False, 'from voronoi_diagrams.models import WeightedSite, WeightedPointBisector, WeightedPointBoundary\n'), ((1256, 1291), 'voronoi_diagrams.models.WeightedPointBisector', 'WeightedPointBisector', ([], {'sites': '(p, q)'}), '(sites=(p, q))\n', (1277, 1291), False, 'from voronoi_diagrams.models import WeightedSite, WeightedPointBisector, WeightedPointBoundary\n'), ((1316, 1367), 'voronoi_diagrams.models.WeightedPointBoundary', 'WeightedPointBoundary', ([], {'bisector': 'bisector', 'sign': '(True)'}), '(bisector=bisector, sign=True)\n', (1337, 1367), False, 'from voronoi_diagrams.models import WeightedSite, WeightedPointBisector, WeightedPointBoundary\n'), ((1393, 1445), 'voronoi_diagrams.models.WeightedPointBoundary', 'WeightedPointBoundary', ([], {'bisector': 'bisector', 'sign': '(False)'}), '(bisector=bisector, sign=False)\n', (1414, 1445), False, 'from voronoi_diagrams.models import WeightedSite, WeightedPointBisector, WeightedPointBoundary\n'), ((1856, 1891), 'voronoi_diagrams.models.WeightedPointBisector', 'WeightedPointBisector', ([], {'sites': '(p, q)'}), '(sites=(p, q))\n', (1877, 1891), False, 'from voronoi_diagrams.models import WeightedSite, WeightedPointBisector, WeightedPointBoundary\n'), ((1916, 1967), 'voronoi_diagrams.models.WeightedPointBoundary', 'WeightedPointBoundary', ([], {'bisector': 'bisector', 'sign': '(True)'}), '(bisector=bisector, sign=True)\n', (1937, 1967), False, 'from voronoi_diagrams.models import WeightedSite, WeightedPointBisector, WeightedPointBoundary\n'), ((1993, 2045), 'voronoi_diagrams.models.WeightedPointBoundary', 'WeightedPointBoundary', ([], {'bisector': 'bisector', 'sign': '(False)'}), '(bisector=bisector, sign=False)\n', (2014, 2045), False, 'from voronoi_diagrams.models import WeightedSite, WeightedPointBisector, WeightedPointBoundary\n'), ((505, 517), 'decimal.Decimal', 'Decimal', (['(-20)'], {}), '(-20)\n', (512, 517), False, 'from decimal import Decimal\n'), ((519, 530), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (526, 530), False, 'from decimal import Decimal\n'), ((532, 542), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (539, 542), False, 'from decimal import Decimal\n'), ((604, 615), 'decimal.Decimal', 'Decimal', (['(-5)'], {}), '(-5)\n', (611, 615), False, 'from decimal import Decimal\n'), ((617, 628), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (624, 628), False, 'from decimal import Decimal\n'), ((630, 640), 'decimal.Decimal', 'Decimal', (['(7)'], {}), '(7)\n', (637, 640), False, 'from decimal import Decimal\n'), ((1100, 1112), 'decimal.Decimal', 'Decimal', (['(-20)'], {}), '(-20)\n', (1107, 1112), False, 'from decimal import Decimal\n'), ((1114, 1125), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (1121, 1125), False, 'from decimal import Decimal\n'), ((1127, 1137), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (1134, 1137), False, 'from decimal import Decimal\n'), ((1199, 1210), 'decimal.Decimal', 'Decimal', (['(-8)'], {}), '(-8)\n', (1206, 1210), False, 'from decimal import Decimal\n'), ((1212, 1223), 'decimal.Decimal', 'Decimal', (['(18)'], {}), '(18)\n', (1219, 1223), False, 'from decimal import Decimal\n'), ((1225, 1235), 'decimal.Decimal', 'Decimal', (['(7)'], {}), '(7)\n', (1232, 1235), False, 'from decimal import Decimal\n'), ((1700, 1712), 'decimal.Decimal', 'Decimal', (['(-20)'], {}), '(-20)\n', (1707, 1712), False, 'from decimal import Decimal\n'), ((1714, 1725), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (1721, 1725), False, 'from decimal import Decimal\n'), ((1727, 1737), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (1734, 1737), False, 'from decimal import Decimal\n'), ((1799, 1810), 'decimal.Decimal', 'Decimal', (['(-5)'], {}), '(-5)\n', (1806, 1810), False, 'from decimal import Decimal\n'), ((1812, 1823), 'decimal.Decimal', 'Decimal', (['(15)'], {}), '(15)\n', (1819, 1823), False, 'from decimal import Decimal\n'), ((1825, 1835), 'decimal.Decimal', 'Decimal', (['(7)'], {}), '(7)\n', (1832, 1835), False, 'from decimal import Decimal\n')] |
from europython import hello
hello("Alisa")
| [
"europython.hello"
] | [((29, 43), 'europython.hello', 'hello', (['"""Alisa"""'], {}), "('Alisa')\n", (34, 43), False, 'from europython import hello\n')] |
#!/usr/bin/env python3
import sys
import csv
import datetime
import math
from tabulate import tabulate
import scipy.stats as st
from tqdm import tqdm
import numpy as np
np.seterr(all='ignore')
def isfloat(val):
try:
val = float(val)
if math.isnan(val):
return False
return True
except:
return False
class Describe:
def __init__(self, filename):
self.filename = filename
self.content = []
self.listed = {}
self.mean = {}
self.count = {}
self.columns = []
self.min = {}
self.max = {}
self.std = {}
self.Q25 = {}
self.Q50 = {}
self.Q75 = {}
self.iqr = {}
self.range = {}
self.best_dist = {}
self.dist_params = {}
self.dist_pval = {}
def ReadFile(self):
with open(self.filename, 'r') as file:
coco = csv.DictReader(file)
for row in coco:
del row['Index']
newrow = {}
for k, v in row.items():
if isfloat(v):
newrow[k] = float(v)
if k not in self.listed.keys():
self.listed[k] = [float(v)]
else:
self.listed[k] += [float(v)]
elif k == 'Birthday':
split = v.split('-')
year, month, day = int(split[0]), int(split[1]), int(split[2])
newrow[k] = datetime.datetime(year, month, day, 0, 0).timestamp()
if k not in self.listed.keys():
self.listed[k] = [newrow[k]]
else:
self.listed[k] += [newrow[k]]
self.content += [newrow]
def FilterNumerics(self):
for k, v in self.content[0].items():
try:
float(v)
self.columns += [k]
self.mean[k] = 0
self.count[k] = 0
self.std[k] = 0
self.min[k] = 0
self.max[k] = 0
except:
pass
def GetCount(self):
for x in self.content:
for k, v in x.items():
self.count[k] += 1
def GetMean(self):
for x in self.content:
for k, v in x.items():
self.mean[k] += v / self.count[k]
def GetStd(self):
for x in self.content:
for k, v in x.items():
self.std[k] += (v - self.mean[k]) ** 2 / self.count[k]
for k, v in self.std.items():
self.std[k] = math.sqrt(self.std[k])
def GetQMinMax(self):
for k in self.listed.keys():
self.listed[k] = sorted(self.listed[k])
if self.listed[k] != []:
self.min[k] = self.listed[k][0]
self.max[k] = self.listed[k][-1]
self.range[k] = self.max[k] - self.min[k]
else:
continue
L25 = (self.count[k] + 1) * 0.25
L50 = (self.count[k] + 1) * 0.5
L75 = (self.count[k] + 1) * 0.75
try:
P25 = self.listed[k][int(L25)] + (L25 - int(L25)) * (self.listed[k][int(L25) + 1] - self.listed[k][int(L25)])
P50 = self.listed[k][int(L50)] + (L50 - int(L50)) * (self.listed[k][int(L50) + 1] - self.listed[k][int(L25)])
P75 = self.listed[k][int(L75)] + (L75 - int(L75)) * (self.listed[k][int(L75) + 1] - self.listed[k][int(L25)])
except:
P25 = self.listed[k][0]
P50 = self.listed[k][0]
P75 = self.listed[k][0]
self.Q25[k] = P25
self.Q50[k] = P50
self.Q75[k] = P75
self.iqr[k] = P75 - P25
def get_best_distribution(self):
dist_names = ["norm", "exponweib", "weibull_max", "weibull_min", "pareto", "genextreme"]
dist_results = []
params = {}
with tqdm(total=len(self.listed.keys()) * len(dist_names)) as tq:
for k in self.listed.keys():
for dist_name in dist_names:
dist = getattr(st, dist_name)
param = dist.fit(self.listed[k])
params[dist_name] = param
# Applying the Kolmogorov-Smirnov test
D, p = st.kstest(self.listed[k], dist_name, args=param)
dist_results.append((dist_name, p))
tq.update(1)
# select the best fitted distribution
best_dist, best_p = (max(dist_results, key=lambda item: item[1]))
self.best_dist[k] = best_dist
self.dist_params[k] = params[dist_name]
self.dist_pval[k] = best_p
def Describe(self):
self.GetCount()
self.GetMean()
self.GetStd()
self.GetQMinMax()
if len(sys.argv) > 2 and sys.argv[2] == "-dist":
self.get_best_distribution()
def Print(self):
self.columns = sorted(self.columns)
if len(sys.argv) > 2 and sys.argv[2] == "-dist":
i = 0
for k, v in self.best_dist.items():
self.columns[i] += '\n(' + v + ')'
i += 1
self.mean = {k: v for k, v in sorted(self.mean.items(), key=lambda item: item[0])}
self.count = {k: v for k, v in sorted(self.count.items(), key=lambda item: item[0])}
self.min = {k: v for k, v in sorted(self.min.items(), key=lambda item: item[0])}
self.max = {k: v for k, v in sorted(self.max.items(), key=lambda item: item[0])}
self.std = {k: v for k, v in sorted(self.std.items(), key=lambda item: item[0])}
self.Q25 = {k: v for k, v in sorted(self.Q25.items(), key=lambda item: item[0])}
self.Q50 = {k: v for k, v in sorted(self.Q50.items(), key=lambda item: item[0])}
self.Q75 = {k: v for k, v in sorted(self.Q75.items(), key=lambda item: item[0])}
self.iqr = {k: v for k, v in sorted(self.iqr.items(), key=lambda item: item[0])}
self.range = {k: v for k, v in sorted(self.range.items(), key=lambda item: item[0])}
self.best_dist = {k: v for k, v in sorted(self.best_dist.items(), key=lambda item: item[0])}
columns = [''] + self.columns
print(tabulate([
['Count'] + list(self.count.values()),
['Mean'] + list(self.mean.values()),
['Std'] + list(self.std.values()),
['Min'] + list(self.min.values()),
['25%'] + list(self.Q25.values()),
['50%'] + list(self.Q50.values()),
['75%'] + list(self.Q75.values()),
['Max'] + list(self.max.values()),
['IQR'] + list(self.iqr.values()),
['Range'] + list(self.range.values())], headers=columns, tablefmt='plain', floatfmt=".6f"))
#print(tabulate([
# ['Distribution'] + list(self.best_dist.values())], headers=columns, tablefmt='plain', floatfmt=".6f"))
def ConvertBirthday(self):
start = datetime.datetime.fromtimestamp(0)
self.mean['Birthday'] = datetime.datetime.fromtimestamp(self.mean['Birthday']).strftime('%Y-%m-%d')
self.std['Birthday'] = str((datetime.datetime.fromtimestamp(self.std['Birthday']) - start).days) + '(d)'
self.min['Birthday'] = datetime.datetime.fromtimestamp(self.min['Birthday']).strftime('%Y-%m-%d')
self.max['Birthday'] = datetime.datetime.fromtimestamp(self.max['Birthday']).strftime('%Y-%m-%d')
self.Q25['Birthday'] = datetime.datetime.fromtimestamp(self.Q25['Birthday']).strftime('%Y-%m-%d')
self.Q50['Birthday'] = datetime.datetime.fromtimestamp(self.Q50['Birthday']).strftime('%Y-%m-%d')
self.Q75['Birthday'] = datetime.datetime.fromtimestamp(self.Q75['Birthday']).strftime('%Y-%m-%d')
self.iqr['Birthday'] = str((datetime.datetime.fromtimestamp(self.iqr['Birthday']) - start).days) + '(d)'
self.range['Birthday'] = str((datetime.datetime.fromtimestamp(self.range['Birthday']) - start).days) + '(d)'
pass
def __call__(self):
self.ReadFile()
self.FilterNumerics()
self.Describe()
self.ConvertBirthday()
self.Print()
def main():
best_class = Describe(sys.argv[1])
best_class()
def CheckArgs():
if len(sys.argv) < 2:
print(f"Usage: {__file__} <dataset_name.csv> <flags>")
exit()
if __name__ == '__main__':
CheckArgs()
main()
| [
"datetime.datetime",
"csv.DictReader",
"datetime.datetime.fromtimestamp",
"scipy.stats.kstest",
"math.sqrt",
"numpy.seterr",
"math.isnan"
] | [((175, 198), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (184, 198), True, 'import numpy as np\n'), ((263, 278), 'math.isnan', 'math.isnan', (['val'], {}), '(val)\n', (273, 278), False, 'import math\n'), ((7153, 7187), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(0)'], {}), '(0)\n', (7184, 7187), False, 'import datetime\n'), ((923, 943), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (937, 943), False, 'import csv\n'), ((2712, 2734), 'math.sqrt', 'math.sqrt', (['self.std[k]'], {}), '(self.std[k])\n', (2721, 2734), False, 'import math\n'), ((7220, 7274), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.mean['Birthday']"], {}), "(self.mean['Birthday'])\n", (7251, 7274), False, 'import datetime\n'), ((7440, 7493), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.min['Birthday']"], {}), "(self.min['Birthday'])\n", (7471, 7493), False, 'import datetime\n'), ((7546, 7599), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.max['Birthday']"], {}), "(self.max['Birthday'])\n", (7577, 7599), False, 'import datetime\n'), ((7652, 7705), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.Q25['Birthday']"], {}), "(self.Q25['Birthday'])\n", (7683, 7705), False, 'import datetime\n'), ((7758, 7811), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.Q50['Birthday']"], {}), "(self.Q50['Birthday'])\n", (7789, 7811), False, 'import datetime\n'), ((7864, 7917), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.Q75['Birthday']"], {}), "(self.Q75['Birthday'])\n", (7895, 7917), False, 'import datetime\n'), ((4459, 4507), 'scipy.stats.kstest', 'st.kstest', (['self.listed[k]', 'dist_name'], {'args': 'param'}), '(self.listed[k], dist_name, args=param)\n', (4468, 4507), True, 'import scipy.stats as st\n'), ((7332, 7385), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.std['Birthday']"], {}), "(self.std['Birthday'])\n", (7363, 7385), False, 'import datetime\n'), ((7975, 8028), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.iqr['Birthday']"], {}), "(self.iqr['Birthday'])\n", (8006, 8028), False, 'import datetime\n'), ((8090, 8145), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.range['Birthday']"], {}), "(self.range['Birthday'])\n", (8121, 8145), False, 'import datetime\n'), ((1564, 1605), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', '(0)', '(0)'], {}), '(year, month, day, 0, 0)\n', (1581, 1605), False, 'import datetime\n')] |
import os
import shutil
from modulefinder import ModuleFinder
def main():
temp_dir = "package_temp"
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.makedirs(temp_dir)
for py in ["index.py", "notifier.py"]:
src, dst = py, os.path.join(temp_dir, py)
print("copy '%s' to '%s'" % (src, dst))
shutil.copy(src, dst)
print("analysing modules ...")
finder = ModuleFinder()
finder.run_script("index.py")
module_paths = set()
for name, mod in finder.modules.items():
if mod.__path__ and "site-packages" in mod.__path__[0]:
path = mod.__path__[0]
while os.path.basename(os.path.dirname(path)) != "site-packages":
path = os.path.dirname(path)
if path not in module_paths:
src, dst = path, os.path.join(temp_dir, os.path.basename(path))
print("copy '%s' from '%s' to '%s'" % (name, src, dst))
shutil.copytree(src, dst, ignore=shutil.ignore_patterns("__pycache__", "*.pyc"))
module_paths.add(path)
zip_file = "notify-github-release"
print("zipping %s to %s.zip ..." % (temp_dir, zip_file))
if os.path.exists(zip_file + ".zip"):
os.remove(zip_file + ".zip")
shutil.make_archive(zip_file, 'zip', temp_dir)
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
print("done")
if __name__ == '__main__':
main()
| [
"os.path.exists",
"modulefinder.ModuleFinder",
"shutil.make_archive",
"os.makedirs",
"shutil.ignore_patterns",
"os.path.join",
"os.path.dirname",
"os.path.basename",
"shutil.copy",
"shutil.rmtree",
"os.remove"
] | [((113, 137), 'os.path.exists', 'os.path.exists', (['temp_dir'], {}), '(temp_dir)\n', (127, 137), False, 'import os\n'), ((175, 196), 'os.makedirs', 'os.makedirs', (['temp_dir'], {}), '(temp_dir)\n', (186, 196), False, 'import os\n'), ((418, 432), 'modulefinder.ModuleFinder', 'ModuleFinder', ([], {}), '()\n', (430, 432), False, 'from modulefinder import ModuleFinder\n'), ((1197, 1230), 'os.path.exists', 'os.path.exists', (["(zip_file + '.zip')"], {}), "(zip_file + '.zip')\n", (1211, 1230), False, 'import os\n'), ((1273, 1319), 'shutil.make_archive', 'shutil.make_archive', (['zip_file', '"""zip"""', 'temp_dir'], {}), "(zip_file, 'zip', temp_dir)\n", (1292, 1319), False, 'import shutil\n'), ((1328, 1352), 'os.path.exists', 'os.path.exists', (['temp_dir'], {}), '(temp_dir)\n', (1342, 1352), False, 'import os\n'), ((147, 170), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (160, 170), False, 'import shutil\n'), ((347, 368), 'shutil.copy', 'shutil.copy', (['src', 'dst'], {}), '(src, dst)\n', (358, 368), False, 'import shutil\n'), ((1240, 1268), 'os.remove', 'os.remove', (["(zip_file + '.zip')"], {}), "(zip_file + '.zip')\n", (1249, 1268), False, 'import os\n'), ((1362, 1385), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (1375, 1385), False, 'import shutil\n'), ((264, 290), 'os.path.join', 'os.path.join', (['temp_dir', 'py'], {}), '(temp_dir, py)\n', (276, 290), False, 'import os\n'), ((738, 759), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (753, 759), False, 'import os\n'), ((672, 693), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (687, 693), False, 'import os\n'), ((857, 879), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (873, 879), False, 'import os\n'), ((1002, 1048), 'shutil.ignore_patterns', 'shutil.ignore_patterns', (['"""__pycache__"""', '"""*.pyc"""'], {}), "('__pycache__', '*.pyc')\n", (1024, 1048), False, 'import shutil\n')] |
import numpy as np
def flip_axis(x_in, axis):
x_out = np.zeros(x_in.shape, dtype=x_in.dtype)
for i, x in enumerate(x_in):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x_out[i] = x.swapaxes(0, axis)
return x_out
def flip_axis_fra(x, flipping_axis):
pattern = [flipping_axis]
pattern += [el for el in range(x.ndim) if el != flipping_axis]
inv_pattern = [pattern.index(el) for el in range(x.ndim)]
x = x.transpose(pattern) # "flipping_axis" first
x = x[::-1, ...]
x = x.transpose(inv_pattern)
return x
if __name__ == '__main__':
aa = np.random.random((10, 2, 3, 4)) # b, *, *, *
for axis in [1, 2, 3]:
print('Testing channel in axis {}'.format(axis))
mm = flip_axis(aa.copy(), axis-1)
ff = flip_axis_fra(aa.copy(), axis)
assert np.array_equal(mm, ff)
print('Test passed!')
| [
"numpy.random.random",
"numpy.zeros",
"numpy.array_equal",
"numpy.asarray"
] | [((60, 98), 'numpy.zeros', 'np.zeros', (['x_in.shape'], {'dtype': 'x_in.dtype'}), '(x_in.shape, dtype=x_in.dtype)\n', (68, 98), True, 'import numpy as np\n'), ((614, 645), 'numpy.random.random', 'np.random.random', (['(10, 2, 3, 4)'], {}), '((10, 2, 3, 4))\n', (630, 645), True, 'import numpy as np\n'), ((845, 867), 'numpy.array_equal', 'np.array_equal', (['mm', 'ff'], {}), '(mm, ff)\n', (859, 867), True, 'import numpy as np\n'), ((144, 157), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (154, 157), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from apiclient.discovery import build
from apiclient import errors
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
def _format_topic(project, topic):
return 'projects/%s/topics/%s' % (project, topic)
class PubSubHook(GoogleCloudBaseHook):
"""Hook for accessing Google Pub/Sub.
The GCP project against which actions are applied is determined by
the project embedded in the Connection referenced by gcp_conn_id.
"""
def __init__(self,
gcp_conn_id='google_cloud_default',
delegate_to=None):
super(PubSubHook, self).__init__(gcp_conn_id, delegate_to=delegate_to)
def get_conn(self):
"""Returns a Pub/Sub service object.
:rtype: apiclient.discovery.Resource
"""
http_authorized = self._authorize()
return build('pubsub', 'v1', http=http_authorized)
def publish(self, project, topic, messages):
"""Publishes messages to a Pub/Sub topic.
:param project: the GCP project name or ID in which to publish
:type project: string
:param topic: the Pub/Sub topic to which to publish; do not
include the 'projects/{project}/topics/' prefix.
:type topic: string
:param messages: messages to publish; if the data field in a
message is set, it should already be base64 encoded.
:type messages: list of PubSub messages; see
http://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
"""
body = {'messages': messages}
full_topic = _format_topic(project, topic)
request = self.get_conn().projects().topics().publish(
topic=full_topic, body=body)
try:
request.execute()
except errors.HttpError as e:
raise Exception('Error publishing to topic %s' % full_topic, e)
def create_topic(self, project, topic, fail_if_exists=False):
"""Creates a Pub/Sub topic, if it does not already exist.
:param project: the GCP project name or ID in which to create
the topic
:type project: string
:param topic: the Pub/Sub topic name to create; do not
include the 'projects/{project}/topics/' prefix.
:type topic: string
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().create(
name=full_topic, body={}).execute()
except errors.HttpError as e:
# Status code 409 indicates that the topic already exists.
if str(e.resp['status']) == '409':
if fail_if_exists:
raise Exception(
'Error creating topic. Topic already exists: %s'
% full_topic)
else:
raise Exception('Error creating topic %s' % full_topic, e)
| [
"apiclient.discovery.build"
] | [((1410, 1453), 'apiclient.discovery.build', 'build', (['"""pubsub"""', '"""v1"""'], {'http': 'http_authorized'}), "('pubsub', 'v1', http=http_authorized)\n", (1415, 1453), False, 'from apiclient.discovery import build\n')] |
""" Defines the Note repository """
import random
import string
import time
import bcrypt
from sqlalchemy.orm import load_only
from werkzeug.exceptions import Forbidden, UnprocessableEntity
from models import Note
class NoteRepository:
@staticmethod
def create(user, notebook_id, title, content):
""" Create a new note """
if len(title) > 32:
return UnprocessableEntity(description="NOTE_TITLE_MAX_LENGTH")
current_time = int(time.time())
note = Note(
notebook_id=notebook_id,
title=title,
content=content,
user_id=user.id,
created_at=current_time,
updated_at=current_time
)
note.save()
return note.transform()
@staticmethod
def update(user, id, title, content):
""" Update a notebook by ID """
if len(title) > 32:
return UnprocessableEntity(description="NOTE_TITLE_MAX_LENGTH")
current_time = int(time.time())
note = Note.query.filter_by(id=id, user_id=user.id).first()
if not note:
raise UnprocessableEntity(description="NOTE_NOT_FOUND")
note.title = title
note.content = content
note.updated_at = current_time
note.save()
return note.transform()
@staticmethod
def delete(user, id):
""" Delete a notebook by ID """
note = Note.query.filter_by(id=id, user_id=user.id).first()
if not note:
raise UnprocessableEntity(description="NOTE_NOT_FOUND")
note.delete()
return 200
| [
"models.Note",
"models.Note.query.filter_by",
"time.time",
"werkzeug.exceptions.UnprocessableEntity"
] | [((505, 635), 'models.Note', 'Note', ([], {'notebook_id': 'notebook_id', 'title': 'title', 'content': 'content', 'user_id': 'user.id', 'created_at': 'current_time', 'updated_at': 'current_time'}), '(notebook_id=notebook_id, title=title, content=content, user_id=user.id,\n created_at=current_time, updated_at=current_time)\n', (509, 635), False, 'from models import Note\n'), ((392, 448), 'werkzeug.exceptions.UnprocessableEntity', 'UnprocessableEntity', ([], {'description': '"""NOTE_TITLE_MAX_LENGTH"""'}), "(description='NOTE_TITLE_MAX_LENGTH')\n", (411, 448), False, 'from werkzeug.exceptions import Forbidden, UnprocessableEntity\n'), ((477, 488), 'time.time', 'time.time', ([], {}), '()\n', (486, 488), False, 'import time\n'), ((921, 977), 'werkzeug.exceptions.UnprocessableEntity', 'UnprocessableEntity', ([], {'description': '"""NOTE_TITLE_MAX_LENGTH"""'}), "(description='NOTE_TITLE_MAX_LENGTH')\n", (940, 977), False, 'from werkzeug.exceptions import Forbidden, UnprocessableEntity\n'), ((1006, 1017), 'time.time', 'time.time', ([], {}), '()\n', (1015, 1017), False, 'import time\n'), ((1127, 1176), 'werkzeug.exceptions.UnprocessableEntity', 'UnprocessableEntity', ([], {'description': '"""NOTE_NOT_FOUND"""'}), "(description='NOTE_NOT_FOUND')\n", (1146, 1176), False, 'from werkzeug.exceptions import Forbidden, UnprocessableEntity\n'), ((1534, 1583), 'werkzeug.exceptions.UnprocessableEntity', 'UnprocessableEntity', ([], {'description': '"""NOTE_NOT_FOUND"""'}), "(description='NOTE_NOT_FOUND')\n", (1553, 1583), False, 'from werkzeug.exceptions import Forbidden, UnprocessableEntity\n'), ((1034, 1078), 'models.Note.query.filter_by', 'Note.query.filter_by', ([], {'id': 'id', 'user_id': 'user.id'}), '(id=id, user_id=user.id)\n', (1054, 1078), False, 'from models import Note\n'), ((1441, 1485), 'models.Note.query.filter_by', 'Note.query.filter_by', ([], {'id': 'id', 'user_id': 'user.id'}), '(id=id, user_id=user.id)\n', (1461, 1485), False, 'from models import Note\n')] |
'''tzinfo timezone information for Africa/Ndjamena.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Ndjamena(DstTzInfo):
'''Africa/Ndjamena timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Ndjamena'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1911,12,31,22,59,48),
d(1979,10,13,23,0,0),
d(1980,3,7,22,0,0),
]
_transition_info = [
i(3600,0,'LMT'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
]
Ndjamena = Ndjamena()
| [
"pytz.tzinfo.memorized_ttinfo",
"pytz.tzinfo.memorized_datetime"
] | [((351, 370), 'pytz.tzinfo.memorized_datetime', 'd', (['(1)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1, 1, 1, 0, 0, 0)\n', (352, 370), True, 'from pytz.tzinfo import memorized_datetime as d\n'), ((367, 394), 'pytz.tzinfo.memorized_datetime', 'd', (['(1911)', '(12)', '(31)', '(22)', '(59)', '(48)'], {}), '(1911, 12, 31, 22, 59, 48)\n', (368, 394), True, 'from pytz.tzinfo import memorized_datetime as d\n'), ((391, 416), 'pytz.tzinfo.memorized_datetime', 'd', (['(1979)', '(10)', '(13)', '(23)', '(0)', '(0)'], {}), '(1979, 10, 13, 23, 0, 0)\n', (392, 416), True, 'from pytz.tzinfo import memorized_datetime as d\n'), ((413, 436), 'pytz.tzinfo.memorized_datetime', 'd', (['(1980)', '(3)', '(7)', '(22)', '(0)', '(0)'], {}), '(1980, 3, 7, 22, 0, 0)\n', (414, 436), True, 'from pytz.tzinfo import memorized_datetime as d\n'), ((469, 486), 'pytz.tzinfo.memorized_ttinfo', 'i', (['(3600)', '(0)', '"""LMT"""'], {}), "(3600, 0, 'LMT')\n", (470, 486), True, 'from pytz.tzinfo import memorized_ttinfo as i\n'), ((486, 503), 'pytz.tzinfo.memorized_ttinfo', 'i', (['(3600)', '(0)', '"""WAT"""'], {}), "(3600, 0, 'WAT')\n", (487, 503), True, 'from pytz.tzinfo import memorized_ttinfo as i\n'), ((503, 524), 'pytz.tzinfo.memorized_ttinfo', 'i', (['(7200)', '(3600)', '"""WAST"""'], {}), "(7200, 3600, 'WAST')\n", (504, 524), True, 'from pytz.tzinfo import memorized_ttinfo as i\n'), ((524, 541), 'pytz.tzinfo.memorized_ttinfo', 'i', (['(3600)', '(0)', '"""WAT"""'], {}), "(3600, 0, 'WAT')\n", (525, 541), True, 'from pytz.tzinfo import memorized_ttinfo as i\n')] |
import logging
import os
import shutil
import tempfile
from git import Repo
from .ast_analysis import _get_all_names, _get_all_func_names, _generate_trees
from .ntlk_analysis import _get_verbs_from_function_name, _get_nouns_from_function_name
from .utils import _get_count_most_common, _get_converted_names, _convert_tpls_to_lst
logging.basicConfig(
filename='code_analyzer.log',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S',
level=logging.INFO)
class CodeAnalyzer:
"""Code analyzer main class."""
def __init__(
self,
path='C:\\',
lookup='verb',
projects=('', ),
top_size=10,
len_filenames=100,
github_path=None,
):
logging.info("Program started.")
self.path = path
self.github_path = github_path
self.lookup = lookup
self.projects = projects
self.top_size = top_size
self.len_filenames = len_filenames
self.words = []
def _get_filenames(self, path):
"""
Get filenames from path.
:param path: path
:return: list
"""
filenames = []
for dirname, dirs, files in os.walk(path, topdown=True):
for file in files:
if file.endswith('.py'):
filenames.append(os.path.join(dirname, file))
if len(filenames) == self.len_filenames:
break
logging.info(f"Path is: {path}.")
logging.info(f"Total {len(filenames)} files.")
return filenames
def _get_trees(self, path, with_filenames=False, with_file_content=False):
"""
Returns lists of ast objects.
:param path: path
:return: lists of ast objects
"""
filenames = self._get_filenames(path)
trees = (_generate_trees(filename, with_filenames,
with_file_content)[0]
for filename in filenames)
logging.info("Trees generated.")
return trees
def _get_top_verbs_in_path(self, path):
"""
Returns a list of tuples with words and his counts.
:param path: path
:return: list of tuples with words and his counts
"""
trees = self._get_trees(path)
fncs = _get_converted_names(trees, _get_all_func_names)
verbs = (_get_verbs_from_function_name(function_name)
for function_name in fncs)
converted_verbs = _convert_tpls_to_lst(verbs)
return converted_verbs
def _get_top_nouns_in_path(self, path):
"""
Returns a list of tuples with words and his counts.
:param path: path
:return: list of tuples with words and his counts
"""
trees = self._get_trees(path)
fncs = _get_converted_names(trees, _get_all_func_names)
nouns = (_get_nouns_from_function_name(function_name)
for function_name in fncs)
converted_nouns = _convert_tpls_to_lst(nouns)
return converted_nouns
def _get_all_words_in_path(self, path):
"""
Returns a list of tuples with words and his counts.
:param path: path
:return: list of tuples with words and his counts
"""
trees = self._get_trees(path)
function_names = _get_converted_names(trees, _get_all_names)
all_words_in_path = ((word for word in function_name.split('_')
if word) for function_name in function_names)
converted_all_words_in_path = _convert_tpls_to_lst(all_words_in_path)
return converted_all_words_in_path
def _get_top_functions_names_in_path(self, path):
"""
Returns a list of tuples with words and his counts.
:param path: path
:return: list of tuples with words and his counts
"""
trees = self._get_trees(path)
fncs = _get_converted_names(trees, _get_all_func_names)
return fncs
def _parse_lookup_args(self, path_):
"""
Parse arguments for lookup.
:param path_: path
:return: None
"""
# verb - show statistics of the most common words by verbs
# noun - show statistics on the most frequent words by nouns
# funcname - show statistics of the most common words function names
# localvarname - show statistics of the most common
# words names of local variables inside functions
lookups_functions = {
'verb': self._get_top_verbs_in_path,
'noun': self._get_top_nouns_in_path,
'funcname': self._get_top_functions_names_in_path,
'localvarname': self._get_all_words_in_path,
}
for project in self.projects:
path = os.path.join(path_, project)
function_for_lookup = lookups_functions.get(self.lookup)
self.words += function_for_lookup(path)
def parse(self):
"""
Returns a list of tuples with words and his counts.
:return: list of tuples with words and his counts
"""
if self.github_path:
tmpdir = tempfile.mkdtemp()
logging.info(f'Created temporary directory: {tmpdir}.')
Repo.clone_from(self.github_path, tmpdir)
self._parse_lookup_args(tmpdir)
top_words = _get_count_most_common(self.words, self.top_size)
try:
shutil.rmtree(tmpdir)
except PermissionError:
logging.info(
'Can\'t deleting temp directory. Access is denied.')
logging.info('Done!')
return [] if len(top_words) == 0 else top_words
else:
self._parse_lookup_args(self.path)
top_words = _get_count_most_common(self.words, self.top_size)
logging.info("Done!")
return [] if len(top_words) == 0 else top_words
| [
"logging.basicConfig",
"git.Repo.clone_from",
"os.path.join",
"tempfile.mkdtemp",
"shutil.rmtree",
"logging.info",
"os.walk"
] | [((332, 492), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""code_analyzer.log"""', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""', 'datefmt': '"""%d-%b-%y %H:%M:%S"""', 'level': 'logging.INFO'}), "(filename='code_analyzer.log', format=\n '%(asctime)s - %(levelname)s - %(message)s', datefmt=\n '%d-%b-%y %H:%M:%S', level=logging.INFO)\n", (351, 492), False, 'import logging\n'), ((777, 809), 'logging.info', 'logging.info', (['"""Program started."""'], {}), "('Program started.')\n", (789, 809), False, 'import logging\n'), ((1237, 1264), 'os.walk', 'os.walk', (['path'], {'topdown': '(True)'}), '(path, topdown=True)\n', (1244, 1264), False, 'import os\n'), ((1503, 1536), 'logging.info', 'logging.info', (['f"""Path is: {path}."""'], {}), "(f'Path is: {path}.')\n", (1515, 1536), False, 'import logging\n'), ((2035, 2067), 'logging.info', 'logging.info', (['"""Trees generated."""'], {}), "('Trees generated.')\n", (2047, 2067), False, 'import logging\n'), ((4847, 4875), 'os.path.join', 'os.path.join', (['path_', 'project'], {}), '(path_, project)\n', (4859, 4875), False, 'import os\n'), ((5211, 5229), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5227, 5229), False, 'import tempfile\n'), ((5242, 5297), 'logging.info', 'logging.info', (['f"""Created temporary directory: {tmpdir}."""'], {}), "(f'Created temporary directory: {tmpdir}.')\n", (5254, 5297), False, 'import logging\n'), ((5310, 5351), 'git.Repo.clone_from', 'Repo.clone_from', (['self.github_path', 'tmpdir'], {}), '(self.github_path, tmpdir)\n', (5325, 5351), False, 'from git import Repo\n'), ((5677, 5698), 'logging.info', 'logging.info', (['"""Done!"""'], {}), "('Done!')\n", (5689, 5698), False, 'import logging\n'), ((5906, 5927), 'logging.info', 'logging.info', (['"""Done!"""'], {}), "('Done!')\n", (5918, 5927), False, 'import logging\n'), ((5503, 5524), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (5516, 5524), False, 'import shutil\n'), ((5577, 5642), 'logging.info', 'logging.info', (['"""Can\'t deleting temp directory. Access is denied."""'], {}), '("Can\'t deleting temp directory. Access is denied.")\n', (5589, 5642), False, 'import logging\n'), ((1375, 1402), 'os.path.join', 'os.path.join', (['dirname', 'file'], {}), '(dirname, file)\n', (1387, 1402), False, 'import os\n')] |
from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
from base64 import b64encode
class Pet:
db = 'pawfosterfamily'
def __init__(self,data):
self.id = data['id']
self.img = data['img']
self.name = data['name']
self.age = data['age']
self.foster_time_needed = data['foster_time_needed']
self.foster_grade = data['foster_grade']
self.description = data['description']
self.shelter_id = data['shelter_id']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
@classmethod
def save(cls,data):
query = 'INSERT INTO pets (img, name, age, foster_time_needed, foster_grade, description, shelter_id) VALUES (%(img)s, %(name)s, %(age)s, %(foster_time_needed)s, %(foster_grade)s, %(description)s, %(shelter_id)s);'
# query = 'INSERT INTO pets (img, name, age, foster_time_needed, foster_grade, description, shelter_id) VALUES (%(img)s, %(name)s, %(age)s, %(foster_time_needed)s, %(foster_grade)s, %(description)s, %(shelter_id)s);'
return connectToMySQL(cls.db).query_db(query, data)
@classmethod
def get_by_shelter(cls, data):
query = 'SELECT * FROM pets WHERE shelter_id = %(shelter_id)s;'
results = connectToMySQL(cls.db).query_db(query, data)
all_pets = []
for row in results:
all_pets.append(cls(row))
return all_pets
@classmethod
def get_one(cls,data):
query = 'SELECT * FROM pets WHERE id = %(id)s;'
results = connectToMySQL(cls.db).query_db(query,data)
return cls(results[0])
@classmethod
def get_all_available(cls):
query = 'SELECT * FROM pets WHERE (SELECT count(*) from applications where applications.pet_id = pets.id AND applications.status = "APPROVED") = 0;'
results = connectToMySQL(cls.db).query_db(query)
all_pets = []
for row in results:
all_pets.append(cls(row))
return all_pets
@classmethod
def destroy(cls,data):
query = 'DELETE FROM pets WHERE id = %(id)s;'
return connectToMySQL(cls.db).query_db(query,data)
@classmethod
def pets_for_user(cls,data):
query = 'SELECT * FROM pets WHERE user_id = %(id)s;'
return connectToMySQL(cls.db).query_db(query,data)
@classmethod
def get_by_foster(cls, foster_id):
query = 'SELECT * FROM pets JOIN applications on applications.pet_id = pets.id where applications.status = "APPROVED" and applications.foster_id = %(foster_id)s;'
results = connectToMySQL(cls.db).query_db(query, { 'foster_id': foster_id })
all_pets = []
for row in results:
all_pets.append(cls(row))
return all_pets
@staticmethod
def validate_pet(pet):
is_valid = True
if len(pet['name']) == 0:
is_valid = False
flash("Name is required","pet")
if len(pet['foster_time_needed']) < 0:
is_valid = False
flash("Foster time needed is required","pet")
return is_valid | [
"flask_app.config.mysqlconnection.connectToMySQL",
"flask.flash"
] | [((2917, 2949), 'flask.flash', 'flash', (['"""Name is required"""', '"""pet"""'], {}), "('Name is required', 'pet')\n", (2922, 2949), False, 'from flask import flash\n'), ((3037, 3083), 'flask.flash', 'flash', (['"""Foster time needed is required"""', '"""pet"""'], {}), "('Foster time needed is required', 'pet')\n", (3042, 3083), False, 'from flask import flash\n'), ((1103, 1125), 'flask_app.config.mysqlconnection.connectToMySQL', 'connectToMySQL', (['cls.db'], {}), '(cls.db)\n', (1117, 1125), False, 'from flask_app.config.mysqlconnection import connectToMySQL\n'), ((1291, 1313), 'flask_app.config.mysqlconnection.connectToMySQL', 'connectToMySQL', (['cls.db'], {}), '(cls.db)\n', (1305, 1313), False, 'from flask_app.config.mysqlconnection import connectToMySQL\n'), ((1567, 1589), 'flask_app.config.mysqlconnection.connectToMySQL', 'connectToMySQL', (['cls.db'], {}), '(cls.db)\n', (1581, 1589), False, 'from flask_app.config.mysqlconnection import connectToMySQL\n'), ((1867, 1889), 'flask_app.config.mysqlconnection.connectToMySQL', 'connectToMySQL', (['cls.db'], {}), '(cls.db)\n', (1881, 1889), False, 'from flask_app.config.mysqlconnection import connectToMySQL\n'), ((2132, 2154), 'flask_app.config.mysqlconnection.connectToMySQL', 'connectToMySQL', (['cls.db'], {}), '(cls.db)\n', (2146, 2154), False, 'from flask_app.config.mysqlconnection import connectToMySQL\n'), ((2303, 2325), 'flask_app.config.mysqlconnection.connectToMySQL', 'connectToMySQL', (['cls.db'], {}), '(cls.db)\n', (2317, 2325), False, 'from flask_app.config.mysqlconnection import connectToMySQL\n'), ((2593, 2615), 'flask_app.config.mysqlconnection.connectToMySQL', 'connectToMySQL', (['cls.db'], {}), '(cls.db)\n', (2607, 2615), False, 'from flask_app.config.mysqlconnection import connectToMySQL\n')] |
import wallycore as wally
from . import exceptions
from gaservices.utils import h2b
wordlist_ = wally.bip39_get_wordlist('en')
wordlist = [wally.bip39_get_word(wordlist_, i) for i in range(2048)]
def seed_from_mnemonic(mnemonic_or_hex_seed):
"""Return seed, mnemonic given an input string
mnemonic_or_hex_seed can either be:
- A mnemonic
- A hex seed, with an 'X' at the end, which needs to be stripped
seed will always be returned, mnemonic may be None if a seed was passed
"""
if mnemonic_or_hex_seed.endswith('X'):
mnemonic = None
seed = h2b(mnemonic_or_hex_seed[:-1])
else:
mnemonic = mnemonic_or_hex_seed
written, seed = wally.bip39_mnemonic_to_seed512(mnemonic_or_hex_seed, None)
assert written == wally.BIP39_SEED_LEN_512
assert len(seed) == wally.BIP39_SEED_LEN_512
return seed, mnemonic
def wallet_from_mnemonic(mnemonic_or_hex_seed, ver=wally.BIP32_VER_MAIN_PRIVATE):
"""Generate a BIP32 HD Master Key (wallet) from a mnemonic phrase or a hex seed"""
seed, mnemonic = seed_from_mnemonic(mnemonic_or_hex_seed)
return wally.bip32_key_from_seed(seed, ver, wally.BIP32_FLAG_SKIP_HASH)
def _decrypt_mnemonic(mnemonic, password):
"""Decrypt a 27 word encrypted mnemonic to a 24 word mnemonic"""
mnemonic = ' '.join(mnemonic.split())
entropy = bytearray(wally.BIP39_ENTROPY_LEN_288)
assert wally.bip39_mnemonic_to_bytes(None, mnemonic, entropy) == len(entropy)
salt, encrypted = entropy[32:], entropy[:32]
derived = bytearray(64)
wally.scrypt(password.encode('utf-8'), salt, 16384, 8, 8, derived)
key, decrypted = derived[32:], bytearray(32)
wally.aes(key, encrypted, wally.AES_FLAG_DECRYPT, decrypted)
for i in range(len(decrypted)):
decrypted[i] ^= derived[i]
if wally.sha256d(decrypted)[:4] != salt:
raise exceptions.InvalidMnemonicOrPasswordError('Incorrect password')
return wally.bip39_mnemonic_from_bytes(None, decrypted)
def check_mnemonic_or_hex_seed(mnemonic):
"""Raise an error if mnemonic/hex seed is invalid"""
if ' ' not in mnemonic:
if mnemonic.endswith('X'):
# mnemonic is the hex seed
return
msg = 'Mnemonic words must be separated by spaces, hex seed must end with X'
raise exceptions.InvalidMnemonicOrPasswordError(msg)
for word in mnemonic.split():
if word not in wordlist:
msg = 'Invalid word: {}'.format(word)
raise exceptions.InvalidMnemonicOrPasswordError(msg)
try:
wally.bip39_mnemonic_validate(None, mnemonic)
except ValueError:
raise exceptions.InvalidMnemonicOrPasswordError('Invalid mnemonic checksum')
| [
"wallycore.bip39_mnemonic_validate",
"wallycore.bip39_mnemonic_from_bytes",
"wallycore.bip32_key_from_seed",
"wallycore.sha256d",
"wallycore.bip39_get_wordlist",
"wallycore.bip39_mnemonic_to_seed512",
"wallycore.bip39_mnemonic_to_bytes",
"wallycore.bip39_get_word",
"gaservices.utils.h2b",
"wallyco... | [((98, 128), 'wallycore.bip39_get_wordlist', 'wally.bip39_get_wordlist', (['"""en"""'], {}), "('en')\n", (122, 128), True, 'import wallycore as wally\n'), ((141, 175), 'wallycore.bip39_get_word', 'wally.bip39_get_word', (['wordlist_', 'i'], {}), '(wordlist_, i)\n', (161, 175), True, 'import wallycore as wally\n'), ((1127, 1191), 'wallycore.bip32_key_from_seed', 'wally.bip32_key_from_seed', (['seed', 'ver', 'wally.BIP32_FLAG_SKIP_HASH'], {}), '(seed, ver, wally.BIP32_FLAG_SKIP_HASH)\n', (1152, 1191), True, 'import wallycore as wally\n'), ((1684, 1744), 'wallycore.aes', 'wally.aes', (['key', 'encrypted', 'wally.AES_FLAG_DECRYPT', 'decrypted'], {}), '(key, encrypted, wally.AES_FLAG_DECRYPT, decrypted)\n', (1693, 1744), True, 'import wallycore as wally\n'), ((1950, 1998), 'wallycore.bip39_mnemonic_from_bytes', 'wally.bip39_mnemonic_from_bytes', (['None', 'decrypted'], {}), '(None, decrypted)\n', (1981, 1998), True, 'import wallycore as wally\n'), ((591, 621), 'gaservices.utils.h2b', 'h2b', (['mnemonic_or_hex_seed[:-1]'], {}), '(mnemonic_or_hex_seed[:-1])\n', (594, 621), False, 'from gaservices.utils import h2b\n'), ((696, 755), 'wallycore.bip39_mnemonic_to_seed512', 'wally.bip39_mnemonic_to_seed512', (['mnemonic_or_hex_seed', 'None'], {}), '(mnemonic_or_hex_seed, None)\n', (727, 755), True, 'import wallycore as wally\n'), ((1412, 1466), 'wallycore.bip39_mnemonic_to_bytes', 'wally.bip39_mnemonic_to_bytes', (['None', 'mnemonic', 'entropy'], {}), '(None, mnemonic, entropy)\n', (1441, 1466), True, 'import wallycore as wally\n'), ((2568, 2613), 'wallycore.bip39_mnemonic_validate', 'wally.bip39_mnemonic_validate', (['None', 'mnemonic'], {}), '(None, mnemonic)\n', (2597, 2613), True, 'import wallycore as wally\n'), ((1823, 1847), 'wallycore.sha256d', 'wally.sha256d', (['decrypted'], {}), '(decrypted)\n', (1836, 1847), True, 'import wallycore as wally\n')] |
def elastic_rate(
hv,
hs,
v,
s,
rho,
mu,
nx,
dx,
order,
t,
y,
r0,
r1,
tau0_1,
tau0_2,
tauN_1,
tauN_2,
type_0,
forcing,
):
# we compute rates that will be used for Runge-Kutta time-stepping
#
import first_derivative_sbp_operators
import numpy as np
import boundarycondition
V = np.zeros((nx, 1))
S = np.zeros((nx, 1))
Vt = np.zeros((nx, 1))
St = np.zeros((nx, 1))
Vx = np.zeros((nx, 1))
Sx = np.zeros((nx, 1))
mms(V, S, Vt, St, Vx, Sx, y, t, type_0)
# initialize arrays for computing derivatives
vx = np.zeros((nx, 1))
sx = np.zeros((nx, 1))
# compute first derivatives for velocity and stress fields
first_derivative_sbp_operators.dx(vx, v, nx, dx, order)
first_derivative_sbp_operators.dx(sx, s, nx, dx, order)
# compute the elastic rates
hv[:, :] = (1.0 / rho) * sx + forcing * (Vt - (1.0 / rho) * Sx)
hs[:, :] = mu * vx + forcing * (St - mu * Vx)
# impose boundary conditions using penalty: SAT
impose_bc(
hv,
hs,
v,
s,
rho,
mu,
nx,
dx,
order,
forcing * V,
forcing * S,
r0,
r1,
tau0_1,
tau0_2,
tauN_1,
tauN_2,
)
def advection_rate(hv, v, nx, dx, order, t, y, tau):
# we compute rates that will be used for Runge-Kutta time-stepping
#
import first_derivative_sbp_operators
import numpy as np
# initialize arrays for computing derivatives
vx = np.zeros((nx, 1))
# compute first derivatives of the advected field v
first_derivative_sbp_operators.dx(vx, v, nx, dx, order)
# compute rates
hv[:, :] = -vx
# impose boundary conditions using penalty: SAT
# penalty weights
h11 = np.zeros((1, 1))
penaltyweight(h11, dx, order)
V0 = np.zeros((1, 1))
# boundary forcing
g(V0, t)
# print(Vn)
# penalize boundaries with the SAT terms
hv[0, :] = hv[0, :] - tau / h11 * (v[0, :] - V0)
def impose_bc(
hv,
hs,
v,
s,
rho,
mu,
nx,
dx,
order,
V,
S,
r0,
r1,
tau0_1,
tau0_2,
tauN_1,
tauN_2,
):
# impose boundary conditions
import numpy as np
import boundarycondition
# penalty weights
h11 = np.zeros((1, 1))
penaltyweight(h11, dx, order)
mv = np.zeros((1, 1))
ms = np.zeros((1, 1))
pv = np.zeros((1, 1))
ps = np.zeros((1, 1))
v0 = v[0, :]
s0 = s[0, :]
vn = v[nx - 1, :]
sn = s[nx - 1, :]
# boundary forcing
V0 = V[0, :]
S0 = S[0, :]
Vn = V[nx - 1, :]
Sn = S[nx - 1, :]
# compute SAT terms
boundarycondition.bcm(mv, ms, v0, s0, V0, S0, rho, mu, r0)
boundarycondition.bcp(pv, ps, vn, sn, Vn, Sn, rho, mu, r1)
# penalize boundaries with the SAT terms
hv[0, :] = hv[0, :] - tau0_1 / h11 * mv
hs[0, :] = hs[0, :] - tau0_2 / h11 * ms
hv[nx - 1, :] = hv[nx - 1, :] - tauN_1 / h11 * pv
def mms(V, S, V_t, S_t, V_x, S_x, y, t, type_0):
import numpy as np
if type_0 in ("Gaussian"):
delta = 0.015 * (y[-1, 0] - y[0, 0])
cs = 3.464
rho = 2.6702
Zs = rho * cs
x0 = 0.5 * (y[-1, 0] - y[0, 0])
V[:, :] = (
1
/ np.sqrt(2.0 * np.pi * delta ** 2)
* 0.5
* (
np.exp(-(y + cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
+ np.exp(-(y - cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
)
)
S[:, :] = (
1
/ np.sqrt(2.0 * np.pi * delta ** 2)
* 0.5
* Zs
* (
np.exp(-(y + cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
- np.exp(-(y - cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
)
)
V_t[:, :] = 0
S_t[:, :] = 0
V_x[:, :] = 0
S_x[:, :] = 0
if type_0 in ("Sinusoidal"):
delta = y[-1, 0] - y[0, 0]
ny = 20.5 / delta * np.pi
nt = 2.5 * np.pi
fs = 9.33
V[:, :] = np.cos(nt * t) * np.sin(ny * y + fs)
S[:, :] = ny * np.sin(nt * t) * np.cos(ny * y - fs)
V_t[:, :] = -nt * np.sin(nt * t) * np.sin(ny * y + fs)
S_t[:, :] = nt * ny * np.cos(nt * t) * np.cos(ny * y - fs)
V_x[:, :] = ny * np.cos(nt * t) * np.cos(ny * y + fs)
S_x[:, :] = -ny * ny * np.sin(nt * t) * np.sin(ny * y - fs)
def g(V, t):
import numpy as np
V[:, :] = 0.0
if t <= 1.0 and t >= 0.0:
V[:, :] = (np.sin(np.pi * t)) ** 4
def penaltyweight(h11, dx, order):
if order == 2:
h11[:] = 0.5 * dx
if order == 4:
h11[:] = (17.0 / 48.0) * dx
if order == 6:
h11[:] = 13649.0 / 43200.0 * dx
| [
"boundarycondition.bcm",
"numpy.sqrt",
"first_derivative_sbp_operators.dx",
"boundarycondition.bcp",
"numpy.exp",
"numpy.zeros",
"numpy.cos",
"numpy.sin"
] | [((378, 395), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (386, 395), True, 'import numpy as np\n'), ((404, 421), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (412, 421), True, 'import numpy as np\n'), ((431, 448), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (439, 448), True, 'import numpy as np\n'), ((458, 475), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (466, 475), True, 'import numpy as np\n'), ((485, 502), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (493, 502), True, 'import numpy as np\n'), ((512, 529), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (520, 529), True, 'import numpy as np\n'), ((635, 652), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (643, 652), True, 'import numpy as np\n'), ((662, 679), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (670, 679), True, 'import numpy as np\n'), ((748, 803), 'first_derivative_sbp_operators.dx', 'first_derivative_sbp_operators.dx', (['vx', 'v', 'nx', 'dx', 'order'], {}), '(vx, v, nx, dx, order)\n', (781, 803), False, 'import first_derivative_sbp_operators\n'), ((808, 863), 'first_derivative_sbp_operators.dx', 'first_derivative_sbp_operators.dx', (['sx', 's', 'nx', 'dx', 'order'], {}), '(sx, s, nx, dx, order)\n', (841, 863), False, 'import first_derivative_sbp_operators\n'), ((1586, 1603), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (1594, 1603), True, 'import numpy as np\n'), ((1665, 1720), 'first_derivative_sbp_operators.dx', 'first_derivative_sbp_operators.dx', (['vx', 'v', 'nx', 'dx', 'order'], {}), '(vx, v, nx, dx, order)\n', (1698, 1720), False, 'import first_derivative_sbp_operators\n'), ((1847, 1863), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1855, 1863), True, 'import numpy as np\n'), ((1908, 1924), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1916, 1924), True, 'import numpy as np\n'), ((2367, 2383), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2375, 2383), True, 'import numpy as np\n'), ((2428, 2444), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2436, 2444), True, 'import numpy as np\n'), ((2454, 2470), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2462, 2470), True, 'import numpy as np\n'), ((2481, 2497), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2489, 2497), True, 'import numpy as np\n'), ((2507, 2523), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2515, 2523), True, 'import numpy as np\n'), ((2736, 2794), 'boundarycondition.bcm', 'boundarycondition.bcm', (['mv', 'ms', 'v0', 's0', 'V0', 'S0', 'rho', 'mu', 'r0'], {}), '(mv, ms, v0, s0, V0, S0, rho, mu, r0)\n', (2757, 2794), False, 'import boundarycondition\n'), ((2799, 2857), 'boundarycondition.bcp', 'boundarycondition.bcp', (['pv', 'ps', 'vn', 'sn', 'Vn', 'Sn', 'rho', 'mu', 'r1'], {}), '(pv, ps, vn, sn, Vn, Sn, rho, mu, r1)\n', (2820, 2857), False, 'import boundarycondition\n'), ((4148, 4162), 'numpy.cos', 'np.cos', (['(nt * t)'], {}), '(nt * t)\n', (4154, 4162), True, 'import numpy as np\n'), ((4165, 4184), 'numpy.sin', 'np.sin', (['(ny * y + fs)'], {}), '(ny * y + fs)\n', (4171, 4184), True, 'import numpy as np\n'), ((4226, 4245), 'numpy.cos', 'np.cos', (['(ny * y - fs)'], {}), '(ny * y - fs)\n', (4232, 4245), True, 'import numpy as np\n'), ((4290, 4309), 'numpy.sin', 'np.sin', (['(ny * y + fs)'], {}), '(ny * y + fs)\n', (4296, 4309), True, 'import numpy as np\n'), ((4357, 4376), 'numpy.cos', 'np.cos', (['(ny * y - fs)'], {}), '(ny * y - fs)\n', (4363, 4376), True, 'import numpy as np\n'), ((4420, 4439), 'numpy.cos', 'np.cos', (['(ny * y + fs)'], {}), '(ny * y + fs)\n', (4426, 4439), True, 'import numpy as np\n'), ((4488, 4507), 'numpy.sin', 'np.sin', (['(ny * y - fs)'], {}), '(ny * y - fs)\n', (4494, 4507), True, 'import numpy as np\n'), ((4616, 4633), 'numpy.sin', 'np.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (4622, 4633), True, 'import numpy as np\n'), ((3438, 3490), 'numpy.exp', 'np.exp', (['(-(y + cs * t - x0) ** 2 / (2.0 * delta ** 2))'], {}), '(-(y + cs * t - x0) ** 2 / (2.0 * delta ** 2))\n', (3444, 3490), True, 'import numpy as np\n'), ((3511, 3563), 'numpy.exp', 'np.exp', (['(-(y - cs * t - x0) ** 2 / (2.0 * delta ** 2))'], {}), '(-(y - cs * t - x0) ** 2 / (2.0 * delta ** 2))\n', (3517, 3563), True, 'import numpy as np\n'), ((3740, 3792), 'numpy.exp', 'np.exp', (['(-(y + cs * t - x0) ** 2 / (2.0 * delta ** 2))'], {}), '(-(y + cs * t - x0) ** 2 / (2.0 * delta ** 2))\n', (3746, 3792), True, 'import numpy as np\n'), ((3813, 3865), 'numpy.exp', 'np.exp', (['(-(y - cs * t - x0) ** 2 / (2.0 * delta ** 2))'], {}), '(-(y - cs * t - x0) ** 2 / (2.0 * delta ** 2))\n', (3819, 3865), True, 'import numpy as np\n'), ((4209, 4223), 'numpy.sin', 'np.sin', (['(nt * t)'], {}), '(nt * t)\n', (4215, 4223), True, 'import numpy as np\n'), ((4273, 4287), 'numpy.sin', 'np.sin', (['(nt * t)'], {}), '(nt * t)\n', (4279, 4287), True, 'import numpy as np\n'), ((4340, 4354), 'numpy.cos', 'np.cos', (['(nt * t)'], {}), '(nt * t)\n', (4346, 4354), True, 'import numpy as np\n'), ((4403, 4417), 'numpy.cos', 'np.cos', (['(nt * t)'], {}), '(nt * t)\n', (4409, 4417), True, 'import numpy as np\n'), ((4471, 4485), 'numpy.sin', 'np.sin', (['(nt * t)'], {}), '(nt * t)\n', (4477, 4485), True, 'import numpy as np\n'), ((3354, 3387), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi * delta ** 2)'], {}), '(2.0 * np.pi * delta ** 2)\n', (3361, 3387), True, 'import numpy as np\n'), ((3639, 3672), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi * delta ** 2)'], {}), '(2.0 * np.pi * delta ** 2)\n', (3646, 3672), True, 'import numpy as np\n')] |
import datetime
import json
import os
from unittest import mock
from django.conf import settings
from django.core.files.storage import default_storage as storage
from freezegun import freeze_time
from waffle.testutils import override_switch
from olympia.amo.tests import addon_factory, TestCase, user_factory
from olympia.blocklist.cron import upload_mlbf_to_kinto
from olympia.blocklist.mlbf import MLBF
from olympia.blocklist.models import Block
from olympia.blocklist.tasks import MLBF_TIME_CONFIG_KEY
from olympia.lib.kinto import KintoServer
from olympia.zadmin.models import get_config, set_config
class TestUploadToKinto(TestCase):
def setUp(self):
addon_factory()
self.block = Block.objects.create(
addon=addon_factory(
file_kw={'is_signed': True, 'is_webextension': True}),
updated_by=user_factory())
@freeze_time('2020-01-01 12:34:56')
@override_switch('blocklist_mlbf_submit', active=True)
@mock.patch.object(KintoServer, 'publish_attachment')
def test_upload_mlbf_to_kinto(self, publish_mock):
upload_mlbf_to_kinto()
generation_time = int(
datetime.datetime(2020, 1, 1, 12, 34, 56).timestamp() * 1000)
publish_mock.assert_called_with(
{'key_format': MLBF.KEY_FORMAT,
'generation_time': generation_time},
('filter.bin', mock.ANY, 'application/octet-stream'))
assert (
get_config(MLBF_TIME_CONFIG_KEY, json_value=True) ==
generation_time)
mlfb_path = os.path.join(
settings.MLBF_STORAGE_PATH, str(generation_time), 'filter')
assert os.path.exists(mlfb_path)
assert os.path.getsize(mlfb_path)
blocked_path = os.path.join(
settings.MLBF_STORAGE_PATH, str(generation_time), 'blocked.json')
assert os.path.exists(blocked_path)
assert os.path.getsize(blocked_path)
not_blocked_path = os.path.join(
settings.MLBF_STORAGE_PATH, str(generation_time),
'notblocked.json')
assert os.path.exists(not_blocked_path)
assert os.path.getsize(not_blocked_path)
@freeze_time('2020-01-01 12:34:56')
@override_switch('blocklist_mlbf_submit', active=True)
@mock.patch.object(KintoServer, 'publish_attachment')
def test_stash_file(self, publish_mock):
set_config(MLBF_TIME_CONFIG_KEY, 123456, json_value=True)
prev_blocked_path = os.path.join(
settings.MLBF_STORAGE_PATH, '123456', 'blocked.json')
with storage.open(prev_blocked_path, 'w') as blocked_file:
json.dump(['madeup@guid:123'], blocked_file)
upload_mlbf_to_kinto()
generation_time = int(
datetime.datetime(2020, 1, 1, 12, 34, 56).timestamp() * 1000)
stash_path = os.path.join(
settings.MLBF_STORAGE_PATH, str(generation_time), 'stash.json')
assert os.path.exists(stash_path)
assert os.path.getsize(stash_path)
with open(stash_path) as stash_file:
blocked_guid = (
f'{self.block.guid}:'
f'{self.block.addon.current_version.version}')
assert json.load(stash_file) == {
'blocked': [blocked_guid],
'unblocked': ['madeup@guid:123']}
@override_switch('blocklist_mlbf_submit', active=False)
@mock.patch.object(KintoServer, 'publish_attachment')
def test_waffle_off_disables_publishing(self, publish_mock):
upload_mlbf_to_kinto()
publish_mock.assert_not_called()
assert not get_config(MLBF_TIME_CONFIG_KEY)
@freeze_time('2020-01-01 12:34:56')
@override_switch('blocklist_mlbf_submit', active=True)
@mock.patch.object(KintoServer, 'publish_attachment')
def test_no_need_for_new_mlbf(self, publish_mock):
# This was the last time the mlbf was generated
last_time = int(
datetime.datetime(2020, 1, 1, 12, 34, 1).timestamp() * 1000)
# And the Block was modified just before so would be included
self.block.update(modified=datetime.datetime(2020, 1, 1, 12, 34, 0))
set_config(MLBF_TIME_CONFIG_KEY, last_time, json_value=True)
upload_mlbf_to_kinto()
# So no need for a new bloomfilter
publish_mock.assert_not_called()
# But if we add a new Block a new filter is needed
addon_factory()
Block.objects.create(
addon=addon_factory(
file_kw={'is_signed': True, 'is_webextension': True}),
updated_by=user_factory())
upload_mlbf_to_kinto()
publish_mock.assert_called_once()
assert (
get_config(MLBF_TIME_CONFIG_KEY, json_value=True) ==
int(datetime.datetime(2020, 1, 1, 12, 34, 56).timestamp() * 1000))
| [
"datetime.datetime",
"olympia.blocklist.cron.upload_mlbf_to_kinto",
"os.path.exists",
"os.path.getsize",
"olympia.zadmin.models.set_config",
"olympia.amo.tests.user_factory",
"os.path.join",
"waffle.testutils.override_switch",
"olympia.amo.tests.addon_factory",
"olympia.zadmin.models.get_config",
... | [((881, 915), 'freezegun.freeze_time', 'freeze_time', (['"""2020-01-01 12:34:56"""'], {}), "('2020-01-01 12:34:56')\n", (892, 915), False, 'from freezegun import freeze_time\n'), ((921, 974), 'waffle.testutils.override_switch', 'override_switch', (['"""blocklist_mlbf_submit"""'], {'active': '(True)'}), "('blocklist_mlbf_submit', active=True)\n", (936, 974), False, 'from waffle.testutils import override_switch\n'), ((980, 1032), 'unittest.mock.patch.object', 'mock.patch.object', (['KintoServer', '"""publish_attachment"""'], {}), "(KintoServer, 'publish_attachment')\n", (997, 1032), False, 'from unittest import mock\n'), ((2170, 2204), 'freezegun.freeze_time', 'freeze_time', (['"""2020-01-01 12:34:56"""'], {}), "('2020-01-01 12:34:56')\n", (2181, 2204), False, 'from freezegun import freeze_time\n'), ((2210, 2263), 'waffle.testutils.override_switch', 'override_switch', (['"""blocklist_mlbf_submit"""'], {'active': '(True)'}), "('blocklist_mlbf_submit', active=True)\n", (2225, 2263), False, 'from waffle.testutils import override_switch\n'), ((2269, 2321), 'unittest.mock.patch.object', 'mock.patch.object', (['KintoServer', '"""publish_attachment"""'], {}), "(KintoServer, 'publish_attachment')\n", (2286, 2321), False, 'from unittest import mock\n'), ((3320, 3374), 'waffle.testutils.override_switch', 'override_switch', (['"""blocklist_mlbf_submit"""'], {'active': '(False)'}), "('blocklist_mlbf_submit', active=False)\n", (3335, 3374), False, 'from waffle.testutils import override_switch\n'), ((3380, 3432), 'unittest.mock.patch.object', 'mock.patch.object', (['KintoServer', '"""publish_attachment"""'], {}), "(KintoServer, 'publish_attachment')\n", (3397, 3432), False, 'from unittest import mock\n'), ((3629, 3663), 'freezegun.freeze_time', 'freeze_time', (['"""2020-01-01 12:34:56"""'], {}), "('2020-01-01 12:34:56')\n", (3640, 3663), False, 'from freezegun import freeze_time\n'), ((3669, 3722), 'waffle.testutils.override_switch', 'override_switch', (['"""blocklist_mlbf_submit"""'], {'active': '(True)'}), "('blocklist_mlbf_submit', active=True)\n", (3684, 3722), False, 'from waffle.testutils import override_switch\n'), ((3728, 3780), 'unittest.mock.patch.object', 'mock.patch.object', (['KintoServer', '"""publish_attachment"""'], {}), "(KintoServer, 'publish_attachment')\n", (3745, 3780), False, 'from unittest import mock\n'), ((673, 688), 'olympia.amo.tests.addon_factory', 'addon_factory', ([], {}), '()\n', (686, 688), False, 'from olympia.amo.tests import addon_factory, TestCase, user_factory\n'), ((1096, 1118), 'olympia.blocklist.cron.upload_mlbf_to_kinto', 'upload_mlbf_to_kinto', ([], {}), '()\n', (1116, 1118), False, 'from olympia.blocklist.cron import upload_mlbf_to_kinto\n'), ((1659, 1684), 'os.path.exists', 'os.path.exists', (['mlfb_path'], {}), '(mlfb_path)\n', (1673, 1684), False, 'import os\n'), ((1700, 1726), 'os.path.getsize', 'os.path.getsize', (['mlfb_path'], {}), '(mlfb_path)\n', (1715, 1726), False, 'import os\n'), ((1858, 1886), 'os.path.exists', 'os.path.exists', (['blocked_path'], {}), '(blocked_path)\n', (1872, 1886), False, 'import os\n'), ((1902, 1931), 'os.path.getsize', 'os.path.getsize', (['blocked_path'], {}), '(blocked_path)\n', (1917, 1931), False, 'import os\n'), ((2082, 2114), 'os.path.exists', 'os.path.exists', (['not_blocked_path'], {}), '(not_blocked_path)\n', (2096, 2114), False, 'import os\n'), ((2130, 2163), 'os.path.getsize', 'os.path.getsize', (['not_blocked_path'], {}), '(not_blocked_path)\n', (2145, 2163), False, 'import os\n'), ((2375, 2432), 'olympia.zadmin.models.set_config', 'set_config', (['MLBF_TIME_CONFIG_KEY', '(123456)'], {'json_value': '(True)'}), '(MLBF_TIME_CONFIG_KEY, 123456, json_value=True)\n', (2385, 2432), False, 'from olympia.zadmin.models import get_config, set_config\n'), ((2461, 2527), 'os.path.join', 'os.path.join', (['settings.MLBF_STORAGE_PATH', '"""123456"""', '"""blocked.json"""'], {}), "(settings.MLBF_STORAGE_PATH, '123456', 'blocked.json')\n", (2473, 2527), False, 'import os\n'), ((2674, 2696), 'olympia.blocklist.cron.upload_mlbf_to_kinto', 'upload_mlbf_to_kinto', ([], {}), '()\n', (2694, 2696), False, 'from olympia.blocklist.cron import upload_mlbf_to_kinto\n'), ((2930, 2956), 'os.path.exists', 'os.path.exists', (['stash_path'], {}), '(stash_path)\n', (2944, 2956), False, 'import os\n'), ((2972, 2999), 'os.path.getsize', 'os.path.getsize', (['stash_path'], {}), '(stash_path)\n', (2987, 2999), False, 'import os\n'), ((3506, 3528), 'olympia.blocklist.cron.upload_mlbf_to_kinto', 'upload_mlbf_to_kinto', ([], {}), '()\n', (3526, 3528), False, 'from olympia.blocklist.cron import upload_mlbf_to_kinto\n'), ((4145, 4205), 'olympia.zadmin.models.set_config', 'set_config', (['MLBF_TIME_CONFIG_KEY', 'last_time'], {'json_value': '(True)'}), '(MLBF_TIME_CONFIG_KEY, last_time, json_value=True)\n', (4155, 4205), False, 'from olympia.zadmin.models import get_config, set_config\n'), ((4214, 4236), 'olympia.blocklist.cron.upload_mlbf_to_kinto', 'upload_mlbf_to_kinto', ([], {}), '()\n', (4234, 4236), False, 'from olympia.blocklist.cron import upload_mlbf_to_kinto\n'), ((4389, 4404), 'olympia.amo.tests.addon_factory', 'addon_factory', ([], {}), '()\n', (4402, 4404), False, 'from olympia.amo.tests import addon_factory, TestCase, user_factory\n'), ((4586, 4608), 'olympia.blocklist.cron.upload_mlbf_to_kinto', 'upload_mlbf_to_kinto', ([], {}), '()\n', (4606, 4608), False, 'from olympia.blocklist.cron import upload_mlbf_to_kinto\n'), ((1455, 1504), 'olympia.zadmin.models.get_config', 'get_config', (['MLBF_TIME_CONFIG_KEY'], {'json_value': '(True)'}), '(MLBF_TIME_CONFIG_KEY, json_value=True)\n', (1465, 1504), False, 'from olympia.zadmin.models import get_config, set_config\n'), ((2554, 2590), 'django.core.files.storage.default_storage.open', 'storage.open', (['prev_blocked_path', '"""w"""'], {}), "(prev_blocked_path, 'w')\n", (2566, 2590), True, 'from django.core.files.storage import default_storage as storage\n'), ((2620, 2664), 'json.dump', 'json.dump', (["['madeup@guid:123']", 'blocked_file'], {}), "(['madeup@guid:123'], blocked_file)\n", (2629, 2664), False, 'import json\n'), ((3590, 3622), 'olympia.zadmin.models.get_config', 'get_config', (['MLBF_TIME_CONFIG_KEY'], {}), '(MLBF_TIME_CONFIG_KEY)\n', (3600, 3622), False, 'from olympia.zadmin.models import get_config, set_config\n'), ((4680, 4729), 'olympia.zadmin.models.get_config', 'get_config', (['MLBF_TIME_CONFIG_KEY'], {'json_value': '(True)'}), '(MLBF_TIME_CONFIG_KEY, json_value=True)\n', (4690, 4729), False, 'from olympia.zadmin.models import get_config, set_config\n'), ((750, 817), 'olympia.amo.tests.addon_factory', 'addon_factory', ([], {'file_kw': "{'is_signed': True, 'is_webextension': True}"}), "(file_kw={'is_signed': True, 'is_webextension': True})\n", (763, 817), False, 'from olympia.amo.tests import addon_factory, TestCase, user_factory\n'), ((859, 873), 'olympia.amo.tests.user_factory', 'user_factory', ([], {}), '()\n', (871, 873), False, 'from olympia.amo.tests import addon_factory, TestCase, user_factory\n'), ((3194, 3215), 'json.load', 'json.load', (['stash_file'], {}), '(stash_file)\n', (3203, 3215), False, 'import json\n'), ((4095, 4135), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)', '(12)', '(34)', '(0)'], {}), '(2020, 1, 1, 12, 34, 0)\n', (4112, 4135), False, 'import datetime\n'), ((4453, 4520), 'olympia.amo.tests.addon_factory', 'addon_factory', ([], {'file_kw': "{'is_signed': True, 'is_webextension': True}"}), "(file_kw={'is_signed': True, 'is_webextension': True})\n", (4466, 4520), False, 'from olympia.amo.tests import addon_factory, TestCase, user_factory\n'), ((4562, 4576), 'olympia.amo.tests.user_factory', 'user_factory', ([], {}), '()\n', (4574, 4576), False, 'from olympia.amo.tests import addon_factory, TestCase, user_factory\n'), ((1163, 1204), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)', '(12)', '(34)', '(56)'], {}), '(2020, 1, 1, 12, 34, 56)\n', (1180, 1204), False, 'import datetime\n'), ((2741, 2782), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)', '(12)', '(34)', '(56)'], {}), '(2020, 1, 1, 12, 34, 56)\n', (2758, 2782), False, 'import datetime\n'), ((3929, 3969), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)', '(12)', '(34)', '(1)'], {}), '(2020, 1, 1, 12, 34, 1)\n', (3946, 3969), False, 'import datetime\n'), ((4749, 4790), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)', '(12)', '(34)', '(56)'], {}), '(2020, 1, 1, 12, 34, 56)\n', (4766, 4790), False, 'import datetime\n')] |
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.datasets import generate_ar_df
from etna.datasets import generate_const_df
from etna.datasets import generate_periodic_df
from etna.metrics import R2
from etna.models import LinearPerSegmentModel
from etna.transforms import FilterFeaturesTransform
from etna.transforms.encoders.categorical import LabelEncoderTransform
from etna.transforms.encoders.categorical import OneHotEncoderTransform
@pytest.fixture
def two_df_with_new_values():
d = {
"timestamp": list(pd.date_range(start="2021-01-01", end="2021-01-03"))
+ list(pd.date_range(start="2021-01-01", end="2021-01-03")),
"segment": ["segment_0", "segment_0", "segment_0", "segment_1", "segment_1", "segment_1"],
"regressor_0": [5, 8, 5, 9, 5, 9],
"target": [1, 2, 3, 4, 5, 6],
}
df1 = TSDataset.to_dataset(pd.DataFrame(d))
d = {
"timestamp": list(pd.date_range(start="2021-01-01", end="2021-01-03"))
+ list(pd.date_range(start="2021-01-01", end="2021-01-03")),
"segment": ["segment_0", "segment_0", "segment_0", "segment_1", "segment_1", "segment_1"],
"regressor_0": [5, 8, 9, 5, 0, 0],
"target": [1, 2, 3, 4, 5, 6],
}
df2 = TSDataset.to_dataset(pd.DataFrame(d))
return df1, df2
@pytest.fixture
def df_for_ohe_encoding():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
d = {
"timestamp": pd.date_range(start="2021-01-01", end="2021-01-12"),
"regressor_0": [5, 8, 5, 8, 5, 8, 5, 8, 5, 8, 5, 8],
"regressor_1": [9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5],
"regressor_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"regressor_3": [1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7],
}
df_regressors = pd.DataFrame(d)
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
answer_on_regressor_0 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_0["test_0"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 5))
answer_on_regressor_0["test_1"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 8))
answer_on_regressor_0["test_0"] = answer_on_regressor_0["test_0"].astype("category")
answer_on_regressor_0["test_1"] = answer_on_regressor_0["test_1"].astype("category")
answer_on_regressor_1 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_1["test_0"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 5))
answer_on_regressor_1["test_1"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 9))
answer_on_regressor_1["test_0"] = answer_on_regressor_1["test_0"].astype("category")
answer_on_regressor_1["test_1"] = answer_on_regressor_1["test_1"].astype("category")
answer_on_regressor_2 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_2["test_0"] = answer_on_regressor_2["regressor_2"].apply(lambda x: float(x == 0))
answer_on_regressor_2["test_0"] = answer_on_regressor_2["test_0"].astype("category")
return tsdataset.df, (answer_on_regressor_0, answer_on_regressor_1, answer_on_regressor_2)
@pytest.fixture
def df_for_label_encoding():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
d = {
"timestamp": pd.date_range(start="2021-01-01", end="2021-01-12"),
"regressor_0": [5, 8, 5, 8, 5, 8, 5, 8, 5, 8, 5, 8],
"regressor_1": [9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5],
"regressor_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"regressor_3": [1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7],
}
df_regressors = pd.DataFrame(d)
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
answer_on_regressor_0 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_0["test"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 8))
answer_on_regressor_0["test"] = answer_on_regressor_0["test"].astype("category")
answer_on_regressor_1 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_1["test"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 9))
answer_on_regressor_1["test"] = answer_on_regressor_1["test"].astype("category")
answer_on_regressor_2 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_2["test"] = answer_on_regressor_2["regressor_2"].apply(lambda x: float(x == 1))
answer_on_regressor_2["test"] = answer_on_regressor_2["test"].astype("category")
return tsdataset.df, (answer_on_regressor_0, answer_on_regressor_1, answer_on_regressor_2)
@pytest.fixture
def df_for_naming():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
df_regressors = generate_periodic_df(12, start_time="2021-01-01", scale=10, period=2, n_segments=2)
df_regressors = df_regressors.pivot(index="timestamp", columns="segment").reset_index()
df_regressors.columns = ["timestamp"] + ["regressor_1", "2"]
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
return tsdataset.df
def test_label_encoder_simple(df_for_label_encoding):
"""Test that LabelEncoderTransform works correct in a simple cases."""
df, answers = df_for_label_encoding
for i in range(3):
le = LabelEncoderTransform(in_column=f"regressor_{i}", out_column="test")
le.fit(df)
cols = le.transform(df)["segment_0"].columns
assert le.transform(df)["segment_0"][cols].equals(answers[i][cols])
def test_ohe_encoder_simple(df_for_ohe_encoding):
"""Test that OneHotEncoderTransform works correct in a simple case."""
df, answers = df_for_ohe_encoding
for i in range(3):
ohe = OneHotEncoderTransform(in_column=f"regressor_{i}", out_column="test")
ohe.fit(df)
cols = ohe.transform(df)["segment_0"].columns
assert ohe.transform(df)["segment_0"][cols].equals(answers[i][cols])
def test_value_error_label_encoder(df_for_label_encoding):
"""Test LabelEncoderTransform with wrong strategy."""
df, _ = df_for_label_encoding
with pytest.raises(ValueError, match="The strategy"):
le = LabelEncoderTransform(in_column="target", strategy="new_vlue")
le.fit(df)
le.transform(df)
@pytest.mark.parametrize(
"strategy, expected_values",
[
("new_value", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, -1, 5], [9, -1, 3, 0, -1, 6]])),
("none", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, np.nan, 5], [9, np.nan, 3, 0, np.nan, 6]])),
("mean", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, 0, 5], [9, 0.5, 3, 0, 0, 6]])),
],
)
def test_new_value_label_encoder(two_df_with_new_values, strategy, expected_values):
"""Test LabelEncoderTransform correct works with unknown values."""
df1, df2 = two_df_with_new_values
le = LabelEncoderTransform(in_column="regressor_0", strategy=strategy)
le.fit(df1)
np.testing.assert_array_almost_equal(le.transform(df2).values, expected_values)
def test_new_value_ohe_encoder(two_df_with_new_values):
"""Test OneHotEncoderTransform correct works with unknown values."""
expected_values = np.array(
[
[5.0, 1.0, 1.0, 0.0, 5.0, 4.0, 1.0, 0.0],
[8.0, 2.0, 0.0, 1.0, 0.0, 5.0, 0.0, 0.0],
[9.0, 3.0, 0.0, 0.0, 0.0, 6.0, 0.0, 0.0],
]
)
df1, df2 = two_df_with_new_values
ohe = OneHotEncoderTransform(in_column="regressor_0", out_column="targets")
ohe.fit(df1)
np.testing.assert_array_almost_equal(ohe.transform(df2).values, expected_values)
def test_naming_ohe_encoder(two_df_with_new_values):
"""Test OneHotEncoderTransform gives the correct columns."""
df1, df2 = two_df_with_new_values
ohe = OneHotEncoderTransform(in_column="regressor_0", out_column="targets")
ohe.fit(df1)
segments = ["segment_0", "segment_1"]
target = ["target", "targets_0", "targets_1", "regressor_0"]
assert set([(i, j) for i in segments for j in target]) == set(ohe.transform(df2).columns.values)
@pytest.mark.parametrize(
"in_column, prefix",
[("2", ""), ("regressor_1", "regressor_")],
)
def test_naming_ohe_encoder_no_out_column(df_for_naming, in_column, prefix):
"""Test OneHotEncoderTransform gives the correct columns with no out_column."""
df = df_for_naming
ohe = OneHotEncoderTransform(in_column=in_column)
ohe.fit(df)
answer = set(
list(df["segment_0"].columns) + [prefix + str(ohe.__repr__()) + "_0", prefix + str(ohe.__repr__()) + "_1"]
)
assert answer == set(ohe.transform(df)["segment_0"].columns.values)
@pytest.mark.parametrize(
"in_column, prefix",
[("2", ""), ("regressor_1", "regressor_")],
)
def test_naming_label_encoder_no_out_column(df_for_naming, in_column, prefix):
"""Test LabelEncoderTransform gives the correct columns with no out_column."""
df = df_for_naming
le = LabelEncoderTransform(in_column=in_column)
le.fit(df)
answer = set(list(df["segment_0"].columns) + [prefix + str(le.__repr__())])
assert answer == set(le.transform(df)["segment_0"].columns.values)
@pytest.fixture
def ts_for_ohe_sanity():
df_to_forecast = generate_const_df(periods=100, start_time="2021-01-01", scale=0, n_segments=1)
df_regressors = generate_periodic_df(periods=120, start_time="2021-01-01", scale=10, period=4, n_segments=1)
df_regressors = df_regressors.pivot(index="timestamp", columns="segment").reset_index()
df_regressors.columns = ["timestamp"] + [f"regressor_{i}" for i in range(1)]
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
rng = np.random.default_rng(12345)
def f(x):
return x ** 2 + rng.normal(0, 0.01)
df_to_forecast["segment_0", "target"] = df_regressors["segment_0"]["regressor_0"][:100].apply(f)
ts = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
return ts
def test_ohe_sanity(ts_for_ohe_sanity):
"""Test for correct work in the full forecasting pipeline."""
horizon = 10
train_ts, test_ts = ts_for_ohe_sanity.train_test_split(test_size=horizon)
ohe = OneHotEncoderTransform(in_column="regressor_0")
filt = FilterFeaturesTransform(exclude=["regressor_0"])
train_ts.fit_transform([ohe, filt])
model = LinearPerSegmentModel()
model.fit(train_ts)
future_ts = train_ts.make_future(horizon)
forecast_ts = model.forecast(future_ts)
r2 = R2()
assert 1 - r2(test_ts, forecast_ts)["segment_0"] < 1e-5
| [
"etna.transforms.encoders.categorical.LabelEncoderTransform",
"etna.datasets.TSDataset.to_dataset",
"etna.datasets.generate_periodic_df",
"numpy.random.default_rng",
"etna.metrics.R2",
"etna.datasets.TSDataset",
"etna.models.LinearPerSegmentModel",
"etna.transforms.FilterFeaturesTransform",
"etna.da... | [((8503, 8595), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_column, prefix"""', "[('2', ''), ('regressor_1', 'regressor_')]"], {}), "('in_column, prefix', [('2', ''), ('regressor_1',\n 'regressor_')])\n", (8526, 8595), False, 'import pytest\n'), ((9071, 9163), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_column, prefix"""', "[('2', ''), ('regressor_1', 'regressor_')]"], {}), "('in_column, prefix', [('2', ''), ('regressor_1',\n 'regressor_')])\n", (9094, 9163), False, 'import pytest\n'), ((1409, 1466), 'etna.datasets.generate_ar_df', 'generate_ar_df', (['(10)'], {'start_time': '"""2021-01-01"""', 'n_segments': '(1)'}), "(10, start_time='2021-01-01', n_segments=1)\n", (1423, 1466), False, 'from etna.datasets import generate_ar_df\n'), ((1821, 1836), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (1833, 1836), True, 'import pandas as pd\n'), ((1901, 1937), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_to_forecast'], {}), '(df_to_forecast)\n', (1921, 1937), False, 'from etna.datasets import TSDataset\n'), ((1958, 1993), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_regressors'], {}), '(df_regressors)\n', (1978, 1993), False, 'from etna.datasets import TSDataset\n'), ((2010, 2071), 'etna.datasets.TSDataset', 'TSDataset', ([], {'df': 'df_to_forecast', 'freq': '"""D"""', 'df_exog': 'df_regressors'}), "(df=df_to_forecast, freq='D', df_exog=df_regressors)\n", (2019, 2071), False, 'from etna.datasets import TSDataset\n'), ((3397, 3454), 'etna.datasets.generate_ar_df', 'generate_ar_df', (['(10)'], {'start_time': '"""2021-01-01"""', 'n_segments': '(1)'}), "(10, start_time='2021-01-01', n_segments=1)\n", (3411, 3454), False, 'from etna.datasets import generate_ar_df\n'), ((3809, 3824), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (3821, 3824), True, 'import pandas as pd\n'), ((3889, 3925), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_to_forecast'], {}), '(df_to_forecast)\n', (3909, 3925), False, 'from etna.datasets import TSDataset\n'), ((3946, 3981), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_regressors'], {}), '(df_regressors)\n', (3966, 3981), False, 'from etna.datasets import TSDataset\n'), ((3998, 4059), 'etna.datasets.TSDataset', 'TSDataset', ([], {'df': 'df_to_forecast', 'freq': '"""D"""', 'df_exog': 'df_regressors'}), "(df=df_to_forecast, freq='D', df_exog=df_regressors)\n", (4007, 4059), False, 'from etna.datasets import TSDataset\n'), ((4969, 5026), 'etna.datasets.generate_ar_df', 'generate_ar_df', (['(10)'], {'start_time': '"""2021-01-01"""', 'n_segments': '(1)'}), "(10, start_time='2021-01-01', n_segments=1)\n", (4983, 5026), False, 'from etna.datasets import generate_ar_df\n'), ((5047, 5134), 'etna.datasets.generate_periodic_df', 'generate_periodic_df', (['(12)'], {'start_time': '"""2021-01-01"""', 'scale': '(10)', 'period': '(2)', 'n_segments': '(2)'}), "(12, start_time='2021-01-01', scale=10, period=2,\n n_segments=2)\n", (5067, 5134), False, 'from etna.datasets import generate_periodic_df\n'), ((5352, 5388), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_to_forecast'], {}), '(df_to_forecast)\n', (5372, 5388), False, 'from etna.datasets import TSDataset\n'), ((5409, 5444), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_regressors'], {}), '(df_regressors)\n', (5429, 5444), False, 'from etna.datasets import TSDataset\n'), ((5461, 5522), 'etna.datasets.TSDataset', 'TSDataset', ([], {'df': 'df_to_forecast', 'freq': '"""D"""', 'df_exog': 'df_regressors'}), "(df=df_to_forecast, freq='D', df_exog=df_regressors)\n", (5470, 5522), False, 'from etna.datasets import TSDataset\n'), ((7300, 7365), 'etna.transforms.encoders.categorical.LabelEncoderTransform', 'LabelEncoderTransform', ([], {'in_column': '"""regressor_0"""', 'strategy': 'strategy'}), "(in_column='regressor_0', strategy=strategy)\n", (7321, 7365), False, 'from etna.transforms.encoders.categorical import LabelEncoderTransform\n'), ((7619, 7760), 'numpy.array', 'np.array', (['[[5.0, 1.0, 1.0, 0.0, 5.0, 4.0, 1.0, 0.0], [8.0, 2.0, 0.0, 1.0, 0.0, 5.0, \n 0.0, 0.0], [9.0, 3.0, 0.0, 0.0, 0.0, 6.0, 0.0, 0.0]]'], {}), '([[5.0, 1.0, 1.0, 0.0, 5.0, 4.0, 1.0, 0.0], [8.0, 2.0, 0.0, 1.0, \n 0.0, 5.0, 0.0, 0.0], [9.0, 3.0, 0.0, 0.0, 0.0, 6.0, 0.0, 0.0]])\n', (7627, 7760), True, 'import numpy as np\n'), ((7865, 7934), 'etna.transforms.encoders.categorical.OneHotEncoderTransform', 'OneHotEncoderTransform', ([], {'in_column': '"""regressor_0"""', 'out_column': '"""targets"""'}), "(in_column='regressor_0', out_column='targets')\n", (7887, 7934), False, 'from etna.transforms.encoders.categorical import OneHotEncoderTransform\n'), ((8205, 8274), 'etna.transforms.encoders.categorical.OneHotEncoderTransform', 'OneHotEncoderTransform', ([], {'in_column': '"""regressor_0"""', 'out_column': '"""targets"""'}), "(in_column='regressor_0', out_column='targets')\n", (8227, 8274), False, 'from etna.transforms.encoders.categorical import OneHotEncoderTransform\n'), ((8797, 8840), 'etna.transforms.encoders.categorical.OneHotEncoderTransform', 'OneHotEncoderTransform', ([], {'in_column': 'in_column'}), '(in_column=in_column)\n', (8819, 8840), False, 'from etna.transforms.encoders.categorical import OneHotEncoderTransform\n'), ((9365, 9407), 'etna.transforms.encoders.categorical.LabelEncoderTransform', 'LabelEncoderTransform', ([], {'in_column': 'in_column'}), '(in_column=in_column)\n', (9386, 9407), False, 'from etna.transforms.encoders.categorical import LabelEncoderTransform\n'), ((9638, 9716), 'etna.datasets.generate_const_df', 'generate_const_df', ([], {'periods': '(100)', 'start_time': '"""2021-01-01"""', 'scale': '(0)', 'n_segments': '(1)'}), "(periods=100, start_time='2021-01-01', scale=0, n_segments=1)\n", (9655, 9716), False, 'from etna.datasets import generate_const_df\n'), ((9737, 9834), 'etna.datasets.generate_periodic_df', 'generate_periodic_df', ([], {'periods': '(120)', 'start_time': '"""2021-01-01"""', 'scale': '(10)', 'period': '(4)', 'n_segments': '(1)'}), "(periods=120, start_time='2021-01-01', scale=10, period\n =4, n_segments=1)\n", (9757, 9834), False, 'from etna.datasets import generate_periodic_df\n'), ((10067, 10103), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_to_forecast'], {}), '(df_to_forecast)\n', (10087, 10103), False, 'from etna.datasets import TSDataset\n'), ((10124, 10159), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_regressors'], {}), '(df_regressors)\n', (10144, 10159), False, 'from etna.datasets import TSDataset\n'), ((10170, 10198), 'numpy.random.default_rng', 'np.random.default_rng', (['(12345)'], {}), '(12345)\n', (10191, 10198), True, 'import numpy as np\n'), ((10369, 10430), 'etna.datasets.TSDataset', 'TSDataset', ([], {'df': 'df_to_forecast', 'freq': '"""D"""', 'df_exog': 'df_regressors'}), "(df=df_to_forecast, freq='D', df_exog=df_regressors)\n", (10378, 10430), False, 'from etna.datasets import TSDataset\n'), ((10658, 10705), 'etna.transforms.encoders.categorical.OneHotEncoderTransform', 'OneHotEncoderTransform', ([], {'in_column': '"""regressor_0"""'}), "(in_column='regressor_0')\n", (10680, 10705), False, 'from etna.transforms.encoders.categorical import OneHotEncoderTransform\n'), ((10717, 10765), 'etna.transforms.FilterFeaturesTransform', 'FilterFeaturesTransform', ([], {'exclude': "['regressor_0']"}), "(exclude=['regressor_0'])\n", (10740, 10765), False, 'from etna.transforms import FilterFeaturesTransform\n'), ((10818, 10841), 'etna.models.LinearPerSegmentModel', 'LinearPerSegmentModel', ([], {}), '()\n', (10839, 10841), False, 'from etna.models import LinearPerSegmentModel\n'), ((10965, 10969), 'etna.metrics.R2', 'R2', ([], {}), '()\n', (10967, 10969), False, 'from etna.metrics import R2\n'), ((914, 929), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (926, 929), True, 'import pandas as pd\n'), ((1306, 1321), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (1318, 1321), True, 'import pandas as pd\n'), ((1498, 1549), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-12"""'}), "(start='2021-01-01', end='2021-01-12')\n", (1511, 1549), True, 'import pandas as pd\n'), ((3486, 3537), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-12"""'}), "(start='2021-01-01', end='2021-01-12')\n", (3499, 3537), True, 'import pandas as pd\n'), ((5754, 5822), 'etna.transforms.encoders.categorical.LabelEncoderTransform', 'LabelEncoderTransform', ([], {'in_column': 'f"""regressor_{i}"""', 'out_column': '"""test"""'}), "(in_column=f'regressor_{i}', out_column='test')\n", (5775, 5822), False, 'from etna.transforms.encoders.categorical import LabelEncoderTransform\n'), ((6173, 6242), 'etna.transforms.encoders.categorical.OneHotEncoderTransform', 'OneHotEncoderTransform', ([], {'in_column': 'f"""regressor_{i}"""', 'out_column': '"""test"""'}), "(in_column=f'regressor_{i}', out_column='test')\n", (6195, 6242), False, 'from etna.transforms.encoders.categorical import OneHotEncoderTransform\n'), ((6556, 6603), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""The strategy"""'}), "(ValueError, match='The strategy')\n", (6569, 6603), False, 'import pytest\n'), ((6618, 6680), 'etna.transforms.encoders.categorical.LabelEncoderTransform', 'LabelEncoderTransform', ([], {'in_column': '"""target"""', 'strategy': '"""new_vlue"""'}), "(in_column='target', strategy='new_vlue')\n", (6639, 6680), False, 'from etna.transforms.encoders.categorical import LabelEncoderTransform\n'), ((6814, 6887), 'numpy.array', 'np.array', (['[[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, -1, 5], [9, -1, 3, 0, -1, 6]]'], {}), '([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, -1, 5], [9, -1, 3, 0, -1, 6]])\n', (6822, 6887), True, 'import numpy as np\n'), ((6907, 6997), 'numpy.array', 'np.array', (['[[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, np.nan, 5], [9, np.nan, 3, 0, np.nan, 6]]'], {}), '([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, np.nan, 5], [9, np.nan, 3, 0, np\n .nan, 6]])\n', (6915, 6997), True, 'import numpy as np\n'), ((7012, 7084), 'numpy.array', 'np.array', (['[[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, 0, 5], [9, 0.5, 3, 0, 0, 6]]'], {}), '([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, 0, 5], [9, 0.5, 3, 0, 0, 6]])\n', (7020, 7084), True, 'import numpy as np\n'), ((575, 626), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-03"""'}), "(start='2021-01-01', end='2021-01-03')\n", (588, 626), True, 'import pandas as pd\n'), ((643, 694), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-03"""'}), "(start='2021-01-01', end='2021-01-03')\n", (656, 694), True, 'import pandas as pd\n'), ((967, 1018), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-03"""'}), "(start='2021-01-01', end='2021-01-03')\n", (980, 1018), True, 'import pandas as pd\n'), ((1035, 1086), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-03"""'}), "(start='2021-01-01', end='2021-01-03')\n", (1048, 1086), True, 'import pandas as pd\n')] |