hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3756b3498489f11af3f11fbb569f02701ee7d2 | 1,413 | py | Python | tests/test_param_count.py | DenXX/fvcore | 4b91cf092f4f5d379b2c93398780a3b5755e7179 | [
"Apache-2.0"
] | null | null | null | tests/test_param_count.py | DenXX/fvcore | 4b91cf092f4f5d379b2c93398780a3b5755e7179 | [
"Apache-2.0"
] | null | null | null | tests/test_param_count.py | DenXX/fvcore | 4b91cf092f4f5d379b2c93398780a3b5755e7179 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from torch import nn
from fvcore.nn.parameter_count import parameter_count, parameter_count_table
class NetWithReuse(nn.Module):
def __init__(self, reuse: bool = False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(100, 100, 3)
self.conv2 = nn.Conv2d(100, 100, 3)
if reuse:
self.conv2.weight = self.conv1.weight # pyre-ignore
class NetWithDupPrefix(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(100, 100, 3)
self.conv111 = nn.Conv2d(100, 100, 3)
class TestParamCount(unittest.TestCase):
def test_param(self) -> None:
net = NetWithReuse()
count = parameter_count(net)
self.assertTrue(count[""], 180200)
self.assertTrue(count["conv2"], 90100)
def test_param_with_reuse(self) -> None:
net = NetWithReuse(reuse=True)
count = parameter_count(net)
self.assertTrue(count[""], 90200)
self.assertTrue(count["conv2"], 100)
def test_param_with_same_prefix(self) -> None:
net = NetWithDupPrefix()
table = parameter_count_table(net)
c = ["conv111.weight" in line for line in table.split("\n")]
self.assertEqual(
sum(c), 1
) # it only appears once, despite being a prefix of conv1
| 30.717391 | 76 | 0.635527 |
import unittest
from torch import nn
from fvcore.nn.parameter_count import parameter_count, parameter_count_table
class NetWithReuse(nn.Module):
def __init__(self, reuse: bool = False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(100, 100, 3)
self.conv2 = nn.Conv2d(100, 100, 3)
if reuse:
self.conv2.weight = self.conv1.weight
class NetWithDupPrefix(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(100, 100, 3)
self.conv111 = nn.Conv2d(100, 100, 3)
class TestParamCount(unittest.TestCase):
def test_param(self) -> None:
net = NetWithReuse()
count = parameter_count(net)
self.assertTrue(count[""], 180200)
self.assertTrue(count["conv2"], 90100)
def test_param_with_reuse(self) -> None:
net = NetWithReuse(reuse=True)
count = parameter_count(net)
self.assertTrue(count[""], 90200)
self.assertTrue(count["conv2"], 100)
def test_param_with_same_prefix(self) -> None:
net = NetWithDupPrefix()
table = parameter_count_table(net)
c = ["conv111.weight" in line for line in table.split("\n")]
self.assertEqual(
sum(c), 1
)
| true | true |
1c375718258e5aba8e221c8493b54ed4e2309394 | 4,848 | py | Python | style_transfer/train.py | fredericgo/rl_morph_pytorch | 743cd82d82c16c8d52e5265b6cc5cdf490cb8945 | [
"MIT"
] | null | null | null | style_transfer/train.py | fredericgo/rl_morph_pytorch | 743cd82d82c16c8d52e5265b6cc5cdf490cb8945 | [
"MIT"
] | null | null | null | style_transfer/train.py | fredericgo/rl_morph_pytorch | 743cd82d82c16c8d52e5265b6cc5cdf490cb8945 | [
"MIT"
] | null | null | null | import argparse
import datetime
import gym
import numpy as np
import itertools
import sys
sys.path.insert(0, '..')
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.optim import Adam
from torch.utils.data import DataLoader, ConcatDataset
from style_transfer.replay_memory_dataset import ReplayMemoryDataset
from style_transfer.skeleton_template_dataset import SkeletonTemplateDataset
from style_transfer.skeleton_encoder import SkeletonEncoder
from style_transfer.motion_encoder import MotionEncoder
from style_transfer.motion_decoder import MotionDecoder
from style_transfer.ae import AE
import envs
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env1-name', default="ant",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--env2-name', default="ant3",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--agent_memory1', default='data/ant.memory',
help='Path for saved replay memory')
parser.add_argument('--agent_memory2', default='data/ant3.memory',
help='Path for saved replay memory')
parser.add_argument('--hidden_dim', type=int, default=256,
help='MLP hidden dimension')
parser.add_argument('--latent_dim', type=int, default=64,
help='Encoder latent dimension')
parser.add_argument('--seed', type=int, default=123456, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--lr', type=float, default=5e-4, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--epochs', type=int, default=2000, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--checkpoint_interval', type=int, default=10,
help='checkpoint training model every # steps')
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = envs.load(args.env1_name)
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
dataset1 = ReplayMemoryDataset(args.agent_memory1)
dataset2 = ReplayMemoryDataset(args.agent_memory2)
combined_dataset = ConcatDataset([dataset1, dataset2])
s1 = dataset1[0][0].size(0)
s2 = dataset2[0][0].size(0)
skeleton_dataset = SkeletonTemplateDataset([s1, s2])
MAX_LEN = 27
def collate_and_pad(batch):
B = len(batch)
out_dims = (B, MAX_LEN)
out_x = batch[0][0].new_full(out_dims, 0.)
for i, (state, _, _, _, _) in enumerate(batch):
length = state.size(0)
out_x[i, :length, ...] = state
out_x = out_x.to(device=device)
return out_x
state_size = env.observation_space.shape[0]
model = AE(state_size, state_size, args.hidden_dim, args.latent_dim).to(device=device)
#Tesnorboard
datetime_st = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
log_dir = f'runs/{datetime_st}_StyleAE'
writer = SummaryWriter(log_dir)
dataloader = DataLoader(combined_dataset, batch_size=args.batch_size,
collate_fn=collate_and_pad, drop_last=True,
shuffle=True, num_workers=2)
skeleton_loader = DataLoader(skeleton_dataset, batch_size=args.batch_size, num_workers=0)
skeleton_iter = iter(itertools.cycle(skeleton_loader))
def style_trasfer_loss(f, x, s, x_hat):
dt = f(x_hat, s) - f(x, s)
content_loss = torch.sum(torch.norm(dt, p=2, dim=-1))
ds = f.skeleton_encoder(x_hat) - f.skeleton_encoder(s)
style_loss = torch.sum(torch.norm(ds, p=2, dim=-1))
return content_loss + style_loss
optimizer = Adam(model.parameters(), lr=args.lr)
print("Start training StyleAE...")
model.train()
epoch = 0
for epoch in range(args.epochs):
overall_loss = 0
for batch_idx, x, in enumerate(dataloader):
s = next(skeleton_iter)
optimizer.zero_grad()
x_hat = model(x, s)
loss = style_trasfer_loss(model.f,
x, s, x_hat)
overall_loss += loss.item()
loss.backward()
optimizer.step()
avg_loss = overall_loss / (batch_idx * args.batch_size)
writer.add_scalar('loss', avg_loss, epoch)
print(f"\tEpoch {epoch + 1} completed!\t Average Loss: {avg_loss}")
if epoch % args.checkpoint_interval == 0:
model.save_model(log_dir)
print("----------------------------------------")
print(f"Save Model: {epoch} epoch.")
print("----------------------------------------") | 35.647059 | 89 | 0.669142 | import argparse
import datetime
import gym
import numpy as np
import itertools
import sys
sys.path.insert(0, '..')
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.optim import Adam
from torch.utils.data import DataLoader, ConcatDataset
from style_transfer.replay_memory_dataset import ReplayMemoryDataset
from style_transfer.skeleton_template_dataset import SkeletonTemplateDataset
from style_transfer.skeleton_encoder import SkeletonEncoder
from style_transfer.motion_encoder import MotionEncoder
from style_transfer.motion_decoder import MotionDecoder
from style_transfer.ae import AE
import envs
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env1-name', default="ant",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--env2-name', default="ant3",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--agent_memory1', default='data/ant.memory',
help='Path for saved replay memory')
parser.add_argument('--agent_memory2', default='data/ant3.memory',
help='Path for saved replay memory')
parser.add_argument('--hidden_dim', type=int, default=256,
help='MLP hidden dimension')
parser.add_argument('--latent_dim', type=int, default=64,
help='Encoder latent dimension')
parser.add_argument('--seed', type=int, default=123456, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--lr', type=float, default=5e-4, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--epochs', type=int, default=2000, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--checkpoint_interval', type=int, default=10,
help='checkpoint training model every # steps')
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = envs.load(args.env1_name)
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
dataset1 = ReplayMemoryDataset(args.agent_memory1)
dataset2 = ReplayMemoryDataset(args.agent_memory2)
combined_dataset = ConcatDataset([dataset1, dataset2])
s1 = dataset1[0][0].size(0)
s2 = dataset2[0][0].size(0)
skeleton_dataset = SkeletonTemplateDataset([s1, s2])
MAX_LEN = 27
def collate_and_pad(batch):
B = len(batch)
out_dims = (B, MAX_LEN)
out_x = batch[0][0].new_full(out_dims, 0.)
for i, (state, _, _, _, _) in enumerate(batch):
length = state.size(0)
out_x[i, :length, ...] = state
out_x = out_x.to(device=device)
return out_x
state_size = env.observation_space.shape[0]
model = AE(state_size, state_size, args.hidden_dim, args.latent_dim).to(device=device)
datetime_st = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
log_dir = f'runs/{datetime_st}_StyleAE'
writer = SummaryWriter(log_dir)
dataloader = DataLoader(combined_dataset, batch_size=args.batch_size,
collate_fn=collate_and_pad, drop_last=True,
shuffle=True, num_workers=2)
skeleton_loader = DataLoader(skeleton_dataset, batch_size=args.batch_size, num_workers=0)
skeleton_iter = iter(itertools.cycle(skeleton_loader))
def style_trasfer_loss(f, x, s, x_hat):
dt = f(x_hat, s) - f(x, s)
content_loss = torch.sum(torch.norm(dt, p=2, dim=-1))
ds = f.skeleton_encoder(x_hat) - f.skeleton_encoder(s)
style_loss = torch.sum(torch.norm(ds, p=2, dim=-1))
return content_loss + style_loss
optimizer = Adam(model.parameters(), lr=args.lr)
print("Start training StyleAE...")
model.train()
epoch = 0
for epoch in range(args.epochs):
overall_loss = 0
for batch_idx, x, in enumerate(dataloader):
s = next(skeleton_iter)
optimizer.zero_grad()
x_hat = model(x, s)
loss = style_trasfer_loss(model.f,
x, s, x_hat)
overall_loss += loss.item()
loss.backward()
optimizer.step()
avg_loss = overall_loss / (batch_idx * args.batch_size)
writer.add_scalar('loss', avg_loss, epoch)
print(f"\tEpoch {epoch + 1} completed!\t Average Loss: {avg_loss}")
if epoch % args.checkpoint_interval == 0:
model.save_model(log_dir)
print("----------------------------------------")
print(f"Save Model: {epoch} epoch.")
print("----------------------------------------") | true | true |
1c37572a567b9775579ee8e4f79a0542c64b9868 | 9,784 | py | Python | optim/fd_optim_lbfgs_mod_distributed.py | slowlightx/peps-torch | 3f94e2ac32e79cbdadf572c89e57ae8e17d4e012 | [
"MIT"
] | 33 | 2020-04-22T23:11:25.000Z | 2022-03-27T09:11:29.000Z | optim/fd_optim_lbfgs_mod_distributed.py | jurajHasik/tn-torch | bc5068b2026e670a2795fc3fc060a3313bc1e3fb | [
"MIT"
] | 4 | 2021-06-09T14:57:50.000Z | 2021-11-29T14:46:08.000Z | optim/fd_optim_lbfgs_mod_distributed.py | jurajHasik/tn-torch | bc5068b2026e670a2795fc3fc060a3313bc1e3fb | [
"MIT"
] | 8 | 2020-07-12T11:42:49.000Z | 2022-02-09T07:34:23.000Z | import copy
import time
import json
import logging
log = logging.getLogger(__name__)
import torch
from optim import lbfgs_modified
import config as cfg
def store_checkpoint(checkpoint_file, state, optimizer, current_epoch, current_loss,\
verbosity=0):
r"""
:param checkpoint_file: target file
:param state: ipeps wavefunction
:param optimizer: Optimizer
:param current_epoch: current epoch
:param current_loss: current value of a loss function
:param verbosity: verbosity
:type checkpoint_file: str or Path
:type state: IPEPS
:type optimizer: torch.optim.Optimizer
:type current_epoch: int
:type current_loss: float
:type verbosity: int
Store the current state of the optimization in ``checkpoint_file``.
"""
torch.save({
'epoch': current_epoch,
'loss': current_loss,
'parameters': state.get_checkpoint(),
'optimizer_state_dict': optimizer.state_dict()}, checkpoint_file)
if verbosity>0:
print(checkpoint_file)
def optimize_state(state, ctm_env_init, loss_fn, grad_fn,
obs_fn=None, post_proc=None,
main_args=cfg.main_args, opt_args=cfg.opt_args,ctm_args=cfg.ctm_args,
global_args=cfg.global_args):
r"""
:param state: initial wavefunction
:param ctm_env_init: initial environment corresponding to ``state``
:param loss_fn: loss function
:param model: model with definition of observables
:param main_args: parsed command line arguments
:param opt_args: optimization configuration
:param ctm_args: CTM algorithm configuration
:param global_args: global configuration
:type state: IPEPS
:type ctm_env_init: ENV
:type loss_fn: function(IPEPS,ENV,CTMARGS,OPTARGS,GLOBALARGS)->torch.tensor
:type model: TODO Model base class
:type main_args: argparse.Namespace
:type opt_args: OPTARGS
:type ctm_args: CTMARGS
:type global_args: GLOBALARGS
Optimizes initial wavefunction ``state`` with respect to ``loss_fn`` using LBFGS optimizer.
The main parameters influencing the optimization process are given in :py:class:`config.OPTARGS`.
"""
verbosity = opt_args.verbosity_opt_epoch
checkpoint_file = main_args.out_prefix+"_checkpoint.p"
outputstatefile= main_args.out_prefix+"_state.json"
t_data = dict({"loss": [], "min_loss": 1.0e+16, "loss_ls": [], "min_loss_ls": 1.0e+16})
current_env=[ctm_env_init]
context= dict({"ctm_args":ctm_args, "opt_args":opt_args, "loss_history": t_data})
epoch= 0
parameters= state.get_parameters()
for A in parameters: A.requires_grad_(True)
optimizer = lbfgs_modified.LBFGS_MOD(parameters, max_iter=opt_args.max_iter_per_epoch, \
lr=opt_args.lr, tolerance_grad=opt_args.tolerance_grad, \
tolerance_change=opt_args.tolerance_change, history_size=opt_args.history_size, \
line_search_fn=opt_args.line_search, line_search_eps=opt_args.line_search_tol)
# load and/or modify optimizer state from checkpoint
if main_args.opt_resume is not None:
print(f"INFO: resuming from check point. resume = {main_args.opt_resume}")
checkpoint = torch.load(main_args.opt_resume)
epoch0 = checkpoint["epoch"]
loss0 = checkpoint["loss"]
cp_state_dict= checkpoint["optimizer_state_dict"]
cp_opt_params= cp_state_dict["param_groups"][0]
cp_opt_history= cp_state_dict["state"][cp_opt_params["params"][0]]
if main_args.opt_resume_override_params:
cp_opt_params["lr"] = opt_args.lr
cp_opt_params["max_iter"] = opt_args.max_iter_per_epoch
cp_opt_params["tolerance_grad"] = opt_args.tolerance_grad
cp_opt_params["tolerance_change"] = opt_args.tolerance_change
# resize stored old_dirs, old_stps, ro, al to new history size
cp_history_size= cp_opt_params["history_size"]
cp_opt_params["history_size"] = opt_args.history_size
if opt_args.history_size < cp_history_size:
if len(cp_opt_history["old_dirs"]) > opt_args.history_size:
cp_opt_history["old_dirs"]= cp_opt_history["old_dirs"][-opt_args.history_size:]
cp_opt_history["old_stps"]= cp_opt_history["old_stps"][-opt_args.history_size:]
cp_ro_filtered= list(filter(None,cp_opt_history["ro"]))
cp_al_filtered= list(filter(None,cp_opt_history["al"]))
if len(cp_ro_filtered) > opt_args.history_size:
cp_opt_history["ro"]= cp_ro_filtered[-opt_args.history_size:]
cp_opt_history["al"]= cp_al_filtered[-opt_args.history_size:]
else:
cp_opt_history["ro"]= cp_ro_filtered + [None for i in range(opt_args.history_size-len(cp_ro_filtered))]
cp_opt_history["al"]= cp_al_filtered + [None for i in range(opt_args.history_size-len(cp_ro_filtered))]
cp_state_dict["param_groups"][0]= cp_opt_params
cp_state_dict["state"][cp_opt_params["params"][0]]= cp_opt_history
optimizer.load_state_dict(cp_state_dict)
print(f"checkpoint.loss = {loss0}")
#@profile
def closure(linesearching=False):
context["line_search"]=linesearching
# 0) evaluate loss
optimizer.zero_grad()
with torch.no_grad():
loss, ctm_env, history, timings= loss_fn(state, current_env[0], context)
# 1) record loss and store current state if the loss improves
if linesearching:
t_data["loss_ls"].append(loss.item())
if t_data["min_loss_ls"] > t_data["loss_ls"][-1]:
t_data["min_loss_ls"]= t_data["loss_ls"][-1]
else:
t_data["loss"].append(loss.item())
if t_data["min_loss"] > t_data["loss"][-1]:
t_data["min_loss"]= t_data["loss"][-1]
state.write_to_file(outputstatefile, normalize=True)
# 2) log CTM metrics for debugging
if opt_args.opt_logging:
log.info({"history_length": len(history['log']), "history": history['log'],
"final_multiplets": history["final_multiplets"]})
log_entry=dict({"id": epoch, "loss": t_data["loss"][-1], "timings": timings})
if linesearching:
log_entry["LS"]=len(t_data["loss_ls"])
log_entry["loss"]=t_data["loss_ls"]
log.info(json.dumps(log_entry))
# 3) compute desired observables
if obs_fn is not None:
obs_fn(state, ctm_env, context)
# 4) evaluate gradient
t_grad0= time.perf_counter()
with torch.no_grad():
grad= grad_fn(state, ctm_env, context, loss)
for k in state.coeffs.keys():
state.coeffs[k].grad= grad[k]
t_grad1= time.perf_counter()
# 5) log grad metrics
if opt_args.opt_logging:
log_entry=dict({"id": epoch, "t_grad": t_grad1-t_grad0 })
if linesearching: log_entry["LS"]=len(t_data["loss_ls"])
log.info(json.dumps(log_entry))
# 6) detach current environment from autograd graph
current_env[0] = ctm_env.detach().clone()
return loss
# closure for derivative-free line search. This closure
# is to be called within torch.no_grad context
@torch.no_grad()
def closure_linesearch(linesearching):
context["line_search"]=linesearching
# 1) evaluate loss
loc_opt_args= copy.deepcopy(opt_args)
loc_opt_args.opt_ctm_reinit= opt_args.line_search_ctm_reinit
loc_ctm_args= copy.deepcopy(ctm_args)
# TODO check if we are optimizing C4v symmetric ansatz
if opt_args.line_search_svd_method != 'DEFAULT':
loc_ctm_args.projector_svd_method= opt_args.line_search_svd_method
loc_context= dict({"ctm_args":loc_ctm_args, "opt_args":loc_opt_args, \
"loss_history": t_data, "line_search": True})
loss, ctm_env, history, timings = loss_fn(state, current_env[0],\
loc_context)
# 2) store current state if the loss improves
t_data["loss_ls"].append(loss.item())
if t_data["min_loss_ls"] > t_data["loss_ls"][-1]:
t_data["min_loss_ls"]= t_data["loss_ls"][-1]
# 5) log CTM metrics for debugging
if opt_args.opt_logging:
log.info({"history_length": len(history['log']), "history": history['log'],
"final_multiplets": history["final_multiplets"]})
log_entry=dict({"id": epoch, "LS": len(t_data["loss_ls"]), \
"loss": t_data["loss_ls"], "timings": timings})
log.info(json.dumps(log_entry))
# 4) compute desired observables
if obs_fn is not None:
obs_fn(state, ctm_env, context)
current_env[0]= ctm_env
return loss
for epoch in range(main_args.opt_max_iter):
# checkpoint the optimizer
# checkpointing before step, guarantees the correspondence between the wavefunction
# and the last computed value of loss t_data["loss"][-1]
if epoch>0:
store_checkpoint(checkpoint_file, state, optimizer, epoch, t_data["loss"][-1])
# After execution closure ``current_env`` **IS NOT** corresponding to ``state``, since
# the ``state`` on-site tensors have been modified by gradient.
optimizer.step_2c(closure, closure_linesearch)
# reset line search history
t_data["loss_ls"]=[]
t_data["min_loss_ls"]=1.0e+16
if post_proc is not None:
post_proc(state, current_env[0], context)
# optimization is over, store the last checkpoint
store_checkpoint(checkpoint_file, state, optimizer, \
main_args.opt_max_iter, t_data["loss"][-1]) | 44.072072 | 119 | 0.657093 | import copy
import time
import json
import logging
log = logging.getLogger(__name__)
import torch
from optim import lbfgs_modified
import config as cfg
def store_checkpoint(checkpoint_file, state, optimizer, current_epoch, current_loss,\
verbosity=0):
torch.save({
'epoch': current_epoch,
'loss': current_loss,
'parameters': state.get_checkpoint(),
'optimizer_state_dict': optimizer.state_dict()}, checkpoint_file)
if verbosity>0:
print(checkpoint_file)
def optimize_state(state, ctm_env_init, loss_fn, grad_fn,
obs_fn=None, post_proc=None,
main_args=cfg.main_args, opt_args=cfg.opt_args,ctm_args=cfg.ctm_args,
global_args=cfg.global_args):
verbosity = opt_args.verbosity_opt_epoch
checkpoint_file = main_args.out_prefix+"_checkpoint.p"
outputstatefile= main_args.out_prefix+"_state.json"
t_data = dict({"loss": [], "min_loss": 1.0e+16, "loss_ls": [], "min_loss_ls": 1.0e+16})
current_env=[ctm_env_init]
context= dict({"ctm_args":ctm_args, "opt_args":opt_args, "loss_history": t_data})
epoch= 0
parameters= state.get_parameters()
for A in parameters: A.requires_grad_(True)
optimizer = lbfgs_modified.LBFGS_MOD(parameters, max_iter=opt_args.max_iter_per_epoch, \
lr=opt_args.lr, tolerance_grad=opt_args.tolerance_grad, \
tolerance_change=opt_args.tolerance_change, history_size=opt_args.history_size, \
line_search_fn=opt_args.line_search, line_search_eps=opt_args.line_search_tol)
if main_args.opt_resume is not None:
print(f"INFO: resuming from check point. resume = {main_args.opt_resume}")
checkpoint = torch.load(main_args.opt_resume)
epoch0 = checkpoint["epoch"]
loss0 = checkpoint["loss"]
cp_state_dict= checkpoint["optimizer_state_dict"]
cp_opt_params= cp_state_dict["param_groups"][0]
cp_opt_history= cp_state_dict["state"][cp_opt_params["params"][0]]
if main_args.opt_resume_override_params:
cp_opt_params["lr"] = opt_args.lr
cp_opt_params["max_iter"] = opt_args.max_iter_per_epoch
cp_opt_params["tolerance_grad"] = opt_args.tolerance_grad
cp_opt_params["tolerance_change"] = opt_args.tolerance_change
cp_history_size= cp_opt_params["history_size"]
cp_opt_params["history_size"] = opt_args.history_size
if opt_args.history_size < cp_history_size:
if len(cp_opt_history["old_dirs"]) > opt_args.history_size:
cp_opt_history["old_dirs"]= cp_opt_history["old_dirs"][-opt_args.history_size:]
cp_opt_history["old_stps"]= cp_opt_history["old_stps"][-opt_args.history_size:]
cp_ro_filtered= list(filter(None,cp_opt_history["ro"]))
cp_al_filtered= list(filter(None,cp_opt_history["al"]))
if len(cp_ro_filtered) > opt_args.history_size:
cp_opt_history["ro"]= cp_ro_filtered[-opt_args.history_size:]
cp_opt_history["al"]= cp_al_filtered[-opt_args.history_size:]
else:
cp_opt_history["ro"]= cp_ro_filtered + [None for i in range(opt_args.history_size-len(cp_ro_filtered))]
cp_opt_history["al"]= cp_al_filtered + [None for i in range(opt_args.history_size-len(cp_ro_filtered))]
cp_state_dict["param_groups"][0]= cp_opt_params
cp_state_dict["state"][cp_opt_params["params"][0]]= cp_opt_history
optimizer.load_state_dict(cp_state_dict)
print(f"checkpoint.loss = {loss0}")
def closure(linesearching=False):
context["line_search"]=linesearching
optimizer.zero_grad()
with torch.no_grad():
loss, ctm_env, history, timings= loss_fn(state, current_env[0], context)
if linesearching:
t_data["loss_ls"].append(loss.item())
if t_data["min_loss_ls"] > t_data["loss_ls"][-1]:
t_data["min_loss_ls"]= t_data["loss_ls"][-1]
else:
t_data["loss"].append(loss.item())
if t_data["min_loss"] > t_data["loss"][-1]:
t_data["min_loss"]= t_data["loss"][-1]
state.write_to_file(outputstatefile, normalize=True)
if opt_args.opt_logging:
log.info({"history_length": len(history['log']), "history": history['log'],
"final_multiplets": history["final_multiplets"]})
log_entry=dict({"id": epoch, "loss": t_data["loss"][-1], "timings": timings})
if linesearching:
log_entry["LS"]=len(t_data["loss_ls"])
log_entry["loss"]=t_data["loss_ls"]
log.info(json.dumps(log_entry))
if obs_fn is not None:
obs_fn(state, ctm_env, context)
t_grad0= time.perf_counter()
with torch.no_grad():
grad= grad_fn(state, ctm_env, context, loss)
for k in state.coeffs.keys():
state.coeffs[k].grad= grad[k]
t_grad1= time.perf_counter()
if opt_args.opt_logging:
log_entry=dict({"id": epoch, "t_grad": t_grad1-t_grad0 })
if linesearching: log_entry["LS"]=len(t_data["loss_ls"])
log.info(json.dumps(log_entry))
current_env[0] = ctm_env.detach().clone()
return loss
@torch.no_grad()
def closure_linesearch(linesearching):
context["line_search"]=linesearching
loc_opt_args= copy.deepcopy(opt_args)
loc_opt_args.opt_ctm_reinit= opt_args.line_search_ctm_reinit
loc_ctm_args= copy.deepcopy(ctm_args)
if opt_args.line_search_svd_method != 'DEFAULT':
loc_ctm_args.projector_svd_method= opt_args.line_search_svd_method
loc_context= dict({"ctm_args":loc_ctm_args, "opt_args":loc_opt_args, \
"loss_history": t_data, "line_search": True})
loss, ctm_env, history, timings = loss_fn(state, current_env[0],\
loc_context)
t_data["loss_ls"].append(loss.item())
if t_data["min_loss_ls"] > t_data["loss_ls"][-1]:
t_data["min_loss_ls"]= t_data["loss_ls"][-1]
if opt_args.opt_logging:
log.info({"history_length": len(history['log']), "history": history['log'],
"final_multiplets": history["final_multiplets"]})
log_entry=dict({"id": epoch, "LS": len(t_data["loss_ls"]), \
"loss": t_data["loss_ls"], "timings": timings})
log.info(json.dumps(log_entry))
if obs_fn is not None:
obs_fn(state, ctm_env, context)
current_env[0]= ctm_env
return loss
for epoch in range(main_args.opt_max_iter):
if epoch>0:
store_checkpoint(checkpoint_file, state, optimizer, epoch, t_data["loss"][-1])
optimizer.step_2c(closure, closure_linesearch)
t_data["loss_ls"]=[]
t_data["min_loss_ls"]=1.0e+16
if post_proc is not None:
post_proc(state, current_env[0], context)
store_checkpoint(checkpoint_file, state, optimizer, \
main_args.opt_max_iter, t_data["loss"][-1]) | true | true |
1c37577a5f6a0f5b0a4b1b452367c16321d3926a | 2,162 | py | Python | chatterbox/migrations/0002_data.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | 8 | 2015-03-10T20:03:09.000Z | 2018-06-14T23:03:58.000Z | chatterbox/migrations/0002_data.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | 3 | 2015-07-14T22:44:47.000Z | 2020-06-05T23:43:05.000Z | chatterbox/migrations/0002_data.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def populate_collectors(apps):
Service = apps.get_model('chatterbox', 'Service')
Collector = apps.get_model('chatterbox', 'Collector')
# Services
fb = Service(label='Facebook',
key='facebook',
driver='chatterbox.drivers.facebook.Facebook')
fb.save()
ig = Service(label='Instagram',
key='instagram',
driver='chatterbox.drivers.instagram.Instagram')
ig.save()
tw = Service(label='Twitter',
key='twitter',
driver='chatterbox.drivers.twitter.Twitter')
tw.save()
yt = Service(label='YouTube',
key='youtube',
driver='chatterbox.drivers.youtube.YouTube')
yt.save()
# Collectors
col = Collector(label='Facebook User Wall',
service=fb,
driver='chatterbox.collectors.facebook.FacebookWall')
col.save()
col = Collector(label='Instagram Tag Search',
service=ig,
driver='chatterbox.collectors.instagram.InstagramSearch')
col.save()
col = Collector(label='Instagram User Media',
service=ig,
driver='chatterbox.collectors.instagram.InstagramWall')
col.save()
col = Collector(label='Twitter Tag Search',
service=tw,
driver='chatterbox.collectors.twitter.TwitterTagSearch')
col.save()
col = Collector(label='YouTube Search',
service=yt,
driver='chatterbox.collectors.youtube.YouTubeSearch')
col.save()
col = Collector(label='YouTube User Videos',
service=yt,
driver='chatterbox.collectors.youtube.YouTubeUser')
col.save()
def populate(apps, schema_editor):
populate_collectors(apps)
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('chatterbox', '0001_initial'),
]
operations = [
migrations.RunPython(populate),
]
| 28.077922 | 77 | 0.580019 |
from __future__ import unicode_literals
from django.db import migrations
def populate_collectors(apps):
Service = apps.get_model('chatterbox', 'Service')
Collector = apps.get_model('chatterbox', 'Collector')
fb = Service(label='Facebook',
key='facebook',
driver='chatterbox.drivers.facebook.Facebook')
fb.save()
ig = Service(label='Instagram',
key='instagram',
driver='chatterbox.drivers.instagram.Instagram')
ig.save()
tw = Service(label='Twitter',
key='twitter',
driver='chatterbox.drivers.twitter.Twitter')
tw.save()
yt = Service(label='YouTube',
key='youtube',
driver='chatterbox.drivers.youtube.YouTube')
yt.save()
col = Collector(label='Facebook User Wall',
service=fb,
driver='chatterbox.collectors.facebook.FacebookWall')
col.save()
col = Collector(label='Instagram Tag Search',
service=ig,
driver='chatterbox.collectors.instagram.InstagramSearch')
col.save()
col = Collector(label='Instagram User Media',
service=ig,
driver='chatterbox.collectors.instagram.InstagramWall')
col.save()
col = Collector(label='Twitter Tag Search',
service=tw,
driver='chatterbox.collectors.twitter.TwitterTagSearch')
col.save()
col = Collector(label='YouTube Search',
service=yt,
driver='chatterbox.collectors.youtube.YouTubeSearch')
col.save()
col = Collector(label='YouTube User Videos',
service=yt,
driver='chatterbox.collectors.youtube.YouTubeUser')
col.save()
def populate(apps, schema_editor):
populate_collectors(apps)
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('chatterbox', '0001_initial'),
]
operations = [
migrations.RunPython(populate),
]
| true | true |
1c37579c0a84e141fc7080b5cf9806f91db521c0 | 598 | py | Python | tests/gis_tests/inspectapp/models.py | imjvdn/scratch-game-1 | 5dffd79f17e0b66d3d2e57262749311aca28e850 | [
"PSF-2.0",
"BSD-3-Clause"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | tests/gis_tests/inspectapp/models.py | imjvdn/scratch-game-1 | 5dffd79f17e0b66d3d2e57262749311aca28e850 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | tests/gis_tests/inspectapp/models.py | imjvdn/scratch-game-1 | 5dffd79f17e0b66d3d2e57262749311aca28e850 | [
"PSF-2.0",
"BSD-3-Clause"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | from django.contrib.gis.db import models
class AllOGRFields(models.Model):
f_decimal = models.FloatField()
f_float = models.FloatField()
f_int = models.IntegerField()
f_char = models.CharField(max_length=10)
f_date = models.DateField()
f_datetime = models.DateTimeField()
f_time = models.TimeField()
geom = models.PolygonField()
point = models.PointField()
class Fields3D(models.Model):
point = models.PointField(dim=3)
pointg = models.PointField(dim=3, geography=True)
line = models.LineStringField(dim=3)
poly = models.PolygonField(dim=3)
| 27.181818 | 53 | 0.707358 | from django.contrib.gis.db import models
class AllOGRFields(models.Model):
f_decimal = models.FloatField()
f_float = models.FloatField()
f_int = models.IntegerField()
f_char = models.CharField(max_length=10)
f_date = models.DateField()
f_datetime = models.DateTimeField()
f_time = models.TimeField()
geom = models.PolygonField()
point = models.PointField()
class Fields3D(models.Model):
point = models.PointField(dim=3)
pointg = models.PointField(dim=3, geography=True)
line = models.LineStringField(dim=3)
poly = models.PolygonField(dim=3)
| true | true |
1c37589d145a5085b1d5059184b437797db2d06e | 337 | py | Python | mysite/myapp/migrations/0016_remove_gallerymodel_image.py | wiparraguirre/WI-Construction | 22e3f5c615bffda6f9c1681d1ca00b0918362126 | [
"MIT"
] | null | null | null | mysite/myapp/migrations/0016_remove_gallerymodel_image.py | wiparraguirre/WI-Construction | 22e3f5c615bffda6f9c1681d1ca00b0918362126 | [
"MIT"
] | null | null | null | mysite/myapp/migrations/0016_remove_gallerymodel_image.py | wiparraguirre/WI-Construction | 22e3f5c615bffda6f9c1681d1ca00b0918362126 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2022-05-18 04:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0015_alter_gallerymodel_image'),
]
operations = [
migrations.RemoveField(
model_name='gallerymodel',
name='image',
),
]
| 18.722222 | 51 | 0.602374 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0015_alter_gallerymodel_image'),
]
operations = [
migrations.RemoveField(
model_name='gallerymodel',
name='image',
),
]
| true | true |
1c37591beb7002f607bb4418787325b43a751b34 | 893 | py | Python | setup.py | bbilly1/ryd-client | 10b9b6f0fe0ba5d022375eed301f25ab51c31109 | [
"MIT"
] | 1 | 2021-12-24T19:46:02.000Z | 2021-12-24T19:46:02.000Z | setup.py | bbilly1/ryd-client | 10b9b6f0fe0ba5d022375eed301f25ab51c31109 | [
"MIT"
] | null | null | null | setup.py | bbilly1/ryd-client | 10b9b6f0fe0ba5d022375eed301f25ab51c31109 | [
"MIT"
] | null | null | null | """setup file with project metadata"""
import setuptools
with open("README_SHORT.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="ryd-client",
version="0.0.3",
author="Simon",
author_email="simobilleter@gmail.com",
description="api client for returnyoutubedislike.com",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bbilly1/ryd-client",
project_urls={
"Bug Tracker": "https://github.com/bbilly1/ryd-client/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"ryd_client": "ryd_client"},
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=["requests"],
)
| 29.766667 | 70 | 0.662934 |
import setuptools
with open("README_SHORT.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="ryd-client",
version="0.0.3",
author="Simon",
author_email="simobilleter@gmail.com",
description="api client for returnyoutubedislike.com",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bbilly1/ryd-client",
project_urls={
"Bug Tracker": "https://github.com/bbilly1/ryd-client/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"ryd_client": "ryd_client"},
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=["requests"],
)
| true | true |
1c3759df5a38cc9eec92e29506b100742f627706 | 953 | py | Python | Constellations/get_brightest_stars.py | PatD123/Polar-Constellation | 86f54ae2028a4f351b9f1a056aa3166f49541679 | [
"MIT"
] | null | null | null | Constellations/get_brightest_stars.py | PatD123/Polar-Constellation | 86f54ae2028a4f351b9f1a056aa3166f49541679 | [
"MIT"
] | null | null | null | Constellations/get_brightest_stars.py | PatD123/Polar-Constellation | 86f54ae2028a4f351b9f1a056aa3166f49541679 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
import re, json
# Getting the page
URL = "https://www.astronomytrek.com/star-constellations-brightest-stars/"
uClient = uReq(url=URL)
page_html = uClient.read()
page_soup = soup(page_html, "html.parser")
# Opening a file to write in
stars_file = open("brightest_stars.txt", 'w')
#
def find_space(star):
for i in range(0, len(star)):
if star[i] == " " and star[i + 1] == "(":
return i
brightest_uncleaned = page_soup.find_all("tr")
for html in brightest_uncleaned:
col_4 = html.contents[4].contents[0]
col_5 = html.contents[5].string
if col_5 is not None:
idx = find_space(col_5)
col_5 = col_5[0:idx]
if col_5 == "Brightest Star": continue
stars_file.write(col_5 + "\n")
else:
idx = find_space(col_4)
col_4 = col_4[0:idx]
stars_file.write(col_4 + "\n")
stars_file.close() | 27.228571 | 74 | 0.651626 | from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
import re, json
URL = "https://www.astronomytrek.com/star-constellations-brightest-stars/"
uClient = uReq(url=URL)
page_html = uClient.read()
page_soup = soup(page_html, "html.parser")
stars_file = open("brightest_stars.txt", 'w')
def find_space(star):
for i in range(0, len(star)):
if star[i] == " " and star[i + 1] == "(":
return i
brightest_uncleaned = page_soup.find_all("tr")
for html in brightest_uncleaned:
col_4 = html.contents[4].contents[0]
col_5 = html.contents[5].string
if col_5 is not None:
idx = find_space(col_5)
col_5 = col_5[0:idx]
if col_5 == "Brightest Star": continue
stars_file.write(col_5 + "\n")
else:
idx = find_space(col_4)
col_4 = col_4[0:idx]
stars_file.write(col_4 + "\n")
stars_file.close() | true | true |
1c375a895060c17d5bfde9430e870a0e67b39870 | 3,247 | py | Python | parser.py | Anzurna/litels | a42bdea5839c2e35d49737310cb535a955b852a7 | [
"MIT"
] | null | null | null | parser.py | Anzurna/litels | a42bdea5839c2e35d49737310cb535a955b852a7 | [
"MIT"
] | null | null | null | parser.py | Anzurna/litels | a42bdea5839c2e35d49737310cb535a955b852a7 | [
"MIT"
] | null | null | null | import gzip
import json
import io
import re
import mmap
import os
import warc
import glob
from pprint import pprint
class Litels:
def __init__(self):
self.data = {}
self.data["web_records"] = []
def extract_info(self, file, pattern):
text = ''
k = 0
with warc.open(file, 'r') as f:
for record in f:
#pprint(vars(record.header))
if (record.header['warc-type'] != 'warcinfo'):
if pattern.match(record.header['warc-target-uri']):
if (int(record.header['content-length']) > 1000):
text = record.payload.read()
self.data['web_records'].append({'header': record.header['warc-target-uri'],
'length': int(record.header['content-length']),
'content' : text.decode() })
print(text.decode())
k += 1
print(k)
if k > 5:
with open('data.txt', 'w', encoding="utf-8") as outfile:
outfile.write(self.data)
#json.dump(self.data, outfile)
break
# with open('text.txt', 'w', encoding="utf-8") as foil:
# with gzip.open(datafile, 'rb') as f:
# for line in f:
# data = line.decode()
# foil.write(data)
# with open('text.txt', 'w') as wf:
# wf.write(data)
#text = ''
def open_and_parse_file(self):
with open('website_list.txt', 'r') as website_list:
for site in website_list.readlines():
site = site.strip('\n')
regex = rf'[\s\S]*http://www.washingtonpost.com/[\s\S]*'
pattern = re.compile(regex)
print(regex)
for wet_file in (glob.glob("wet_files/*.wet.gz")):
self.extract_info(wet_file, pattern)
#http://www.washingtonpost.com/local/crime/prince-georges-cop-suspended-after-dui-charge/2013/10/26/2b3253c8-3e6d-11e3-b7ba-503fb5822c3e_story.html[\s\S]*
#url = record.header.get('http://1023blakefm.com/pay-to-promote-facebook-posts-dollars-and-sense/', None)
# if not url:
# continue
# text = record.payload.read()
# print(url)
# print(text)
# with open('text.txt', 'r') as foil:
# for line in foil:
# print(line)
# pattern = re.compile(r'WARC-Type[\s\S]*microsoft.com[\s\S]*WARC-Type')
# with open('text.txt', 'r', encoding="utf-8") as f:
# for line in f:
# for match in re.finditer(pattern, line):
# print(match)
# for match in matches:
# print(match)
# raw_string = r"{}".format(string)
# https://stackoverflow.com/questions/18707338/print-raw-string-from-variable-not-getting-the-answers
if __name__ == "__main__":
var = Litels()
var.open_and_parse_file() | 40.08642 | 167 | 0.483523 | import gzip
import json
import io
import re
import mmap
import os
import warc
import glob
from pprint import pprint
class Litels:
def __init__(self):
self.data = {}
self.data["web_records"] = []
def extract_info(self, file, pattern):
text = ''
k = 0
with warc.open(file, 'r') as f:
for record in f:
if (record.header['warc-type'] != 'warcinfo'):
if pattern.match(record.header['warc-target-uri']):
if (int(record.header['content-length']) > 1000):
text = record.payload.read()
self.data['web_records'].append({'header': record.header['warc-target-uri'],
'length': int(record.header['content-length']),
'content' : text.decode() })
print(text.decode())
k += 1
print(k)
if k > 5:
with open('data.txt', 'w', encoding="utf-8") as outfile:
outfile.write(self.data)
break
def open_and_parse_file(self):
with open('website_list.txt', 'r') as website_list:
for site in website_list.readlines():
site = site.strip('\n')
regex = rf'[\s\S]*http://www.washingtonpost.com/[\s\S]*'
pattern = re.compile(regex)
print(regex)
for wet_file in (glob.glob("wet_files/*.wet.gz")):
self.extract_info(wet_file, pattern)
if __name__ == "__main__":
var = Litels()
var.open_and_parse_file() | true | true |
1c375bb4f7bb541cc9dbfaf5556a99dd143fb8c9 | 19,110 | py | Python | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams.py | luizgribeiro/airbyte | 71a96f5417b678c39b34e2e92234d8a51529e086 | [
"MIT"
] | 2 | 2021-08-04T03:17:38.000Z | 2021-11-15T10:16:08.000Z | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams.py | luizgribeiro/airbyte | 71a96f5417b678c39b34e2e92234d8a51529e086 | [
"MIT"
] | 52 | 2021-06-11T12:39:05.000Z | 2022-03-30T04:59:35.000Z | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams.py | luizgribeiro/airbyte | 71a96f5417b678c39b34e2e92234d8a51529e086 | [
"MIT"
] | 2 | 2021-12-14T17:15:40.000Z | 2021-12-14T17:18:03.000Z | #
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import time
import urllib.parse as urlparse
from abc import ABC
from collections import deque
from datetime import datetime
from typing import Any, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Sequence
import backoff
import pendulum
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.core import package_name_from_class
from airbyte_cdk.sources.utils.schema_helpers import ResourceSchemaLoader
from cached_property import cached_property
from facebook_business.adobjects.adreportrun import AdReportRun
from facebook_business.api import FacebookAdsApiBatch, FacebookRequest, FacebookResponse
from facebook_business.exceptions import FacebookRequestError
from source_facebook_marketing.api import API
from .common import FacebookAPIException, JobTimeoutException, batch, deep_merge, retry_pattern
backoff_policy = retry_pattern(backoff.expo, FacebookRequestError, max_tries=5, factor=5)
def remove_params_from_url(url, params):
parsed_url = urlparse.urlparse(url)
res_query = []
for q in parsed_url.query.split("&"):
key, value = q.split("=")
if key not in params:
res_query.append(f"{key}={value}")
parse_result = parsed_url._replace(query="&".join(res_query))
return urlparse.urlunparse(parse_result)
class FBMarketingStream(Stream, ABC):
"""Base stream class"""
primary_key = "id"
page_size = 100
enable_deleted = False
entity_prefix = None
def __init__(self, api: API, include_deleted: bool = False, **kwargs):
super().__init__(**kwargs)
self._api = api
self._include_deleted = include_deleted if self.enable_deleted else False
@cached_property
def fields(self) -> List[str]:
"""List of fields that we want to query, for now just all properties from stream's schema"""
return list(self.get_json_schema().get("properties", {}).keys())
@backoff_policy
def execute_in_batch(self, requests: Iterable[FacebookRequest]) -> Sequence[MutableMapping[str, Any]]:
"""Execute list of requests in batches"""
records = []
def success(response: FacebookResponse):
records.append(response.json())
def failure(response: FacebookResponse):
raise response.error()
api_batch: FacebookAdsApiBatch = self._api.api.new_batch()
for request in requests:
api_batch.add_request(request, success=success, failure=failure)
retry_batch = api_batch.execute()
if retry_batch:
raise FacebookAPIException(f"Batch has failed {len(retry_batch)} requests")
return records
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""Main read method used by CDK"""
for record in self._read_records(params=self.request_params(stream_state=stream_state)):
yield self._extend_record(record, fields=self.fields)
def _read_records(self, params: Mapping[str, Any]) -> Iterable:
"""Wrapper around query to backoff errors.
We have default implementation because we still can override read_records so this method is not mandatory.
"""
return []
@backoff_policy
def _extend_record(self, obj: Any, **kwargs):
"""Wrapper around api_get to backoff errors"""
return obj.api_get(**kwargs).export_all_data()
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
"""Parameters that should be passed to query_records method"""
params = {"limit": self.page_size}
if self._include_deleted:
params.update(self._filter_all_statuses())
return params
def _filter_all_statuses(self) -> MutableMapping[str, Any]:
"""Filter that covers all possible statuses thus including deleted/archived records"""
filt_values = [
"active",
"archived",
"completed",
"limited",
"not_delivering",
"deleted",
"not_published",
"pending_review",
"permanently_deleted",
"recently_completed",
"recently_rejected",
"rejected",
"scheduled",
"inactive",
]
return {
"filtering": [
{"field": f"{self.entity_prefix}.delivery_info", "operator": "IN", "value": filt_values},
],
}
class FBMarketingIncrementalStream(FBMarketingStream, ABC):
cursor_field = "updated_time"
def __init__(self, start_date: datetime, **kwargs):
super().__init__(**kwargs)
self._start_date = pendulum.instance(start_date)
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
"""Update stream state from latest record"""
potentially_new_records_in_the_past = self._include_deleted and not current_stream_state.get("include_deleted", False)
record_value = latest_record[self.cursor_field]
state_value = current_stream_state.get(self.cursor_field) or record_value
max_cursor = max(pendulum.parse(state_value), pendulum.parse(record_value))
if potentially_new_records_in_the_past:
max_cursor = record_value
return {
self.cursor_field: str(max_cursor),
"include_deleted": self._include_deleted,
}
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
"""Include state filter"""
params = super().request_params(**kwargs)
params = deep_merge(params, self._state_filter(stream_state=stream_state or {}))
return params
def _state_filter(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Additional filters associated with state if any set"""
state_value = stream_state.get(self.cursor_field)
filter_value = self._start_date if not state_value else pendulum.parse(state_value)
potentially_new_records_in_the_past = self._include_deleted and not stream_state.get("include_deleted", False)
if potentially_new_records_in_the_past:
self.logger.info(f"Ignoring bookmark for {self.name} because of enabled `include_deleted` option")
filter_value = self._start_date
return {
"filtering": [
{
"field": f"{self.entity_prefix}.{self.cursor_field}",
"operator": "GREATER_THAN",
"value": filter_value.int_timestamp,
},
],
}
class AdCreatives(FBMarketingStream):
"""AdCreative is append only stream
doc: https://developers.facebook.com/docs/marketing-api/reference/ad-creative
"""
entity_prefix = "adcreative"
batch_size = 50
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""Read records using batch API"""
records = self._read_records(params=self.request_params(stream_state=stream_state))
requests = [record.api_get(fields=self.fields, pending=True) for record in records]
for requests_batch in batch(requests, size=self.batch_size):
for record in self.execute_in_batch(requests_batch):
yield self.clear_urls(record)
@staticmethod
def clear_urls(record: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
"""Some URLs has random values, these values doesn't affect validity of URLs, but breaks SAT"""
thumbnail_url = record.get("thumbnail_url")
if thumbnail_url:
record["thumbnail_url"] = remove_params_from_url(thumbnail_url, ["_nc_hash", "d"])
return record
@backoff_policy
def _read_records(self, params: Mapping[str, Any]) -> Iterator:
return self._api.account.get_ad_creatives(params=params)
class Ads(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/adgroup"""
entity_prefix = "ad"
enable_deleted = True
@backoff_policy
def _read_records(self, params: Mapping[str, Any]):
return self._api.account.get_ads(params=params, fields=[self.cursor_field])
class AdSets(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/ad-campaign"""
entity_prefix = "adset"
enable_deleted = True
@backoff_policy
def _read_records(self, params: Mapping[str, Any]):
return self._api.account.get_ad_sets(params=params)
class Campaigns(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/ad-campaign-group"""
entity_prefix = "campaign"
enable_deleted = True
@backoff_policy
def _read_records(self, params: Mapping[str, Any]):
return self._api.account.get_campaigns(params=params)
class AdsInsights(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/insights"""
cursor_field = "date_start"
primary_key = None
ALL_ACTION_ATTRIBUTION_WINDOWS = [
"1d_click",
"7d_click",
"28d_click",
"1d_view",
"7d_view",
"28d_view",
]
ALL_ACTION_BREAKDOWNS = [
"action_type",
"action_target_id",
"action_destination",
]
MAX_WAIT_TO_START = pendulum.duration(minutes=5)
MAX_WAIT_TO_FINISH = pendulum.duration(minutes=30)
MAX_ASYNC_SLEEP = pendulum.duration(minutes=5)
MAX_ASYNC_JOBS = 3
INSIGHTS_RETENTION_PERIOD = pendulum.duration(days=37 * 30)
action_breakdowns = ALL_ACTION_BREAKDOWNS
level = "ad"
action_attribution_windows = ALL_ACTION_ATTRIBUTION_WINDOWS
time_increment = 1
breakdowns = []
def __init__(self, buffer_days, days_per_job, **kwargs):
super().__init__(**kwargs)
self.lookback_window = pendulum.duration(days=buffer_days)
self._days_per_job = days_per_job
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""Waits for current job to finish (slice) and yield its result"""
result = self.wait_for_job(stream_slice["job"])
# because we query `lookback_window` days before actual cursor we might get records older then cursor
for obj in result.get_result():
yield obj.export_all_data()
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
"""Slice by date periods and schedule async job for each period, run at most MAX_ASYNC_JOBS jobs at the same time.
This solution for Async was chosen because:
1. we should commit state after each successful job
2. we should run as many job as possible before checking for result
3. we shouldn't proceed to consumption of the next job before previous succeed
"""
stream_state = stream_state or {}
running_jobs = deque()
date_ranges = list(self._date_ranges(stream_state=stream_state))
for params in date_ranges:
params = deep_merge(params, self.request_params(stream_state=stream_state))
job = self._create_insights_job(params)
running_jobs.append(job)
if len(running_jobs) >= self.MAX_ASYNC_JOBS:
yield {"job": running_jobs.popleft()}
while running_jobs:
yield {"job": running_jobs.popleft()}
@backoff_policy
def wait_for_job(self, job) -> AdReportRun:
factor = 2
start_time = pendulum.now()
sleep_seconds = factor
while True:
job = job.api_get()
job_progress_pct = job["async_percent_completion"]
job_id = job["report_run_id"]
self.logger.info(f"ReportRunId {job_id} is {job_progress_pct}% complete")
runtime = pendulum.now() - start_time
if job["async_status"] == "Job Completed":
return job
elif job["async_status"] == "Job Failed":
raise JobTimeoutException(f"AdReportRun {job} failed after {runtime.in_seconds()} seconds.")
elif job["async_status"] == "Job Skipped":
raise JobTimeoutException(f"AdReportRun {job} skipped after {runtime.in_seconds()} seconds.")
if runtime > self.MAX_WAIT_TO_START and job_progress_pct == 0:
raise JobTimeoutException(
f"AdReportRun {job} did not start after {runtime.in_seconds()} seconds."
f" This is an intermittent error which may be fixed by retrying the job. Aborting."
)
elif runtime > self.MAX_WAIT_TO_FINISH:
raise JobTimeoutException(
f"AdReportRun {job} did not finish after {runtime.in_seconds()} seconds."
f" This is an intermittent error which may be fixed by retrying the job. Aborting."
)
self.logger.info(f"Sleeping {sleep_seconds} seconds while waiting for AdReportRun: {job_id} to complete")
time.sleep(sleep_seconds)
if sleep_seconds < self.MAX_ASYNC_SLEEP.in_seconds():
sleep_seconds *= factor
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, **kwargs)
params = deep_merge(
params,
{
"level": self.level,
"action_breakdowns": self.action_breakdowns,
"breakdowns": self.breakdowns,
"fields": self.fields,
"time_increment": self.time_increment,
"action_attribution_windows": self.action_attribution_windows,
},
)
return params
def _state_filter(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Works differently for insights, so remove it"""
return {}
def get_json_schema(self) -> Mapping[str, Any]:
"""Add fields from breakdowns to the stream schema
:return: A dict of the JSON schema representing this stream.
"""
schema = ResourceSchemaLoader(package_name_from_class(self.__class__)).get_schema("ads_insights")
schema["properties"].update(self._schema_for_breakdowns())
return schema
@cached_property
def fields(self) -> List[str]:
"""List of fields that we want to query, for now just all properties from stream's schema"""
schema = ResourceSchemaLoader(package_name_from_class(self.__class__)).get_schema("ads_insights")
return list(schema.get("properties", {}).keys())
def _schema_for_breakdowns(self) -> Mapping[str, Any]:
"""Breakdown fields and their type"""
schemas = {
"age": {"type": ["null", "integer", "string"]},
"gender": {"type": ["null", "string"]},
"country": {"type": ["null", "string"]},
"dma": {"type": ["null", "string"]},
"region": {"type": ["null", "string"]},
"impression_device": {"type": ["null", "string"]},
"placement": {"type": ["null", "string"]},
"platform_position": {"type": ["null", "string"]},
"publisher_platform": {"type": ["null", "string"]},
}
breakdowns = self.breakdowns[:]
if "platform_position" in breakdowns:
breakdowns.append("placement")
return {breakdown: schemas[breakdown] for breakdown in self.breakdowns}
def _date_ranges(self, stream_state: Mapping[str, Any]) -> Iterator[dict]:
"""Iterate over period between start_date/state and now
Notes: Facebook freezes insight data 28 days after it was generated, which means that all data
from the past 28 days may have changed since we last emitted it, so we retrieve it again.
"""
state_value = stream_state.get(self.cursor_field)
if state_value:
start_date = pendulum.parse(state_value) - self.lookback_window
else:
start_date = self._start_date
end_date = pendulum.now()
start_date = max(end_date - self.INSIGHTS_RETENTION_PERIOD, start_date)
for since in pendulum.period(start_date, end_date).range("days", self._days_per_job):
until = min(since.add(days=self._days_per_job - 1), end_date) # -1 because time_range is inclusive
yield {
"time_range": {"since": since.to_date_string(), "until": until.to_date_string()},
}
@backoff_policy
def _create_insights_job(self, params) -> AdReportRun:
job = self._api.account.get_insights(params=params, is_async=True)
job_id = job["report_run_id"]
time_range = params["time_range"]
self.logger.info(f"Created AdReportRun: {job_id} to sync insights {time_range} with breakdown {self.breakdowns}")
return job
class AdsInsightsAgeAndGender(AdsInsights):
breakdowns = ["age", "gender"]
class AdsInsightsCountry(AdsInsights):
breakdowns = ["country"]
class AdsInsightsRegion(AdsInsights):
breakdowns = ["region"]
class AdsInsightsDma(AdsInsights):
breakdowns = ["dma"]
class AdsInsightsPlatformAndDevice(AdsInsights):
breakdowns = ["publisher_platform", "platform_position", "impression_device"]
action_breakdowns = ["action_type"] # FB Async Job fails for unknown reason if we set other breakdowns
| 39.647303 | 126 | 0.660858 |
import time
import urllib.parse as urlparse
from abc import ABC
from collections import deque
from datetime import datetime
from typing import Any, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Sequence
import backoff
import pendulum
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.core import package_name_from_class
from airbyte_cdk.sources.utils.schema_helpers import ResourceSchemaLoader
from cached_property import cached_property
from facebook_business.adobjects.adreportrun import AdReportRun
from facebook_business.api import FacebookAdsApiBatch, FacebookRequest, FacebookResponse
from facebook_business.exceptions import FacebookRequestError
from source_facebook_marketing.api import API
from .common import FacebookAPIException, JobTimeoutException, batch, deep_merge, retry_pattern
backoff_policy = retry_pattern(backoff.expo, FacebookRequestError, max_tries=5, factor=5)
def remove_params_from_url(url, params):
parsed_url = urlparse.urlparse(url)
res_query = []
for q in parsed_url.query.split("&"):
key, value = q.split("=")
if key not in params:
res_query.append(f"{key}={value}")
parse_result = parsed_url._replace(query="&".join(res_query))
return urlparse.urlunparse(parse_result)
class FBMarketingStream(Stream, ABC):
primary_key = "id"
page_size = 100
enable_deleted = False
entity_prefix = None
def __init__(self, api: API, include_deleted: bool = False, **kwargs):
super().__init__(**kwargs)
self._api = api
self._include_deleted = include_deleted if self.enable_deleted else False
@cached_property
def fields(self) -> List[str]:
return list(self.get_json_schema().get("properties", {}).keys())
@backoff_policy
def execute_in_batch(self, requests: Iterable[FacebookRequest]) -> Sequence[MutableMapping[str, Any]]:
records = []
def success(response: FacebookResponse):
records.append(response.json())
def failure(response: FacebookResponse):
raise response.error()
api_batch: FacebookAdsApiBatch = self._api.api.new_batch()
for request in requests:
api_batch.add_request(request, success=success, failure=failure)
retry_batch = api_batch.execute()
if retry_batch:
raise FacebookAPIException(f"Batch has failed {len(retry_batch)} requests")
return records
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
for record in self._read_records(params=self.request_params(stream_state=stream_state)):
yield self._extend_record(record, fields=self.fields)
def _read_records(self, params: Mapping[str, Any]) -> Iterable:
return []
@backoff_policy
def _extend_record(self, obj: Any, **kwargs):
return obj.api_get(**kwargs).export_all_data()
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = {"limit": self.page_size}
if self._include_deleted:
params.update(self._filter_all_statuses())
return params
def _filter_all_statuses(self) -> MutableMapping[str, Any]:
filt_values = [
"active",
"archived",
"completed",
"limited",
"not_delivering",
"deleted",
"not_published",
"pending_review",
"permanently_deleted",
"recently_completed",
"recently_rejected",
"rejected",
"scheduled",
"inactive",
]
return {
"filtering": [
{"field": f"{self.entity_prefix}.delivery_info", "operator": "IN", "value": filt_values},
],
}
class FBMarketingIncrementalStream(FBMarketingStream, ABC):
cursor_field = "updated_time"
def __init__(self, start_date: datetime, **kwargs):
super().__init__(**kwargs)
self._start_date = pendulum.instance(start_date)
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
potentially_new_records_in_the_past = self._include_deleted and not current_stream_state.get("include_deleted", False)
record_value = latest_record[self.cursor_field]
state_value = current_stream_state.get(self.cursor_field) or record_value
max_cursor = max(pendulum.parse(state_value), pendulum.parse(record_value))
if potentially_new_records_in_the_past:
max_cursor = record_value
return {
self.cursor_field: str(max_cursor),
"include_deleted": self._include_deleted,
}
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params = deep_merge(params, self._state_filter(stream_state=stream_state or {}))
return params
def _state_filter(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
state_value = stream_state.get(self.cursor_field)
filter_value = self._start_date if not state_value else pendulum.parse(state_value)
potentially_new_records_in_the_past = self._include_deleted and not stream_state.get("include_deleted", False)
if potentially_new_records_in_the_past:
self.logger.info(f"Ignoring bookmark for {self.name} because of enabled `include_deleted` option")
filter_value = self._start_date
return {
"filtering": [
{
"field": f"{self.entity_prefix}.{self.cursor_field}",
"operator": "GREATER_THAN",
"value": filter_value.int_timestamp,
},
],
}
class AdCreatives(FBMarketingStream):
entity_prefix = "adcreative"
batch_size = 50
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
records = self._read_records(params=self.request_params(stream_state=stream_state))
requests = [record.api_get(fields=self.fields, pending=True) for record in records]
for requests_batch in batch(requests, size=self.batch_size):
for record in self.execute_in_batch(requests_batch):
yield self.clear_urls(record)
@staticmethod
def clear_urls(record: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
thumbnail_url = record.get("thumbnail_url")
if thumbnail_url:
record["thumbnail_url"] = remove_params_from_url(thumbnail_url, ["_nc_hash", "d"])
return record
@backoff_policy
def _read_records(self, params: Mapping[str, Any]) -> Iterator:
return self._api.account.get_ad_creatives(params=params)
class Ads(FBMarketingIncrementalStream):
entity_prefix = "ad"
enable_deleted = True
@backoff_policy
def _read_records(self, params: Mapping[str, Any]):
return self._api.account.get_ads(params=params, fields=[self.cursor_field])
class AdSets(FBMarketingIncrementalStream):
entity_prefix = "adset"
enable_deleted = True
@backoff_policy
def _read_records(self, params: Mapping[str, Any]):
return self._api.account.get_ad_sets(params=params)
class Campaigns(FBMarketingIncrementalStream):
entity_prefix = "campaign"
enable_deleted = True
@backoff_policy
def _read_records(self, params: Mapping[str, Any]):
return self._api.account.get_campaigns(params=params)
class AdsInsights(FBMarketingIncrementalStream):
cursor_field = "date_start"
primary_key = None
ALL_ACTION_ATTRIBUTION_WINDOWS = [
"1d_click",
"7d_click",
"28d_click",
"1d_view",
"7d_view",
"28d_view",
]
ALL_ACTION_BREAKDOWNS = [
"action_type",
"action_target_id",
"action_destination",
]
MAX_WAIT_TO_START = pendulum.duration(minutes=5)
MAX_WAIT_TO_FINISH = pendulum.duration(minutes=30)
MAX_ASYNC_SLEEP = pendulum.duration(minutes=5)
MAX_ASYNC_JOBS = 3
INSIGHTS_RETENTION_PERIOD = pendulum.duration(days=37 * 30)
action_breakdowns = ALL_ACTION_BREAKDOWNS
level = "ad"
action_attribution_windows = ALL_ACTION_ATTRIBUTION_WINDOWS
time_increment = 1
breakdowns = []
def __init__(self, buffer_days, days_per_job, **kwargs):
super().__init__(**kwargs)
self.lookback_window = pendulum.duration(days=buffer_days)
self._days_per_job = days_per_job
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
result = self.wait_for_job(stream_slice["job"])
for obj in result.get_result():
yield obj.export_all_data()
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
stream_state = stream_state or {}
running_jobs = deque()
date_ranges = list(self._date_ranges(stream_state=stream_state))
for params in date_ranges:
params = deep_merge(params, self.request_params(stream_state=stream_state))
job = self._create_insights_job(params)
running_jobs.append(job)
if len(running_jobs) >= self.MAX_ASYNC_JOBS:
yield {"job": running_jobs.popleft()}
while running_jobs:
yield {"job": running_jobs.popleft()}
@backoff_policy
def wait_for_job(self, job) -> AdReportRun:
factor = 2
start_time = pendulum.now()
sleep_seconds = factor
while True:
job = job.api_get()
job_progress_pct = job["async_percent_completion"]
job_id = job["report_run_id"]
self.logger.info(f"ReportRunId {job_id} is {job_progress_pct}% complete")
runtime = pendulum.now() - start_time
if job["async_status"] == "Job Completed":
return job
elif job["async_status"] == "Job Failed":
raise JobTimeoutException(f"AdReportRun {job} failed after {runtime.in_seconds()} seconds.")
elif job["async_status"] == "Job Skipped":
raise JobTimeoutException(f"AdReportRun {job} skipped after {runtime.in_seconds()} seconds.")
if runtime > self.MAX_WAIT_TO_START and job_progress_pct == 0:
raise JobTimeoutException(
f"AdReportRun {job} did not start after {runtime.in_seconds()} seconds."
f" This is an intermittent error which may be fixed by retrying the job. Aborting."
)
elif runtime > self.MAX_WAIT_TO_FINISH:
raise JobTimeoutException(
f"AdReportRun {job} did not finish after {runtime.in_seconds()} seconds."
f" This is an intermittent error which may be fixed by retrying the job. Aborting."
)
self.logger.info(f"Sleeping {sleep_seconds} seconds while waiting for AdReportRun: {job_id} to complete")
time.sleep(sleep_seconds)
if sleep_seconds < self.MAX_ASYNC_SLEEP.in_seconds():
sleep_seconds *= factor
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, **kwargs)
params = deep_merge(
params,
{
"level": self.level,
"action_breakdowns": self.action_breakdowns,
"breakdowns": self.breakdowns,
"fields": self.fields,
"time_increment": self.time_increment,
"action_attribution_windows": self.action_attribution_windows,
},
)
return params
def _state_filter(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
return {}
def get_json_schema(self) -> Mapping[str, Any]:
schema = ResourceSchemaLoader(package_name_from_class(self.__class__)).get_schema("ads_insights")
schema["properties"].update(self._schema_for_breakdowns())
return schema
@cached_property
def fields(self) -> List[str]:
schema = ResourceSchemaLoader(package_name_from_class(self.__class__)).get_schema("ads_insights")
return list(schema.get("properties", {}).keys())
def _schema_for_breakdowns(self) -> Mapping[str, Any]:
schemas = {
"age": {"type": ["null", "integer", "string"]},
"gender": {"type": ["null", "string"]},
"country": {"type": ["null", "string"]},
"dma": {"type": ["null", "string"]},
"region": {"type": ["null", "string"]},
"impression_device": {"type": ["null", "string"]},
"placement": {"type": ["null", "string"]},
"platform_position": {"type": ["null", "string"]},
"publisher_platform": {"type": ["null", "string"]},
}
breakdowns = self.breakdowns[:]
if "platform_position" in breakdowns:
breakdowns.append("placement")
return {breakdown: schemas[breakdown] for breakdown in self.breakdowns}
def _date_ranges(self, stream_state: Mapping[str, Any]) -> Iterator[dict]:
state_value = stream_state.get(self.cursor_field)
if state_value:
start_date = pendulum.parse(state_value) - self.lookback_window
else:
start_date = self._start_date
end_date = pendulum.now()
start_date = max(end_date - self.INSIGHTS_RETENTION_PERIOD, start_date)
for since in pendulum.period(start_date, end_date).range("days", self._days_per_job):
until = min(since.add(days=self._days_per_job - 1), end_date)
yield {
"time_range": {"since": since.to_date_string(), "until": until.to_date_string()},
}
@backoff_policy
def _create_insights_job(self, params) -> AdReportRun:
job = self._api.account.get_insights(params=params, is_async=True)
job_id = job["report_run_id"]
time_range = params["time_range"]
self.logger.info(f"Created AdReportRun: {job_id} to sync insights {time_range} with breakdown {self.breakdowns}")
return job
class AdsInsightsAgeAndGender(AdsInsights):
breakdowns = ["age", "gender"]
class AdsInsightsCountry(AdsInsights):
breakdowns = ["country"]
class AdsInsightsRegion(AdsInsights):
breakdowns = ["region"]
class AdsInsightsDma(AdsInsights):
breakdowns = ["dma"]
class AdsInsightsPlatformAndDevice(AdsInsights):
breakdowns = ["publisher_platform", "platform_position", "impression_device"]
action_breakdowns = ["action_type"]
| true | true |
1c375c2700db6553177987a2096c557093b61a2d | 706 | py | Python | google/ads/googleads/v6/services/services/hotel_performance_view_service/__init__.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/services/services/hotel_performance_view_service/__init__.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/services/services/hotel_performance_view_service/__init__.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import HotelPerformanceViewServiceClient
__all__ = ("HotelPerformanceViewServiceClient",)
| 33.619048 | 74 | 0.760623 |
from .client import HotelPerformanceViewServiceClient
__all__ = ("HotelPerformanceViewServiceClient",)
| true | true |
1c375ca98b8a2794402212dff0ce2cddb2d4c5cd | 5,077 | py | Python | perfkitbenchmarker/linux_packages/openmpi.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 3 | 2018-04-28T13:06:14.000Z | 2020-06-09T02:39:44.000Z | perfkitbenchmarker/linux_packages/openmpi.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 1 | 2021-09-09T07:43:25.000Z | 2021-09-09T10:47:56.000Z | perfkitbenchmarker/linux_packages/openmpi.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 6 | 2019-06-11T18:59:57.000Z | 2021-03-02T19:14:42.000Z | # Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing OpenMPI installation and cleanup functions."""
import posixpath
import re
from absl import flags
from perfkitbenchmarker import linux_packages
FLAGS = flags.FLAGS
flags.DEFINE_string('openmpi_version', '3.1.2',
'OpenMPI version to install, such as 3.1.2 and 4.0.2.'
'Set to empty to ignore the intallation of OpenMPI.')
flags.DEFINE_bool('openmpi_enable_shared', False,
'Whether openmpi should build shared libraries '
'in addition to static ones.')
flags.DEFINE_bool('openmpi_with_cuda_support', False,
'Compile with CUDA support')
flags.DEFINE_string('openmpi_configs', None,
'command line options to be provided to ./configure for'
'OpenMPI compilation')
MPI_URL_BASE = 'https://download.open-mpi.org/release/open-mpi'
REMOVE_MPI_CMD = 'autoremove -y libopenmpi-dev openmpi-bin openmpi-common'
class MpirunParseOutputError(Exception):
pass
def GetMpiVersion(vm):
"""Get the MPI version on the vm, based on mpirun.
Args:
vm: the virtual machine to query
Returns:
A string containing the active MPI version,
None if mpirun could not be found
"""
stdout, _ = vm.RemoteCommand('mpirun --version',
ignore_failure=True,
suppress_warning=True)
if bool(stdout.rstrip()):
regex = r'MPI\) (\S+)'
match = re.search(regex, stdout)
try:
return str(match.group(1))
except:
raise MpirunParseOutputError('Unable to parse mpirun version output')
else:
return None
def _Install(vm):
"""Installs the OpenMPI package on the VM."""
version_to_install = FLAGS.openmpi_version
if not version_to_install:
return
current_version = GetMpiVersion(vm)
if current_version == version_to_install:
return
first_dot_pos = version_to_install.find('.')
second_dot_pos = version_to_install.find('.', first_dot_pos + 1)
major_version = version_to_install[0:second_dot_pos]
mpi_tar = ('openmpi-{version}.tar.gz'.format(version=version_to_install))
mpi_url = ('{mpi_url_base}/v{major_version}/{mpi_tar}'.format(
mpi_url_base=MPI_URL_BASE, major_version=major_version, mpi_tar=mpi_tar))
install_dir = posixpath.join(
linux_packages.INSTALL_DIR,
'openmpi-{version}'.format(version=version_to_install))
vm.Install('build_tools')
vm.Install('wget')
vm.RemoteCommand('wget %s -P %s' % (mpi_url, install_dir))
vm.RemoteCommand('cd %s && tar xvfz %s' % (install_dir, mpi_tar))
make_jobs = vm.NumCpusForBenchmark()
config_options = []
config_options.append('--enable-static')
config_options.append('--prefix=/usr')
config_options.append('--enable-shared' if FLAGS.openmpi_enable_shared
else '--disable-shared')
if FLAGS.openmpi_with_cuda_support:
config_options.append('--with-cuda=/usr/local/cuda-{version}/'
.format(version=FLAGS.cuda_toolkit_version))
config_options.append('--with-cuda-libdir=/usr/local/cuda-{version}/lib64/'
.format(version=FLAGS.cuda_toolkit_version))
if FLAGS.openmpi_configs:
config_options.append(FLAGS.openmpi_configs)
config_cmd = './configure {}'.format(' '.join(config_options))
vm.RobustRemoteCommand(
'cd %s/openmpi-%s && %s && make -j %s && sudo make install' %
(install_dir, version_to_install, config_cmd, make_jobs))
def GetMpiDir():
"""Returns the installation directory of OpenMPI."""
mpi_dir = posixpath.join(
linux_packages.INSTALL_DIR,
'openmpi-{version}'.format(version=FLAGS.openmpi_version))
return mpi_dir
def YumInstall(vm):
"""Installs the OpenMPI package on the VM."""
if not FLAGS.openmpi_version:
return
vm.RobustRemoteCommand(
'sudo yum {}'.format(REMOVE_MPI_CMD), ignore_failure=True)
_Install(vm)
def AptInstall(vm):
"""Installs the OpenMPI package on the VM."""
if not FLAGS.openmpi_version:
return
vm.RobustRemoteCommand(
'sudo apt-get {}'.format(REMOVE_MPI_CMD), ignore_failure=True)
_Install(vm)
def _Uninstall(vm):
"""Uninstalls the OpenMPI package on the VM."""
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(GetMpiDir()))
def YumUninstall(vm):
"""Uninstalls the OpenMPI package on the VM."""
_Uninstall(vm)
def AptUninstall(vm):
"""Uninstalls the OpenMPI package on the VM."""
_Uninstall(vm)
| 32.967532 | 79 | 0.694899 |
import posixpath
import re
from absl import flags
from perfkitbenchmarker import linux_packages
FLAGS = flags.FLAGS
flags.DEFINE_string('openmpi_version', '3.1.2',
'OpenMPI version to install, such as 3.1.2 and 4.0.2.'
'Set to empty to ignore the intallation of OpenMPI.')
flags.DEFINE_bool('openmpi_enable_shared', False,
'Whether openmpi should build shared libraries '
'in addition to static ones.')
flags.DEFINE_bool('openmpi_with_cuda_support', False,
'Compile with CUDA support')
flags.DEFINE_string('openmpi_configs', None,
'command line options to be provided to ./configure for'
'OpenMPI compilation')
MPI_URL_BASE = 'https://download.open-mpi.org/release/open-mpi'
REMOVE_MPI_CMD = 'autoremove -y libopenmpi-dev openmpi-bin openmpi-common'
class MpirunParseOutputError(Exception):
pass
def GetMpiVersion(vm):
stdout, _ = vm.RemoteCommand('mpirun --version',
ignore_failure=True,
suppress_warning=True)
if bool(stdout.rstrip()):
regex = r'MPI\) (\S+)'
match = re.search(regex, stdout)
try:
return str(match.group(1))
except:
raise MpirunParseOutputError('Unable to parse mpirun version output')
else:
return None
def _Install(vm):
version_to_install = FLAGS.openmpi_version
if not version_to_install:
return
current_version = GetMpiVersion(vm)
if current_version == version_to_install:
return
first_dot_pos = version_to_install.find('.')
second_dot_pos = version_to_install.find('.', first_dot_pos + 1)
major_version = version_to_install[0:second_dot_pos]
mpi_tar = ('openmpi-{version}.tar.gz'.format(version=version_to_install))
mpi_url = ('{mpi_url_base}/v{major_version}/{mpi_tar}'.format(
mpi_url_base=MPI_URL_BASE, major_version=major_version, mpi_tar=mpi_tar))
install_dir = posixpath.join(
linux_packages.INSTALL_DIR,
'openmpi-{version}'.format(version=version_to_install))
vm.Install('build_tools')
vm.Install('wget')
vm.RemoteCommand('wget %s -P %s' % (mpi_url, install_dir))
vm.RemoteCommand('cd %s && tar xvfz %s' % (install_dir, mpi_tar))
make_jobs = vm.NumCpusForBenchmark()
config_options = []
config_options.append('--enable-static')
config_options.append('--prefix=/usr')
config_options.append('--enable-shared' if FLAGS.openmpi_enable_shared
else '--disable-shared')
if FLAGS.openmpi_with_cuda_support:
config_options.append('--with-cuda=/usr/local/cuda-{version}/'
.format(version=FLAGS.cuda_toolkit_version))
config_options.append('--with-cuda-libdir=/usr/local/cuda-{version}/lib64/'
.format(version=FLAGS.cuda_toolkit_version))
if FLAGS.openmpi_configs:
config_options.append(FLAGS.openmpi_configs)
config_cmd = './configure {}'.format(' '.join(config_options))
vm.RobustRemoteCommand(
'cd %s/openmpi-%s && %s && make -j %s && sudo make install' %
(install_dir, version_to_install, config_cmd, make_jobs))
def GetMpiDir():
mpi_dir = posixpath.join(
linux_packages.INSTALL_DIR,
'openmpi-{version}'.format(version=FLAGS.openmpi_version))
return mpi_dir
def YumInstall(vm):
if not FLAGS.openmpi_version:
return
vm.RobustRemoteCommand(
'sudo yum {}'.format(REMOVE_MPI_CMD), ignore_failure=True)
_Install(vm)
def AptInstall(vm):
if not FLAGS.openmpi_version:
return
vm.RobustRemoteCommand(
'sudo apt-get {}'.format(REMOVE_MPI_CMD), ignore_failure=True)
_Install(vm)
def _Uninstall(vm):
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(GetMpiDir()))
def YumUninstall(vm):
_Uninstall(vm)
def AptUninstall(vm):
_Uninstall(vm)
| true | true |
1c37613ee06eb74f96f1ffe86addd256d2965ae2 | 976 | py | Python | src/elasticizefiles/extractors/metadata.py | pierluigi-failla/elasticize_files | 2530d74f1b56344ee73ca113bcb2870566a565a0 | [
"MIT"
] | null | null | null | src/elasticizefiles/extractors/metadata.py | pierluigi-failla/elasticize_files | 2530d74f1b56344ee73ca113bcb2870566a565a0 | [
"MIT"
] | null | null | null | src/elasticizefiles/extractors/metadata.py | pierluigi-failla/elasticize_files | 2530d74f1b56344ee73ca113bcb2870566a565a0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created by Pierluigi on 2020-02-04
project: elasticizefiles
"""
import logging
from elasticizefiles.base import Extractor
class ExtractExif(Extractor):
def __init__(self):
Extractor.__init__(self)
def extract(self, filename):
try:
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
except Exception as e:
raise Exception('module `hachoir` is not installed, try `pip install -U hachoir`')
metadata = {}
try:
parser = createParser(filename)
metadata = extractMetadata(parser).exportDictionary()['Metadata']
except Exception as e:
logging.warning(f'exception extracting metadata from {filename}. {e}')
return metadata
def mapping(self):
""" Mapping in this case can be really different by the file type
so I will leave to Elastic. """
return None
| 28.705882 | 94 | 0.639344 |
import logging
from elasticizefiles.base import Extractor
class ExtractExif(Extractor):
def __init__(self):
Extractor.__init__(self)
def extract(self, filename):
try:
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
except Exception as e:
raise Exception('module `hachoir` is not installed, try `pip install -U hachoir`')
metadata = {}
try:
parser = createParser(filename)
metadata = extractMetadata(parser).exportDictionary()['Metadata']
except Exception as e:
logging.warning(f'exception extracting metadata from {filename}. {e}')
return metadata
def mapping(self):
return None
| true | true |
1c3761696c14bc564c49dbdee8fad2a15923d854 | 10,602 | py | Python | PuzzleSolver/solver/solver.py | cloudrave/logic-puzzle-generator | 33d65da0776f1ef074c461a8599f71f6c3d192ea | [
"MIT"
] | 1 | 2021-11-26T07:21:07.000Z | 2021-11-26T07:21:07.000Z | PuzzleSolver/solver/solver.py | cloudrave/logic-puzzle-generator | 33d65da0776f1ef074c461a8599f71f6c3d192ea | [
"MIT"
] | null | null | null | PuzzleSolver/solver/solver.py | cloudrave/logic-puzzle-generator | 33d65da0776f1ef074c461a8599f71f6c3d192ea | [
"MIT"
] | null | null | null | from package.puzzle_generator import *
def main():
p1 = Puzzle({
'A': [
Biconditional(
DisjunctiveStatement( # uncertain
IsOfType('B', Knight),
IsOfType('C', Knight),
IsOfType('B', Monk),
IsOfType('C', Monk),
),
IsOfType('E', Knave),
),
IsOfType('A', Monk),
],
'B': [
CountOfTypes(Knight, Knave, operator.eq),
IsSameAs('A', 'B'),
],
'C': [
Biconditional(
IsOfType('C', Monk),
IsSameAs('B', 'D'),
),
ConjunctiveStatement(
IsOfType('A', Knight),
IsOfType('E', Knight),
)
],
'D': [
IsOfType('D', Monk),
IfConnective(
Not(IsOfType('A', Monk)),
IsOfType('B', Knight),
),
],
'E': IfConnective(
IsOfType('D', Knave),
CountOfType(Monk, 2, operator.eq),
),
})
a = AllTheSame()
b = Honesty('A', 'E', operator.gt)
c = IsSameAs('C', 'A')
p2 = Puzzle({
'A': a,
'B': b,
'C': c,
'D': DisjunctiveStatement(
ConjunctiveStatement(a, b),
ConjunctiveStatement(a, c),
ConjunctiveStatement(b, c),
),
'E': IsOfType('E', Knave),
})
p4 = Puzzle({
'A': [
CountOfType(Knight, 2, operator.le),
CountOfType(Knave, 2, operator.lt),
],
'B': [
Honesty('B', 'A', operator.eq),
CountOfType(Knave, 1, operator.ge),
],
'C': [
IsOfType('B', Monk),
DisjunctiveStatement(
IsOfType('D', Monk),
IsOfType('E', Monk),
),
],
'D': Biconditional(
IsOfType('D', Monk),
IsOfType('E', Knave),
),
'E': Biconditional(
IsOfType('E', Monk),
IsOfType('A', Knight),
),
})
p5 = Puzzle({
'A': [
CountOfType(Knight, 3, operator.eq),
IsOfType('B', Knight),
],
'B': [
CountOfType(Monk, 1, operator.ge),
Not(IsOfType('A', Knight)),
],
'C': [
CountOfType(Knave, 0, operator.eq),
CountOfType(Monk, 2, operator.ge),
],
'D': [
ExclusiveOrConnective(
IsOfType('D', Knight),
IsOfType('B', Monk),
),
Honesty('B', 'D', operator.lt),
],
'E': CountOfType(Knave, 1, operator.eq),
'F': CountOfType(Knight, 2, operator.le), # uncertain
})
def remainder_by_2_equals(a, b):
return operator.mod(a, 2) == b
p6 = Puzzle({
'A': ConjunctiveStatement(
IsOfType('B', Knight),
IsOfType('C', Knight),
),
'B': [
CountOfType(Knight, 0, remainder_by_2_equals),
IsOfType('A', Knave),
],
'C': [
Honesty('C', 'A', operator.gt),
Honesty('B', 'A', operator.gt),
],
})
p8 = Puzzle({
'Karen': [
IfConnective(
IsOfType('Thomas', Knave),
Honesty('Karen', 'Perry', operator.gt),
),
Not(IsSameAs('Perry', 'Thomas')),
],
'Perry': [
IfConnective(
CountOfType(Monk, 1, operator.ge),
CountOfType(Knight, 1, remainder_by_2_equals),
),
CountOfTypes(Knave, Knight, operator.gt),
],
'Thomas': IfConnective(
CountOfType(Knave, 0, remainder_by_2_equals),
Not(IsOfType('Thomas', Knave)),
),
})
c1 = IsSameAs('A', 'E')
p9 = Puzzle({
'A': [
Biconditional(
IsOfType('A', Monk),
CountOfType(Monk, 0, remainder_by_2_equals),
),
],
'B': [
Biconditional(
IsOfType('A', Knight),
CountOfType(Knight, 0, remainder_by_2_equals),
),
Honesty('C', 'A', operator.gt),
],
'C': [
c1,
Honesty('A', 'B', operator.gt),
],
'D': [
c1,
IfConnective(
IsOfType('E', Knave),
IsOfType('A', Knave),
),
],
'E': [
Biconditional(
IsOfType('B', Knave),
CountOfType(Knave, 0, remainder_by_2_equals),
),
IfConnective(
IsOfType('A', Knight),
IsOfType('D', Monk),
),
],
})
p13 = Puzzle({
'A': Biconditional(
Honesty('A', 'D', operator.gt),
Honesty('D', 'C', operator.gt),
),
'B': IsOfType('D', Knight),
'C': IfConnective(
Honesty('A', 'C', operator.gt),
CountOfType(Knave, 1, remainder_by_2_equals)
),
'D': ConjunctiveStatement(
Not(IsSameAs('D', 'B')),
Not(IsOfType('B', Monk)),
),
})
p14 = Puzzle({
'Ned': CountOfType(Knight, 0, remainder_by_2_equals),
'Chandler': Honesty('Zoe', 'Chandler', operator.ge),
'Zoe': CountOfType(Knight, 1, remainder_by_2_equals),
'Ewa': Honesty('Ewa', 'Zoe', operator.gt),
})
p18 = Puzzle({
'A': CountOfType(Monk, 0, operator.eq),
'B': [
ConjunctiveStatement(
IfConnective(
IsOfType('B', Knight),
CountOfType(Knight, 1, operator.eq),
),
IfConnective(
IsOfType('B', Monk),
CountOfType(Monk, 1, operator.eq),
),
IfConnective(
IsOfType('B', Knave),
CountOfType(Knave, 1, operator.eq),
),
),
Not(IsOfType('D', Monk)),
],
'C': CountOfType(Knight, 0, operator.eq),
'D': DisjunctiveStatement(
IsOfType('A', Monk),
IsOfType('D', Knave),
)
})
p19 = Puzzle({
'A': [
Honesty('C', 'B', operator.gt),
IfConnective(
Honesty('B', 'A', operator.gt),
IsOfType('B', Monk),
),
Honesty('A', 'C', operator.gt),
],
'B': [
Honesty('B', 'A', operator.gt),
Honesty('A', 'C', operator.gt),
Not(IsOfType('C', Knave)),
],
'C': [
Honesty('A', 'B', operator.gt),
Not(Honesty('B', 'A', operator.gt)),
],
})
p20 = Puzzle({
'A': [
CountOfType(Knave, 2, operator.eq),
Not(IsOfType('B', Knave)),
],
'B': [
CountOfType(Knight, 2, operator.eq),
],
'C': [
Honesty('B', 'A', operator.gt),
IsOfType('A', Knight),
]
})
p22 = Puzzle({
'Deb': IfConnective(
IsOfType('Deb', Knight),
CountOfType(Knave, 1, operator.eq), # uncertain "exactly"?
),
'Jeb': IfConnective(
Not(IsOfType('Jeb', Monk)),
IsOfType('Bob', Monk)
),
'Rob': IfConnective(
IsOfType('Rob', Monk),
CountOfType(Knave, 3, operator.eq)
),
'Bob': [
IfConnective(
IsOfType('Bob', Knave),
IsSameAs('Deb', 'Rob')
),
CountOfType(Knave, 3, operator.eq), # uncertain "exactly"?
],
})
p23 = Puzzle({
'A': [
Biconditional(
IsOfType('B', Knight),
IsOfType('C', Knight)
),
IsOfType('C', Knave),
],
'B': [
Biconditional(
IsOfType('A', Knight),
IsOfType('C', Monk)
),
],
'C': [
Biconditional(
IsOfType('A', Knave),
IsOfType('D', Knight),
),
IsOfType('B', Monk),
],
'D': [
Biconditional(
IsOfType('A', Knave),
IsOfType('B', Knave),
),
],
})
p24 = Puzzle({
'A': [
Honesty('B', 'C', operator.gt),
IsOfType('C', Knave),
],
'B': [
Honesty('C', 'A', operator.gt),
SumOfTypes((Knave, Knight), 2, operator.eq),
],
'C': [
IsSameAs('C', 'B'),
],
})
p25 = Puzzle({
'A': [
IsOfType('A', Knight),
CountOfType(Knave, 0, remainder_by_2_equals),
],
'B': [
IsOfType('C', Knight),
CountOfType(Monk, 0, operator.eq),
],
'C': [
CountOfType(Knight, 1, operator.eq),
Biconditional(
IsOfType('C', Knight),
IsOfType('A', Knave)
),
],
})
p26 = Puzzle({
'Antoine': [
Biconditional(
IsOfType('Bernardo', Knight),
IsOfType('Antoine', Knave),
),
CountOfType(Monk, 1, operator.ge),
],
'Bernardo': CountOfType(Knight, 1, remainder_by_2_equals),
'Campbell': ConjunctiveStatement(
Not(IsOfType('Campbell', Monk)),
IsOfType('Antoine', Monk),
)
})
b1 = Not(IsSameAs('E', 'B'))
e = IsOfType('A', Knight)
p27 = Puzzle({
'A': [
Biconditional(
Not(b1),
Honesty('D', 'A', operator.eq),
),
CountOfType(Monk, 0, operator.eq),
],
'B': [
b1,
CountOfType(Knave, 2, operator.ge),
],
'C': [
DisjunctiveStatement(
IsOfType('D', Knight),
CountOfType(Monk, 0, operator.eq),
),
Not(e),
],
'D': [
IfConnective(
Not(IsSameAs('D', 'B')),
IsOfType('E', Knave)
),
],
'E': [
e,
],
})
p27.print_puzzle_with_solutions()
# p.print_puzzle_statistics()
if __name__ == '__main__':
main()
| 25.92176 | 71 | 0.396718 | from package.puzzle_generator import *
def main():
p1 = Puzzle({
'A': [
Biconditional(
DisjunctiveStatement(
IsOfType('B', Knight),
IsOfType('C', Knight),
IsOfType('B', Monk),
IsOfType('C', Monk),
),
IsOfType('E', Knave),
),
IsOfType('A', Monk),
],
'B': [
CountOfTypes(Knight, Knave, operator.eq),
IsSameAs('A', 'B'),
],
'C': [
Biconditional(
IsOfType('C', Monk),
IsSameAs('B', 'D'),
),
ConjunctiveStatement(
IsOfType('A', Knight),
IsOfType('E', Knight),
)
],
'D': [
IsOfType('D', Monk),
IfConnective(
Not(IsOfType('A', Monk)),
IsOfType('B', Knight),
),
],
'E': IfConnective(
IsOfType('D', Knave),
CountOfType(Monk, 2, operator.eq),
),
})
a = AllTheSame()
b = Honesty('A', 'E', operator.gt)
c = IsSameAs('C', 'A')
p2 = Puzzle({
'A': a,
'B': b,
'C': c,
'D': DisjunctiveStatement(
ConjunctiveStatement(a, b),
ConjunctiveStatement(a, c),
ConjunctiveStatement(b, c),
),
'E': IsOfType('E', Knave),
})
p4 = Puzzle({
'A': [
CountOfType(Knight, 2, operator.le),
CountOfType(Knave, 2, operator.lt),
],
'B': [
Honesty('B', 'A', operator.eq),
CountOfType(Knave, 1, operator.ge),
],
'C': [
IsOfType('B', Monk),
DisjunctiveStatement(
IsOfType('D', Monk),
IsOfType('E', Monk),
),
],
'D': Biconditional(
IsOfType('D', Monk),
IsOfType('E', Knave),
),
'E': Biconditional(
IsOfType('E', Monk),
IsOfType('A', Knight),
),
})
p5 = Puzzle({
'A': [
CountOfType(Knight, 3, operator.eq),
IsOfType('B', Knight),
],
'B': [
CountOfType(Monk, 1, operator.ge),
Not(IsOfType('A', Knight)),
],
'C': [
CountOfType(Knave, 0, operator.eq),
CountOfType(Monk, 2, operator.ge),
],
'D': [
ExclusiveOrConnective(
IsOfType('D', Knight),
IsOfType('B', Monk),
),
Honesty('B', 'D', operator.lt),
],
'E': CountOfType(Knave, 1, operator.eq),
'F': CountOfType(Knight, 2, operator.le),
})
def remainder_by_2_equals(a, b):
return operator.mod(a, 2) == b
p6 = Puzzle({
'A': ConjunctiveStatement(
IsOfType('B', Knight),
IsOfType('C', Knight),
),
'B': [
CountOfType(Knight, 0, remainder_by_2_equals),
IsOfType('A', Knave),
],
'C': [
Honesty('C', 'A', operator.gt),
Honesty('B', 'A', operator.gt),
],
})
p8 = Puzzle({
'Karen': [
IfConnective(
IsOfType('Thomas', Knave),
Honesty('Karen', 'Perry', operator.gt),
),
Not(IsSameAs('Perry', 'Thomas')),
],
'Perry': [
IfConnective(
CountOfType(Monk, 1, operator.ge),
CountOfType(Knight, 1, remainder_by_2_equals),
),
CountOfTypes(Knave, Knight, operator.gt),
],
'Thomas': IfConnective(
CountOfType(Knave, 0, remainder_by_2_equals),
Not(IsOfType('Thomas', Knave)),
),
})
c1 = IsSameAs('A', 'E')
p9 = Puzzle({
'A': [
Biconditional(
IsOfType('A', Monk),
CountOfType(Monk, 0, remainder_by_2_equals),
),
],
'B': [
Biconditional(
IsOfType('A', Knight),
CountOfType(Knight, 0, remainder_by_2_equals),
),
Honesty('C', 'A', operator.gt),
],
'C': [
c1,
Honesty('A', 'B', operator.gt),
],
'D': [
c1,
IfConnective(
IsOfType('E', Knave),
IsOfType('A', Knave),
),
],
'E': [
Biconditional(
IsOfType('B', Knave),
CountOfType(Knave, 0, remainder_by_2_equals),
),
IfConnective(
IsOfType('A', Knight),
IsOfType('D', Monk),
),
],
})
p13 = Puzzle({
'A': Biconditional(
Honesty('A', 'D', operator.gt),
Honesty('D', 'C', operator.gt),
),
'B': IsOfType('D', Knight),
'C': IfConnective(
Honesty('A', 'C', operator.gt),
CountOfType(Knave, 1, remainder_by_2_equals)
),
'D': ConjunctiveStatement(
Not(IsSameAs('D', 'B')),
Not(IsOfType('B', Monk)),
),
})
p14 = Puzzle({
'Ned': CountOfType(Knight, 0, remainder_by_2_equals),
'Chandler': Honesty('Zoe', 'Chandler', operator.ge),
'Zoe': CountOfType(Knight, 1, remainder_by_2_equals),
'Ewa': Honesty('Ewa', 'Zoe', operator.gt),
})
p18 = Puzzle({
'A': CountOfType(Monk, 0, operator.eq),
'B': [
ConjunctiveStatement(
IfConnective(
IsOfType('B', Knight),
CountOfType(Knight, 1, operator.eq),
),
IfConnective(
IsOfType('B', Monk),
CountOfType(Monk, 1, operator.eq),
),
IfConnective(
IsOfType('B', Knave),
CountOfType(Knave, 1, operator.eq),
),
),
Not(IsOfType('D', Monk)),
],
'C': CountOfType(Knight, 0, operator.eq),
'D': DisjunctiveStatement(
IsOfType('A', Monk),
IsOfType('D', Knave),
)
})
p19 = Puzzle({
'A': [
Honesty('C', 'B', operator.gt),
IfConnective(
Honesty('B', 'A', operator.gt),
IsOfType('B', Monk),
),
Honesty('A', 'C', operator.gt),
],
'B': [
Honesty('B', 'A', operator.gt),
Honesty('A', 'C', operator.gt),
Not(IsOfType('C', Knave)),
],
'C': [
Honesty('A', 'B', operator.gt),
Not(Honesty('B', 'A', operator.gt)),
],
})
p20 = Puzzle({
'A': [
CountOfType(Knave, 2, operator.eq),
Not(IsOfType('B', Knave)),
],
'B': [
CountOfType(Knight, 2, operator.eq),
],
'C': [
Honesty('B', 'A', operator.gt),
IsOfType('A', Knight),
]
})
p22 = Puzzle({
'Deb': IfConnective(
IsOfType('Deb', Knight),
CountOfType(Knave, 1, operator.eq),
),
'Jeb': IfConnective(
Not(IsOfType('Jeb', Monk)),
IsOfType('Bob', Monk)
),
'Rob': IfConnective(
IsOfType('Rob', Monk),
CountOfType(Knave, 3, operator.eq)
),
'Bob': [
IfConnective(
IsOfType('Bob', Knave),
IsSameAs('Deb', 'Rob')
),
CountOfType(Knave, 3, operator.eq),
],
})
p23 = Puzzle({
'A': [
Biconditional(
IsOfType('B', Knight),
IsOfType('C', Knight)
),
IsOfType('C', Knave),
],
'B': [
Biconditional(
IsOfType('A', Knight),
IsOfType('C', Monk)
),
],
'C': [
Biconditional(
IsOfType('A', Knave),
IsOfType('D', Knight),
),
IsOfType('B', Monk),
],
'D': [
Biconditional(
IsOfType('A', Knave),
IsOfType('B', Knave),
),
],
})
p24 = Puzzle({
'A': [
Honesty('B', 'C', operator.gt),
IsOfType('C', Knave),
],
'B': [
Honesty('C', 'A', operator.gt),
SumOfTypes((Knave, Knight), 2, operator.eq),
],
'C': [
IsSameAs('C', 'B'),
],
})
p25 = Puzzle({
'A': [
IsOfType('A', Knight),
CountOfType(Knave, 0, remainder_by_2_equals),
],
'B': [
IsOfType('C', Knight),
CountOfType(Monk, 0, operator.eq),
],
'C': [
CountOfType(Knight, 1, operator.eq),
Biconditional(
IsOfType('C', Knight),
IsOfType('A', Knave)
),
],
})
p26 = Puzzle({
'Antoine': [
Biconditional(
IsOfType('Bernardo', Knight),
IsOfType('Antoine', Knave),
),
CountOfType(Monk, 1, operator.ge),
],
'Bernardo': CountOfType(Knight, 1, remainder_by_2_equals),
'Campbell': ConjunctiveStatement(
Not(IsOfType('Campbell', Monk)),
IsOfType('Antoine', Monk),
)
})
b1 = Not(IsSameAs('E', 'B'))
e = IsOfType('A', Knight)
p27 = Puzzle({
'A': [
Biconditional(
Not(b1),
Honesty('D', 'A', operator.eq),
),
CountOfType(Monk, 0, operator.eq),
],
'B': [
b1,
CountOfType(Knave, 2, operator.ge),
],
'C': [
DisjunctiveStatement(
IsOfType('D', Knight),
CountOfType(Monk, 0, operator.eq),
),
Not(e),
],
'D': [
IfConnective(
Not(IsSameAs('D', 'B')),
IsOfType('E', Knave)
),
],
'E': [
e,
],
})
p27.print_puzzle_with_solutions()
if __name__ == '__main__':
main()
| true | true |
1c376169d3a20849d3b51fcf8972870fa9a4658b | 807 | py | Python | checkov/terraform/checks/resource/azure/AzureInstanceExtensions.py | jamesholland-uk/checkov | d73fd4bd7096d48ab3434a92a177bcc55605460a | [
"Apache-2.0"
] | 1 | 2021-02-13T15:24:42.000Z | 2021-02-13T15:24:42.000Z | checkov/terraform/checks/resource/azure/AzureInstanceExtensions.py | jamesholland-uk/checkov | d73fd4bd7096d48ab3434a92a177bcc55605460a | [
"Apache-2.0"
] | 7 | 2021-04-12T06:54:07.000Z | 2022-03-21T14:04:14.000Z | checkov/terraform/checks/resource/azure/AzureInstanceExtensions.py | jamesholland-uk/checkov | d73fd4bd7096d48ab3434a92a177bcc55605460a | [
"Apache-2.0"
] | 1 | 2021-12-16T03:09:55.000Z | 2021-12-16T03:09:55.000Z | from typing import Any
from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class AzureInstanceExtensions(BaseResourceValueCheck):
def __init__(self) -> None:
name = "Ensure Virtual Machine Extensions are not Installed"
id = "CKV_AZURE_50"
supported_resources = ["azurerm_linux_virtual_machine", "azurerm_windows_virtual_machine"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
return "allow_extension_operations"
def get_expected_value(self) -> Any:
return False
check = AzureInstanceExtensions()
| 35.086957 | 106 | 0.763321 | from typing import Any
from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class AzureInstanceExtensions(BaseResourceValueCheck):
def __init__(self) -> None:
name = "Ensure Virtual Machine Extensions are not Installed"
id = "CKV_AZURE_50"
supported_resources = ["azurerm_linux_virtual_machine", "azurerm_windows_virtual_machine"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
return "allow_extension_operations"
def get_expected_value(self) -> Any:
return False
check = AzureInstanceExtensions()
| true | true |
1c3761753b08197305fbfa0a1376c13ef72b4aed | 505 | py | Python | note/models.py | ehomeshasha/easydata | 0c599cc34d18b8865e06b15bbb96aa58612dfde2 | [
"MIT"
] | 1 | 2018-03-16T09:56:23.000Z | 2018-03-16T09:56:23.000Z | note/models.py | ehomeshasha/easydata | 0c599cc34d18b8865e06b15bbb96aa58612dfde2 | [
"MIT"
] | null | null | null | note/models.py | ehomeshasha/easydata | 0c599cc34d18b8865e06b15bbb96aa58612dfde2 | [
"MIT"
] | null | null | null | from django.db import models
from easydata.db.mysql.fields import C_SmallIntegerField, C_AutoField, C_IntegerField
class Note(models.Model):
id = C_AutoField(max_length=8, primary_key=True)
uid = C_IntegerField(max_length=11, default=0)
username = models.CharField(max_length=30)
content = models.TextField()
date_create = models.DateTimeField('date created')
date_update = models.DateTimeField('date updated')
displayorder = C_SmallIntegerField(max_length=5, default=0)
| 38.846154 | 85 | 0.760396 | from django.db import models
from easydata.db.mysql.fields import C_SmallIntegerField, C_AutoField, C_IntegerField
class Note(models.Model):
id = C_AutoField(max_length=8, primary_key=True)
uid = C_IntegerField(max_length=11, default=0)
username = models.CharField(max_length=30)
content = models.TextField()
date_create = models.DateTimeField('date created')
date_update = models.DateTimeField('date updated')
displayorder = C_SmallIntegerField(max_length=5, default=0)
| true | true |
1c37620533c2a68f63b0cbb3e74a99b8a8283d60 | 842 | py | Python | tests/test_schema_utils.py | ITISFoundation/aiohttp_apiset | c12d05aabadbd6ee9f82e4f002908c2c08be44b7 | [
"Apache-2.0"
] | null | null | null | tests/test_schema_utils.py | ITISFoundation/aiohttp_apiset | c12d05aabadbd6ee9f82e4f002908c2c08be44b7 | [
"Apache-2.0"
] | null | null | null | tests/test_schema_utils.py | ITISFoundation/aiohttp_apiset | c12d05aabadbd6ee9f82e4f002908c2c08be44b7 | [
"Apache-2.0"
] | null | null | null | import pytest
from aiohttp_apiset.swagger.loader import deref
from aiohttp_apiset.swagger.operations import OperationIdMapping
data = {
'a': {
'b': [
{'$ref': '#/definitions/G'},
3,
]
}
}
spec = {
'definitions': {
'F': 1,
'G': {'$ref': '#/definitions/F'}
}
}
def test_deref():
deref_data = deref(data, spec)
assert deref_data is not data
assert deref_data == {
'a': {
'b': [
1,
3,
]
}
}
def test_operation_id1():
opmap = OperationIdMapping('math.sin')
assert opmap
def test_operation_id2():
with pytest.raises(ImportError):
OperationIdMapping('math.sin.3')
def test_operation_id3():
with pytest.raises(ValueError):
OperationIdMapping('3')
| 17.183673 | 64 | 0.535629 | import pytest
from aiohttp_apiset.swagger.loader import deref
from aiohttp_apiset.swagger.operations import OperationIdMapping
data = {
'a': {
'b': [
{'$ref': '#/definitions/G'},
3,
]
}
}
spec = {
'definitions': {
'F': 1,
'G': {'$ref': '#/definitions/F'}
}
}
def test_deref():
deref_data = deref(data, spec)
assert deref_data is not data
assert deref_data == {
'a': {
'b': [
1,
3,
]
}
}
def test_operation_id1():
opmap = OperationIdMapping('math.sin')
assert opmap
def test_operation_id2():
with pytest.raises(ImportError):
OperationIdMapping('math.sin.3')
def test_operation_id3():
with pytest.raises(ValueError):
OperationIdMapping('3')
| true | true |
1c3762176513f80d2a2a93e3ec440f78cf0fe0ef | 6,282 | py | Python | pkgs/bokeh-0.11.1-py27_0/Examples/bokeh/plotting/server/fourier_animated.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/bokeh-0.11.1-py27_0/Examples/bokeh/plotting/server/fourier_animated.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/bokeh-0.11.1-py27_0/Examples/bokeh/plotting/server/fourier_animated.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # You must first run "bokeh serve" to view this example
#
# Example inspired by:
#
# https://www.youtube.com/watch?v=LznjC4Lo7lE
from __future__ import division
from collections import OrderedDict
from math import pi
import numpy as np
from bokeh.client import push_session
from bokeh.driving import repeat
from bokeh.io import vplot
from bokeh.models.sources import ColumnDataSource as CDS
from bokeh.plotting import figure, curdoc
N = 100
newx = x = np.linspace(0, 2*pi, N)
shift = 2.2
base_x = x + shift
period = pi/2
palette = ['#08519c', '#3182bd', '#6baed6', '#bdd7e7']
def new_source():
return dict(curve=CDS(), lines=CDS(), circle_point=CDS(), circleds=CDS())
def create_circle_glyphs(p, color, sources):
p.circle('x', 'y', size=1., line_color=color, color=None, source=sources['circleds'])
p.circle('x', 'y', size=5, line_color=color, color=color, source=sources['circle_point'])
p.line('radius_x', 'radius_y', line_color=color, color=color, alpha=0.5, source=sources['lines'])
def create_plot(foos, title='', r = 1, y_range=None, period = pi/2, cfoos=None):
if y_range is None:
y_range=[-2, 2]
# create new figure
p = figure(title=title, width=800, height=300, x_range=[-2.5, 9], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
cx, cy = 0, 0
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i], cx, cy, i==0)
cp = sources['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i==0:
# compute the full fourier eq
full_y = sum([foo(x) for foo in foos])
# replace the foo curve with the full fourier eq
sources['curve'] = CDS(dict(x=x, base_x=base_x, y=full_y))
# draw the line
p.line('base_x','y', color="orange", line_width=2, source=sources['curve'],
legend="4sin(x)/pi + 4sin(3x)/3pi + 4sin(5x)/5pi + 4sin(7x)/7pi")
if i==len(foos)-1:
# if it's the last foo let's draw a circle on the head of the curve
sources['floating_point'] = CDS({'x':[shift], 'y': [cy]})
p.line('line_x', 'line_y', color="palette[i]", line_width=2, source=sources['lines'])
p.circle('x', 'y', size=10, line_color=palette[i], color=palette[i], source=sources['floating_point'])
# draw the circle, radius and circle point realted to foo domain
create_circle_glyphs(p, palette[i], sources)
_sources.append(sources)
return p, _sources
def get_new_sources(xs, foo, sources, cfoo, cx=0, cy=0, compute_curve = True):
if compute_curve:
ys = foo(xs)
sources['curve'].data = dict(x=xs, base_x=base_x, y=ys)
r = foo(period)
y = foo(xs[0]) + cy
x = cfoo(xs[0]) + cx
sources['lines'].data = {
'line_x': [x, shift], 'line_y': [y, y],
'radius_x': [0, x], 'radius_y': [0, y]
}
sources['circle_point'].data = {'x': [x], 'y': [y], 'r': [r]}
sources['circleds'].data=dict(
x = cx + np.cos(np.linspace(0, 2*pi, N)) * r,
y = cy + np.sin(np.linspace(0, 2*pi, N)) * r,
)
def update_sources(sources, foos, newx, ind, cfoos):
cx, cy = 0, 0
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i], cx, cy,
compute_curve = i != 0)
if i == 0:
full_y = sum([foo(newx) for foo in foos])
sources[i]['curve'].data = dict(x=newx, base_x=base_x, y=full_y)
cp = sources[i]['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i == len(foos)-1:
sources[i]['floating_point'].data['x'] = [shift]
sources[i]['floating_point'].data['y'] = [cy]
def update_centric_sources(sources, foos, newx, ind, cfoos):
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i])
def create_centric_plot(foos, title='', r = 1, y_range=(-2, 2), period = pi/2, cfoos=None):
p = figure(title=title, width=800, height=300, x_range=[-1.5, 10.5], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i])
_sources.append(sources)
if i:
legend = "4sin(%(c)sx)/%(c)spi" % {'c': i*2+1}
else:
legend = "4sin(x)/pi"
p.line('base_x','y', color=palette[i], line_width=2, source=sources['curve'])
p.line('line_x', 'line_y', color=palette[i], line_width=2,
source=sources['lines'], legend=legend)
create_circle_glyphs(p, palette[i], sources)
return p, _sources
# create the series partials
f1 = lambda x: (4*np.sin(x))/pi
f2 = lambda x: (4*np.sin(3*x))/(3*pi)
f3 = lambda x: (4*np.sin(5*x))/(5*pi)
f4 = lambda x: (4*np.sin(7*x))/(7*pi)
cf1 = lambda x: (4*np.cos(x))/pi
cf2 = lambda x: (4*np.cos(3*x))/(3*pi)
cf3 = lambda x: (4*np.cos(5*x))/(5*pi)
cf4 = lambda x: (4*np.cos(7*x))/(7*pi)
fourier = OrderedDict(
fourier_4 = {
'f': lambda x: f1(x) + f2(x) + f3(x) + f4(x),
'fs': [f1, f2, f3, f4],
'cfs': [cf1, cf2, cf3, cf4]
},
)
for k, p in fourier.items():
p['plot'], p['sources'] = create_plot(
p['fs'], 'Fourier (Sum of the first 4 Harmonic Circles)', r = p['f'](period), cfoos = p['cfs']
)
for k, p in fourier.items():
p['cplot'], p['csources'] = create_centric_plot(
p['fs'], 'Fourier First 4 Harmonics & Harmonic Circles', r = p['f'](period), cfoos = p['cfs']
)
layout = vplot(*[f['plot'] for f in fourier.values()] + [f['cplot'] for f in fourier.values()])
# open a session to keep our local document in sync with server
session = push_session(curdoc())
@repeat(range(N))
def cb(gind):
global newx
oldx = np.delete(newx, 0)
newx = np.hstack([oldx, [oldx[-1] + 2*pi/N]])
for k, p in fourier.items():
update_sources(p['sources'], p['fs'], newx, gind, p['cfs'])
update_centric_sources(p['csources'], p['fs'], newx, gind, p['cfs'])
curdoc().add_periodic_callback(cb, 100)
session.show(layout) # open the document in a browser
session.loop_until_closed() # run forever
| 33.593583 | 114 | 0.586597 |
from __future__ import division
from collections import OrderedDict
from math import pi
import numpy as np
from bokeh.client import push_session
from bokeh.driving import repeat
from bokeh.io import vplot
from bokeh.models.sources import ColumnDataSource as CDS
from bokeh.plotting import figure, curdoc
N = 100
newx = x = np.linspace(0, 2*pi, N)
shift = 2.2
base_x = x + shift
period = pi/2
palette = ['#08519c', '#3182bd', '#6baed6', '#bdd7e7']
def new_source():
return dict(curve=CDS(), lines=CDS(), circle_point=CDS(), circleds=CDS())
def create_circle_glyphs(p, color, sources):
p.circle('x', 'y', size=1., line_color=color, color=None, source=sources['circleds'])
p.circle('x', 'y', size=5, line_color=color, color=color, source=sources['circle_point'])
p.line('radius_x', 'radius_y', line_color=color, color=color, alpha=0.5, source=sources['lines'])
def create_plot(foos, title='', r = 1, y_range=None, period = pi/2, cfoos=None):
if y_range is None:
y_range=[-2, 2]
p = figure(title=title, width=800, height=300, x_range=[-2.5, 9], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
cx, cy = 0, 0
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i], cx, cy, i==0)
cp = sources['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i==0:
full_y = sum([foo(x) for foo in foos])
sources['curve'] = CDS(dict(x=x, base_x=base_x, y=full_y))
p.line('base_x','y', color="orange", line_width=2, source=sources['curve'],
legend="4sin(x)/pi + 4sin(3x)/3pi + 4sin(5x)/5pi + 4sin(7x)/7pi")
if i==len(foos)-1:
sources['floating_point'] = CDS({'x':[shift], 'y': [cy]})
p.line('line_x', 'line_y', color="palette[i]", line_width=2, source=sources['lines'])
p.circle('x', 'y', size=10, line_color=palette[i], color=palette[i], source=sources['floating_point'])
create_circle_glyphs(p, palette[i], sources)
_sources.append(sources)
return p, _sources
def get_new_sources(xs, foo, sources, cfoo, cx=0, cy=0, compute_curve = True):
if compute_curve:
ys = foo(xs)
sources['curve'].data = dict(x=xs, base_x=base_x, y=ys)
r = foo(period)
y = foo(xs[0]) + cy
x = cfoo(xs[0]) + cx
sources['lines'].data = {
'line_x': [x, shift], 'line_y': [y, y],
'radius_x': [0, x], 'radius_y': [0, y]
}
sources['circle_point'].data = {'x': [x], 'y': [y], 'r': [r]}
sources['circleds'].data=dict(
x = cx + np.cos(np.linspace(0, 2*pi, N)) * r,
y = cy + np.sin(np.linspace(0, 2*pi, N)) * r,
)
def update_sources(sources, foos, newx, ind, cfoos):
cx, cy = 0, 0
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i], cx, cy,
compute_curve = i != 0)
if i == 0:
full_y = sum([foo(newx) for foo in foos])
sources[i]['curve'].data = dict(x=newx, base_x=base_x, y=full_y)
cp = sources[i]['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i == len(foos)-1:
sources[i]['floating_point'].data['x'] = [shift]
sources[i]['floating_point'].data['y'] = [cy]
def update_centric_sources(sources, foos, newx, ind, cfoos):
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i])
def create_centric_plot(foos, title='', r = 1, y_range=(-2, 2), period = pi/2, cfoos=None):
p = figure(title=title, width=800, height=300, x_range=[-1.5, 10.5], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i])
_sources.append(sources)
if i:
legend = "4sin(%(c)sx)/%(c)spi" % {'c': i*2+1}
else:
legend = "4sin(x)/pi"
p.line('base_x','y', color=palette[i], line_width=2, source=sources['curve'])
p.line('line_x', 'line_y', color=palette[i], line_width=2,
source=sources['lines'], legend=legend)
create_circle_glyphs(p, palette[i], sources)
return p, _sources
f1 = lambda x: (4*np.sin(x))/pi
f2 = lambda x: (4*np.sin(3*x))/(3*pi)
f3 = lambda x: (4*np.sin(5*x))/(5*pi)
f4 = lambda x: (4*np.sin(7*x))/(7*pi)
cf1 = lambda x: (4*np.cos(x))/pi
cf2 = lambda x: (4*np.cos(3*x))/(3*pi)
cf3 = lambda x: (4*np.cos(5*x))/(5*pi)
cf4 = lambda x: (4*np.cos(7*x))/(7*pi)
fourier = OrderedDict(
fourier_4 = {
'f': lambda x: f1(x) + f2(x) + f3(x) + f4(x),
'fs': [f1, f2, f3, f4],
'cfs': [cf1, cf2, cf3, cf4]
},
)
for k, p in fourier.items():
p['plot'], p['sources'] = create_plot(
p['fs'], 'Fourier (Sum of the first 4 Harmonic Circles)', r = p['f'](period), cfoos = p['cfs']
)
for k, p in fourier.items():
p['cplot'], p['csources'] = create_centric_plot(
p['fs'], 'Fourier First 4 Harmonics & Harmonic Circles', r = p['f'](period), cfoos = p['cfs']
)
layout = vplot(*[f['plot'] for f in fourier.values()] + [f['cplot'] for f in fourier.values()])
session = push_session(curdoc())
@repeat(range(N))
def cb(gind):
global newx
oldx = np.delete(newx, 0)
newx = np.hstack([oldx, [oldx[-1] + 2*pi/N]])
for k, p in fourier.items():
update_sources(p['sources'], p['fs'], newx, gind, p['cfs'])
update_centric_sources(p['csources'], p['fs'], newx, gind, p['cfs'])
curdoc().add_periodic_callback(cb, 100)
session.show(layout)
session.loop_until_closed()
| true | true |
1c37621aafb0e31028386e7910f15bba4ec73003 | 44 | py | Python | Src/Escher/tools/mesh_interpolate/__init__.py | sanjeevmk/GLASS | 91c0954eab87d25d4866fea5c338f79fbca4f79e | [
"MIT"
] | 2 | 2022-03-22T17:36:14.000Z | 2022-03-27T05:03:39.000Z | Src/Escher/tools/mesh_interpolate/__init__.py | sanjeevmk/glass | 91c0954eab87d25d4866fea5c338f79fbca4f79e | [
"MIT"
] | null | null | null | Src/Escher/tools/mesh_interpolate/__init__.py | sanjeevmk/glass | 91c0954eab87d25d4866fea5c338f79fbca4f79e | [
"MIT"
] | null | null | null | from .main import deformation_interpolation
| 22 | 43 | 0.886364 | from .main import deformation_interpolation
| true | true |
1c376246832b44019681cbf48ef845f33c434d11 | 12,945 | py | Python | schemas/tests/test_defs/test_base.py | polyaxon/schemas | e0742a80a0e6c5d1439d15ceb03de1e149331594 | [
"Apache-2.0"
] | 7 | 2017-09-24T15:34:17.000Z | 2020-02-14T19:54:08.000Z | schemas/tests/test_defs/test_base.py | polyaxon/schemas | e0742a80a0e6c5d1439d15ceb03de1e149331594 | [
"Apache-2.0"
] | 53 | 2017-10-16T14:43:15.000Z | 2020-07-01T18:11:11.000Z | schemas/tests/test_defs/test_base.py | polyaxon/schemas | e0742a80a0e6c5d1439d15ceb03de1e149331594 | [
"Apache-2.0"
] | 15 | 2017-10-03T22:03:38.000Z | 2021-12-03T07:11:45.000Z | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from unittest import TestCase
from marshmallow import Schema, ValidationError, fields
from polyaxon_schemas.base import BaseConfig, BaseOneOfSchema, BaseSchema
REQUIRED_ERROR = u"Missing data for required field."
class FooSchema(BaseSchema):
value = fields.String(required=True)
@staticmethod
def schema_config():
return FooConfig
class FooConfig(BaseConfig):
SCHEMA = FooSchema
IDENTIFIER = "foo"
def __init__(self, value=None):
self.value = value
def __eq__(self, other):
return isinstance(other, self.__class__) and self.value == other.value
class BarSchema(BaseSchema):
value = fields.Integer(required=True)
@staticmethod
def schema_config():
return BarConfig
class BarConfig(BaseConfig):
SCHEMA = BarSchema
IDENTIFIER = "bar"
def __init__(self, value=None):
self.value = value
def __eq__(self, other):
return isinstance(other, self.__class__) and self.value == other.value
class BazSchema(BaseSchema):
value1 = fields.Integer(required=True)
value2 = fields.String(required=True)
@staticmethod
def schema_config():
return BazConfig
class BazConfig(BaseConfig):
SCHEMA = BazSchema
IDENTIFIER = "baz"
def __init__(self, value1=None, value2=None):
self.value1 = value1
self.value2 = value2
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.value1 == other.value1
and self.value2 == other.value2
)
class EmptySchema(BaseSchema):
@staticmethod
def schema_config():
return EmptyConfig
class EmptyConfig(BaseConfig):
SCHEMA = EmptySchema
IDENTIFIER = "empty"
class MySchema(BaseOneOfSchema):
SCHEMAS = {
"foo": FooSchema,
"bar": BarSchema,
"baz": BazSchema,
"empty": EmptySchema,
}
@pytest.mark.schemas_mark
class TestOneOfSchema(TestCase):
def test_dump(self):
foo_result = MySchema().dump(FooConfig("hello"))
assert {"type": "foo", "value": "hello"} == foo_result
bar_result = MySchema().dump(BarConfig(123))
assert {"type": "bar", "value": 123} == bar_result
def test_dump_many(self):
result = MySchema().dump([FooConfig("hello"), BarConfig(123)], many=True)
assert [
{"type": "foo", "value": "hello"},
{"type": "bar", "value": 123},
] == result
def test_dump_many_in_constructor(self):
result = MySchema(many=True).dump([FooConfig("hello"), BarConfig(123)])
assert [
{"type": "foo", "value": "hello"},
{"type": "bar", "value": 123},
] == result
def test_dump_with_empty_keeps_type(self):
result = MySchema().dump(EmptyConfig())
assert {"type": "empty"} == result
def test_load(self):
foo_result = MySchema().load({"type": "foo", "value": "world"})
assert FooConfig("world") == foo_result
bar_result = MySchema().load({"type": "bar", "value": 456})
assert BarConfig(456) == bar_result
def test_load_many(self):
result = MySchema().load(
[{"type": "foo", "value": "hello world!"}, {"type": "bar", "value": 123}],
many=True,
)
assert FooConfig("hello world!"), BarConfig(123) == result
def test_load_many_in_constructor(self):
result = MySchema(many=True).load(
[{"type": "foo", "value": "hello world!"}, {"type": "bar", "value": 123}]
)
assert FooConfig("hello world!"), BarConfig(123) == result
def test_load_removes_type_field(self):
class Nonlocal:
data = None
class MySchema(Schema):
def load(self, data, *args, **kwargs):
Nonlocal.data = data
return super().load(data, *args, **kwargs)
class FooSchema(MySchema):
foo = fields.String(required=True)
class BarSchema(MySchema):
bar = fields.Integer(required=True)
class TestSchema(BaseOneOfSchema):
SCHEMAS = {"foo": FooSchema, "bar": BarSchema}
TestSchema().load({"type": "foo", "foo": "hello"})
assert "type" not in Nonlocal.data
TestSchema().load({"type": "bar", "bar": 123})
assert "type" not in Nonlocal.data
def test_load_keeps_type_field(self):
class Nonlocal:
data = None
type = None
class MySchema(Schema):
def load(self, data, *args, **kwargs):
Nonlocal.data = data
return super().load(data, *args, **kwargs)
class FooSchema(MySchema):
foo = fields.String(required=True)
class BarSchema(MySchema):
bar = fields.Integer(required=True)
class TestSchema(BaseOneOfSchema):
TYPE_FIELD_REMOVE = False
SCHEMAS = {"foo": FooSchema, "bar": BarSchema}
TestSchema(unknown="exclude").load({"type": "foo", "foo": "hello"})
assert Nonlocal.data["type"] == "foo"
TestSchema(unknown="exclude").load({"type": "bar", "bar": 123})
assert Nonlocal.data["type"] == "bar"
def test_load_non_dict(self):
with self.assertRaises(ValidationError):
MySchema().load(123)
with self.assertRaises(ValidationError):
MySchema().load("foo")
def test_load_errors_no_type(self):
with self.assertRaises(ValidationError):
MySchema().load({"value": "foo"})
def test_load_errors_field_error(self):
with self.assertRaises(ValidationError):
MySchema().load({"type": "foo"})
def test_load_errors_strict(self):
with self.assertRaises(ValidationError):
MySchema().load({"type": "foo"})
def test_load_many_errors_are_indexed_by_object_position(self):
with self.assertRaises(ValidationError):
MySchema().load([{"type": "foo"}, {"type": "bar", "value": 123}], many=True)
def test_load_many_errors_strict(self):
with self.assertRaises(ValidationError):
MySchema().load(
[
{"type": "foo", "value": "hello world!"},
{"type": "foo"},
{"type": "bar", "value": 123},
{"type": "bar", "value": "hello"},
],
many=True,
)
def test_load_partial_specific(self):
result = MySchema().load({"type": "foo"}, partial=("value", "value2"))
assert FooConfig() == result
result = MySchema().load(
{"type": "baz", "value1": 123}, partial=("value", "value2")
)
assert BazConfig(value1=123) == result
def test_load_partial_any(self):
result = MySchema().load({"type": "foo"}, partial=True)
assert FooConfig() == result
result = MySchema().load({"type": "baz", "value1": 123}, partial=True)
assert BazConfig(value1=123) == result
result = MySchema().load({"type": "baz", "value2": "hello"}, partial=True)
assert BazConfig(value2="hello") == result
def test_load_partial_specific_in_constructor(self):
result = MySchema(partial=("value", "value2")).load({"type": "foo"})
assert FooConfig() == result
result = MySchema(partial=("value", "value2")).load(
{"type": "baz", "value1": 123}
)
assert BazConfig(value1=123) == result
def test_load_partial_any_in_constructor(self):
result = MySchema(partial=True).load({"type": "foo"})
assert FooConfig() == result
result = MySchema(partial=True).load({"type": "baz", "value1": 123})
assert BazConfig(value1=123) == result
result = MySchema(partial=True).load({"type": "baz", "value2": "hello"})
assert BazConfig(value2="hello") == result
def test_validate(self):
assert {} == MySchema().validate({"type": "foo", "value": "123"})
assert {0: {"value": [REQUIRED_ERROR]}} == MySchema().validate({"type": "bar"})
assert {0: {"value": [REQUIRED_ERROR]}} == MySchema().validate({"type": "bar"})
def test_validate_many(self):
errors = MySchema().validate(
[{"type": "foo", "value": "123"}, {"type": "bar", "value": 123}], many=True
)
assert {} == errors
errors = MySchema().validate([{"value": "123"}, {"type": "bar"}], many=True)
assert {0: {"type": [REQUIRED_ERROR]}, 1: {"value": [REQUIRED_ERROR]}} == errors
errors = MySchema().validate([{"value": "123"}, {"type": "bar"}], many=True)
assert {0: {"type": [REQUIRED_ERROR]}, 1: {"value": [REQUIRED_ERROR]}} == errors
def test_validate_many_in_constructor(self):
errors = MySchema(many=True).validate(
[{"type": "foo", "value": "123"}, {"type": "bar", "value": 123}]
)
assert {} == errors
errors = MySchema(many=True).validate([{"value": "123"}, {"type": "bar"}])
assert {0: {"type": [REQUIRED_ERROR]}, 1: {"value": [REQUIRED_ERROR]}} == errors
def test_validate_partial_specific(self):
errors = MySchema().validate({"type": "foo"}, partial=("value", "value2"))
assert {} == errors
errors = MySchema().validate(
{"type": "baz", "value1": 123}, partial=("value", "value2")
)
assert {} == errors
def test_validate_partial_any(self):
errors = MySchema().validate({"type": "foo"}, partial=True)
assert {} == errors
errors = MySchema().validate({"type": "baz", "value1": 123}, partial=True)
assert {} == errors
errors = MySchema().validate({"type": "baz", "value2": "hello"}, partial=True)
assert {} == errors
def test_validate_partial_specific_in_constructor(self):
errors = MySchema(partial=("value", "value2")).validate({"type": "foo"})
assert {} == errors
errors = MySchema(partial=("value", "value2")).validate(
{"type": "baz", "value1": 123}
)
assert {} == errors
def test_validate_partial_any_in_constructor(self):
errors = MySchema(partial=True).validate({"type": "foo"})
assert {} == errors
errors = MySchema(partial=True).validate({"type": "baz", "value1": 123})
assert {} == errors
errors = MySchema(partial=True).validate({"type": "baz", "value2": "hello"})
assert {} == errors
def test_using_as_nested_schema(self):
class SchemaWithList(Schema):
items = fields.List(fields.Nested(MySchema))
schema = SchemaWithList()
result = schema.load(
{
"items": [
{"type": "foo", "value": "hello world!"},
{"type": "bar", "value": 123},
]
}
)
assert {"items": [FooConfig("hello world!"), BarConfig(123)]} == result
with self.assertRaises(ValidationError):
schema.load(
{"items": [{"type": "foo", "value": "hello world!"}, {"value": 123}]}
)
def test_using_as_nested_schema_with_many(self):
class SchemaWithMany(Schema):
items = fields.Nested(MySchema, many=True)
schema = SchemaWithMany()
result = schema.load(
{
"items": [
{"type": "foo", "value": "hello world!"},
{"type": "bar", "value": 123},
]
}
)
assert {"items": [FooConfig("hello world!"), BarConfig(123)]} == result
with self.assertRaises(ValidationError):
schema.load(
{"items": [{"type": "foo", "value": "hello world!"}, {"value": 123}]}
)
def test_using_custom_type_field(self):
class MyCustomTypeFieldSchema(MySchema):
TYPE_FIELD = "object_type"
schema = MyCustomTypeFieldSchema()
data = [FooConfig("hello"), BarConfig(111)]
marshalled = schema.dump(data, many=True)
assert [
{"object_type": "foo", "value": "hello"},
{"object_type": "bar", "value": 111},
] == marshalled
unmarshalled = schema.load(marshalled, many=True)
assert data == unmarshalled
| 32.443609 | 88 | 0.575435 |
import pytest
from unittest import TestCase
from marshmallow import Schema, ValidationError, fields
from polyaxon_schemas.base import BaseConfig, BaseOneOfSchema, BaseSchema
REQUIRED_ERROR = u"Missing data for required field."
class FooSchema(BaseSchema):
value = fields.String(required=True)
@staticmethod
def schema_config():
return FooConfig
class FooConfig(BaseConfig):
SCHEMA = FooSchema
IDENTIFIER = "foo"
def __init__(self, value=None):
self.value = value
def __eq__(self, other):
return isinstance(other, self.__class__) and self.value == other.value
class BarSchema(BaseSchema):
value = fields.Integer(required=True)
@staticmethod
def schema_config():
return BarConfig
class BarConfig(BaseConfig):
SCHEMA = BarSchema
IDENTIFIER = "bar"
def __init__(self, value=None):
self.value = value
def __eq__(self, other):
return isinstance(other, self.__class__) and self.value == other.value
class BazSchema(BaseSchema):
value1 = fields.Integer(required=True)
value2 = fields.String(required=True)
@staticmethod
def schema_config():
return BazConfig
class BazConfig(BaseConfig):
SCHEMA = BazSchema
IDENTIFIER = "baz"
def __init__(self, value1=None, value2=None):
self.value1 = value1
self.value2 = value2
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.value1 == other.value1
and self.value2 == other.value2
)
class EmptySchema(BaseSchema):
@staticmethod
def schema_config():
return EmptyConfig
class EmptyConfig(BaseConfig):
SCHEMA = EmptySchema
IDENTIFIER = "empty"
class MySchema(BaseOneOfSchema):
SCHEMAS = {
"foo": FooSchema,
"bar": BarSchema,
"baz": BazSchema,
"empty": EmptySchema,
}
@pytest.mark.schemas_mark
class TestOneOfSchema(TestCase):
def test_dump(self):
foo_result = MySchema().dump(FooConfig("hello"))
assert {"type": "foo", "value": "hello"} == foo_result
bar_result = MySchema().dump(BarConfig(123))
assert {"type": "bar", "value": 123} == bar_result
def test_dump_many(self):
result = MySchema().dump([FooConfig("hello"), BarConfig(123)], many=True)
assert [
{"type": "foo", "value": "hello"},
{"type": "bar", "value": 123},
] == result
def test_dump_many_in_constructor(self):
result = MySchema(many=True).dump([FooConfig("hello"), BarConfig(123)])
assert [
{"type": "foo", "value": "hello"},
{"type": "bar", "value": 123},
] == result
def test_dump_with_empty_keeps_type(self):
result = MySchema().dump(EmptyConfig())
assert {"type": "empty"} == result
def test_load(self):
foo_result = MySchema().load({"type": "foo", "value": "world"})
assert FooConfig("world") == foo_result
bar_result = MySchema().load({"type": "bar", "value": 456})
assert BarConfig(456) == bar_result
def test_load_many(self):
result = MySchema().load(
[{"type": "foo", "value": "hello world!"}, {"type": "bar", "value": 123}],
many=True,
)
assert FooConfig("hello world!"), BarConfig(123) == result
def test_load_many_in_constructor(self):
result = MySchema(many=True).load(
[{"type": "foo", "value": "hello world!"}, {"type": "bar", "value": 123}]
)
assert FooConfig("hello world!"), BarConfig(123) == result
def test_load_removes_type_field(self):
class Nonlocal:
data = None
class MySchema(Schema):
def load(self, data, *args, **kwargs):
Nonlocal.data = data
return super().load(data, *args, **kwargs)
class FooSchema(MySchema):
foo = fields.String(required=True)
class BarSchema(MySchema):
bar = fields.Integer(required=True)
class TestSchema(BaseOneOfSchema):
SCHEMAS = {"foo": FooSchema, "bar": BarSchema}
TestSchema().load({"type": "foo", "foo": "hello"})
assert "type" not in Nonlocal.data
TestSchema().load({"type": "bar", "bar": 123})
assert "type" not in Nonlocal.data
def test_load_keeps_type_field(self):
class Nonlocal:
data = None
type = None
class MySchema(Schema):
def load(self, data, *args, **kwargs):
Nonlocal.data = data
return super().load(data, *args, **kwargs)
class FooSchema(MySchema):
foo = fields.String(required=True)
class BarSchema(MySchema):
bar = fields.Integer(required=True)
class TestSchema(BaseOneOfSchema):
TYPE_FIELD_REMOVE = False
SCHEMAS = {"foo": FooSchema, "bar": BarSchema}
TestSchema(unknown="exclude").load({"type": "foo", "foo": "hello"})
assert Nonlocal.data["type"] == "foo"
TestSchema(unknown="exclude").load({"type": "bar", "bar": 123})
assert Nonlocal.data["type"] == "bar"
def test_load_non_dict(self):
with self.assertRaises(ValidationError):
MySchema().load(123)
with self.assertRaises(ValidationError):
MySchema().load("foo")
def test_load_errors_no_type(self):
with self.assertRaises(ValidationError):
MySchema().load({"value": "foo"})
def test_load_errors_field_error(self):
with self.assertRaises(ValidationError):
MySchema().load({"type": "foo"})
def test_load_errors_strict(self):
with self.assertRaises(ValidationError):
MySchema().load({"type": "foo"})
def test_load_many_errors_are_indexed_by_object_position(self):
with self.assertRaises(ValidationError):
MySchema().load([{"type": "foo"}, {"type": "bar", "value": 123}], many=True)
def test_load_many_errors_strict(self):
with self.assertRaises(ValidationError):
MySchema().load(
[
{"type": "foo", "value": "hello world!"},
{"type": "foo"},
{"type": "bar", "value": 123},
{"type": "bar", "value": "hello"},
],
many=True,
)
def test_load_partial_specific(self):
result = MySchema().load({"type": "foo"}, partial=("value", "value2"))
assert FooConfig() == result
result = MySchema().load(
{"type": "baz", "value1": 123}, partial=("value", "value2")
)
assert BazConfig(value1=123) == result
def test_load_partial_any(self):
result = MySchema().load({"type": "foo"}, partial=True)
assert FooConfig() == result
result = MySchema().load({"type": "baz", "value1": 123}, partial=True)
assert BazConfig(value1=123) == result
result = MySchema().load({"type": "baz", "value2": "hello"}, partial=True)
assert BazConfig(value2="hello") == result
def test_load_partial_specific_in_constructor(self):
result = MySchema(partial=("value", "value2")).load({"type": "foo"})
assert FooConfig() == result
result = MySchema(partial=("value", "value2")).load(
{"type": "baz", "value1": 123}
)
assert BazConfig(value1=123) == result
def test_load_partial_any_in_constructor(self):
result = MySchema(partial=True).load({"type": "foo"})
assert FooConfig() == result
result = MySchema(partial=True).load({"type": "baz", "value1": 123})
assert BazConfig(value1=123) == result
result = MySchema(partial=True).load({"type": "baz", "value2": "hello"})
assert BazConfig(value2="hello") == result
def test_validate(self):
assert {} == MySchema().validate({"type": "foo", "value": "123"})
assert {0: {"value": [REQUIRED_ERROR]}} == MySchema().validate({"type": "bar"})
assert {0: {"value": [REQUIRED_ERROR]}} == MySchema().validate({"type": "bar"})
def test_validate_many(self):
errors = MySchema().validate(
[{"type": "foo", "value": "123"}, {"type": "bar", "value": 123}], many=True
)
assert {} == errors
errors = MySchema().validate([{"value": "123"}, {"type": "bar"}], many=True)
assert {0: {"type": [REQUIRED_ERROR]}, 1: {"value": [REQUIRED_ERROR]}} == errors
errors = MySchema().validate([{"value": "123"}, {"type": "bar"}], many=True)
assert {0: {"type": [REQUIRED_ERROR]}, 1: {"value": [REQUIRED_ERROR]}} == errors
def test_validate_many_in_constructor(self):
errors = MySchema(many=True).validate(
[{"type": "foo", "value": "123"}, {"type": "bar", "value": 123}]
)
assert {} == errors
errors = MySchema(many=True).validate([{"value": "123"}, {"type": "bar"}])
assert {0: {"type": [REQUIRED_ERROR]}, 1: {"value": [REQUIRED_ERROR]}} == errors
def test_validate_partial_specific(self):
errors = MySchema().validate({"type": "foo"}, partial=("value", "value2"))
assert {} == errors
errors = MySchema().validate(
{"type": "baz", "value1": 123}, partial=("value", "value2")
)
assert {} == errors
def test_validate_partial_any(self):
errors = MySchema().validate({"type": "foo"}, partial=True)
assert {} == errors
errors = MySchema().validate({"type": "baz", "value1": 123}, partial=True)
assert {} == errors
errors = MySchema().validate({"type": "baz", "value2": "hello"}, partial=True)
assert {} == errors
def test_validate_partial_specific_in_constructor(self):
errors = MySchema(partial=("value", "value2")).validate({"type": "foo"})
assert {} == errors
errors = MySchema(partial=("value", "value2")).validate(
{"type": "baz", "value1": 123}
)
assert {} == errors
def test_validate_partial_any_in_constructor(self):
errors = MySchema(partial=True).validate({"type": "foo"})
assert {} == errors
errors = MySchema(partial=True).validate({"type": "baz", "value1": 123})
assert {} == errors
errors = MySchema(partial=True).validate({"type": "baz", "value2": "hello"})
assert {} == errors
def test_using_as_nested_schema(self):
class SchemaWithList(Schema):
items = fields.List(fields.Nested(MySchema))
schema = SchemaWithList()
result = schema.load(
{
"items": [
{"type": "foo", "value": "hello world!"},
{"type": "bar", "value": 123},
]
}
)
assert {"items": [FooConfig("hello world!"), BarConfig(123)]} == result
with self.assertRaises(ValidationError):
schema.load(
{"items": [{"type": "foo", "value": "hello world!"}, {"value": 123}]}
)
def test_using_as_nested_schema_with_many(self):
class SchemaWithMany(Schema):
items = fields.Nested(MySchema, many=True)
schema = SchemaWithMany()
result = schema.load(
{
"items": [
{"type": "foo", "value": "hello world!"},
{"type": "bar", "value": 123},
]
}
)
assert {"items": [FooConfig("hello world!"), BarConfig(123)]} == result
with self.assertRaises(ValidationError):
schema.load(
{"items": [{"type": "foo", "value": "hello world!"}, {"value": 123}]}
)
def test_using_custom_type_field(self):
class MyCustomTypeFieldSchema(MySchema):
TYPE_FIELD = "object_type"
schema = MyCustomTypeFieldSchema()
data = [FooConfig("hello"), BarConfig(111)]
marshalled = schema.dump(data, many=True)
assert [
{"object_type": "foo", "value": "hello"},
{"object_type": "bar", "value": 111},
] == marshalled
unmarshalled = schema.load(marshalled, many=True)
assert data == unmarshalled
| true | true |
1c3764399482c80bed57bf544783a90cda22efad | 2,832 | py | Python | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ListExecutionPlanInstancesRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ListExecutionPlanInstancesRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ListExecutionPlanInstancesRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class ListExecutionPlanInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ListExecutionPlanInstances','emr')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OnlyLastInstance(self):
return self.get_query_params().get('OnlyLastInstance')
def set_OnlyLastInstance(self,OnlyLastInstance):
self.add_query_param('OnlyLastInstance',OnlyLastInstance)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ExecutionPlanIdLists(self):
return self.get_query_params().get('ExecutionPlanIdList')
def set_ExecutionPlanIdLists(self, ExecutionPlanIdLists):
for depth1 in range(len(ExecutionPlanIdLists)):
if ExecutionPlanIdLists[depth1] is not None:
self.add_query_param('ExecutionPlanIdList.' + str(depth1 + 1) , ExecutionPlanIdLists[depth1])
def get_StatusLists(self):
return self.get_query_params().get('StatusList')
def set_StatusLists(self, StatusLists):
for depth1 in range(len(StatusLists)):
if StatusLists[depth1] is not None:
self.add_query_param('StatusList.' + str(depth1 + 1) , StatusLists[depth1])
def get_IsDesc(self):
return self.get_query_params().get('IsDesc')
def set_IsDesc(self,IsDesc):
self.add_query_param('IsDesc',IsDesc)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize) | 36.307692 | 98 | 0.764831 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class ListExecutionPlanInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ListExecutionPlanInstances','emr')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OnlyLastInstance(self):
return self.get_query_params().get('OnlyLastInstance')
def set_OnlyLastInstance(self,OnlyLastInstance):
self.add_query_param('OnlyLastInstance',OnlyLastInstance)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ExecutionPlanIdLists(self):
return self.get_query_params().get('ExecutionPlanIdList')
def set_ExecutionPlanIdLists(self, ExecutionPlanIdLists):
for depth1 in range(len(ExecutionPlanIdLists)):
if ExecutionPlanIdLists[depth1] is not None:
self.add_query_param('ExecutionPlanIdList.' + str(depth1 + 1) , ExecutionPlanIdLists[depth1])
def get_StatusLists(self):
return self.get_query_params().get('StatusList')
def set_StatusLists(self, StatusLists):
for depth1 in range(len(StatusLists)):
if StatusLists[depth1] is not None:
self.add_query_param('StatusList.' + str(depth1 + 1) , StatusLists[depth1])
def get_IsDesc(self):
return self.get_query_params().get('IsDesc')
def set_IsDesc(self,IsDesc):
self.add_query_param('IsDesc',IsDesc)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize) | true | true |
1c37648a13789a415e017f2ba4c9aeb2b2f87007 | 11,687 | py | Python | user/views.py | Samurai-XHe/myblog | c9e182b84c3cb06b3207e7359f0a4d352c28d043 | [
"MIT"
] | 1 | 2018-09-25T09:11:17.000Z | 2018-09-25T09:11:17.000Z | user/views.py | Samurai-XHe/myblog | c9e182b84c3cb06b3207e7359f0a4d352c28d043 | [
"MIT"
] | null | null | null | user/views.py | Samurai-XHe/myblog | c9e182b84c3cb06b3207e7359f0a4d352c28d043 | [
"MIT"
] | null | null | null | import random,time, string,re
from django.shortcuts import render, redirect
from django.contrib import auth
from django.contrib.auth.models import User
from django.urls import reverse
from django.http import JsonResponse
from django.core.mail import send_mail
from .forms import LoginForm, RegisterForm, ChangeNickNameForm, BindEmailForm
from .forms import ChangePassWordFormk, ForgetPasswordForm, ChangeForgetPasswordForm
from .models import Profile
def login(request):
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
user = login_form.cleaned_data['user']
auth.login(request,user)
return redirect(request.GET.get('from', reverse('blog_list')))
else:
login_form = LoginForm()
context = {}
context['login_form'] = login_form
return render(request, 'user/login.html', context)
def login_for_modal(request):
login_form = LoginForm(request.POST)
data = {}
if login_form.is_valid():
user = login_form.cleaned_data['user']
auth.login(request, user)
data['status'] = 'SUCCESS'
else:
data['status'] = 'ERROR'
return JsonResponse(data)
def user_info(request):
return render(request, 'user/user_info.html')
def logout(request):
auth.logout(request)
return redirect(request.GET.get('from', reverse('blog_list')))
def register(request):
redirect_to = request.GET.get('from', reverse('blog_list'))
if request.method == 'POST':
register_form = RegisterForm(request.POST, request=request)
if register_form.is_valid():
username = register_form.cleaned_data['username']
password = register_form.cleaned_data['password']
email = register_form.cleaned_data['email']
# 写入数据库
new_user = User.objects.create_user(username=username, password=password, email=email)
# 顺便登录
user = auth.authenticate(username=username, password=password)
auth.login(request, user)
return redirect(redirect_to, '/')
else:
register_form = RegisterForm()
context = {}
context['form'] = register_form
context['page_title'] = '欢迎注册'
context['form_title'] = '欢迎注册'
context['submit_text'] = '注册'
context['return_back'] = redirect_to
return render(request, 'user/register.html', context)
def register_code(request):
email = request.GET.get('email', 'None')
data = {}
if email == '':
data['status'] = 'ERROR'
data['code'] = '401'
data['message'] = '邮箱不能为空'
elif not re.search(r'^\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$', email):
data['status'] = 'ERROR'
data['code'] = '400'
data['message'] = '请输入正确的邮箱地址'
else:
if User.objects.filter(email=email).exists():
data['status'] = 'ERROR'
data['code'] = '402'
data['message'] = '该邮箱已被使用,请换一个邮箱地址'
else:
code = ''.join(random.sample(string.ascii_letters + string.digits, 4))
now = int(time.time())
send_code_time = request.session.get('send_code_time', 0)
if now - send_code_time < 30:
data['status'] = 'ERROR'
data['code'] = '403'
data['message'] = '您操作太频繁了'
else:
request.session[email] = code
request.session['send_code_time'] = now
request.session['email'] = email
send_mail(
'绑定邮箱',
'您的验证码:%s' % code,
'847834358@qq.com',
[email],
fail_silently=False,
)
data['status'] = 'SUCCESS'
data['message'] = '发送成功'
return JsonResponse(data)
def change_nickname(request):
redirect_to = request.GET.get('from', reverse('blog_list'))
if request.method == 'POST':
form = ChangeNickNameForm(request.POST, user=request.user)
if form.is_valid():
nickname_new = form.cleaned_data['nickname_new']
profile, created = Profile.objects.get_or_create(user=request.user)
profile.nickname = nickname_new
profile.save()
return redirect(redirect_to)
else:
form = ChangeNickNameForm()
context = {}
context['form'] = form
context['page_title'] = '修改昵称'
context['form_title'] = '修改昵称'
context['submit_text'] = '修改'
context['return_back'] = redirect_to
return render(request,'form.html', context)
def bind_email(request):
redirect_to = request.GET.get('from', reverse('blog_list'))
if request.method == 'POST':
form = BindEmailForm(request.POST, request=request)
if form.is_valid():
email = form.cleaned_data['email']
request.user.email = email
request.user.save()
del request.session[email]
del request.session['email']
del request.session['send_code_time']
return redirect(redirect_to)
else:
form = BindEmailForm()
context = {}
context['form'] = form
context['page_title'] = '绑定邮箱'
context['form_title'] = '绑定邮箱'
context['submit_text'] = '绑定'
context['return_back'] = redirect_to
return render(request, 'user/bind_email.html', context)
def send_verification_code(request):
email = request.GET.get('email', 'None')
data = {}
if email == '':
data['status'] = 'ERROR'
data['code'] = '401'
data['message'] = '邮箱不能为空'
elif not re.search(r'^\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$', email):
data['status'] = 'ERROR'
data['code'] = '400'
data['message'] = '请输入正确的邮箱地址'
else:
if User.objects.filter(email=email).exists():
data['status'] = 'ERROR'
data['code'] = '402'
data['message'] = '该邮箱已被使用,请换一个邮箱地址'
else:
code = ''.join(random.sample(string.ascii_letters + string.digits, 4))
now = int(time.time())
send_code_time = request.session.get('send_code_time', 0)
if now - send_code_time < 30:
data['status'] = 'ERROR'
data['code'] = '403'
data['message'] = '您操作太频繁了'
else:
request.session[email] = code
request.session['send_code_time'] = now
request.session['email'] = email
send_mail(
'绑定邮箱',
'您的验证码:%s' % code,
'847834358@qq.com',
[email],
fail_silently=False,
)
data['status'] = 'SUCCESS'
data['message'] = '发送成功'
return JsonResponse(data)
def change_password(request):
redirect_to = request.GET.get('from', reverse('blog_list'))
if request.method == 'POST':
form = ChangePassWordFormk(request.POST, user=request.user)
if form.is_valid():
new_password = form.cleaned_data['new_password']
user = form.cleaned_data['user']
user.set_password(new_password)
user.save()
return redirect(reverse('user:login'))
else:
form = ChangePassWordFormk()
context = {}
context['form'] = form
context['page_title'] = '修改密码'
context['form_title'] = '修改密码'
context['submit_text'] = '修改'
context['return_back'] = redirect_to
return render(request, 'user/change_password.html', context)
def forget_password(request):
redirect_to = request.GET.get('from', reverse('blog_list'))
context = {}
if request.method == 'POST':
form = ForgetPasswordForm(request.POST, request=request)
if form.is_valid():
email = form.cleaned_data['email']
del request.session[email]
del request.session['send_code_time']
del request.session['username_or_email']
return redirect(reverse('user:change_forget_password'))
else:
form = ForgetPasswordForm()
context['form'] = form
context['page_title'] = '忘记密码'
context['form_title'] = '找回密码'
context['submit_text'] = '提交'
context['return_back'] = redirect_to
return render(request, 'user/forget_password.html', context)
def send_verification_code_forget(request):
username_or_email = request.GET.get('username_or_email', 'None')
data = {}
if username_or_email == '':
data['status'] = 'ERROR'
data['code'] = '401'
data['message'] = '用户名或邮箱地址不能为空'
elif not User.objects.filter(email=username_or_email).exists():
if not User.objects.filter(username=username_or_email).exists():
data['status'] = 'ERROR'
data['code'] = '402'
data['message'] = '您输入的用户名或邮箱地址不存在'
else:
email = User.objects.get(username=username_or_email).email
code = ''.join(random.sample(string.ascii_letters + string.digits, 4))
now = int(time.time())
send_code_time = request.session.get('send_code_time', 0)
if now - send_code_time < 30:
data['status'] = 'ERROR'
data['code'] = '403'
data['message'] = '您操作太频繁了'
else:
request.session[email] = code
request.session['send_code_time'] = now
request.session['username_or_email'] = username_or_email
request.session['email'] = email
send_mail(
'找回密码',
'您的验证码:%s' % code,
'847834358@qq.com',
[email],
fail_silently=False,
)
data['status'] = 'SUCCESS'
data['message'] = '发送成功'
else:
code = ''.join(random.sample(string.ascii_letters + string.digits, 4))
now = int(time.time())
send_code_time = request.session.get('send_code_time', 0)
if now - send_code_time < 30:
data['status'] = 'ERROR'
data['code'] = '403'
data['message'] = '您操作太频繁了'
else:
request.session[username_or_email] = code
request.session['send_code_time'] = now
request.session['username_or_email'] = username_or_email
request.session['email'] = username_or_email
send_mail(
'找回密码',
'您的验证码:%s' % code,
'847834358@qq.com',
[username_or_email],
fail_silently=False,
)
data['status'] = 'SUCCESS'
data['message'] = '发送成功'
return JsonResponse(data)
def change_forget_password(request):
context ={}
if request.session.get('email', '') != '':
if request.method == 'POST':
email = request.session['email']
del request.session['email']
form = ChangeForgetPasswordForm(request.POST)
if form.is_valid():
new_password = form.cleaned_data['new_password']
user = User.objects.get(email=email)
user.set_password(new_password)
user.save()
return redirect(reverse('user:login'))
else:
form = ChangeForgetPasswordForm()
context['form'] = form
context['page_title'] = '重置密码'
context['form_title'] = '重置密码'
context['submit_text'] = '提交'
return render(request, 'user/change_forget_password.html', context)
else:
return redirect(reverse('blog_list'))
| 36.182663 | 98 | 0.564816 | import random,time, string,re
from django.shortcuts import render, redirect
from django.contrib import auth
from django.contrib.auth.models import User
from django.urls import reverse
from django.http import JsonResponse
from django.core.mail import send_mail
from .forms import LoginForm, RegisterForm, ChangeNickNameForm, BindEmailForm
from .forms import ChangePassWordFormk, ForgetPasswordForm, ChangeForgetPasswordForm
from .models import Profile
def login(request):
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
user = login_form.cleaned_data['user']
auth.login(request,user)
return redirect(request.GET.get('from', reverse('blog_list')))
else:
login_form = LoginForm()
context = {}
context['login_form'] = login_form
return render(request, 'user/login.html', context)
def login_for_modal(request):
login_form = LoginForm(request.POST)
data = {}
if login_form.is_valid():
user = login_form.cleaned_data['user']
auth.login(request, user)
data['status'] = 'SUCCESS'
else:
data['status'] = 'ERROR'
return JsonResponse(data)
def user_info(request):
return render(request, 'user/user_info.html')
def logout(request):
auth.logout(request)
return redirect(request.GET.get('from', reverse('blog_list')))
def register(request):
redirect_to = request.GET.get('from', reverse('blog_list'))
if request.method == 'POST':
register_form = RegisterForm(request.POST, request=request)
if register_form.is_valid():
username = register_form.cleaned_data['username']
password = register_form.cleaned_data['password']
email = register_form.cleaned_data['email']
new_user = User.objects.create_user(username=username, password=password, email=email)
user = auth.authenticate(username=username, password=password)
auth.login(request, user)
return redirect(redirect_to, '/')
else:
register_form = RegisterForm()
context = {}
context['form'] = register_form
context['page_title'] = '欢迎注册'
context['form_title'] = '欢迎注册'
context['submit_text'] = '注册'
context['return_back'] = redirect_to
return render(request, 'user/register.html', context)
def register_code(request):
email = request.GET.get('email', 'None')
data = {}
if email == '':
data['status'] = 'ERROR'
data['code'] = '401'
data['message'] = '邮箱不能为空'
elif not re.search(r'^\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$', email):
data['status'] = 'ERROR'
data['code'] = '400'
data['message'] = '请输入正确的邮箱地址'
else:
if User.objects.filter(email=email).exists():
data['status'] = 'ERROR'
data['code'] = '402'
data['message'] = '该邮箱已被使用,请换一个邮箱地址'
else:
code = ''.join(random.sample(string.ascii_letters + string.digits, 4))
now = int(time.time())
send_code_time = request.session.get('send_code_time', 0)
if now - send_code_time < 30:
data['status'] = 'ERROR'
data['code'] = '403'
data['message'] = '您操作太频繁了'
else:
request.session[email] = code
request.session['send_code_time'] = now
request.session['email'] = email
send_mail(
'绑定邮箱',
'您的验证码:%s' % code,
'847834358@qq.com',
[email],
fail_silently=False,
)
data['status'] = 'SUCCESS'
data['message'] = '发送成功'
return JsonResponse(data)
def change_nickname(request):
redirect_to = request.GET.get('from', reverse('blog_list'))
if request.method == 'POST':
form = ChangeNickNameForm(request.POST, user=request.user)
if form.is_valid():
nickname_new = form.cleaned_data['nickname_new']
profile, created = Profile.objects.get_or_create(user=request.user)
profile.nickname = nickname_new
profile.save()
return redirect(redirect_to)
else:
form = ChangeNickNameForm()
context = {}
context['form'] = form
context['page_title'] = '修改昵称'
context['form_title'] = '修改昵称'
context['submit_text'] = '修改'
context['return_back'] = redirect_to
return render(request,'form.html', context)
def bind_email(request):
redirect_to = request.GET.get('from', reverse('blog_list'))
if request.method == 'POST':
form = BindEmailForm(request.POST, request=request)
if form.is_valid():
email = form.cleaned_data['email']
request.user.email = email
request.user.save()
del request.session[email]
del request.session['email']
del request.session['send_code_time']
return redirect(redirect_to)
else:
form = BindEmailForm()
context = {}
context['form'] = form
context['page_title'] = '绑定邮箱'
context['form_title'] = '绑定邮箱'
context['submit_text'] = '绑定'
context['return_back'] = redirect_to
return render(request, 'user/bind_email.html', context)
def send_verification_code(request):
email = request.GET.get('email', 'None')
data = {}
if email == '':
data['status'] = 'ERROR'
data['code'] = '401'
data['message'] = '邮箱不能为空'
elif not re.search(r'^\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$', email):
data['status'] = 'ERROR'
data['code'] = '400'
data['message'] = '请输入正确的邮箱地址'
else:
if User.objects.filter(email=email).exists():
data['status'] = 'ERROR'
data['code'] = '402'
data['message'] = '该邮箱已被使用,请换一个邮箱地址'
else:
code = ''.join(random.sample(string.ascii_letters + string.digits, 4))
now = int(time.time())
send_code_time = request.session.get('send_code_time', 0)
if now - send_code_time < 30:
data['status'] = 'ERROR'
data['code'] = '403'
data['message'] = '您操作太频繁了'
else:
request.session[email] = code
request.session['send_code_time'] = now
request.session['email'] = email
send_mail(
'绑定邮箱',
'您的验证码:%s' % code,
'847834358@qq.com',
[email],
fail_silently=False,
)
data['status'] = 'SUCCESS'
data['message'] = '发送成功'
return JsonResponse(data)
def change_password(request):
redirect_to = request.GET.get('from', reverse('blog_list'))
if request.method == 'POST':
form = ChangePassWordFormk(request.POST, user=request.user)
if form.is_valid():
new_password = form.cleaned_data['new_password']
user = form.cleaned_data['user']
user.set_password(new_password)
user.save()
return redirect(reverse('user:login'))
else:
form = ChangePassWordFormk()
context = {}
context['form'] = form
context['page_title'] = '修改密码'
context['form_title'] = '修改密码'
context['submit_text'] = '修改'
context['return_back'] = redirect_to
return render(request, 'user/change_password.html', context)
def forget_password(request):
redirect_to = request.GET.get('from', reverse('blog_list'))
context = {}
if request.method == 'POST':
form = ForgetPasswordForm(request.POST, request=request)
if form.is_valid():
email = form.cleaned_data['email']
del request.session[email]
del request.session['send_code_time']
del request.session['username_or_email']
return redirect(reverse('user:change_forget_password'))
else:
form = ForgetPasswordForm()
context['form'] = form
context['page_title'] = '忘记密码'
context['form_title'] = '找回密码'
context['submit_text'] = '提交'
context['return_back'] = redirect_to
return render(request, 'user/forget_password.html', context)
def send_verification_code_forget(request):
username_or_email = request.GET.get('username_or_email', 'None')
data = {}
if username_or_email == '':
data['status'] = 'ERROR'
data['code'] = '401'
data['message'] = '用户名或邮箱地址不能为空'
elif not User.objects.filter(email=username_or_email).exists():
if not User.objects.filter(username=username_or_email).exists():
data['status'] = 'ERROR'
data['code'] = '402'
data['message'] = '您输入的用户名或邮箱地址不存在'
else:
email = User.objects.get(username=username_or_email).email
code = ''.join(random.sample(string.ascii_letters + string.digits, 4))
now = int(time.time())
send_code_time = request.session.get('send_code_time', 0)
if now - send_code_time < 30:
data['status'] = 'ERROR'
data['code'] = '403'
data['message'] = '您操作太频繁了'
else:
request.session[email] = code
request.session['send_code_time'] = now
request.session['username_or_email'] = username_or_email
request.session['email'] = email
send_mail(
'找回密码',
'您的验证码:%s' % code,
'847834358@qq.com',
[email],
fail_silently=False,
)
data['status'] = 'SUCCESS'
data['message'] = '发送成功'
else:
code = ''.join(random.sample(string.ascii_letters + string.digits, 4))
now = int(time.time())
send_code_time = request.session.get('send_code_time', 0)
if now - send_code_time < 30:
data['status'] = 'ERROR'
data['code'] = '403'
data['message'] = '您操作太频繁了'
else:
request.session[username_or_email] = code
request.session['send_code_time'] = now
request.session['username_or_email'] = username_or_email
request.session['email'] = username_or_email
send_mail(
'找回密码',
'您的验证码:%s' % code,
'847834358@qq.com',
[username_or_email],
fail_silently=False,
)
data['status'] = 'SUCCESS'
data['message'] = '发送成功'
return JsonResponse(data)
def change_forget_password(request):
context ={}
if request.session.get('email', '') != '':
if request.method == 'POST':
email = request.session['email']
del request.session['email']
form = ChangeForgetPasswordForm(request.POST)
if form.is_valid():
new_password = form.cleaned_data['new_password']
user = User.objects.get(email=email)
user.set_password(new_password)
user.save()
return redirect(reverse('user:login'))
else:
form = ChangeForgetPasswordForm()
context['form'] = form
context['page_title'] = '重置密码'
context['form_title'] = '重置密码'
context['submit_text'] = '提交'
return render(request, 'user/change_forget_password.html', context)
else:
return redirect(reverse('blog_list'))
| true | true |
1c376569d6eef2f1a3ba85061b8c447783559e60 | 275 | py | Python | tests/utils/process.py | paweljasinski/ironclad | c37d08910dfd0cb531668e5218684130eee4e925 | [
"PSF-2.0"
] | 58 | 2015-03-02T15:13:45.000Z | 2021-07-31T16:10:13.000Z | tests/utils/process.py | paweljasinski/ironclad | c37d08910dfd0cb531668e5218684130eee4e925 | [
"PSF-2.0"
] | 4 | 2015-01-02T11:45:46.000Z | 2022-01-17T14:45:33.000Z | tests/utils/process.py | paweljasinski/ironclad | c37d08910dfd0cb531668e5218684130eee4e925 | [
"PSF-2.0"
] | 11 | 2015-01-22T11:56:32.000Z | 2020-06-02T01:40:58.000Z |
import os
def spawn(executable, *args, **kwargs):
cwd = kwargs.get('cwd')
oldCwd = os.getcwd()
if cwd:
os.chdir(cwd)
try:
result = os.spawnl(os.P_WAIT, executable, executable, *args)
finally:
os.chdir(oldCwd)
return result
| 17.1875 | 68 | 0.585455 |
import os
def spawn(executable, *args, **kwargs):
cwd = kwargs.get('cwd')
oldCwd = os.getcwd()
if cwd:
os.chdir(cwd)
try:
result = os.spawnl(os.P_WAIT, executable, executable, *args)
finally:
os.chdir(oldCwd)
return result
| true | true |
1c376586d982d9f8e652e29f7a8066a066c47dfc | 1,133 | py | Python | sql_orm/database.py | santomet/OpenSupportTool | 9be84d2b3ab8418e9ffa9ac603e6d6dc3de4cf07 | [
"MIT"
] | null | null | null | sql_orm/database.py | santomet/OpenSupportTool | 9be84d2b3ab8418e9ffa9ac603e6d6dc3de4cf07 | [
"MIT"
] | null | null | null | sql_orm/database.py | santomet/OpenSupportTool | 9be84d2b3ab8418e9ffa9ac603e6d6dc3de4cf07 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# The default is with SQLite
SQLALCHEMY_DATABASE_URL = "sqlite:///./sql_app.db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False} # these connect_args only for SQLite
)
# MySQL example (you need apt install python3-mysqldb)
# Note that some (most of the free ones) providers limit the length of row to 767 bytes. We need more than that!
# Also MySQL often does not support VARCHAR with dynamic size of
# SQLALCHEMY_DATABASE_URL = "mysql://user:pass@db4free.net/db"
#
# engine = create_engine(
# SQLALCHEMY_DATABASE_URL
# )
# Example with Postgres (you need apt install python3-psycopg2)
# SQLALCHEMY_DATABASE_URL = "postgresql://user:pass@db.fi.muni.cz:5432/pgdb"
# engine = create_engine(
# SQLALCHEMY_DATABASE_URL
# )
# ....
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
| 27.634146 | 112 | 0.74669 | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = "sqlite:///./sql_app.db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
| true | true |
1c37665fcd25bae8d136d03448b80439ad84bf88 | 5,475 | py | Python | qiskit/algorithms/optimizers/nft.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 1,599 | 2018-07-10T10:59:12.000Z | 2022-03-31T23:56:25.000Z | qiskit/algorithms/optimizers/nft.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 5,244 | 2018-07-10T06:20:13.000Z | 2022-03-31T22:18:48.000Z | qiskit/algorithms/optimizers/nft.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 1,409 | 2018-07-10T02:16:12.000Z | 2022-03-31T09:01:32.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Nakanishi-Fujii-Todo algorithm."""
from typing import Optional
import numpy as np
from scipy.optimize import OptimizeResult
from .scipy_optimizer import SciPyOptimizer
class NFT(SciPyOptimizer):
"""
Nakanishi-Fujii-Todo algorithm.
See https://arxiv.org/abs/1903.12166
"""
_OPTIONS = ["maxiter", "maxfev", "disp", "reset_interval"]
# pylint: disable=unused-argument
def __init__(
self,
maxiter: Optional[int] = None,
maxfev: int = 1024,
disp: bool = False,
reset_interval: int = 32,
options: Optional[dict] = None,
**kwargs,
) -> None:
"""
Built out using scipy framework, for details, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Args:
maxiter: Maximum number of iterations to perform.
maxfev: Maximum number of function evaluations to perform.
disp: disp
reset_interval: The minimum estimates directly once
in ``reset_interval`` times.
options: A dictionary of solver options.
kwargs: additional kwargs for scipy.optimize.minimize.
Notes:
In this optimization method, the optimization function have to satisfy
three conditions written in [1]_.
References:
.. [1] K. M. Nakanishi, K. Fujii, and S. Todo. 2019.
Sequential minimal optimization for quantum-classical hybrid algorithms.
arXiv preprint arXiv:1903.12166.
"""
if options is None:
options = {}
for k, v in list(locals().items()):
if k in self._OPTIONS:
options[k] = v
super().__init__(method=nakanishi_fujii_todo, options=options, **kwargs)
# pylint: disable=invalid-name
def nakanishi_fujii_todo(
fun, x0, args=(), maxiter=None, maxfev=1024, reset_interval=32, eps=1e-32, callback=None, **_
):
"""
Find the global minimum of a function using the nakanishi_fujii_todo
algorithm [1].
Args:
fun (callable): ``f(x, *args)``
Function to be optimized. ``args`` can be passed as an optional item
in the dict ``minimizer_kwargs``.
This function must satisfy the three condition written in Ref. [1].
x0 (ndarray): shape (n,)
Initial guess. Array of real elements of size (n,),
where 'n' is the number of independent variables.
args (tuple, optional):
Extra arguments passed to the objective function.
maxiter (int):
Maximum number of iterations to perform.
Default: None.
maxfev (int):
Maximum number of function evaluations to perform.
Default: 1024.
reset_interval (int):
The minimum estimates directly once in ``reset_interval`` times.
Default: 32.
eps (float): eps
**_ : additional options
callback (callable, optional):
Called after each iteration.
Returns:
OptimizeResult:
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array. See
`OptimizeResult` for a description of other attributes.
Notes:
In this optimization method, the optimization function have to satisfy
three conditions written in [1].
References:
.. [1] K. M. Nakanishi, K. Fujii, and S. Todo. 2019.
Sequential minimal optimization for quantum-classical hybrid algorithms.
arXiv preprint arXiv:1903.12166.
"""
x0 = np.asarray(x0)
recycle_z0 = None
niter = 0
funcalls = 0
while True:
idx = niter % x0.size
if reset_interval > 0:
if niter % reset_interval == 0:
recycle_z0 = None
if recycle_z0 is None:
z0 = fun(np.copy(x0), *args)
funcalls += 1
else:
z0 = recycle_z0
p = np.copy(x0)
p[idx] = x0[idx] + np.pi / 2
z1 = fun(p, *args)
funcalls += 1
p = np.copy(x0)
p[idx] = x0[idx] - np.pi / 2
z3 = fun(p, *args)
funcalls += 1
z2 = z1 + z3 - z0
c = (z1 + z3) / 2
a = np.sqrt((z0 - z2) ** 2 + (z1 - z3) ** 2) / 2
b = np.arctan((z1 - z3) / ((z0 - z2) + eps * (z0 == z2))) + x0[idx]
b += 0.5 * np.pi + 0.5 * np.pi * np.sign((z0 - z2) + eps * (z0 == z2))
x0[idx] = b
recycle_z0 = c - a
niter += 1
if callback is not None:
callback(np.copy(x0))
if maxfev is not None:
if funcalls >= maxfev:
break
if maxiter is not None:
if niter >= maxiter:
break
return OptimizeResult(
fun=fun(np.copy(x0), *args), x=x0, nit=niter, nfev=funcalls, success=(niter > 1)
)
| 32.205882 | 97 | 0.581735 |
from typing import Optional
import numpy as np
from scipy.optimize import OptimizeResult
from .scipy_optimizer import SciPyOptimizer
class NFT(SciPyOptimizer):
_OPTIONS = ["maxiter", "maxfev", "disp", "reset_interval"]
def __init__(
self,
maxiter: Optional[int] = None,
maxfev: int = 1024,
disp: bool = False,
reset_interval: int = 32,
options: Optional[dict] = None,
**kwargs,
) -> None:
if options is None:
options = {}
for k, v in list(locals().items()):
if k in self._OPTIONS:
options[k] = v
super().__init__(method=nakanishi_fujii_todo, options=options, **kwargs)
def nakanishi_fujii_todo(
fun, x0, args=(), maxiter=None, maxfev=1024, reset_interval=32, eps=1e-32, callback=None, **_
):
x0 = np.asarray(x0)
recycle_z0 = None
niter = 0
funcalls = 0
while True:
idx = niter % x0.size
if reset_interval > 0:
if niter % reset_interval == 0:
recycle_z0 = None
if recycle_z0 is None:
z0 = fun(np.copy(x0), *args)
funcalls += 1
else:
z0 = recycle_z0
p = np.copy(x0)
p[idx] = x0[idx] + np.pi / 2
z1 = fun(p, *args)
funcalls += 1
p = np.copy(x0)
p[idx] = x0[idx] - np.pi / 2
z3 = fun(p, *args)
funcalls += 1
z2 = z1 + z3 - z0
c = (z1 + z3) / 2
a = np.sqrt((z0 - z2) ** 2 + (z1 - z3) ** 2) / 2
b = np.arctan((z1 - z3) / ((z0 - z2) + eps * (z0 == z2))) + x0[idx]
b += 0.5 * np.pi + 0.5 * np.pi * np.sign((z0 - z2) + eps * (z0 == z2))
x0[idx] = b
recycle_z0 = c - a
niter += 1
if callback is not None:
callback(np.copy(x0))
if maxfev is not None:
if funcalls >= maxfev:
break
if maxiter is not None:
if niter >= maxiter:
break
return OptimizeResult(
fun=fun(np.copy(x0), *args), x=x0, nit=niter, nfev=funcalls, success=(niter > 1)
)
| true | true |
1c37683682256fd47f868e979c971a1b402d935b | 10,956 | py | Python | sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface_datanetwork.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface_datanetwork.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface_datanetwork.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | 1 | 2021-01-05T16:24:58.000Z | 2021-01-05T16:24:58.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import uuid
import wsme
import pecan
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import utils as cutils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.openstack.common.gettextutils import _
from sysinv import objects
class InterfaceDataNetwork(base.APIBase):
id = int
"Unique ID for this interface data network"
uuid = types.uuid
"Unique UUID for this interface data network"
forihostid = int
"The ID of the host the interface data network belongs to"
interface_uuid = types.uuid
"Unique UUID of the parent interface"
ifname = wtypes.text
"User defined name of the interface"
datanetwork_id = int
"Unique ID of the parent datanetwork"
datanetwork_uuid = types.uuid
"Unique UUID of the parent datanetwork"
datanetwork_name = wtypes.text
"User defined name of the datanetwork"
network_type = wtypes.text
"Represents the type for the datanetwork"
def __init__(self, **kwargs):
self.fields = objects.interface_datanetwork.fields.keys()
for k in self.fields:
if not hasattr(self, k):
continue
setattr(self, k, kwargs.get(k, wtypes.Unset))
@classmethod
def convert_with_links(cls, rpc_interface_datanetwork, expand=True):
interface_datanetwork = InterfaceDataNetwork(
**rpc_interface_datanetwork.as_dict())
if not expand:
interface_datanetwork.unset_fields_except([
'forihostid', 'id', 'uuid', 'interface_uuid', 'ifname',
'datanetwork_id', 'datanetwork_uuid',
'datanetwork_name', 'network_type'
])
return interface_datanetwork
class InterfaceDataNetworkCollection(collection.Collection):
"""API representation of a collection of IP addresses."""
interface_datanetworks = [InterfaceDataNetwork]
"A list containing Interface Data Network objects"
def __init__(self, **kwargs):
self._type = 'interface_datanetworks'
@classmethod
def convert_with_links(cls, rpc_interface_datanetwork, limit, url=None,
expand=False, **kwargs):
collection = InterfaceDataNetworkCollection()
collection.interface_datanetworks = [
InterfaceDataNetwork.convert_with_links(p, expand)
for p in rpc_interface_datanetwork]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'InterfaceDataNetworkController'
class InterfaceDataNetworkController(rest.RestController):
def __init__(self, parent=None):
self._parent = parent
def _create_interface_datanetwork(self, interface_datanetwork):
interface_datanetwork_dict = interface_datanetwork.as_dict()
interface_datanetwork_dict['uuid'] = str(uuid.uuid4())
# Remove UUIDs from dict to be replaced with IDs
interface_uuid = interface_datanetwork_dict.pop('interface_uuid')
datanetwork_uuid = interface_datanetwork_dict.pop('datanetwork_uuid')
interface_id = self._get_interface_id(interface_uuid)
try:
datanetwork_obj = \
pecan.request.dbapi.datanetwork_get(datanetwork_uuid)
except exception.DataNetworkNotFound:
msg = _("DataNetwork with uuid '%s' does not exist. " %
datanetwork_uuid)
raise wsme.exc.ClientSideError(msg)
datanetwork_id = datanetwork_obj['id']
interface_datanetwork_dict['interface_id'] = interface_id
interface_datanetwork_dict['datanetwork_id'] = datanetwork_id
interface_obj = pecan.request.dbapi.iinterface_get(interface_uuid)
self._check_host(interface_obj.ihost_uuid)
self._check_interface_class(interface_obj)
self._check_interface_mtu(interface_obj, datanetwork_obj)
self._check_duplicate_interface_datanetwork(interface_datanetwork_dict)
result = pecan.request.dbapi.interface_datanetwork_create(
interface_datanetwork_dict)
return InterfaceDataNetwork.convert_with_links(result)
def _get_interface_datanetwork_collection(
self, parent_uuid=None, marker=None, limit=None, sort_key=None,
sort_dir=None, expand=False, resource_url=None):
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.interface_datanetwork.get_by_uuid(
pecan.request.context, marker)
if self._parent == "ihosts":
interface_datanetworks = \
pecan.request.dbapi.interface_datanetwork_get_by_host(
parent_uuid,
limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
elif self._parent == "iinterfaces":
interface_datanetworks = \
pecan.request.dbapi.interface_datanetwork_get_by_interface(
parent_uuid, limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
else:
interface_datanetworks = \
pecan.request.dbapi.interface_datanetwork_get_all(
limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
return InterfaceDataNetworkCollection.convert_with_links(
interface_datanetworks, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
@staticmethod
def _get_one(interface_datanetwork_uuid):
rpc_interface_datanetwork = objects.interface_datanetwork.get_by_uuid(
pecan.request.context, interface_datanetwork_uuid)
return InterfaceDataNetwork.convert_with_links(
rpc_interface_datanetwork)
@staticmethod
def _check_interface_class(interface_obj):
if (not interface_obj.ifclass or
interface_obj.ifclass == constants.INTERFACE_CLASS_NONE):
values = {'ifclass': constants.INTERFACE_CLASS_DATA}
pecan.request.dbapi.iinterface_update(interface_obj.uuid, values)
return
else:
# Allow ifclass data to assign another; disallow other ifclass
if interface_obj.ifclass != constants.INTERFACE_CLASS_DATA:
msg = _("An interface with interface class '%s' "
"cannot assign datanetworks." %
interface_obj.ifclass)
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _check_host(host_uuid):
host = pecan.request.dbapi.ihost_get(host_uuid)
if host.administrative != constants.ADMIN_LOCKED:
msg = _("Operation Rejected: Host '%s' is adminstrative '%s' " %
(host.hostname, host.administrative))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _check_interface_mtu(interface_obj, datanetwork_obj):
if datanetwork_obj.network_type == constants.DATANETWORK_TYPE_VXLAN:
overhead = constants.VXLAN_MTU_OVERHEAD
else:
overhead = 0
if interface_obj.imtu < datanetwork_obj.mtu + overhead:
msg = _("The interface MTU %s must be larger than the '%s' "
"datanetwork MTU requirement." %
(interface_obj.imtu, datanetwork_obj.mtu))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _query_interface_datanetwork(interface_datanetwork):
try:
result = pecan.request.dbapi.interface_datanetwork_query(
interface_datanetwork)
except exception.InterfaceDataNetworkNotFoundByKeys:
return None
return result
def _check_duplicate_interface_datanetwork(self, interface_datanetwork):
result = self._query_interface_datanetwork(interface_datanetwork)
if not result:
return
msg = _("Interface '%s' assignment with Data Network '%s' "
"already exists."
% (interface_datanetwork['interface_id'],
interface_datanetwork['datanetwork_id']))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _get_interface_id(interface_uuid):
interface = pecan.request.dbapi.iinterface_get(interface_uuid)
return interface['id']
@staticmethod
def _get_datanetwork_id_and_type(datanetwork_uuid):
datanetwork = pecan.request.dbapi.datanetwork_get(datanetwork_uuid)
return datanetwork['id'], datanetwork['network_type']
@wsme_pecan.wsexpose(InterfaceDataNetwork, types.uuid)
def get_one(self, interface_datanetwork_uuid):
return self._get_one(interface_datanetwork_uuid)
@wsme_pecan.wsexpose(InterfaceDataNetworkCollection,
wtypes.text, types.uuid, int,
wtypes.text, wtypes.text)
def get_all(self, parent_uuid=None, marker=None,
limit=None, sort_key='id', sort_dir='asc'):
return self._get_interface_datanetwork_collection(
parent_uuid, marker, limit, sort_key, sort_dir)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(InterfaceDataNetwork, body=InterfaceDataNetwork)
def post(self, interface_datanetwork):
return self._create_interface_datanetwork(interface_datanetwork)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, interface_datanetwork_uuid):
ifdn_obj = pecan.request.dbapi.interface_datanetwork_get(
interface_datanetwork_uuid)
interface_obj = pecan.request.dbapi.iinterface_get(
ifdn_obj.interface_uuid)
self._check_host(interface_obj.ihost_uuid)
pecan.request.dbapi.interface_datanetwork_destroy(
interface_datanetwork_uuid)
| 38.174216 | 79 | 0.683552 |
import uuid
import wsme
import pecan
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import utils as cutils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.openstack.common.gettextutils import _
from sysinv import objects
class InterfaceDataNetwork(base.APIBase):
id = int
uuid = types.uuid
forihostid = int
interface_uuid = types.uuid
ifname = wtypes.text
datanetwork_id = int
datanetwork_uuid = types.uuid
datanetwork_name = wtypes.text
network_type = wtypes.text
def __init__(self, **kwargs):
self.fields = objects.interface_datanetwork.fields.keys()
for k in self.fields:
if not hasattr(self, k):
continue
setattr(self, k, kwargs.get(k, wtypes.Unset))
@classmethod
def convert_with_links(cls, rpc_interface_datanetwork, expand=True):
interface_datanetwork = InterfaceDataNetwork(
**rpc_interface_datanetwork.as_dict())
if not expand:
interface_datanetwork.unset_fields_except([
'forihostid', 'id', 'uuid', 'interface_uuid', 'ifname',
'datanetwork_id', 'datanetwork_uuid',
'datanetwork_name', 'network_type'
])
return interface_datanetwork
class InterfaceDataNetworkCollection(collection.Collection):
interface_datanetworks = [InterfaceDataNetwork]
def __init__(self, **kwargs):
self._type = 'interface_datanetworks'
@classmethod
def convert_with_links(cls, rpc_interface_datanetwork, limit, url=None,
expand=False, **kwargs):
collection = InterfaceDataNetworkCollection()
collection.interface_datanetworks = [
InterfaceDataNetwork.convert_with_links(p, expand)
for p in rpc_interface_datanetwork]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'InterfaceDataNetworkController'
class InterfaceDataNetworkController(rest.RestController):
def __init__(self, parent=None):
self._parent = parent
def _create_interface_datanetwork(self, interface_datanetwork):
interface_datanetwork_dict = interface_datanetwork.as_dict()
interface_datanetwork_dict['uuid'] = str(uuid.uuid4())
interface_uuid = interface_datanetwork_dict.pop('interface_uuid')
datanetwork_uuid = interface_datanetwork_dict.pop('datanetwork_uuid')
interface_id = self._get_interface_id(interface_uuid)
try:
datanetwork_obj = \
pecan.request.dbapi.datanetwork_get(datanetwork_uuid)
except exception.DataNetworkNotFound:
msg = _("DataNetwork with uuid '%s' does not exist. " %
datanetwork_uuid)
raise wsme.exc.ClientSideError(msg)
datanetwork_id = datanetwork_obj['id']
interface_datanetwork_dict['interface_id'] = interface_id
interface_datanetwork_dict['datanetwork_id'] = datanetwork_id
interface_obj = pecan.request.dbapi.iinterface_get(interface_uuid)
self._check_host(interface_obj.ihost_uuid)
self._check_interface_class(interface_obj)
self._check_interface_mtu(interface_obj, datanetwork_obj)
self._check_duplicate_interface_datanetwork(interface_datanetwork_dict)
result = pecan.request.dbapi.interface_datanetwork_create(
interface_datanetwork_dict)
return InterfaceDataNetwork.convert_with_links(result)
def _get_interface_datanetwork_collection(
self, parent_uuid=None, marker=None, limit=None, sort_key=None,
sort_dir=None, expand=False, resource_url=None):
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.interface_datanetwork.get_by_uuid(
pecan.request.context, marker)
if self._parent == "ihosts":
interface_datanetworks = \
pecan.request.dbapi.interface_datanetwork_get_by_host(
parent_uuid,
limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
elif self._parent == "iinterfaces":
interface_datanetworks = \
pecan.request.dbapi.interface_datanetwork_get_by_interface(
parent_uuid, limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
else:
interface_datanetworks = \
pecan.request.dbapi.interface_datanetwork_get_all(
limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
return InterfaceDataNetworkCollection.convert_with_links(
interface_datanetworks, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
@staticmethod
def _get_one(interface_datanetwork_uuid):
rpc_interface_datanetwork = objects.interface_datanetwork.get_by_uuid(
pecan.request.context, interface_datanetwork_uuid)
return InterfaceDataNetwork.convert_with_links(
rpc_interface_datanetwork)
@staticmethod
def _check_interface_class(interface_obj):
if (not interface_obj.ifclass or
interface_obj.ifclass == constants.INTERFACE_CLASS_NONE):
values = {'ifclass': constants.INTERFACE_CLASS_DATA}
pecan.request.dbapi.iinterface_update(interface_obj.uuid, values)
return
else:
if interface_obj.ifclass != constants.INTERFACE_CLASS_DATA:
msg = _("An interface with interface class '%s' "
"cannot assign datanetworks." %
interface_obj.ifclass)
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _check_host(host_uuid):
host = pecan.request.dbapi.ihost_get(host_uuid)
if host.administrative != constants.ADMIN_LOCKED:
msg = _("Operation Rejected: Host '%s' is adminstrative '%s' " %
(host.hostname, host.administrative))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _check_interface_mtu(interface_obj, datanetwork_obj):
if datanetwork_obj.network_type == constants.DATANETWORK_TYPE_VXLAN:
overhead = constants.VXLAN_MTU_OVERHEAD
else:
overhead = 0
if interface_obj.imtu < datanetwork_obj.mtu + overhead:
msg = _("The interface MTU %s must be larger than the '%s' "
"datanetwork MTU requirement." %
(interface_obj.imtu, datanetwork_obj.mtu))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _query_interface_datanetwork(interface_datanetwork):
try:
result = pecan.request.dbapi.interface_datanetwork_query(
interface_datanetwork)
except exception.InterfaceDataNetworkNotFoundByKeys:
return None
return result
def _check_duplicate_interface_datanetwork(self, interface_datanetwork):
result = self._query_interface_datanetwork(interface_datanetwork)
if not result:
return
msg = _("Interface '%s' assignment with Data Network '%s' "
"already exists."
% (interface_datanetwork['interface_id'],
interface_datanetwork['datanetwork_id']))
raise wsme.exc.ClientSideError(msg)
@staticmethod
def _get_interface_id(interface_uuid):
interface = pecan.request.dbapi.iinterface_get(interface_uuid)
return interface['id']
@staticmethod
def _get_datanetwork_id_and_type(datanetwork_uuid):
datanetwork = pecan.request.dbapi.datanetwork_get(datanetwork_uuid)
return datanetwork['id'], datanetwork['network_type']
@wsme_pecan.wsexpose(InterfaceDataNetwork, types.uuid)
def get_one(self, interface_datanetwork_uuid):
return self._get_one(interface_datanetwork_uuid)
@wsme_pecan.wsexpose(InterfaceDataNetworkCollection,
wtypes.text, types.uuid, int,
wtypes.text, wtypes.text)
def get_all(self, parent_uuid=None, marker=None,
limit=None, sort_key='id', sort_dir='asc'):
return self._get_interface_datanetwork_collection(
parent_uuid, marker, limit, sort_key, sort_dir)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(InterfaceDataNetwork, body=InterfaceDataNetwork)
def post(self, interface_datanetwork):
return self._create_interface_datanetwork(interface_datanetwork)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, interface_datanetwork_uuid):
ifdn_obj = pecan.request.dbapi.interface_datanetwork_get(
interface_datanetwork_uuid)
interface_obj = pecan.request.dbapi.iinterface_get(
ifdn_obj.interface_uuid)
self._check_host(interface_obj.ihost_uuid)
pecan.request.dbapi.interface_datanetwork_destroy(
interface_datanetwork_uuid)
| true | true |
1c37683969b46b880d6a04272843917e691b430e | 1,855 | py | Python | tests/components/config/test_init.py | milaq/home-assistant | c32300a3868aceb1c4f2ba5a17f69d6ba9651baa | [
"Apache-2.0"
] | null | null | null | tests/components/config/test_init.py | milaq/home-assistant | c32300a3868aceb1c4f2ba5a17f69d6ba9651baa | [
"Apache-2.0"
] | null | null | null | tests/components/config/test_init.py | milaq/home-assistant | c32300a3868aceb1c4f2ba5a17f69d6ba9651baa | [
"Apache-2.0"
] | 2 | 2018-06-03T11:14:44.000Z | 2018-11-04T18:18:12.000Z | """Test config init."""
import asyncio
from unittest.mock import patch
import pytest
from homeassistant.const import EVENT_COMPONENT_LOADED
from homeassistant.bootstrap import async_setup_component, ATTR_COMPONENT
from homeassistant.components import config
from tests.common import mock_http_component, mock_coro, mock_component
@pytest.fixture(autouse=True)
def stub_http(hass):
"""Stub the HTTP component."""
mock_http_component(hass)
@asyncio.coroutine
def test_config_setup(hass, loop):
"""Test it sets up hassbian."""
yield from async_setup_component(hass, 'config', {})
assert 'config' in hass.config.components
@asyncio.coroutine
def test_load_on_demand_already_loaded(hass, test_client):
"""Test getting suites."""
mock_component(hass, 'zwave')
with patch.object(config, 'SECTIONS', []), \
patch.object(config, 'ON_DEMAND', ['zwave']), \
patch('homeassistant.components.config.zwave.async_setup') as stp:
stp.return_value = mock_coro(True)
yield from async_setup_component(hass, 'config', {})
yield from hass.async_block_till_done()
assert 'config.zwave' in hass.config.components
assert stp.called
@asyncio.coroutine
def test_load_on_demand_on_load(hass, test_client):
"""Test getting suites."""
with patch.object(config, 'SECTIONS', []), \
patch.object(config, 'ON_DEMAND', ['zwave']):
yield from async_setup_component(hass, 'config', {})
assert 'config.zwave' not in hass.config.components
with patch('homeassistant.components.config.zwave.async_setup') as stp:
stp.return_value = mock_coro(True)
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: 'zwave'})
yield from hass.async_block_till_done()
assert 'config.zwave' in hass.config.components
assert stp.called
| 30.916667 | 78 | 0.721294 | import asyncio
from unittest.mock import patch
import pytest
from homeassistant.const import EVENT_COMPONENT_LOADED
from homeassistant.bootstrap import async_setup_component, ATTR_COMPONENT
from homeassistant.components import config
from tests.common import mock_http_component, mock_coro, mock_component
@pytest.fixture(autouse=True)
def stub_http(hass):
mock_http_component(hass)
@asyncio.coroutine
def test_config_setup(hass, loop):
yield from async_setup_component(hass, 'config', {})
assert 'config' in hass.config.components
@asyncio.coroutine
def test_load_on_demand_already_loaded(hass, test_client):
mock_component(hass, 'zwave')
with patch.object(config, 'SECTIONS', []), \
patch.object(config, 'ON_DEMAND', ['zwave']), \
patch('homeassistant.components.config.zwave.async_setup') as stp:
stp.return_value = mock_coro(True)
yield from async_setup_component(hass, 'config', {})
yield from hass.async_block_till_done()
assert 'config.zwave' in hass.config.components
assert stp.called
@asyncio.coroutine
def test_load_on_demand_on_load(hass, test_client):
with patch.object(config, 'SECTIONS', []), \
patch.object(config, 'ON_DEMAND', ['zwave']):
yield from async_setup_component(hass, 'config', {})
assert 'config.zwave' not in hass.config.components
with patch('homeassistant.components.config.zwave.async_setup') as stp:
stp.return_value = mock_coro(True)
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: 'zwave'})
yield from hass.async_block_till_done()
assert 'config.zwave' in hass.config.components
assert stp.called
| true | true |
1c376971c33a5b04365014688e29dbe6af0fb22f | 1,609 | py | Python | monero_glue/xmr/sub/creds.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | 20 | 2018-04-05T22:06:10.000Z | 2021-09-18T10:43:44.000Z | monero_glue/xmr/sub/creds.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | null | null | null | monero_glue/xmr/sub/creds.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | 5 | 2018-08-06T15:06:04.000Z | 2021-07-16T01:58:43.000Z | from monero_glue.xmr import crypto
from monero_glue.xmr.sub.addr import encode_addr
from monero_glue.xmr.sub.xmr_net import NetworkTypes, net_version
from typing import Optional
from monero_glue.xmr.crypto import Ge25519, Sc25519
class AccountCreds(object):
"""
Stores account private keys
"""
def __init__(
self,
view_key_private: Optional[Sc25519] = None,
spend_key_private: Optional[Sc25519] = None,
view_key_public: Optional[Ge25519] = None,
spend_key_public: Optional[Ge25519] = None,
address=None,
network_type=NetworkTypes.MAINNET,
):
self.view_key_private = view_key_private
self.view_key_public = view_key_public
self.spend_key_private = spend_key_private
self.spend_key_public = spend_key_public
self.address = address
self.network_type = network_type
self.multisig_keys = []
@classmethod
def new_wallet(
cls, priv_view_key, priv_spend_key, network_type=NetworkTypes.MAINNET
):
pub_view_key = crypto.scalarmult_base(priv_view_key)
pub_spend_key = crypto.scalarmult_base(priv_spend_key)
addr = encode_addr(
net_version(network_type),
crypto.encodepoint(pub_spend_key),
crypto.encodepoint(pub_view_key),
)
return cls(
view_key_private=priv_view_key,
spend_key_private=priv_spend_key,
view_key_public=pub_view_key,
spend_key_public=pub_spend_key,
address=addr,
network_type=network_type,
)
| 32.836735 | 77 | 0.67371 | from monero_glue.xmr import crypto
from monero_glue.xmr.sub.addr import encode_addr
from monero_glue.xmr.sub.xmr_net import NetworkTypes, net_version
from typing import Optional
from monero_glue.xmr.crypto import Ge25519, Sc25519
class AccountCreds(object):
def __init__(
self,
view_key_private: Optional[Sc25519] = None,
spend_key_private: Optional[Sc25519] = None,
view_key_public: Optional[Ge25519] = None,
spend_key_public: Optional[Ge25519] = None,
address=None,
network_type=NetworkTypes.MAINNET,
):
self.view_key_private = view_key_private
self.view_key_public = view_key_public
self.spend_key_private = spend_key_private
self.spend_key_public = spend_key_public
self.address = address
self.network_type = network_type
self.multisig_keys = []
@classmethod
def new_wallet(
cls, priv_view_key, priv_spend_key, network_type=NetworkTypes.MAINNET
):
pub_view_key = crypto.scalarmult_base(priv_view_key)
pub_spend_key = crypto.scalarmult_base(priv_spend_key)
addr = encode_addr(
net_version(network_type),
crypto.encodepoint(pub_spend_key),
crypto.encodepoint(pub_view_key),
)
return cls(
view_key_private=priv_view_key,
spend_key_private=priv_spend_key,
view_key_public=pub_view_key,
spend_key_public=pub_spend_key,
address=addr,
network_type=network_type,
)
| true | true |
1c3769b750e9604dea47316cfeafb47fc3de7e9c | 468 | py | Python | doorbot/worker.py | masom/doorbot-api-python | daad4bd279661bee268ae2584769f09e4a132982 | [
"MIT"
] | null | null | null | doorbot/worker.py | masom/doorbot-api-python | daad4bd279661bee268ae2584769f09e4a132982 | [
"MIT"
] | null | null | null | doorbot/worker.py | masom/doorbot-api-python | daad4bd279661bee268ae2584769f09e4a132982 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from flask import Flask
from .db import db
from celery import Celery
worker = Flask(__name__)
worker.config.from_pyfile('../config.py')
db.init_app(worker)
celery = Celery(__name__)
celery.conf.update(
CELERY_IMPORTS=worker.config.get('CELERY_IMPORTS', ('doorbot.tasks',)),
BROKER_URL=worker.config.get('CELERY_BROKER_URL'),
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json'
)
| 26 | 75 | 0.735043 |
from flask import Flask
from .db import db
from celery import Celery
worker = Flask(__name__)
worker.config.from_pyfile('../config.py')
db.init_app(worker)
celery = Celery(__name__)
celery.conf.update(
CELERY_IMPORTS=worker.config.get('CELERY_IMPORTS', ('doorbot.tasks',)),
BROKER_URL=worker.config.get('CELERY_BROKER_URL'),
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json'
)
| true | true |
1c376a2f136c794225944f9fc835de442fc0d8d5 | 897 | py | Python | tests/test_driving_cycles.py | vishalbelsare/carculator | 44516a5f3e7f7f42f0d0d7a5c2bd5af3d17d0fd4 | [
"BSD-3-Clause"
] | 32 | 2019-11-05T03:46:56.000Z | 2022-01-10T09:34:20.000Z | tests/test_driving_cycles.py | vishalbelsare/carculator | 44516a5f3e7f7f42f0d0d7a5c2bd5af3d17d0fd4 | [
"BSD-3-Clause"
] | 17 | 2019-08-05T15:46:43.000Z | 2022-03-08T16:57:55.000Z | tests/test_driving_cycles.py | vishalbelsare/carculator | 44516a5f3e7f7f42f0d0d7a5c2bd5af3d17d0fd4 | [
"BSD-3-Clause"
] | 8 | 2019-09-26T08:33:44.000Z | 2021-07-17T12:41:26.000Z | import numpy as np
import pytest
from carculator.driving_cycles import get_standard_driving_cycle
def test_cycle_retrieval_default():
dc = get_standard_driving_cycle()
assert isinstance(dc, np.ndarray)
assert dc.sum() == 83744.6
def test_cycle_retrieval_wltc():
dc = get_standard_driving_cycle("WLTC")
assert isinstance(dc, np.ndarray)
assert dc.sum() == 83744.6
def test_cycle_retrieval_nedc():
dc = get_standard_driving_cycle("NEDC")
assert isinstance(dc, np.ndarray)
assert dc.sum() == 39353.0
def test_cycle_retrieval_cadc():
dc = get_standard_driving_cycle("CADC")
assert isinstance(dc, np.ndarray)
assert dc.sum() == 186074.2
def test_missing_cycle():
with pytest.raises(SystemExit) as wrapped_error:
get_standard_driving_cycle("Foo")
assert wrapped_error.type == SystemExit
assert wrapped_error.value.code == 1
| 24.916667 | 64 | 0.731327 | import numpy as np
import pytest
from carculator.driving_cycles import get_standard_driving_cycle
def test_cycle_retrieval_default():
dc = get_standard_driving_cycle()
assert isinstance(dc, np.ndarray)
assert dc.sum() == 83744.6
def test_cycle_retrieval_wltc():
dc = get_standard_driving_cycle("WLTC")
assert isinstance(dc, np.ndarray)
assert dc.sum() == 83744.6
def test_cycle_retrieval_nedc():
dc = get_standard_driving_cycle("NEDC")
assert isinstance(dc, np.ndarray)
assert dc.sum() == 39353.0
def test_cycle_retrieval_cadc():
dc = get_standard_driving_cycle("CADC")
assert isinstance(dc, np.ndarray)
assert dc.sum() == 186074.2
def test_missing_cycle():
with pytest.raises(SystemExit) as wrapped_error:
get_standard_driving_cycle("Foo")
assert wrapped_error.type == SystemExit
assert wrapped_error.value.code == 1
| true | true |
1c376a4f2c15dbc72953409f6e73a7d2393354e7 | 9,027 | py | Python | ravenframework/Optimizers/mutators/mutators.py | dgarrett622/raven | f36cc108f7500b0e2717df4832b69b801b43960d | [
"Apache-2.0"
] | null | null | null | ravenframework/Optimizers/mutators/mutators.py | dgarrett622/raven | f36cc108f7500b0e2717df4832b69b801b43960d | [
"Apache-2.0"
] | null | null | null | ravenframework/Optimizers/mutators/mutators.py | dgarrett622/raven | f36cc108f7500b0e2717df4832b69b801b43960d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of mutators for Mutation process of Genetic Algorithm
currently the implemented mutation algorithms are:
1. swapMutator
2. scrambleMutator
3. bitFlipMutator
4. inversionMutator
Created June,16,2020
@authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
"""
import numpy as np
import xarray as xr
from operator import itemgetter
from ...utils import utils, randomUtils
def swapMutator(offSprings, distDict, **kwargs):
"""
This method performs the swap mutator. For each child, two genes are sampled and switched
E.g.:
child=[a,b,c,d,e] --> b and d are selected --> child = [a,d,c,b,e]
@ In, offSprings, xr.DataArray, children resulting from the crossover process
@ In, kwargs, dict, dictionary of parameters for this mutation method:
locs, list, the 2 locations of the genes to be swapped
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
variables, list, variables names.
@ Out, children, xr.DataArray, the mutated chromosome, i.e., the child.
"""
if kwargs['locs'] == None:
locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False)))
loc1 = locs[0]
loc2 = locs[1]
else:
loc1 = kwargs['locs'][0]
loc2 = kwargs['locs'][1]
# initializing children
children = xr.DataArray(np.zeros((np.shape(offSprings))),
dims=['chromosome','Gene'],
coords={'chromosome': np.arange(np.shape(offSprings)[0]),
'Gene':kwargs['variables']})
for i in range(np.shape(offSprings)[0]):
children[i] = offSprings[i]
## TODO What happens if loc1 or 2 is out of range?! should we raise an error?
if randomUtils.random(dim=1,samples=1)<=kwargs['mutationProb']:
# convert loc1 and loc2 in terms on cdf values
cdf1 = distDict[offSprings.coords['Gene'].values[loc1]].cdf(float(offSprings[i,loc1].values))
cdf2 = distDict[offSprings.coords['Gene'].values[loc2]].cdf(float(offSprings[i,loc2].values))
children[i,loc1] = distDict[offSprings.coords['Gene'].values[loc1]].ppf(cdf2)
children[i,loc2] = distDict[offSprings.coords['Gene'].values[loc2]].ppf(cdf1)
return children
# @profile
def scrambleMutator(offSprings, distDict, **kwargs):
"""
This method performs the scramble mutator. For each child, a subset of genes is chosen
and their values are shuffled randomly.
@ In, offSprings, xr.DataArray, offsprings after crossover
@ In, kwargs, dict, dictionary of parameters for this mutation method:
chromosome, numpy.array, the chromosome that will mutate to the new child
locs, list, the locations of the genes to be randomly scrambled
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
variables, list, variables names.
@ Out, child, np.array, the mutated chromosome, i.e., the child.
"""
locs = kwargs['locs']
if locs == None:
nLocs = randomUtils.randomIntegers(0,offSprings.sizes['Gene']-1,None)
locs=[]
for i in range(nLocs):
l = randomUtils.randomIntegers(0,offSprings.sizes['Gene']-1,None)
locs.append(l)
locs = list(set(locs))
# initializing children
children = xr.DataArray(np.zeros((np.shape(offSprings))),
dims=['chromosome','Gene'],
coords={'chromosome': np.arange(np.shape(offSprings)[0]),
'Gene':kwargs['variables']})
for i in range(np.shape(offSprings)[0]):
for j in range(np.shape(offSprings)[1]):
children[i,j] = distDict[offSprings[i].coords['Gene'].values[j]].cdf(float(offSprings[i,j].values))
for i in range(np.shape(offSprings)[0]):
children[i] = offSprings[i]
for ind,element in enumerate(locs):
if randomUtils.random(dim=1,samples=1)< kwargs['mutationProb']:
children[i,locs[0]:locs[-1]+1] = randomUtils.randomPermutation(list(offSprings.data[i,locs[0]:locs[-1]+1]),None)
for i in range(np.shape(offSprings)[0]):
for j in range(np.shape(offSprings)[1]):
children[i,j] = distDict[offSprings.coords['Gene'].values[j]].ppf(children[i,j])
return children
def bitFlipMutator(offSprings,**kwargs):
"""
This method is designed to flip a single gene in each chromosome with probability = mutationProb.
E.g. gene at location loc is flipped from current value to newValue
The gene to be flipped is completely random.
The new value of the flipped gene is is completely random.
@ In, offSprings, xr.DataArray, children resulting from the crossover process
@ In, kwargs, dict, dictionary of parameters for this mutation method:
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
@ Out, offSprings, xr.DataArray, children resulting from the crossover process
"""
for child in offSprings:
# the mutation is performed for each child independently
if randomUtils.random(dim=1,samples=1)<kwargs['mutationProb']:
# sample gene location to be flipped: i.e., determine loc
chromosomeSize = child.values.shape[0]
loc = randomUtils.randomIntegers(0, chromosomeSize, caller=None, engine=None)
##############
# sample value: i.e., determine newValue
if kwargs['sampleRange']=='local':
rangeValues = list(set(offSprings[:,loc].values))
else: #kwargs['sampleRange']=='global'
rangeValues = offSprings.values.ravel().tolist()
rangeValues.pop(child.values[loc])
newValuePos = randomUtils.randomIntegers(0, len(rangeValues), caller=None, engine=None)
newValue = rangeValues[newValuePos]
##############
# gene at location loc is flipped from current value to newValue
child.values[loc] = newValue
return offSprings
def inversionMutator(offSprings, distDict, **kwargs):
"""
This method is designed mirror a sequence of genes in each chromosome with probability = mutationProb.
The sequence of genes to be mirrored is completely random.
E.g. given chromosome C = [0,1,2,3,4,5,6,7,8,9] and sampled locL=2 locU=6;
New chromosome C' = [0,1,6,5,4,3,2,7,8,9]
@ In, offSprings, xr.DataArray, children resulting from the crossover process
@ In, kwargs, dict, dictionary of parameters for this mutation method:
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
@ Out, offSprings, xr.DataArray, children resulting from the crossover process
"""
for child in offSprings:
# the mutation is performed for each child independently
if randomUtils.random(dim=1,samples=1)<kwargs['mutationProb']:
# sample gene locations: i.e., determine loc1 and loc2
locRangeList = list(range(0,child.values.shape[0]))
index1 = randomUtils.randomIntegers(0, len(locRangeList), caller=None, engine=None)
loc1 = locRangeList[index1]
locRangeList.pop(loc1)
index2 = randomUtils.randomIntegers(0, len(locRangeList), caller=None, engine=None)
loc2 = locRangeList[index2]
if loc1>loc2:
locL=loc2
locU=loc1
elif loc1<loc2:
locL=loc1
locU=loc2
##############
# select sequence to be mirrored and mirror it
seq=child.values[locL:locU+1]
for elem in seq:
elem = distDict[child.coords['Gene'].values[elem]].cdf(float(child[elem].values))
mirrSeq = seq[::-1]
for elem in mirrSeq:
elem = distDict[child.coords['Gene'].values[elem]].ppf(elem)
##############
# insert mirrored sequence into child
child.values[locL:locU+1]=mirrSeq
return offSprings
__mutators = {}
__mutators['swapMutator'] = swapMutator
__mutators['scrambleMutator'] = scrambleMutator
__mutators['bitFlipMutator'] = bitFlipMutator
__mutators['inversionMutator'] = inversionMutator
def returnInstance(cls, name):
"""
Method designed to return class instance:
@ In, cls, class type
@ In, name, string, name of class
@ Out, __crossovers[name], instance of class
"""
if name not in __mutators:
cls.raiseAnError (IOError, "{} MECHANISM NOT IMPLEMENTED!!!!!".format(name))
return __mutators[name]
| 44.910448 | 137 | 0.680403 |
import numpy as np
import xarray as xr
from operator import itemgetter
from ...utils import utils, randomUtils
def swapMutator(offSprings, distDict, **kwargs):
if kwargs['locs'] == None:
locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False)))
loc1 = locs[0]
loc2 = locs[1]
else:
loc1 = kwargs['locs'][0]
loc2 = kwargs['locs'][1]
children = xr.DataArray(np.zeros((np.shape(offSprings))),
dims=['chromosome','Gene'],
coords={'chromosome': np.arange(np.shape(offSprings)[0]),
'Gene':kwargs['variables']})
for i in range(np.shape(offSprings)[0]):
children[i] = offSprings[i]
cdf1 = distDict[offSprings.coords['Gene'].values[loc1]].cdf(float(offSprings[i,loc1].values))
cdf2 = distDict[offSprings.coords['Gene'].values[loc2]].cdf(float(offSprings[i,loc2].values))
children[i,loc1] = distDict[offSprings.coords['Gene'].values[loc1]].ppf(cdf2)
children[i,loc2] = distDict[offSprings.coords['Gene'].values[loc2]].ppf(cdf1)
return children
def scrambleMutator(offSprings, distDict, **kwargs):
locs = kwargs['locs']
if locs == None:
nLocs = randomUtils.randomIntegers(0,offSprings.sizes['Gene']-1,None)
locs=[]
for i in range(nLocs):
l = randomUtils.randomIntegers(0,offSprings.sizes['Gene']-1,None)
locs.append(l)
locs = list(set(locs))
children = xr.DataArray(np.zeros((np.shape(offSprings))),
dims=['chromosome','Gene'],
coords={'chromosome': np.arange(np.shape(offSprings)[0]),
'Gene':kwargs['variables']})
for i in range(np.shape(offSprings)[0]):
for j in range(np.shape(offSprings)[1]):
children[i,j] = distDict[offSprings[i].coords['Gene'].values[j]].cdf(float(offSprings[i,j].values))
for i in range(np.shape(offSprings)[0]):
children[i] = offSprings[i]
for ind,element in enumerate(locs):
if randomUtils.random(dim=1,samples=1)< kwargs['mutationProb']:
children[i,locs[0]:locs[-1]+1] = randomUtils.randomPermutation(list(offSprings.data[i,locs[0]:locs[-1]+1]),None)
for i in range(np.shape(offSprings)[0]):
for j in range(np.shape(offSprings)[1]):
children[i,j] = distDict[offSprings.coords['Gene'].values[j]].ppf(children[i,j])
return children
def bitFlipMutator(offSprings,**kwargs):
for child in offSprings:
if randomUtils.random(dim=1,samples=1)<kwargs['mutationProb']:
chromosomeSize = child.values.shape[0]
loc = randomUtils.randomIntegers(0, chromosomeSize, caller=None, engine=None)
:,loc].values))
else:
rangeValues = offSprings.values.ravel().tolist()
rangeValues.pop(child.values[loc])
newValuePos = randomUtils.randomIntegers(0, len(rangeValues), caller=None, engine=None)
newValue = rangeValues[newValuePos]
rings, distDict, **kwargs):
for child in offSprings:
if randomUtils.random(dim=1,samples=1)<kwargs['mutationProb']:
locRangeList = list(range(0,child.values.shape[0]))
index1 = randomUtils.randomIntegers(0, len(locRangeList), caller=None, engine=None)
loc1 = locRangeList[index1]
locRangeList.pop(loc1)
index2 = randomUtils.randomIntegers(0, len(locRangeList), caller=None, engine=None)
loc2 = locRangeList[index2]
if loc1>loc2:
locL=loc2
locU=loc1
elif loc1<loc2:
locL=loc1
locU=loc2
child.coords['Gene'].values[elem]].cdf(float(child[elem].values))
mirrSeq = seq[::-1]
for elem in mirrSeq:
elem = distDict[child.coords['Gene'].values[elem]].ppf(elem)
ators['swapMutator'] = swapMutator
__mutators['scrambleMutator'] = scrambleMutator
__mutators['bitFlipMutator'] = bitFlipMutator
__mutators['inversionMutator'] = inversionMutator
def returnInstance(cls, name):
if name not in __mutators:
cls.raiseAnError (IOError, "{} MECHANISM NOT IMPLEMENTED!!!!!".format(name))
return __mutators[name]
| true | true |
1c376ac09b79fcdf39fdae28583da0a291718718 | 2,728 | py | Python | volatility/framework/symbols/windows/__init__.py | dl9rdz/volatility3 | 9d9cdfb7d43b98662089503fdb85f103d551b543 | [
"Linux-OpenIB"
] | null | null | null | volatility/framework/symbols/windows/__init__.py | dl9rdz/volatility3 | 9d9cdfb7d43b98662089503fdb85f103d551b543 | [
"Linux-OpenIB"
] | null | null | null | volatility/framework/symbols/windows/__init__.py | dl9rdz/volatility3 | 9d9cdfb7d43b98662089503fdb85f103d551b543 | [
"Linux-OpenIB"
] | null | null | null | # This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
import volatility.framework.symbols.windows.extensions.pool
from volatility.framework.symbols import intermed
from volatility.framework.symbols.windows import extensions
from volatility.framework.symbols.windows.extensions import registry, pool
class WindowsKernelIntermedSymbols(intermed.IntermediateSymbolTable):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# Set-up windows specific types
self.set_type_class('_ETHREAD', extensions.ETHREAD)
self.set_type_class('_LIST_ENTRY', extensions.LIST_ENTRY)
self.set_type_class('_EPROCESS', extensions.EPROCESS)
self.set_type_class('_UNICODE_STRING', extensions.UNICODE_STRING)
self.set_type_class('_EX_FAST_REF', extensions.EX_FAST_REF)
self.set_type_class('_OBJECT_HEADER', pool.OBJECT_HEADER)
self.set_type_class('_FILE_OBJECT', extensions.FILE_OBJECT)
self.set_type_class('_DEVICE_OBJECT', extensions.DEVICE_OBJECT)
self.set_type_class('_CM_KEY_BODY', registry.CM_KEY_BODY)
self.set_type_class('_CMHIVE', registry.CMHIVE)
self.set_type_class('_CM_KEY_NODE', registry.CM_KEY_NODE)
self.set_type_class('_CM_KEY_VALUE', registry.CM_KEY_VALUE)
self.set_type_class('_HMAP_ENTRY', registry.HMAP_ENTRY)
self.set_type_class('_MMVAD_SHORT', extensions.MMVAD_SHORT)
self.set_type_class('_MMVAD', extensions.MMVAD)
self.set_type_class('_KSYSTEM_TIME', extensions.KSYSTEM_TIME)
self.set_type_class('_KMUTANT', extensions.KMUTANT)
self.set_type_class('_DRIVER_OBJECT', extensions.DRIVER_OBJECT)
self.set_type_class('_OBJECT_SYMBOLIC_LINK', extensions.OBJECT_SYMBOLIC_LINK)
self.set_type_class('_POOL_TRACKER_BIG_PAGES', pool.POOL_TRACKER_BIG_PAGES)
# This doesn't exist in very specific versions of windows
try:
self.set_type_class('_POOL_HEADER', pool.POOL_HEADER)
except ValueError:
pass
# these don't exist in windows XP
try:
self.set_type_class('_MMADDRESS_NODE', extensions.MMVAD_SHORT)
except ValueError:
pass
# these were introduced starting in windows 8
try:
self.set_type_class('_MM_AVL_NODE', extensions.MMVAD_SHORT)
except ValueError:
pass
# these were introduced starting in windows 7
try:
self.set_type_class('_RTL_BALANCED_NODE', extensions.MMVAD_SHORT)
except ValueError:
pass
| 45.466667 | 106 | 0.719575 |
import volatility.framework.symbols.windows.extensions.pool
from volatility.framework.symbols import intermed
from volatility.framework.symbols.windows import extensions
from volatility.framework.symbols.windows.extensions import registry, pool
class WindowsKernelIntermedSymbols(intermed.IntermediateSymbolTable):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.set_type_class('_ETHREAD', extensions.ETHREAD)
self.set_type_class('_LIST_ENTRY', extensions.LIST_ENTRY)
self.set_type_class('_EPROCESS', extensions.EPROCESS)
self.set_type_class('_UNICODE_STRING', extensions.UNICODE_STRING)
self.set_type_class('_EX_FAST_REF', extensions.EX_FAST_REF)
self.set_type_class('_OBJECT_HEADER', pool.OBJECT_HEADER)
self.set_type_class('_FILE_OBJECT', extensions.FILE_OBJECT)
self.set_type_class('_DEVICE_OBJECT', extensions.DEVICE_OBJECT)
self.set_type_class('_CM_KEY_BODY', registry.CM_KEY_BODY)
self.set_type_class('_CMHIVE', registry.CMHIVE)
self.set_type_class('_CM_KEY_NODE', registry.CM_KEY_NODE)
self.set_type_class('_CM_KEY_VALUE', registry.CM_KEY_VALUE)
self.set_type_class('_HMAP_ENTRY', registry.HMAP_ENTRY)
self.set_type_class('_MMVAD_SHORT', extensions.MMVAD_SHORT)
self.set_type_class('_MMVAD', extensions.MMVAD)
self.set_type_class('_KSYSTEM_TIME', extensions.KSYSTEM_TIME)
self.set_type_class('_KMUTANT', extensions.KMUTANT)
self.set_type_class('_DRIVER_OBJECT', extensions.DRIVER_OBJECT)
self.set_type_class('_OBJECT_SYMBOLIC_LINK', extensions.OBJECT_SYMBOLIC_LINK)
self.set_type_class('_POOL_TRACKER_BIG_PAGES', pool.POOL_TRACKER_BIG_PAGES)
try:
self.set_type_class('_POOL_HEADER', pool.POOL_HEADER)
except ValueError:
pass
# these don't exist in windows XP
try:
self.set_type_class('_MMADDRESS_NODE', extensions.MMVAD_SHORT)
except ValueError:
pass
try:
self.set_type_class('_MM_AVL_NODE', extensions.MMVAD_SHORT)
except ValueError:
pass
try:
self.set_type_class('_RTL_BALANCED_NODE', extensions.MMVAD_SHORT)
except ValueError:
pass
| true | true |
1c376b1c918a5e3eb46ec3ca502103cd459b1fc8 | 603 | py | Python | Chapter04/bookmarks/account/authentication.py | prathmesh-jagtap/Django-4-by-example | 8a9418f746117c1637db0900182e8f4454cdff5e | [
"MIT"
] | 1 | 2022-02-08T09:43:23.000Z | 2022-02-08T09:43:23.000Z | Chapter04/bookmarks/account/authentication.py | prathmesh-jagtap/Django-4-by-example | 8a9418f746117c1637db0900182e8f4454cdff5e | [
"MIT"
] | null | null | null | Chapter04/bookmarks/account/authentication.py | prathmesh-jagtap/Django-4-by-example | 8a9418f746117c1637db0900182e8f4454cdff5e | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
class EmailAuthBackend():
"""
Authenticate using an e-mail address.
"""
def authenticate(self, request, username=None, password=None):
try:
user = User.objects.get(email=username)
if user.check_password(password):
return user
return None
except (User.DoesNotExist, User.MultipleObjectsReturned):
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| 27.409091 | 66 | 0.606965 | from django.contrib.auth.models import User
class EmailAuthBackend():
def authenticate(self, request, username=None, password=None):
try:
user = User.objects.get(email=username)
if user.check_password(password):
return user
return None
except (User.DoesNotExist, User.MultipleObjectsReturned):
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| true | true |
1c376b75a8c23b3a74357826c14c4f507f3c5a3e | 4,606 | py | Python | demo/hello/app.py | HorizonFTT/Flask | ee18c8aa9447a0c4f9c58e286233ce345dcd7127 | [
"MIT"
] | 1 | 2020-01-03T02:58:26.000Z | 2020-01-03T02:58:26.000Z | demo/hello/app.py | HorizonFTT/Flask | ee18c8aa9447a0c4f9c58e286233ce345dcd7127 | [
"MIT"
] | null | null | null | demo/hello/app.py | HorizonFTT/Flask | ee18c8aa9447a0c4f9c58e286233ce345dcd7127 | [
"MIT"
] | null | null | null | import click
import os
from flask import (
Flask,
redirect,
url_for,
jsonify,
make_response,
session,
request,
render_template,
Markup,
flash,
)
from urllib.parse import urlparse, urljoin
from jinja2.utils import generate_lorem_ipsum
app = Flask(__name__)
app.secret_key = os.getenv('SECRET_KEY', 'secret string')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
colors = ['blue', 'white', 'red']
@app.cli.command()
def say_hello():
click.echo('Hello, Human!')
@app.route('/greet', defaults={'name': 'Programmer'})
@app.route('/greet/<name>')
def greet(name):
return f'<h1>Hello, {name}!</h1>'
@app.route('/hello')
def hello():
name = request.args.get('name', 'Flask')
if name is None:
name = request.cookies.get('name', 'Human')
response = f'<h1>Hello, {name}!</h1>'
if 'logged_in' in session:
response += '[Authenticated]'
else:
response += '[Not Authenticated]'
return response
@app.route('/goBack/<int:year>')
def go_back(year):
return f'<p>Welcome to {(2020 - year)}!</p>'
@app.route(f'/colors/<any({str(colors)[1:-1]}):color>')
def three_colors(color):
return '<p>Love is patient and kind. Love is not jealous or boastful or proud or rude.</p>'
@app.route('/fuck')
def fuck():
return redirect(url_for('hello'))
@app.route('/json')
def json():
return jsonify(name='Grey Li', gender='male')
@app.route('/set/<name>')
def set_cookie(name):
response = make_response(redirect(url_for('hello')))
response.set_cookie('name', name)
return response
@app.route('/login')
def login():
session['logged_in'] = True
return redirect(url_for('hello'))
@app.route('/logout')
def logout():
if 'logged_in' in session:
session.pop('logged_in')
return redirect(url_for('hello'))
@app.route('/foo')
def foo():
r = f'<h1>Foo page</h1><a href="{url_for("do_something", next=request.full_path)}">Do something and redirect</a>'
return r
@app.route('/bar')
def bar():
return f'<h1>Bar page</h1><a href="{url_for("do_something", next=request.full_path)}">Do something and redirect</a>'
@app.route('/do_something')
def do_something():
# do something here
return redirect_back()
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
def redirect_back(default='hello', **kwargs):
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return redirect(target)
return redirect(url_for(default, **kwargs))
@app.route('/post')
def show_post():
post_body = generate_lorem_ipsum(n=2)
return '''
<h1>A very long post</h1>
<div class="body">%s</div>
<button id="load">Load More</button>
<script src="https://code.jquery.com/jquery-3.3.1.min.js"></script>
<script type="text/javascript">
$(function() {
$('#load').click(function() {
$.ajax({
url: '/more',
type: 'get',
success: function(data){
$('.body').append(data);
}
})
})
})
</script>''' % post_body
@app.route('/more')
def load_post():
return generate_lorem_ipsum(n=1)
user = {
'username': 'Grey Li',
'bio': 'A boy who loves movies and music.',
}
movies = [
{'name': 'My Neighbor Totoro', 'year': '1988'},
{'name': 'Three Colours trilogy', 'year': '1993'},
{'name': 'Forrest Gump', 'year': '1994'},
{'name': 'Perfect Blue', 'year': '1997'},
{'name': 'The Matrix', 'year': '1999'},
{'name': 'Memento', 'year': '2000'},
{'name': 'The Bucket list', 'year': '2007'},
{'name': 'Black Swan', 'year': '2010'},
{'name': 'Gone Girl', 'year': '2014'},
{'name': 'CoCo', 'year': '2017'},
]
@app.route('/watchlist')
def watchlist():
return render_template('watchlist.html', user=user, movies=movies)
@app.route('/')
def index():
return render_template('index.html')
@app.context_processor
def inject_info():
foo = 'I am foo.'
return dict(foo=foo) # equal to: return {'foo': foo}
@app.template_global()
def bar():
return 'I am bar.'
@app.template_filter()
def musical(s):
return s + Markup(' ♫')
@app.template_test()
def baz(n):
if n == 'baz':
return True
return False
@app.route('/flash')
def just_flash():
flash('I am flash, who is looking for me?')
return redirect(url_for('index'))
| 22.359223 | 120 | 0.614199 | import click
import os
from flask import (
Flask,
redirect,
url_for,
jsonify,
make_response,
session,
request,
render_template,
Markup,
flash,
)
from urllib.parse import urlparse, urljoin
from jinja2.utils import generate_lorem_ipsum
app = Flask(__name__)
app.secret_key = os.getenv('SECRET_KEY', 'secret string')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
colors = ['blue', 'white', 'red']
@app.cli.command()
def say_hello():
click.echo('Hello, Human!')
@app.route('/greet', defaults={'name': 'Programmer'})
@app.route('/greet/<name>')
def greet(name):
return f'<h1>Hello, {name}!</h1>'
@app.route('/hello')
def hello():
name = request.args.get('name', 'Flask')
if name is None:
name = request.cookies.get('name', 'Human')
response = f'<h1>Hello, {name}!</h1>'
if 'logged_in' in session:
response += '[Authenticated]'
else:
response += '[Not Authenticated]'
return response
@app.route('/goBack/<int:year>')
def go_back(year):
return f'<p>Welcome to {(2020 - year)}!</p>'
@app.route(f'/colors/<any({str(colors)[1:-1]}):color>')
def three_colors(color):
return '<p>Love is patient and kind. Love is not jealous or boastful or proud or rude.</p>'
@app.route('/fuck')
def fuck():
return redirect(url_for('hello'))
@app.route('/json')
def json():
return jsonify(name='Grey Li', gender='male')
@app.route('/set/<name>')
def set_cookie(name):
response = make_response(redirect(url_for('hello')))
response.set_cookie('name', name)
return response
@app.route('/login')
def login():
session['logged_in'] = True
return redirect(url_for('hello'))
@app.route('/logout')
def logout():
if 'logged_in' in session:
session.pop('logged_in')
return redirect(url_for('hello'))
@app.route('/foo')
def foo():
r = f'<h1>Foo page</h1><a href="{url_for("do_something", next=request.full_path)}">Do something and redirect</a>'
return r
@app.route('/bar')
def bar():
return f'<h1>Bar page</h1><a href="{url_for("do_something", next=request.full_path)}">Do something and redirect</a>'
@app.route('/do_something')
def do_something():
return redirect_back()
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
def redirect_back(default='hello', **kwargs):
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return redirect(target)
return redirect(url_for(default, **kwargs))
@app.route('/post')
def show_post():
post_body = generate_lorem_ipsum(n=2)
return '''
<h1>A very long post</h1>
<div class="body">%s</div>
<button id="load">Load More</button>
<script src="https://code.jquery.com/jquery-3.3.1.min.js"></script>
<script type="text/javascript">
$(function() {
$('#load').click(function() {
$.ajax({
url: '/more',
type: 'get',
success: function(data){
$('.body').append(data);
}
})
})
})
</script>''' % post_body
@app.route('/more')
def load_post():
return generate_lorem_ipsum(n=1)
user = {
'username': 'Grey Li',
'bio': 'A boy who loves movies and music.',
}
movies = [
{'name': 'My Neighbor Totoro', 'year': '1988'},
{'name': 'Three Colours trilogy', 'year': '1993'},
{'name': 'Forrest Gump', 'year': '1994'},
{'name': 'Perfect Blue', 'year': '1997'},
{'name': 'The Matrix', 'year': '1999'},
{'name': 'Memento', 'year': '2000'},
{'name': 'The Bucket list', 'year': '2007'},
{'name': 'Black Swan', 'year': '2010'},
{'name': 'Gone Girl', 'year': '2014'},
{'name': 'CoCo', 'year': '2017'},
]
@app.route('/watchlist')
def watchlist():
return render_template('watchlist.html', user=user, movies=movies)
@app.route('/')
def index():
return render_template('index.html')
@app.context_processor
def inject_info():
foo = 'I am foo.'
return dict(foo=foo)
@app.template_global()
def bar():
return 'I am bar.'
@app.template_filter()
def musical(s):
return s + Markup(' ♫')
@app.template_test()
def baz(n):
if n == 'baz':
return True
return False
@app.route('/flash')
def just_flash():
flash('I am flash, who is looking for me?')
return redirect(url_for('index'))
| true | true |
1c376bcbeaeb04be53a3e08fa90e2bdb91dc0548 | 540 | py | Python | Prepare_val_data.py | YiLunLee/VRDL_HW4 | 3cc236ad1829745f2402e862cbfbe316f0574b8c | [
"MIT"
] | null | null | null | Prepare_val_data.py | YiLunLee/VRDL_HW4 | 3cc236ad1829745f2402e862cbfbe316f0574b8c | [
"MIT"
] | null | null | null | Prepare_val_data.py | YiLunLee/VRDL_HW4 | 3cc236ad1829745f2402e862cbfbe316f0574b8c | [
"MIT"
] | null | null | null | import os
import random
HR_path = './vrdl_data/val/HR_x3'
LR_path = './vrdl_data/val/LR_x3'
images = os.listdir(LR_path)
new_HR_path = './vrdl_data/vals_light/HR_x3s'
new_LR_path = './vrdl_data/vals_light/LR_x3s'
os.makedirs(new_HR_path, exist_ok=True)
os.makedirs(new_LR_path, exist_ok=True)
samples = random.sample(images, 30)
for img in samples:
os.system('cp {} {}'.format(os.path.join(HR_path, img), os.path.join(new_HR_path, img)))
os.system('cp {} {}'.format(os.path.join(LR_path, img), os.path.join(new_LR_path, img))) | 38.571429 | 96 | 0.722222 | import os
import random
HR_path = './vrdl_data/val/HR_x3'
LR_path = './vrdl_data/val/LR_x3'
images = os.listdir(LR_path)
new_HR_path = './vrdl_data/vals_light/HR_x3s'
new_LR_path = './vrdl_data/vals_light/LR_x3s'
os.makedirs(new_HR_path, exist_ok=True)
os.makedirs(new_LR_path, exist_ok=True)
samples = random.sample(images, 30)
for img in samples:
os.system('cp {} {}'.format(os.path.join(HR_path, img), os.path.join(new_HR_path, img)))
os.system('cp {} {}'.format(os.path.join(LR_path, img), os.path.join(new_LR_path, img))) | true | true |
1c376bf35755fbf88f43baf291fc29b2f88c8a20 | 2,375 | py | Python | gupview/Secondary_Scripts/Flouroscence.py | BboyTian/gupview | 6ef6693f8b58d224a89e2963bcd4d44312e957de | [
"MIT"
] | null | null | null | gupview/Secondary_Scripts/Flouroscence.py | BboyTian/gupview | 6ef6693f8b58d224a89e2963bcd4d44312e957de | [
"MIT"
] | null | null | null | gupview/Secondary_Scripts/Flouroscence.py | BboyTian/gupview | 6ef6693f8b58d224a89e2963bcd4d44312e957de | [
"MIT"
] | 1 | 2021-09-29T04:06:33.000Z | 2021-09-29T04:06:33.000Z | #########
#Imports#
#########
# Python Basics
from decimal import Decimal
# Graph Plotting
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.figure import Figure
# Image process
import numpy as np
import PIL
from .Masks import rectMask_func
# Parameters
import Parameters as para
###########
#Operation#
###########
class Flouro:
def __init__(self, plotsize, cropsize):
figsize = int(plotsize/80)
self.halfcropsize = int(cropsize/2)
# image to be processed
self.img = None
self.count = 0
#intialising figure
self.fig = Figure(figsize=(figsize, figsize), dpi=100)
self.ax = self.fig.add_subplot(111)
self.ax.set_ylim(para.ylim)
self.ax.set_xlim([self.count-para.xlim,self.count])
self.count_ara = np.array([])
self.flour_ara = np.array([])
self.flour_plot = self.ax.plot(self.count_ara, self.flour_ara)
def get_plot(self, image, cropLoc, cropdimension, xlim, flourSum_res):
halfcropsize_x, halfcropsize_y = int(cropdimension[0] / 2), int(cropdimension[1] / 2)
# Obtaining crop image
cropImage = image[cropLoc[1]-halfcropsize_y : cropLoc[1]+halfcropsize_y,
cropLoc[0]-halfcropsize_x : cropLoc[0]+halfcropsize_x]
flour = np.sum(cropImage)
# appending new values
self.count_ara = np.append(self.count_ara, self.count)
self.count += 1
self.flour_ara = np.append(self.flour_ara, flour)
# deleting beyond the limit
if len(self.count_ara) > xlim:
self.count_ara = self.count_ara[-xlim:]
self.flour_ara = self.flour_ara[-xlim:]
self.flour_plot[0].remove()
# updating plot
self.flour_plot = self.ax.plot(self.count_ara, self.flour_ara, 'o', color='C0')
self.ax.set_xlim([self.count-xlim,self.count])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
# updating display number
flourSum_res.configure(text='%.5E' % Decimal(str(flour)))
def get_feed(self, array, cropLoc, cropdimension, width, height):
image = PIL.Image.fromarray(array)
# Obtaining Feed Image
feed_image = rectMask_func(image, cropLoc, cropdimension)
feed_image = feed_image.resize((width, height), PIL.Image.NEAREST)
return feed_image
| 28.27381 | 93 | 0.635368 | g")
from matplotlib.figure import Figure
import numpy as np
import PIL
from .Masks import rectMask_func
import Parameters as para
halfcropsize = int(cropsize/2)
self.img = None
self.count = 0
self.fig = Figure(figsize=(figsize, figsize), dpi=100)
self.ax = self.fig.add_subplot(111)
self.ax.set_ylim(para.ylim)
self.ax.set_xlim([self.count-para.xlim,self.count])
self.count_ara = np.array([])
self.flour_ara = np.array([])
self.flour_plot = self.ax.plot(self.count_ara, self.flour_ara)
def get_plot(self, image, cropLoc, cropdimension, xlim, flourSum_res):
halfcropsize_x, halfcropsize_y = int(cropdimension[0] / 2), int(cropdimension[1] / 2)
cropImage = image[cropLoc[1]-halfcropsize_y : cropLoc[1]+halfcropsize_y,
cropLoc[0]-halfcropsize_x : cropLoc[0]+halfcropsize_x]
flour = np.sum(cropImage)
self.count_ara = np.append(self.count_ara, self.count)
self.count += 1
self.flour_ara = np.append(self.flour_ara, flour)
if len(self.count_ara) > xlim:
self.count_ara = self.count_ara[-xlim:]
self.flour_ara = self.flour_ara[-xlim:]
self.flour_plot[0].remove()
self.flour_plot = self.ax.plot(self.count_ara, self.flour_ara, 'o', color='C0')
self.ax.set_xlim([self.count-xlim,self.count])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
flourSum_res.configure(text='%.5E' % Decimal(str(flour)))
def get_feed(self, array, cropLoc, cropdimension, width, height):
image = PIL.Image.fromarray(array)
feed_image = rectMask_func(image, cropLoc, cropdimension)
feed_image = feed_image.resize((width, height), PIL.Image.NEAREST)
return feed_image
| true | true |
1c376c2c22243977c9d2bd5fc88a83d0caa3b4eb | 3,454 | py | Python | src/ui/turn/resourceinfo.py | szarta/stars-reborn | 61a7847b027e2efd6a26a5d8c276e18210833d0c | [
"MIT"
] | null | null | null | src/ui/turn/resourceinfo.py | szarta/stars-reborn | 61a7847b027e2efd6a26a5d8c276e18210833d0c | [
"MIT"
] | null | null | null | src/ui/turn/resourceinfo.py | szarta/stars-reborn | 61a7847b027e2efd6a26a5d8c276e18210833d0c | [
"MIT"
] | null | null | null | """
resourceinfo.py
The widget for displaying planet resource info.
:author: Brandon Arrendondo
:license: MIT, see LICENSE.txt for more details.
"""
from PySide.QtGui import QWidget
from PySide.QtGui import QBoxLayout
from PySide.QtGui import QLabel
from PySide.QtGui import QFrame
from PySide.QtGui import QPixmap
from PySide.QtCore import Qt
from src.model.enumerations import ResourcePaths
from src.model.enumerations import NeverSeenPlanet
class PlanetInfo(QWidget):
def __init__(self, planet, race):
super(PlanetInfo, self).__init__()
self.race = race
main_layout = QBoxLayout(QBoxLayout.TopToBottom)
self.planet_name = QLabel()
self.planet_name.setFrameStyle(QFrame.Panel | QFrame.Raised)
self.planet_name.setAlignment(Qt.AlignCenter)
main_layout.addWidget(self.planet_name)
self.planet_details = QWidget()
planet_details_layout = QBoxLayout(QBoxLayout.TopToBottom)
first_pane = QBoxLayout(QBoxLayout.LeftToRight)
self.planet_value = QLabel()
self.report_age = QLabel()
info_box = QBoxLayout(QBoxLayout.TopToBottom)
info_box.addWidget(self.planet_value)
info_box.addWidget(self.report_age)
self.population = QLabel()
first_pane.addLayout(info_box)
first_pane.addStretch(1)
first_pane.addWidget(self.population)
planet_details_layout.addLayout(first_pane)
self.planet_details.setLayout(planet_details_layout)
main_layout.addWidget(self.planet_details)
self.unknown_planet_label = QLabel()
self.unknown_planet_label.setAlignment(Qt.AlignCenter)
self.unknown_planet_label.setPixmap(
QPixmap(ResourcePaths.UnknownPlanetPath))
main_layout.addWidget(self.unknown_planet_label)
self.update_planet(planet)
self.setLayout(main_layout)
def update_planet(self, planet):
self.target_planet = planet
summary_text = '<font size="10pt">{0} Summary</font>'.format(
self.target_planet.name)
self.planet_name.setText(summary_text)
if(planet.years_since == NeverSeenPlanet):
self.unknown_planet_label.show()
self.planet_details.hide()
else:
value = self.target_planet.value
color = "red"
if(value > 0):
color = "green"
val_txt = '<font size="8pt">Value: </font>'
val_txt += '<font size="8pt" color="{0}">{1!s}%</font>'.format(
color, value)
self.planet_value.setText(val_txt)
since = ""
if(self.target_planet.years_since > 0):
since = '<font size="8pt" color="red">{0}</font>'.format(
'Report is {0} years old'.format(
self.target_planet.years_since))
else:
since = '<font size="8pt">{0}</font>'.format(
"Report is current")
self.report_age.setText(since)
pop = ""
if(self.target_planet.population == 0):
pop = '<font size="8pt">Uninhabited</font>'
else:
pop = '<font size="8pt">Population: {:,}</font>'.format(
self.target_planet.population)
self.population.setText(pop)
self.unknown_planet_label.hide()
self.planet_details.show()
| 32.584906 | 75 | 0.624493 | from PySide.QtGui import QWidget
from PySide.QtGui import QBoxLayout
from PySide.QtGui import QLabel
from PySide.QtGui import QFrame
from PySide.QtGui import QPixmap
from PySide.QtCore import Qt
from src.model.enumerations import ResourcePaths
from src.model.enumerations import NeverSeenPlanet
class PlanetInfo(QWidget):
def __init__(self, planet, race):
super(PlanetInfo, self).__init__()
self.race = race
main_layout = QBoxLayout(QBoxLayout.TopToBottom)
self.planet_name = QLabel()
self.planet_name.setFrameStyle(QFrame.Panel | QFrame.Raised)
self.planet_name.setAlignment(Qt.AlignCenter)
main_layout.addWidget(self.planet_name)
self.planet_details = QWidget()
planet_details_layout = QBoxLayout(QBoxLayout.TopToBottom)
first_pane = QBoxLayout(QBoxLayout.LeftToRight)
self.planet_value = QLabel()
self.report_age = QLabel()
info_box = QBoxLayout(QBoxLayout.TopToBottom)
info_box.addWidget(self.planet_value)
info_box.addWidget(self.report_age)
self.population = QLabel()
first_pane.addLayout(info_box)
first_pane.addStretch(1)
first_pane.addWidget(self.population)
planet_details_layout.addLayout(first_pane)
self.planet_details.setLayout(planet_details_layout)
main_layout.addWidget(self.planet_details)
self.unknown_planet_label = QLabel()
self.unknown_planet_label.setAlignment(Qt.AlignCenter)
self.unknown_planet_label.setPixmap(
QPixmap(ResourcePaths.UnknownPlanetPath))
main_layout.addWidget(self.unknown_planet_label)
self.update_planet(planet)
self.setLayout(main_layout)
def update_planet(self, planet):
self.target_planet = planet
summary_text = '<font size="10pt">{0} Summary</font>'.format(
self.target_planet.name)
self.planet_name.setText(summary_text)
if(planet.years_since == NeverSeenPlanet):
self.unknown_planet_label.show()
self.planet_details.hide()
else:
value = self.target_planet.value
color = "red"
if(value > 0):
color = "green"
val_txt = '<font size="8pt">Value: </font>'
val_txt += '<font size="8pt" color="{0}">{1!s}%</font>'.format(
color, value)
self.planet_value.setText(val_txt)
since = ""
if(self.target_planet.years_since > 0):
since = '<font size="8pt" color="red">{0}</font>'.format(
'Report is {0} years old'.format(
self.target_planet.years_since))
else:
since = '<font size="8pt">{0}</font>'.format(
"Report is current")
self.report_age.setText(since)
pop = ""
if(self.target_planet.population == 0):
pop = '<font size="8pt">Uninhabited</font>'
else:
pop = '<font size="8pt">Population: {:,}</font>'.format(
self.target_planet.population)
self.population.setText(pop)
self.unknown_planet_label.hide()
self.planet_details.show()
| true | true |
1c376c302110ee92628db9f2b9059b89382814c7 | 270 | py | Python | waroeng_bebek_selamet/waroeng_bebek_selamet/doctype/transaksi_pembayaran_line/transaksi_pembayaran_line.py | rifkisetyantto/frappe-digital-order | e1a9729a7d449ce7ef98a703d6a8d721fd5b5c5b | [
"MIT"
] | null | null | null | waroeng_bebek_selamet/waroeng_bebek_selamet/doctype/transaksi_pembayaran_line/transaksi_pembayaran_line.py | rifkisetyantto/frappe-digital-order | e1a9729a7d449ce7ef98a703d6a8d721fd5b5c5b | [
"MIT"
] | null | null | null | waroeng_bebek_selamet/waroeng_bebek_selamet/doctype/transaksi_pembayaran_line/transaksi_pembayaran_line.py | rifkisetyantto/frappe-digital-order | e1a9729a7d449ce7ef98a703d6a8d721fd5b5c5b | [
"MIT"
] | 2 | 2019-10-29T17:03:16.000Z | 2019-10-30T08:20:19.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Kelompok 6 and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class TransaksiPembayaranLine(Document):
pass
| 24.545455 | 49 | 0.788889 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class TransaksiPembayaranLine(Document):
pass
| true | true |
1c376e09821b633ae0074cdc19887f5321bd9147 | 92 | py | Python | dji_asdk_to_python/mission_control/timeline_element_feedback.py | msanchezc/dji-asdk-to-python | cf3e56691524624314a28f5ebc6f3f59cbd4d8cb | [
"BSD-3-Clause"
] | 7 | 2020-11-02T16:31:28.000Z | 2021-11-09T21:32:44.000Z | dji_asdk_to_python/mission_control/timeline_element_feedback.py | msanchezc/dji-asdk-to-python | cf3e56691524624314a28f5ebc6f3f59cbd4d8cb | [
"BSD-3-Clause"
] | 64 | 2020-09-03T04:32:39.000Z | 2022-02-21T20:30:16.000Z | dji_asdk_to_python/mission_control/timeline_element_feedback.py | PSBPOSAS/dji-asdk-to-python | 39fd29e172249656ce9f6e7b6273eeff6790d8c1 | [
"BSD-3-Clause"
] | 4 | 2020-09-16T19:07:30.000Z | 2022-02-21T04:48:10.000Z | class TimelineElementFeedback:
def __init__(self, app_ip):
self.app_ip = app_ip
| 23 | 31 | 0.706522 | class TimelineElementFeedback:
def __init__(self, app_ip):
self.app_ip = app_ip
| true | true |
1c376eb6a89356b525641162329a18c774d6d155 | 340 | py | Python | src/demo_tzbtc/types/tzbtc/parameter/transfer.py | pravin-d/dipdup-py | 934703e1d9ade2f5c798e9da79dc6f2deb0a7a24 | [
"MIT"
] | 39 | 2021-04-13T10:53:27.000Z | 2022-02-11T00:53:44.000Z | src/demo_tzbtc/types/tzbtc/parameter/transfer.py | pravin-d/dipdup-py | 934703e1d9ade2f5c798e9da79dc6f2deb0a7a24 | [
"MIT"
] | 113 | 2021-06-01T18:16:42.000Z | 2022-03-28T06:12:58.000Z | src/demo_tzbtc/types/tzbtc/parameter/transfer.py | pravin-d/dipdup-py | 934703e1d9ade2f5c798e9da79dc6f2deb0a7a24 | [
"MIT"
] | 16 | 2021-05-26T07:04:40.000Z | 2022-03-29T06:50:25.000Z | # generated by datamodel-codegen:
# filename: transfer.json
from __future__ import annotations
from pydantic import BaseModel
from pydantic import Extra
from pydantic import Field
class TransferParameter(BaseModel):
class Config:
extra = Extra.forbid
from_: str = Field(..., alias='from')
to: str
value: str
| 18.888889 | 41 | 0.717647 |
from __future__ import annotations
from pydantic import BaseModel
from pydantic import Extra
from pydantic import Field
class TransferParameter(BaseModel):
class Config:
extra = Extra.forbid
from_: str = Field(..., alias='from')
to: str
value: str
| true | true |
1c376ee3e2ce66e1ce2ab29d9c05e500a4a0c0b5 | 10,325 | py | Python | pymips.py | gwangmin/PyMIPS | d598a92dc9b90b32fae621396a794590d8899a11 | [
"MIT"
] | null | null | null | pymips.py | gwangmin/PyMIPS | d598a92dc9b90b32fae621396a794590d8899a11 | [
"MIT"
] | null | null | null | pymips.py | gwangmin/PyMIPS | d598a92dc9b90b32fae621396a794590d8899a11 | [
"MIT"
] | null | null | null | '''
PyMIPS
'''
# registers
# decimal
ZERO = 0
AT = 1
V0, V1 = 2, 3
A0, A1, A2, A3 = range(4, 8)
T0, T1, T2, T3, T4, T5, T6, T7 = range(8, 16)
S0, S1, S2, S3, S4, S5, S6, S7 = range(16, 24)
T8, T9 = 24, 25
K0, K1 = 26, 27
GP, SP, FP, RA = range(28, 32)
# utils
def handle_err(f, msg):
'''
Error handler
'''
return '[Error] ' + f.__name__ + ': ' + msg
def ones_complement(bits):
'''
Return 1's complement
'''
ones = ''
for bit in bits:
if bit == '0':
ones += '1'
else:
ones += '0'
return ones
def twos_complement(bits):
'''
Return 2's complement
'''
_len = len(bits)
ones = ones_complement(bits)
result = bin(int('0b' + ones, 2) + 1)[2:]
if len(result) > _len:
# if out of range
l = len(result) - _len
result = result[l:]
return result
def dec_to_bit(dec, _len):
'''
Convert decimal to binary str(no prefix).
dec: decimal int
_len: bit(s) length
'''
if str(dec)[0] != '-':
# positive
bit = bin(dec)[2:]
return bit_ext(bit, _len, sign=False)
else:
# negative
_abs = bin(abs(dec))[2:]
_abs = bit_ext(_abs, _len, sign=False)
return twos_complement(_abs)
def bit_to_dec(bit, signed=False):
'''
Convert bit(s) to dec
signed: signed or unsigned? default unsigned
'''
if (bit[0] == '0') or (signed == False):
# positive or unsigned
return int('0b' + bit, 2)
else:
# negative
n = '-' + str(int('0b' + twos_complement(bit), 2))
return int(n)
def bit_ext(bit, _len, sign=False):
'''
Bit extension
bit: bit str
_len: length
sign: sign ext or zero ext. default zero ext.
'''
bit = str(bit)
if sign == False:
pad = '0'
else:
pad = bit[0]
l = _len - len(bit)
if 0 < l:
bit = pad * l + bit
elif l == 0:
pass
elif l < 0:
return handle_err(bit_ext, 'out of range')
return bit
def hex_to_bit(_hex):
'''
Hex to bit(s)
'''
bit = ''
for h in _hex:
b = bin(int('0x' + h, 16))[2:]
bit += bit_ext(b, 4, sign=False)
return bit
def bit_to_hex(bit):
'''
Bit(s) to hex
'''
_hex = ''
for i in range(len(bit) // 4):
si = 0 + (4*i)
_hex += hex(int('0b' + bit[si:si+4], 2))[2:]
return _hex
def hex_to_dec(_hex, signed=True):
'''
Hex to decimal
signed: signed or unsigned. default signed.
'''
bit = hex_to_bit(_hex)
return bit_to_dec(bit, signed=signed)
def dec_to_hex(dec, _len):
'''
Decimal to hex
_len: hex length
'''
bit = dec_to_bit(dec, _len * 4)
return bit_to_hex(bit)
# instructions
class MIPSInstruction:
def __init__(self):
self.inst_b, self.inst_h = None, None
def encode_hex(self):
# 최종 인스트럭션을 8자리 16진수로(접두사 없이)
h = hex(int('0b' + self.inst_b, 2))[2:]
self.inst_h = ('0' * (8 - len(h))) + h
# decode
class RType(MIPSInstruction):
'''
R type instruction composer
op(6) | rs(5) | rt(5) | rd(5) | shamt(5) | funct(6)
'''
def __init__(self):
'''
Initialize all fields to None
'''
self.op, self.op_b = None, None
self.rs, self.rs_b = None, None
self.rt, self.rt_b = None, None
self.rd, self.rd_b = None, None
self.shamt, self.shamt_b = None, None
self.funct, self.funct_b = None, None
self.inst_b, self.inst_h = None, None
def fill_dec(self, op, rs, rt, rd, shamt, funct):
'''
Fill fields with decimal
all arg: decimal
'''
self.op = op
self.rs = rs
self.rt = rt
self.rd = rd
self.shamt = shamt
self.funct = funct
self.fill_with_dec()
def fill_bit(self, op, rs, rt, rd, shamt, signed, funct):
'''
Fill fields with bit(s)
all arg: bit(s)
signed: shamt is signed?
'''
self.op_b = op
self.rs_b = rs
self.rt_b = rt
self.rd_b = rd
self.shamt_b = shamt
self.funct_b = funct
self.fill_with_bit(signed)
def fill_with_dec(self):
'''
Encode decimal fields to bit(s)
'''
self.op_b = dec_to_bit(self.op, 6)
self.rs_b = dec_to_bit(self.rs, 5)
self.rt_b = dec_to_bit(self.rt, 5)
self.rd_b = dec_to_bit(self.rd, 5)
self.shamt_b = dec_to_bit(self.shamt, 5)
self.funct_b = dec_to_bit(self.funct, 6)
def fill_with_bit(self, signed):
'''
Fill decimal fields with bit(s)
'''
self.op = bit_to_dec(self.op_b, signed=False)
self.rs = bit_to_dec(self.rs_b, signed=False)
self.rt = bit_to_dec(self.rt_b, signed=False)
self.rd = bit_to_dec(self.rd_b, signed=False)
self.shamt = bit_to_dec(self.shamt_b, signed=signed)
self.funct = bit_to_dec(self.funct_b, signed=False)
def encode(self):
'''
Compose (binary/hex)instruction with binary fields.
'''
# 최종 인스트럭션을 2진수로(접두사 없이)
self.inst_b = self.op_b + self.rs_b + self.rt_b + self.rd_b + self.shamt_b + self.funct_b
# 최종 인스트럭션을 8자리 16진수로(접두사 없이)
self.encode_hex()
def decode_hex(self, signed):
'''
Decode hex instruction
signed: shamt is signed?
'''
_hex = self.inst_h
self.inst_b = hex_to_bit(_hex)
self.op_b = self.inst_b[:6]
self.rs_b = self.inst_b[6:6+5]
self.rt_b = self.inst_b[11:11+5]
self.rd_b = self.inst_b[16:16+5]
self.shamt_b = self.inst_b[21:21+5]
self.funct_b = self.inst_b[26:26+6]
self.op = bit_to_dec(self.op_b, signed=False)
self.rs = bit_to_dec(self.rs_b, signed=False)
self.rt = bit_to_dec(self.rt_b, signed=False)
self.rd = bit_to_dec(self.rd_b, signed=False)
self.shamt = bit_to_dec(self.shamt_b, signed=signed)
self.funct = bit_to_dec(self.funct_b, signed=False)
# decode
class IType(MIPSInstruction):
'''
I type instruction composer
op(6) | rs(5) | rt(5) | immediate(16)
'''
def __init__(self):
'''
Initialize all fields to None
'''
self.op, self.op_b = None, None
self.rs, self.rs_b = None, None
self.rt, self.rt_b = None, None
self.im, self.im_b = None, None
self.inst_b, self.inst_h = None, None
def fill_dec(self, op, rs, rt, im):
'''
Fill fields with decimal
all arg: decimal
'''
self.op = op
self.rs = rs
self.rt = rt
self.im = im
self.fill_with_dec()
def fill_bit(self, op, rs, rt, im, signed):
'''
Fill fields with bit(s)
all arg: bit(s)
signed: im is signed?
'''
self.op_b = op
self.rs_b = rs
self.rt_b = rt
self.im_b = im
self.fill_with_bit(signed)
def fill_with_dec(self):
'''
Encode decimal fields to bit(s)
'''
self.op_b = dec_to_bit(self.op, 6)
self.rs_b = dec_to_bit(self.rs, 5)
self.rt_b = dec_to_bit(self.rt, 5)
self.im_b = dec_to_bit(self.im, 16)
def fill_with_bit(self, signed):
'''
Fill decimal fields with bit(s)
'''
self.op = bit_to_dec(self.op_b, signed=False)
self.rs = bit_to_dec(self.rs_b, signed=False)
self.rt = bit_to_dec(self.rt_b, signed=False)
self.im = bit_to_dec(self.im_b, signed=signed)
def encode(self):
'''
Compose (binary/hex)instruction with binary fields.
'''
# 최종 인스트럭션을 2진수로(접두사 없이)
self.inst_b = self.op_b + self.rs_b + self.rt_b + self.im_b
# 최종 인스트럭션을 8자리 16진수로(접두사 없이)
self.encode_hex()
def decode_hex(self, signed):
'''
Decode hex instruction
signed: im is signed?
'''
_hex = self.inst_h
self.inst_b = hex_to_bit(_hex)
self.op_b = self.inst_b[:6]
self.rs_b = self.inst_b[6:6+5]
self.rt_b = self.inst_b[11:11+5]
self.im_b = self.inst_b[16:]
self.op = bit_to_dec(self.op_b, signed=False)
self.rs = bit_to_dec(self.rs_b, signed=False)
self.rt = bit_to_dec(self.rt_b, signed=False)
self.im = bit_to_dec(self.im_b, signed=signed)
# decode
class JType(MIPSInstruction):
'''
J type instruction composer
op(6) | addr(26)
'''
def __init__(self):
'''
Initialize all fields to None
'''
self.op, self.op_b = None, None
self.addr, self.addr_b = None, None
self.inst_b, self.inst_h = None, None
def fill_dec(self, op, addr):
'''
Fill fields with decimal
all arg: decimal
'''
self.op = op
self.addr = addr
self.fill_with_dec()
def fill_bit(self, op, addr):
'''
Fill fields with bit(s)
all arg: bit(s)
'''
self.op_b = op
self.addr_b = addr
self.fill_with_bit()
def fill_with_dec(self):
'''
Encode decimal fields to bit(s)
'''
self.op_b = dec_to_bit(self.op, 6)
self.addr_b = dec_to_bit(self.addr, 26)
def fill_with_bit(self):
'''
Fill decimal fields with bit(s)
'''
self.op = bit_to_dec(self.op_b, signed=False)
self.addr = bit_to_dec(self.addr_b, signed=False)
def encode(self):
'''
Compose (binary/hex)instruction with binary fields.
'''
# 최종 인스트럭션을 2진수로(접두사 없이)
self.inst_b = self.op_b + self.addr_b
# 최종 인스트럭션을 8자리 16진수로(접두사 없이)
self.encode_hex()
def decode_hex(self):
'''
Decode hex instruction
'''
_hex = self.inst_h
self.inst_b = hex_to_bit(_hex)
self.op_b = self.inst_b[:6]
self.addr_b = self.inst_b[6:]
self.op = bit_to_dec(self.op_b, signed=False)
self.addr = bit_to_dec(self.addr_b, signed=False)
if __name__ == "__main__":
i = IType()
i.fill_dec(bit_to_dec('001000'), T5, T4, -2)
i.encode()
print(i.inst_h)
| 24.524941 | 97 | 0.538402 |
ZERO = 0
AT = 1
V0, V1 = 2, 3
A0, A1, A2, A3 = range(4, 8)
T0, T1, T2, T3, T4, T5, T6, T7 = range(8, 16)
S0, S1, S2, S3, S4, S5, S6, S7 = range(16, 24)
T8, T9 = 24, 25
K0, K1 = 26, 27
GP, SP, FP, RA = range(28, 32)
def handle_err(f, msg):
return '[Error] ' + f.__name__ + ': ' + msg
def ones_complement(bits):
ones = ''
for bit in bits:
if bit == '0':
ones += '1'
else:
ones += '0'
return ones
def twos_complement(bits):
_len = len(bits)
ones = ones_complement(bits)
result = bin(int('0b' + ones, 2) + 1)[2:]
if len(result) > _len:
l = len(result) - _len
result = result[l:]
return result
def dec_to_bit(dec, _len):
if str(dec)[0] != '-':
bit = bin(dec)[2:]
return bit_ext(bit, _len, sign=False)
else:
_abs = bin(abs(dec))[2:]
_abs = bit_ext(_abs, _len, sign=False)
return twos_complement(_abs)
def bit_to_dec(bit, signed=False):
if (bit[0] == '0') or (signed == False):
return int('0b' + bit, 2)
else:
n = '-' + str(int('0b' + twos_complement(bit), 2))
return int(n)
def bit_ext(bit, _len, sign=False):
bit = str(bit)
if sign == False:
pad = '0'
else:
pad = bit[0]
l = _len - len(bit)
if 0 < l:
bit = pad * l + bit
elif l == 0:
pass
elif l < 0:
return handle_err(bit_ext, 'out of range')
return bit
def hex_to_bit(_hex):
bit = ''
for h in _hex:
b = bin(int('0x' + h, 16))[2:]
bit += bit_ext(b, 4, sign=False)
return bit
def bit_to_hex(bit):
_hex = ''
for i in range(len(bit) // 4):
si = 0 + (4*i)
_hex += hex(int('0b' + bit[si:si+4], 2))[2:]
return _hex
def hex_to_dec(_hex, signed=True):
bit = hex_to_bit(_hex)
return bit_to_dec(bit, signed=signed)
def dec_to_hex(dec, _len):
bit = dec_to_bit(dec, _len * 4)
return bit_to_hex(bit)
class MIPSInstruction:
def __init__(self):
self.inst_b, self.inst_h = None, None
def encode_hex(self):
h = hex(int('0b' + self.inst_b, 2))[2:]
self.inst_h = ('0' * (8 - len(h))) + h
class RType(MIPSInstruction):
def __init__(self):
self.op, self.op_b = None, None
self.rs, self.rs_b = None, None
self.rt, self.rt_b = None, None
self.rd, self.rd_b = None, None
self.shamt, self.shamt_b = None, None
self.funct, self.funct_b = None, None
self.inst_b, self.inst_h = None, None
def fill_dec(self, op, rs, rt, rd, shamt, funct):
self.op = op
self.rs = rs
self.rt = rt
self.rd = rd
self.shamt = shamt
self.funct = funct
self.fill_with_dec()
def fill_bit(self, op, rs, rt, rd, shamt, signed, funct):
self.op_b = op
self.rs_b = rs
self.rt_b = rt
self.rd_b = rd
self.shamt_b = shamt
self.funct_b = funct
self.fill_with_bit(signed)
def fill_with_dec(self):
self.op_b = dec_to_bit(self.op, 6)
self.rs_b = dec_to_bit(self.rs, 5)
self.rt_b = dec_to_bit(self.rt, 5)
self.rd_b = dec_to_bit(self.rd, 5)
self.shamt_b = dec_to_bit(self.shamt, 5)
self.funct_b = dec_to_bit(self.funct, 6)
def fill_with_bit(self, signed):
self.op = bit_to_dec(self.op_b, signed=False)
self.rs = bit_to_dec(self.rs_b, signed=False)
self.rt = bit_to_dec(self.rt_b, signed=False)
self.rd = bit_to_dec(self.rd_b, signed=False)
self.shamt = bit_to_dec(self.shamt_b, signed=signed)
self.funct = bit_to_dec(self.funct_b, signed=False)
def encode(self):
self.inst_b = self.op_b + self.rs_b + self.rt_b + self.rd_b + self.shamt_b + self.funct_b
self.encode_hex()
def decode_hex(self, signed):
_hex = self.inst_h
self.inst_b = hex_to_bit(_hex)
self.op_b = self.inst_b[:6]
self.rs_b = self.inst_b[6:6+5]
self.rt_b = self.inst_b[11:11+5]
self.rd_b = self.inst_b[16:16+5]
self.shamt_b = self.inst_b[21:21+5]
self.funct_b = self.inst_b[26:26+6]
self.op = bit_to_dec(self.op_b, signed=False)
self.rs = bit_to_dec(self.rs_b, signed=False)
self.rt = bit_to_dec(self.rt_b, signed=False)
self.rd = bit_to_dec(self.rd_b, signed=False)
self.shamt = bit_to_dec(self.shamt_b, signed=signed)
self.funct = bit_to_dec(self.funct_b, signed=False)
class IType(MIPSInstruction):
def __init__(self):
self.op, self.op_b = None, None
self.rs, self.rs_b = None, None
self.rt, self.rt_b = None, None
self.im, self.im_b = None, None
self.inst_b, self.inst_h = None, None
def fill_dec(self, op, rs, rt, im):
self.op = op
self.rs = rs
self.rt = rt
self.im = im
self.fill_with_dec()
def fill_bit(self, op, rs, rt, im, signed):
self.op_b = op
self.rs_b = rs
self.rt_b = rt
self.im_b = im
self.fill_with_bit(signed)
def fill_with_dec(self):
self.op_b = dec_to_bit(self.op, 6)
self.rs_b = dec_to_bit(self.rs, 5)
self.rt_b = dec_to_bit(self.rt, 5)
self.im_b = dec_to_bit(self.im, 16)
def fill_with_bit(self, signed):
self.op = bit_to_dec(self.op_b, signed=False)
self.rs = bit_to_dec(self.rs_b, signed=False)
self.rt = bit_to_dec(self.rt_b, signed=False)
self.im = bit_to_dec(self.im_b, signed=signed)
def encode(self):
self.inst_b = self.op_b + self.rs_b + self.rt_b + self.im_b
self.encode_hex()
def decode_hex(self, signed):
_hex = self.inst_h
self.inst_b = hex_to_bit(_hex)
self.op_b = self.inst_b[:6]
self.rs_b = self.inst_b[6:6+5]
self.rt_b = self.inst_b[11:11+5]
self.im_b = self.inst_b[16:]
self.op = bit_to_dec(self.op_b, signed=False)
self.rs = bit_to_dec(self.rs_b, signed=False)
self.rt = bit_to_dec(self.rt_b, signed=False)
self.im = bit_to_dec(self.im_b, signed=signed)
class JType(MIPSInstruction):
def __init__(self):
self.op, self.op_b = None, None
self.addr, self.addr_b = None, None
self.inst_b, self.inst_h = None, None
def fill_dec(self, op, addr):
self.op = op
self.addr = addr
self.fill_with_dec()
def fill_bit(self, op, addr):
self.op_b = op
self.addr_b = addr
self.fill_with_bit()
def fill_with_dec(self):
self.op_b = dec_to_bit(self.op, 6)
self.addr_b = dec_to_bit(self.addr, 26)
def fill_with_bit(self):
self.op = bit_to_dec(self.op_b, signed=False)
self.addr = bit_to_dec(self.addr_b, signed=False)
def encode(self):
self.inst_b = self.op_b + self.addr_b
self.encode_hex()
def decode_hex(self):
_hex = self.inst_h
self.inst_b = hex_to_bit(_hex)
self.op_b = self.inst_b[:6]
self.addr_b = self.inst_b[6:]
self.op = bit_to_dec(self.op_b, signed=False)
self.addr = bit_to_dec(self.addr_b, signed=False)
if __name__ == "__main__":
i = IType()
i.fill_dec(bit_to_dec('001000'), T5, T4, -2)
i.encode()
print(i.inst_h)
| true | true |
1c376f6d10c34346adf7e1b076c9671d1cbe0001 | 162 | py | Python | plt/__init__.py | chicham/plt | f23e29c13ad9a5ed69abc3688f6716589180fba0 | [
"MIT"
] | 1 | 2018-03-08T02:46:06.000Z | 2018-03-08T02:46:06.000Z | plt/__init__.py | chicham/plt | f23e29c13ad9a5ed69abc3688f6716589180fba0 | [
"MIT"
] | null | null | null | plt/__init__.py | chicham/plt | f23e29c13ad9a5ed69abc3688f6716589180fba0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for plot."""
__author__ = """Hicham Randrianarivo"""
__email__ = 'h.randrianarivo@qwant.com'
__version__ = '0.1.0'
| 20.25 | 39 | 0.654321 |
__author__ = """Hicham Randrianarivo"""
__email__ = 'h.randrianarivo@qwant.com'
__version__ = '0.1.0'
| true | true |
1c376f81aced6badadb0eef4cc5b48f457ae1ce5 | 334 | py | Python | iotbx/command_line/reflection_file_converter.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-03-18T12:31:57.000Z | 2022-03-14T06:27:06.000Z | iotbx/command_line/reflection_file_converter.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | iotbx/command_line/reflection_file_converter.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-03-26T12:52:30.000Z | 2021-03-26T12:52:30.000Z | from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME phenix.reflection_file_converter
from iotbx import reflection_file_converter
import sys
def run():
try:
reflection_file_converter.run(args=sys.argv[1:])
except RuntimeError as e:
print(e)
if (__name__ == "__main__"):
run()
| 22.266667 | 64 | 0.778443 | from __future__ import absolute_import, division, print_function
from iotbx import reflection_file_converter
import sys
def run():
try:
reflection_file_converter.run(args=sys.argv[1:])
except RuntimeError as e:
print(e)
if (__name__ == "__main__"):
run()
| true | true |
1c376fa5f02c462267fc408c9460d667c29e8341 | 2,495 | py | Python | STAP/STAP_imcorr.py | rscalzo/subpipe | 641067a65810ad4acafcc75e7b09cb65712f40f1 | [
"BSD-3-Clause"
] | null | null | null | STAP/STAP_imcorr.py | rscalzo/subpipe | 641067a65810ad4acafcc75e7b09cb65712f40f1 | [
"BSD-3-Clause"
] | null | null | null | STAP/STAP_imcorr.py | rscalzo/subpipe | 641067a65810ad4acafcc75e7b09cb65712f40f1 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
"""
Apply flat fielding and/or fringe correction
Syntax: STAP_imcorr.py imname outname [-ff FFNAME] [-fr FRNAME] [-noampjoin]
[-timeout TIMEOUT]
Inputs:
imname: filename of input image including full path
outname: output filename of corrected image
Optional inputs:
ffname: filename of flat field including full path
frname: filename of fringe pattern
noampjoin: if flag set, no alignment between left and right halves of CCD
timeout: maximum running time (in seconds) allowed for each step
default is 20s, with polling interval fixed to 1 s
Description:
Apply flat fielding on input image if a flat field is supplied.
Apply fringe correction on input image if a fringe pattern is supplied.
Align the right half of CCD to the left, unless noampjoin flag is set.
Write the corrected image to outname.
Option to change timeout.
Exit status:
0, successful
1, see specific returned error message
2, syntax error
Specifications:
External system call:
imarith
Python function:
ampjoin
Memory requriements: 32M x 2 + bits
Scratch disk space requirements: none
Typical wall clock time needed: 3 s
Config files needed: none
Enviroment variables: none if imarith is in the path
"""
import argparse
import sys
import pyfits
parser = argparse.ArgumentParser(description='Apply flat fielding and/or fringe correction.')
parser.add_argument('imname',
help='filename of input image')
parser.add_argument('outname',
help='filename of output image')
parser.add_argument('-ff',dest='ffname',
help='filename of flat field')
parser.add_argument('-fr',dest='frname',
help='filename of fring pattern')
parser.add_argument('-noampjoin',action="store_const",const=True,default=False,
help='if set, two halves of CCD are not aligned')
parser.add_argument('-timeout',type=int, default=60,
help='maximum running time allowed for each correction (default: %(default)s)')
# RS 2011/04/28: added log file option to pass to STAP_callexternal
parser.add_argument('-log',default=None,
help='optional log file (default: write to stdout)')
args = parser.parse_args()
if args.noampjoin is True and args.ffname is None and args.frname is None:
sys.exit("No action to be done on the input image")
def ampjoin():
pass
| 35.140845 | 99 | 0.692184 |
import argparse
import sys
import pyfits
parser = argparse.ArgumentParser(description='Apply flat fielding and/or fringe correction.')
parser.add_argument('imname',
help='filename of input image')
parser.add_argument('outname',
help='filename of output image')
parser.add_argument('-ff',dest='ffname',
help='filename of flat field')
parser.add_argument('-fr',dest='frname',
help='filename of fring pattern')
parser.add_argument('-noampjoin',action="store_const",const=True,default=False,
help='if set, two halves of CCD are not aligned')
parser.add_argument('-timeout',type=int, default=60,
help='maximum running time allowed for each correction (default: %(default)s)')
parser.add_argument('-log',default=None,
help='optional log file (default: write to stdout)')
args = parser.parse_args()
if args.noampjoin is True and args.ffname is None and args.frname is None:
sys.exit("No action to be done on the input image")
def ampjoin():
pass
| true | true |
1c3772af5821e21cb24aff095fc34b95781b7cca | 3,151 | py | Python | django_react_bot/django_react_bot/settings.py | codedak/Django-React-ChatBot | 0b5da30ebad3a751cc18f7af494df001a539e7ec | [
"CC0-1.0"
] | null | null | null | django_react_bot/django_react_bot/settings.py | codedak/Django-React-ChatBot | 0b5da30ebad3a751cc18f7af494df001a539e7ec | [
"CC0-1.0"
] | null | null | null | django_react_bot/django_react_bot/settings.py | codedak/Django-React-ChatBot | 0b5da30ebad3a751cc18f7af494df001a539e7ec | [
"CC0-1.0"
] | null | null | null | """
Django settings for django_react_bot project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nfxo3yr-c5bj+s28o7y06ckfd+1-92_9^kxoguk26gyx8kd6iv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["127.0.0.1"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'frontend'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_react_bot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_react_bot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.827869 | 91 | 0.700413 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'nfxo3yr-c5bj+s28o7y06ckfd+1-92_9^kxoguk26gyx8kd6iv'
DEBUG = True
ALLOWED_HOSTS = ["127.0.0.1"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'frontend'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_react_bot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_react_bot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| true | true |
1c3772bf171cb279a20e72ebd32e1208866b8b32 | 7,397 | py | Python | tests/element_test.py | arunpersaud/becquerel | 5f2aa2e00a62e022c061e4343117e3b0365b2a45 | [
"BSD-3-Clause-LBNL"
] | null | null | null | tests/element_test.py | arunpersaud/becquerel | 5f2aa2e00a62e022c061e4343117e3b0365b2a45 | [
"BSD-3-Clause-LBNL"
] | null | null | null | tests/element_test.py | arunpersaud/becquerel | 5f2aa2e00a62e022c061e4343117e3b0365b2a45 | [
"BSD-3-Clause-LBNL"
] | null | null | null | """Test Element class."""
from becquerel.tools import element
import pytest
class TestElementFunctions(object):
"""Test Element functions."""
def test_validated_z_good(self):
"""Test validated_z................................................"""
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
assert element.validated_z(z1) == z1
def test_validated_z_exception(self):
"""Test validated_z(119) raises ElementZError......................"""
with pytest.raises(element.ElementZError):
element.validated_z(119)
def test_validated_symbol_good(self):
"""Test validated_symbol..........................................."""
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
for sym2 in [sym1, sym1.lower(), sym1.upper()]:
assert element.validated_symbol(sym2) == sym1
def test_validated_symbol_exception(self):
"""Test validated_symbol('Xz') raises ElementSymbolError..........."""
with pytest.raises(element.ElementSymbolError):
element.validated_symbol("Xz")
def test_validated_name_good(self):
"""Test validated_name............................................."""
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.validated_name(name2) == name1
def test_validated_name_exception(self):
"""Test validated_name('Xzzzzz') raises ElementNameError..........."""
with pytest.raises(element.ElementNameError):
element.validated_name("Xzzzzz")
def test_validated_name_aluminum(self):
"""Test validated_name('Aluminum') returns 'Aluminum'.............."""
name1 = "Aluminum"
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.validated_name(name2) == "Aluminum"
def test_validated_name_aluminium(self):
"""Test validated_name('Aluminium') returns 'Aluminum'............."""
name1 = "Aluminium"
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.validated_name(name2) == "Aluminum"
def test_validated_name_cesium(self):
"""Test validated_name('Cesium') returns 'Cesium'.................."""
name1 = "Cesium"
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.validated_name(name2) == "Cesium"
def test_validated_name_caesium(self):
"""Test validated_name('Caesium') returns 'Cesium'................."""
name1 = "Caesium"
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.validated_name(name2) == "Cesium"
def test_element_z(self):
"""Test element_z.................................................."""
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
for sym2 in [sym1, sym1.lower(), sym1.upper()]:
assert element.element_z(sym2) == z1
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.element_z(name2) == z1
def test_element_z_exception(self):
"""Test element_z with bad input raises ElementZError.............."""
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
with pytest.raises(element.ElementZError):
element.element_z(z1)
def test_element_symbol(self):
"""Test element_symbol............................................."""
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
assert element.element_symbol(z1) == sym1
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.element_symbol(name2) == sym1
def test_element_symbol_exception(self):
"""Test element_symbol with bad input raises ElementSymbolError...."""
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
with pytest.raises(element.ElementSymbolError):
element.element_symbol(sym1)
def test_element_name(self):
"""Test element_name..............................................."""
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
assert element.element_name(z1) == name1
for sym2 in [sym1, sym1.lower(), sym1.upper()]:
assert element.element_name(sym2) == name1
def test_element_name_exception(self):
"""Test element_name with bad input raises ElementNameError........"""
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
with pytest.raises(element.ElementNameError):
element.element_name(name1)
@pytest.mark.parametrize(
"z, sym, name",
[
(1, "H", "Hydrogen"),
(2, "He", "Helium"),
(13, "Al", "Aluminum"),
(19, "K", "Potassium"),
(32, "Ge", "Germanium"),
(70, "Yb", "Ytterbium"),
(92, "U", "Uranium"),
(118, "Og", "Oganesson"),
],
)
def test_element(z, sym, name):
"""Run instantiation tests for various elements.
Instantiate for element symbol and name, in mixed case, upper case,
and lower case. Also by Z as both integer and string.
"""
args = [name, name.lower(), name.upper()]
args.extend([sym, sym.lower(), sym.upper()])
args.extend([z, str(z)])
print(args)
for arg in args:
print("")
print("arg: ", arg)
elem = element.Element(arg)
print(elem)
assert elem.Z == z
assert elem.symbol == sym
assert elem.name == name
class TestElementInitExceptions(object):
"""Test Element class throws exceptions."""
def test_bad_arg_symbol(self):
"""Test Element init with a bad symbol raises ElementError........."""
with pytest.raises(element.ElementError):
element.Element("Xx")
def test_bad_arg_name(self):
"""Test Element init with a bad name raises ElementError..........."""
with pytest.raises(element.ElementError):
element.Element("Xirconium")
def test_bad_arg_z(self):
"""Test Element init with a bad Z raises ElementError.............."""
with pytest.raises(element.ElementError):
element.Element(0)
class TestElementsEqual(object):
"""Test Element class equality."""
def test_h(self):
"""Test Element equality: H........................................"""
assert element.Element("H") == element.Element(1)
def test_og(self):
"""Test Element equality: Og......................................."""
assert element.Element("Og") == element.Element(118)
def test_bad(self):
"""Test Element equality: H != 0..................................."""
with pytest.raises(element.ElementError):
elem = element.Element("H")
elem == 0
class TestElementStrFormat(object):
"""Test Element class string formatting."""
def test_h(self):
"""Test Element string formatting: H..............................."""
assert "{:%n (%s) %z}".format(element.Element("H")) == "Hydrogen (H) 1"
def test_og(self):
"""Test Element string formatting: Og.............................."""
assert "{:%n (%s) %z}".format(element.Element("Og")) == "Oganesson (Og) 118"
| 39.345745 | 84 | 0.568609 |
from becquerel.tools import element
import pytest
class TestElementFunctions(object):
def test_validated_z_good(self):
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
assert element.validated_z(z1) == z1
def test_validated_z_exception(self):
with pytest.raises(element.ElementZError):
element.validated_z(119)
def test_validated_symbol_good(self):
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
for sym2 in [sym1, sym1.lower(), sym1.upper()]:
assert element.validated_symbol(sym2) == sym1
def test_validated_symbol_exception(self):
with pytest.raises(element.ElementSymbolError):
element.validated_symbol("Xz")
def test_validated_name_good(self):
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.validated_name(name2) == name1
def test_validated_name_exception(self):
with pytest.raises(element.ElementNameError):
element.validated_name("Xzzzzz")
def test_validated_name_aluminum(self):
name1 = "Aluminum"
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.validated_name(name2) == "Aluminum"
def test_validated_name_aluminium(self):
name1 = "Aluminium"
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.validated_name(name2) == "Aluminum"
def test_validated_name_cesium(self):
name1 = "Cesium"
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.validated_name(name2) == "Cesium"
def test_validated_name_caesium(self):
name1 = "Caesium"
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.validated_name(name2) == "Cesium"
def test_element_z(self):
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
for sym2 in [sym1, sym1.lower(), sym1.upper()]:
assert element.element_z(sym2) == z1
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.element_z(name2) == z1
def test_element_z_exception(self):
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
with pytest.raises(element.ElementZError):
element.element_z(z1)
def test_element_symbol(self):
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
assert element.element_symbol(z1) == sym1
for name2 in [name1, name1.lower(), name1.upper()]:
assert element.element_symbol(name2) == sym1
def test_element_symbol_exception(self):
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
with pytest.raises(element.ElementSymbolError):
element.element_symbol(sym1)
def test_element_name(self):
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
assert element.element_name(z1) == name1
for sym2 in [sym1, sym1.lower(), sym1.upper()]:
assert element.element_name(sym2) == name1
def test_element_name_exception(self):
for z1, sym1, name1, mass1 in element._Z_SYMBOL_NAME_MASS:
with pytest.raises(element.ElementNameError):
element.element_name(name1)
@pytest.mark.parametrize(
"z, sym, name",
[
(1, "H", "Hydrogen"),
(2, "He", "Helium"),
(13, "Al", "Aluminum"),
(19, "K", "Potassium"),
(32, "Ge", "Germanium"),
(70, "Yb", "Ytterbium"),
(92, "U", "Uranium"),
(118, "Og", "Oganesson"),
],
)
def test_element(z, sym, name):
args = [name, name.lower(), name.upper()]
args.extend([sym, sym.lower(), sym.upper()])
args.extend([z, str(z)])
print(args)
for arg in args:
print("")
print("arg: ", arg)
elem = element.Element(arg)
print(elem)
assert elem.Z == z
assert elem.symbol == sym
assert elem.name == name
class TestElementInitExceptions(object):
def test_bad_arg_symbol(self):
with pytest.raises(element.ElementError):
element.Element("Xx")
def test_bad_arg_name(self):
with pytest.raises(element.ElementError):
element.Element("Xirconium")
def test_bad_arg_z(self):
with pytest.raises(element.ElementError):
element.Element(0)
class TestElementsEqual(object):
def test_h(self):
assert element.Element("H") == element.Element(1)
def test_og(self):
assert element.Element("Og") == element.Element(118)
def test_bad(self):
with pytest.raises(element.ElementError):
elem = element.Element("H")
elem == 0
class TestElementStrFormat(object):
def test_h(self):
assert "{:%n (%s) %z}".format(element.Element("H")) == "Hydrogen (H) 1"
def test_og(self):
assert "{:%n (%s) %z}".format(element.Element("Og")) == "Oganesson (Og) 118"
| true | true |
1c37746fe71c84e31d57f848afd8b7c55214ccbc | 3,265 | py | Python | examples/field_mixin_example.py | ZSD-tim/dayu_widgets | 31c2530bdc4161d9311574d9850c2e9471e53072 | [
"MIT"
] | 157 | 2019-03-10T05:55:21.000Z | 2022-03-31T09:07:00.000Z | examples/field_mixin_example.py | ZSD-tim/dayu_widgets | 31c2530bdc4161d9311574d9850c2e9471e53072 | [
"MIT"
] | 16 | 2019-07-15T11:30:53.000Z | 2021-12-16T14:17:59.000Z | examples/field_mixin_example.py | ZSD-tim/dayu_widgets | 31c2530bdc4161d9311574d9850c2e9471e53072 | [
"MIT"
] | 56 | 2019-06-19T03:35:27.000Z | 2022-03-22T08:07:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.3
# Email : muyanru345@163.com
###################################################################
from dayu_widgets.field_mixin import MFieldMixin
from dayu_widgets.label import MLabel
from dayu_widgets.line_edit import MLineEdit
from dayu_widgets.push_button import MPushButton
from dayu_widgets.qt import *
class FieldMixinExample(QWidget, MFieldMixin):
def __init__(self, parent=None):
super(FieldMixinExample, self).__init__(parent)
self.register_field('my_name', 'xiaoming')
self.register_field('thumbnail_path', '')
self.register_field('is_enable', True)
self.register_field('status', 'waiting')
self.register_field('str_enable', self.computed_str_enable)
self.register_field('thumbnail_pix_map', self.computed_thumbnail_pix_map)
self.register_field('email', self.computed_email)
name2_label = MLabel()
email_label = MLabel()
thumbnail_label = MLabel()
enable_button = MPushButton().primary()
self.bind('my_name', name2_label, 'dayu_text')
self.bind('email', email_label, 'dayu_text')
self.bind('is_enable', enable_button, 'enabled')
self.bind('thumbnail_pix_map', thumbnail_label, 'pixmap')
self.bind('str_enable', enable_button, 'text')
button = MPushButton(text='Change Data').primary()
button.clicked.connect(self.slot_change_data)
main_lay = QGridLayout()
main_lay.addWidget(MLabel('Avatar:'), 0, 0)
main_lay.addWidget(thumbnail_label, 0, 1)
main_lay.addWidget(MLabel('Name:'), 1, 0)
main_lay.addWidget(self.bind('my_name', MLineEdit(), 'text', signal='textEdited'), 1, 1)
main_lay.addWidget(MLabel('Email:'), 2, 0)
main_lay.addWidget(email_label, 2, 1)
main_lay.addWidget(MLabel('Enabled:'), 3, 0)
main_lay.addWidget(enable_button, 3, 1)
# for index, i in enumerate(self.field('my_name')):
# main_lay.addRow('name{}:'.format(index), self.bind('my_name', QLabel(), 'text', index=index))
main_lay.addWidget(button, 4, 1)
temp_lay = QVBoxLayout()
temp_lay.addLayout(main_lay)
temp_lay.addStretch()
self.setLayout(temp_lay)
def computed_str_enable(self):
return 'Enabled' if self.field('is_enable') else 'Disabled'
def computed_thumbnail_pix_map(self):
return MPixmap(self.field('thumbnail_path'))
def computed_email(self):
return '{}@phenom-films.com'.format(self.field('my_name'))
def slot_change_data(self):
import random
self.set_field('my_name', random.choice(['xiaoming', 'xiaohua', 'xiaohong', 'hahaha', 'lalalala']))
self.set_field('thumbnail_path', 'app-{}.png'.format(random.choice(['maya', 'nuke', 'houdini'])))
self.set_field('is_enable', bool(random.randint(0, 1)))
self.set_field('status', 'haha')
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = FieldMixinExample()
from dayu_widgets import dayu_theme
dayu_theme.apply(test)
test.show()
sys.exit(app.exec_())
| 39.337349 | 107 | 0.637366 | true | true | |
1c3774f46ae2fc55a60f74c65a9126aab48b4517 | 2,445 | py | Python | Benchmarks/benchmark-5.7.py | wangyonghong/RabbitMQ-in-Depth | 56a35c6359d500b7597daf1bb2185b4c451a572c | [
"BSD-3-Clause"
] | 111 | 2015-01-06T20:26:31.000Z | 2022-03-14T13:17:12.000Z | Benchmarks/benchmark-5.7.py | wangyonghong/RabbitMQ-in-Depth | 56a35c6359d500b7597daf1bb2185b4c451a572c | [
"BSD-3-Clause"
] | 4 | 2018-06-15T20:35:36.000Z | 2021-01-13T16:03:40.000Z | Benchmarks/benchmark-5.7.py | wangyonghong/RabbitMQ-in-Depth | 56a35c6359d500b7597daf1bb2185b4c451a572c | [
"BSD-3-Clause"
] | 43 | 2015-04-18T13:44:01.000Z | 2022-03-14T13:17:13.000Z | import logging
import pika
import sys
import time
from pika.adapters import tornado_connection
LOGGER = logging.getLogger(__name__)
QOS = int(sys.argv[1]) if len(sys.argv) == 2 else None
ROUTING_KEY = 'benchmark_qos_%s' % QOS
PROPERTIES = pika.BasicProperties(content_type='text/plain', delivery_mode=1)
ITERATIONS = 100000
channel = None
consumer_tag = None
received = 0
start_time = None
with open('lorem.txt') as handle:
BODY = handle.read()
def on_basic_cancel(_frame_unused):
connection.close()
def on_message(channel, method_frame, header_unused, body_unused):
global received
channel.basic_ack(method_frame.delivery_tag)
received += 1
if received == ITERATIONS:
total_time = time.time() - start_time
velocity = float(ITERATIONS / total_time)
LOGGER.info('Consumed %.2f messages/sec @ %s QoS in %.2f seconds',
velocity, 'unset' if QOS is None else QOS, total_time)
channel.basic_cancel(on_basic_cancel, consumer_tag)
def on_basic_qosok(_frame_unsued):
global consumer_tag, start_time
LOGGER.info('Starting consumer')
start_time = time.time()
consumer_tag = channel.basic_consume(on_message, ROUTING_KEY)
def on_queue_declared(_frame_unused):
LOGGER.info('Queue declared, publishing %i messages', ITERATIONS)
for iteration in range(0, ITERATIONS):
channel.basic_publish('', ROUTING_KEY, BODY[:2048], PROPERTIES)
if QOS is not None:
channel.basic_qos(callback=on_basic_qosok, prefetch_count=QOS)
else:
on_basic_qosok(None)
def on_channel_open(channel_opened):
global channel
LOGGER.info('Channel opened')
channel = channel_opened
channel.queue_declare(on_queue_declared, ROUTING_KEY,
auto_delete=True, durable=True, exclusive=True)
def on_open(connection):
LOGGER.info('Connection opened')
connection.channel(on_channel_open)
logging.basicConfig(level=logging.INFO)
LOGGER.info('Starting benchmark with QoS %s', QOS)
parameters = pika.URLParameters('amqp://guest:guest@localhost:5672/%2F')
connection = tornado_connection.TornadoConnection(parameters=parameters,
on_open_callback=on_open,
stop_ioloop_on_close=True)
try:
connection.ioloop.start()
except KeyboardInterrupt:
connection.close()
connection.ioloop.start()
| 28.103448 | 77 | 0.699387 | import logging
import pika
import sys
import time
from pika.adapters import tornado_connection
LOGGER = logging.getLogger(__name__)
QOS = int(sys.argv[1]) if len(sys.argv) == 2 else None
ROUTING_KEY = 'benchmark_qos_%s' % QOS
PROPERTIES = pika.BasicProperties(content_type='text/plain', delivery_mode=1)
ITERATIONS = 100000
channel = None
consumer_tag = None
received = 0
start_time = None
with open('lorem.txt') as handle:
BODY = handle.read()
def on_basic_cancel(_frame_unused):
connection.close()
def on_message(channel, method_frame, header_unused, body_unused):
global received
channel.basic_ack(method_frame.delivery_tag)
received += 1
if received == ITERATIONS:
total_time = time.time() - start_time
velocity = float(ITERATIONS / total_time)
LOGGER.info('Consumed %.2f messages/sec @ %s QoS in %.2f seconds',
velocity, 'unset' if QOS is None else QOS, total_time)
channel.basic_cancel(on_basic_cancel, consumer_tag)
def on_basic_qosok(_frame_unsued):
global consumer_tag, start_time
LOGGER.info('Starting consumer')
start_time = time.time()
consumer_tag = channel.basic_consume(on_message, ROUTING_KEY)
def on_queue_declared(_frame_unused):
LOGGER.info('Queue declared, publishing %i messages', ITERATIONS)
for iteration in range(0, ITERATIONS):
channel.basic_publish('', ROUTING_KEY, BODY[:2048], PROPERTIES)
if QOS is not None:
channel.basic_qos(callback=on_basic_qosok, prefetch_count=QOS)
else:
on_basic_qosok(None)
def on_channel_open(channel_opened):
global channel
LOGGER.info('Channel opened')
channel = channel_opened
channel.queue_declare(on_queue_declared, ROUTING_KEY,
auto_delete=True, durable=True, exclusive=True)
def on_open(connection):
LOGGER.info('Connection opened')
connection.channel(on_channel_open)
logging.basicConfig(level=logging.INFO)
LOGGER.info('Starting benchmark with QoS %s', QOS)
parameters = pika.URLParameters('amqp://guest:guest@localhost:5672/%2F')
connection = tornado_connection.TornadoConnection(parameters=parameters,
on_open_callback=on_open,
stop_ioloop_on_close=True)
try:
connection.ioloop.start()
except KeyboardInterrupt:
connection.close()
connection.ioloop.start()
| true | true |
1c37753db0c226b586f85e6accb6d327b27976d0 | 2,687 | py | Python | i3pystatus/alsa.py | rampage644/i3pystatus | b9846936e187cd80f15928e93ad6318755f84285 | [
"MIT"
] | null | null | null | i3pystatus/alsa.py | rampage644/i3pystatus | b9846936e187cd80f15928e93ad6318755f84285 | [
"MIT"
] | null | null | null | i3pystatus/alsa.py | rampage644/i3pystatus | b9846936e187cd80f15928e93ad6318755f84285 | [
"MIT"
] | null | null | null | from alsaaudio import Mixer, ALSAAudioError
from i3pystatus import IntervalModule
class ALSA(IntervalModule):
"""
Shows volume of ALSA mixer. You can also use this for inputs, btw.
Requires pyalsaaudio
.. rubric:: Available formatters
* `{volume}` — the current volume in percent
* `{muted}` — the value of one of the `muted` or `unmuted` settings
* `{card}` — the associated soundcard
* `{mixer}` — the associated ALSA mixer
"""
interval = 1
settings = (
"format",
("format_muted", "optional format string to use when muted"),
("mixer", "ALSA mixer"),
("mixer_id", "ALSA mixer id"),
("card", "ALSA sound card"),
("increment", "integer percentage of max volume to in/decrement volume on mousewheel"),
"muted", "unmuted",
"color_muted", "color",
"channel"
)
muted = "M"
unmuted = ""
color_muted = "#AAAAAA"
color = "#FFFFFF"
format = "♪: {volume}"
format_muted = None
mixer = "Master"
mixer_id = 0
card = 0
channel = 0
increment = 5
alsamixer = None
has_mute = True
def init(self):
self.create_mixer()
try:
self.alsamixer.getmute()
except ALSAAudioError:
self.has_mute = False
self.fdict = {
"card": self.alsamixer.cardname(),
"mixer": self.mixer,
}
def create_mixer(self):
self.alsamixer = Mixer(
control=self.mixer, id=self.mixer_id, cardindex=self.card)
def run(self):
self.create_mixer()
muted = False
if self.has_mute:
muted = self.alsamixer.getmute()[self.channel] == 1
self.fdict["volume"] = self.alsamixer.getvolume()[self.channel]
self.fdict["muted"] = self.muted if muted else self.unmuted
if muted and self.format_muted is not None:
output_format = self.format_muted
else:
output_format = self.format
self.output = {
"full_text": output_format.format(**self.fdict),
"color": self.color_muted if muted else self.color,
}
def on_leftclick(self):
self.on_rightclick()
def on_rightclick(self):
if self.has_mute:
muted = self.alsamixer.getmute()[self.channel]
self.alsamixer.setmute(not muted)
def on_upscroll(self):
vol = self.alsamixer.getvolume()[self.channel]
self.alsamixer.setvolume(min(100, vol + self.increment))
def on_downscroll(self):
vol = self.alsamixer.getvolume()[self.channel]
self.alsamixer.setvolume(max(0, vol - self.increment))
| 26.87 | 95 | 0.589877 | from alsaaudio import Mixer, ALSAAudioError
from i3pystatus import IntervalModule
class ALSA(IntervalModule):
interval = 1
settings = (
"format",
("format_muted", "optional format string to use when muted"),
("mixer", "ALSA mixer"),
("mixer_id", "ALSA mixer id"),
("card", "ALSA sound card"),
("increment", "integer percentage of max volume to in/decrement volume on mousewheel"),
"muted", "unmuted",
"color_muted", "color",
"channel"
)
muted = "M"
unmuted = ""
color_muted = "#AAAAAA"
color = "#FFFFFF"
format = "♪: {volume}"
format_muted = None
mixer = "Master"
mixer_id = 0
card = 0
channel = 0
increment = 5
alsamixer = None
has_mute = True
def init(self):
self.create_mixer()
try:
self.alsamixer.getmute()
except ALSAAudioError:
self.has_mute = False
self.fdict = {
"card": self.alsamixer.cardname(),
"mixer": self.mixer,
}
def create_mixer(self):
self.alsamixer = Mixer(
control=self.mixer, id=self.mixer_id, cardindex=self.card)
def run(self):
self.create_mixer()
muted = False
if self.has_mute:
muted = self.alsamixer.getmute()[self.channel] == 1
self.fdict["volume"] = self.alsamixer.getvolume()[self.channel]
self.fdict["muted"] = self.muted if muted else self.unmuted
if muted and self.format_muted is not None:
output_format = self.format_muted
else:
output_format = self.format
self.output = {
"full_text": output_format.format(**self.fdict),
"color": self.color_muted if muted else self.color,
}
def on_leftclick(self):
self.on_rightclick()
def on_rightclick(self):
if self.has_mute:
muted = self.alsamixer.getmute()[self.channel]
self.alsamixer.setmute(not muted)
def on_upscroll(self):
vol = self.alsamixer.getvolume()[self.channel]
self.alsamixer.setvolume(min(100, vol + self.increment))
def on_downscroll(self):
vol = self.alsamixer.getvolume()[self.channel]
self.alsamixer.setvolume(max(0, vol - self.increment))
| true | true |
1c37760711e3974012d6cdd128c7ad441e28bdfc | 3,284 | py | Python | tests/model_fields/test_datetimefield.py | ni-ning/django | 2e7ba6057cfc82a15a22b6021cd60cf307152e2d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 61,676 | 2015-01-01T00:05:13.000Z | 2022-03-31T20:37:54.000Z | tests/model_fields/test_datetimefield.py | ni-ning/django | 2e7ba6057cfc82a15a22b6021cd60cf307152e2d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 8,884 | 2015-01-01T00:12:05.000Z | 2022-03-31T19:53:11.000Z | tests/model_fields/test_datetimefield.py | mustafa0x/django | d7394cfa13a4d1a02356e3a83e10ec100fbb9948 | [
"BSD-3-Clause",
"0BSD"
] | 33,143 | 2015-01-01T02:04:52.000Z | 2022-03-31T19:42:46.000Z | import datetime
from django.db import models
from django.test import (
SimpleTestCase, TestCase, override_settings, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import timezone
from .models import DateTimeModel
class DateTimeFieldTests(TestCase):
def test_datetimefield_to_python_microseconds(self):
"""DateTimeField.to_python() supports microseconds."""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'), datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'), datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_microseconds(self):
"""TimeField.to_python() supports microseconds."""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'), datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'), datetime.time(1, 2, 3, 999999))
def test_datetimes_save_completely(self):
dat = datetime.date(2014, 3, 12)
datetim = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
tim = datetime.time(21, 22, 23, 240000)
DateTimeModel.objects.create(d=dat, dt=datetim, t=tim)
obj = DateTimeModel.objects.first()
self.assertTrue(obj)
self.assertEqual(obj.d, dat)
self.assertEqual(obj.dt, datetim)
self.assertEqual(obj.t, tim)
@override_settings(USE_TZ=False)
def test_lookup_date_without_use_tz(self):
d = datetime.date(2014, 3, 12)
dt1 = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
dt2 = datetime.datetime(2014, 3, 11, 21, 22, 23, 240000)
t = datetime.time(21, 22, 23, 240000)
m = DateTimeModel.objects.create(d=d, dt=dt1, t=t)
# Other model with different datetime.
DateTimeModel.objects.create(d=d, dt=dt2, t=t)
self.assertEqual(m, DateTimeModel.objects.get(dt__date=d))
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='America/Vancouver')
def test_lookup_date_with_use_tz(self):
d = datetime.date(2014, 3, 12)
# The following is equivalent to UTC 2014-03-12 18:34:23.24000.
dt1 = datetime.datetime(2014, 3, 12, 10, 22, 23, 240000, tzinfo=timezone.get_current_timezone())
# The following is equivalent to UTC 2014-03-13 05:34:23.24000.
dt2 = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000, tzinfo=timezone.get_current_timezone())
t = datetime.time(21, 22, 23, 240000)
m1 = DateTimeModel.objects.create(d=d, dt=dt1, t=t)
m2 = DateTimeModel.objects.create(d=d, dt=dt2, t=t)
# In Vancouver, we expect both results.
self.assertCountEqual(
DateTimeModel.objects.filter(dt__date=d),
[m1, m2],
)
with self.settings(TIME_ZONE='UTC'):
# But in UTC, the __date only matches one of them.
self.assertCountEqual(DateTimeModel.objects.filter(dt__date=d), [m1])
class ValidationTest(SimpleTestCase):
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
| 43.210526 | 115 | 0.66687 | import datetime
from django.db import models
from django.test import (
SimpleTestCase, TestCase, override_settings, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import timezone
from .models import DateTimeModel
class DateTimeFieldTests(TestCase):
def test_datetimefield_to_python_microseconds(self):
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'), datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'), datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_microseconds(self):
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'), datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'), datetime.time(1, 2, 3, 999999))
def test_datetimes_save_completely(self):
dat = datetime.date(2014, 3, 12)
datetim = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
tim = datetime.time(21, 22, 23, 240000)
DateTimeModel.objects.create(d=dat, dt=datetim, t=tim)
obj = DateTimeModel.objects.first()
self.assertTrue(obj)
self.assertEqual(obj.d, dat)
self.assertEqual(obj.dt, datetim)
self.assertEqual(obj.t, tim)
@override_settings(USE_TZ=False)
def test_lookup_date_without_use_tz(self):
d = datetime.date(2014, 3, 12)
dt1 = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
dt2 = datetime.datetime(2014, 3, 11, 21, 22, 23, 240000)
t = datetime.time(21, 22, 23, 240000)
m = DateTimeModel.objects.create(d=d, dt=dt1, t=t)
DateTimeModel.objects.create(d=d, dt=dt2, t=t)
self.assertEqual(m, DateTimeModel.objects.get(dt__date=d))
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='America/Vancouver')
def test_lookup_date_with_use_tz(self):
d = datetime.date(2014, 3, 12)
dt1 = datetime.datetime(2014, 3, 12, 10, 22, 23, 240000, tzinfo=timezone.get_current_timezone())
dt2 = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000, tzinfo=timezone.get_current_timezone())
t = datetime.time(21, 22, 23, 240000)
m1 = DateTimeModel.objects.create(d=d, dt=dt1, t=t)
m2 = DateTimeModel.objects.create(d=d, dt=dt2, t=t)
self.assertCountEqual(
DateTimeModel.objects.filter(dt__date=d),
[m1, m2],
)
with self.settings(TIME_ZONE='UTC'):
self.assertCountEqual(DateTimeModel.objects.filter(dt__date=d), [m1])
class ValidationTest(SimpleTestCase):
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
| true | true |
1c37766f47d4d26e31b4e5368e90e8cb9c60838f | 3,887 | py | Python | tests/executors/test_debug_executor.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 8 | 2017-04-20T16:15:44.000Z | 2020-10-11T13:44:10.000Z | tests/executors/test_debug_executor.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 219 | 2017-03-15T18:40:16.000Z | 2022-02-28T22:52:43.000Z | tests/executors/test_debug_executor.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 12 | 2020-01-09T14:02:39.000Z | 2022-01-24T07:18:51.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import mock
from unittest.mock import MagicMock
from airflow.executors.debug_executor import DebugExecutor
from airflow.utils.state import State
class TestDebugExecutor:
@mock.patch("airflow.executors.debug_executor.DebugExecutor._run_task")
def test_sync(self, run_task_mock):
run_task_mock.return_value = True
executor = DebugExecutor()
ti1 = MagicMock(key="t1")
ti2 = MagicMock(key="t2")
executor.tasks_to_run = [ti1, ti2]
executor.sync()
assert not executor.tasks_to_run
run_task_mock.assert_has_calls([mock.call(ti1), mock.call(ti2)])
@mock.patch("airflow.executors.debug_executor.TaskInstance")
def test_run_task(self, task_instance_mock):
ti_key = "key"
job_id = " job_id"
task_instance_mock.key = ti_key
task_instance_mock.job_id = job_id
executor = DebugExecutor()
executor.running = set([ti_key])
succeeded = executor._run_task(task_instance_mock)
assert succeeded
task_instance_mock._run_raw_task.assert_called_once_with(job_id=job_id)
def test_queue_task_instance(self):
key = "ti_key"
ti = MagicMock(key=key)
executor = DebugExecutor()
executor.queue_task_instance(task_instance=ti, mark_success=True, pool="pool")
assert key in executor.queued_tasks
assert key in executor.tasks_params
assert executor.tasks_params[key] == {
"mark_success": True,
"pool": "pool",
}
def test_trigger_tasks(self):
execute_async_mock = MagicMock()
executor = DebugExecutor()
executor.execute_async = execute_async_mock
executor.queued_tasks = {
"t1": (None, 1, None, MagicMock(key="t1")),
"t2": (None, 2, None, MagicMock(key="t2")),
}
executor.trigger_tasks(open_slots=4)
assert not executor.queued_tasks
assert len(executor.running) == 2
assert len(executor.tasks_to_run) == 2
assert not execute_async_mock.called
def test_end(self):
ti = MagicMock(key="ti_key")
executor = DebugExecutor()
executor.tasks_to_run = [ti]
executor.running = set([ti.key])
executor.end()
ti.set_state.assert_called_once_with(State.UPSTREAM_FAILED)
assert not executor.running
@mock.patch("airflow.executors.debug_executor.DebugExecutor.change_state")
def test_fail_fast(self, change_state_mock):
with mock.patch.dict("os.environ", {"AIRFLOW__DEBUG__FAIL_FAST": "True"}):
executor = DebugExecutor()
ti1 = MagicMock(key="t1")
ti2 = MagicMock(key="t2")
ti1._run_raw_task.side_effect = Exception
executor.tasks_to_run = [ti1, ti2]
executor.sync()
assert executor.fail_fast
assert not executor.tasks_to_run
change_state_mock.assert_has_calls(
[
mock.call(ti1.key, State.FAILED),
mock.call(ti2.key, State.UPSTREAM_FAILED),
]
)
| 33.222222 | 86 | 0.67044 |
from unittest import mock
from unittest.mock import MagicMock
from airflow.executors.debug_executor import DebugExecutor
from airflow.utils.state import State
class TestDebugExecutor:
@mock.patch("airflow.executors.debug_executor.DebugExecutor._run_task")
def test_sync(self, run_task_mock):
run_task_mock.return_value = True
executor = DebugExecutor()
ti1 = MagicMock(key="t1")
ti2 = MagicMock(key="t2")
executor.tasks_to_run = [ti1, ti2]
executor.sync()
assert not executor.tasks_to_run
run_task_mock.assert_has_calls([mock.call(ti1), mock.call(ti2)])
@mock.patch("airflow.executors.debug_executor.TaskInstance")
def test_run_task(self, task_instance_mock):
ti_key = "key"
job_id = " job_id"
task_instance_mock.key = ti_key
task_instance_mock.job_id = job_id
executor = DebugExecutor()
executor.running = set([ti_key])
succeeded = executor._run_task(task_instance_mock)
assert succeeded
task_instance_mock._run_raw_task.assert_called_once_with(job_id=job_id)
def test_queue_task_instance(self):
key = "ti_key"
ti = MagicMock(key=key)
executor = DebugExecutor()
executor.queue_task_instance(task_instance=ti, mark_success=True, pool="pool")
assert key in executor.queued_tasks
assert key in executor.tasks_params
assert executor.tasks_params[key] == {
"mark_success": True,
"pool": "pool",
}
def test_trigger_tasks(self):
execute_async_mock = MagicMock()
executor = DebugExecutor()
executor.execute_async = execute_async_mock
executor.queued_tasks = {
"t1": (None, 1, None, MagicMock(key="t1")),
"t2": (None, 2, None, MagicMock(key="t2")),
}
executor.trigger_tasks(open_slots=4)
assert not executor.queued_tasks
assert len(executor.running) == 2
assert len(executor.tasks_to_run) == 2
assert not execute_async_mock.called
def test_end(self):
ti = MagicMock(key="ti_key")
executor = DebugExecutor()
executor.tasks_to_run = [ti]
executor.running = set([ti.key])
executor.end()
ti.set_state.assert_called_once_with(State.UPSTREAM_FAILED)
assert not executor.running
@mock.patch("airflow.executors.debug_executor.DebugExecutor.change_state")
def test_fail_fast(self, change_state_mock):
with mock.patch.dict("os.environ", {"AIRFLOW__DEBUG__FAIL_FAST": "True"}):
executor = DebugExecutor()
ti1 = MagicMock(key="t1")
ti2 = MagicMock(key="t2")
ti1._run_raw_task.side_effect = Exception
executor.tasks_to_run = [ti1, ti2]
executor.sync()
assert executor.fail_fast
assert not executor.tasks_to_run
change_state_mock.assert_has_calls(
[
mock.call(ti1.key, State.FAILED),
mock.call(ti2.key, State.UPSTREAM_FAILED),
]
)
| true | true |
1c377682872dd7bdff2c0e9c9a984c373303d8dc | 563 | py | Python | env/lib/python3.8/site-packages/plotly/validators/choroplethmapbox/stream/_token.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/choroplethmapbox/stream/_token.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/choroplethmapbox/stream/_token.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="token", parent_name="choroplethmapbox.stream", **kwargs
):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "info"),
strict=kwargs.pop("strict", True),
**kwargs
)
| 33.117647 | 82 | 0.623446 | import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="token", parent_name="choroplethmapbox.stream", **kwargs
):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "info"),
strict=kwargs.pop("strict", True),
**kwargs
)
| true | true |
1c37775300054e63a7495c24c8732a2a07c8d6e5 | 5,649 | py | Python | Python Tutorial Reinforcement Learning/10_mario_a3c/src/process.py | PaulPan00/donkey_wrapper | a03cf0f42f65625fbce792b06c98acd153c5d6c8 | [
"MIT"
] | 6 | 2021-03-26T01:42:31.000Z | 2021-04-11T16:17:42.000Z | Python Tutorial Reinforcement Learning/10_mario_a3c/src/process.py | packetsss/Python | a03cf0f42f65625fbce792b06c98acd153c5d6c8 | [
"MIT"
] | null | null | null | Python Tutorial Reinforcement Learning/10_mario_a3c/src/process.py | packetsss/Python | a03cf0f42f65625fbce792b06c98acd153c5d6c8 | [
"MIT"
] | 7 | 2021-04-06T06:55:22.000Z | 2021-05-03T11:26:38.000Z | # Create by Packetsss
# Personal use is allowed
# Commercial use is prohibited
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import torch
from src.env import create_train_env
from src.model import ActorCritic
import torch.nn.functional as F
from torch.distributions import Categorical
from collections import deque
from tensorboardX import SummaryWriter
import timeit
def local_train(index, opt, global_model, optimizer, save=False):
torch.manual_seed(123 + index)
if save:
start_time = timeit.default_timer()
writer = SummaryWriter(opt.log_path)
env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type)
local_model = ActorCritic(num_states, num_actions)
if opt.use_gpu:
local_model.cuda()
local_model.train()
state = torch.from_numpy(env.reset())
if opt.use_gpu:
state = state.cuda()
done = True
curr_step = 0
curr_episode = 0
while True:
if save:
if curr_episode % opt.save_interval == 0 and curr_episode > 0:
torch.save(global_model.state_dict(),
"{}/a3c_super_mario_bros_{}_{}".format(opt.saved_path, opt.world, opt.stage))
print("Process {}. Episode {}".format(index, curr_episode))
curr_episode += 1
local_model.load_state_dict(global_model.state_dict())
if done:
h_0 = torch.zeros((1, 512), dtype=torch.float)
c_0 = torch.zeros((1, 512), dtype=torch.float)
else:
h_0 = h_0.detach()
c_0 = c_0.detach()
if opt.use_gpu:
h_0 = h_0.cuda()
c_0 = c_0.cuda()
log_policies = []
values = []
rewards = []
entropies = []
for _ in range(opt.num_local_steps):
curr_step += 1
logits, value, h_0, c_0 = local_model(state, h_0, c_0)
policy = F.softmax(logits, dim=1)
log_policy = F.log_softmax(logits, dim=1)
entropy = -(policy * log_policy).sum(1, keepdim=True)
m = Categorical(policy)
action = m.sample().item()
state, reward, done, _ = env.step(action)
state = torch.from_numpy(state)
if opt.use_gpu:
state = state.cuda()
if curr_step > opt.num_global_steps:
done = True
if done:
curr_step = 0
state = torch.from_numpy(env.reset())
if opt.use_gpu:
state = state.cuda()
values.append(value)
log_policies.append(log_policy[0, action])
rewards.append(reward)
entropies.append(entropy)
if done:
break
R = torch.zeros((1, 1), dtype=torch.float)
if opt.use_gpu:
R = R.cuda()
if not done:
_, R, _, _ = local_model(state, h_0, c_0)
gae = torch.zeros((1, 1), dtype=torch.float)
if opt.use_gpu:
gae = gae.cuda()
actor_loss = 0
critic_loss = 0
entropy_loss = 0
next_value = R
for value, log_policy, reward, entropy in list(zip(values, log_policies, rewards, entropies))[::-1]:
gae = gae * opt.gamma * opt.tau
gae = gae + reward + opt.gamma * next_value.detach() - value.detach()
next_value = value
actor_loss = actor_loss + log_policy * gae
R = R * opt.gamma + reward
critic_loss = critic_loss + (R - value) ** 2 / 2
entropy_loss = entropy_loss + entropy
total_loss = -actor_loss + critic_loss - opt.beta * entropy_loss
writer.add_scalar("Train_{}/Loss".format(index), total_loss, curr_episode)
optimizer.zero_grad()
total_loss.backward()
for local_param, global_param in zip(local_model.parameters(), global_model.parameters()):
if global_param.grad is not None:
break
global_param._grad = local_param.grad
optimizer.step()
if curr_episode == int(opt.num_global_steps / opt.num_local_steps):
print("Training process {} terminated".format(index))
if save:
end_time = timeit.default_timer()
print('The code runs for %.2f s ' % (end_time - start_time))
return
def local_test(index, opt, global_model):
torch.manual_seed(123 + index)
env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type)
local_model = ActorCritic(num_states, num_actions)
local_model.eval()
state = torch.from_numpy(env.reset())
done = True
curr_step = 0
actions = deque(maxlen=opt.max_actions)
while True:
curr_step += 1
if done:
local_model.load_state_dict(global_model.state_dict())
with torch.no_grad():
if done:
h_0 = torch.zeros((1, 512), dtype=torch.float)
c_0 = torch.zeros((1, 512), dtype=torch.float)
else:
h_0 = h_0.detach()
c_0 = c_0.detach()
logits, value, h_0, c_0 = local_model(state, h_0, c_0)
policy = F.softmax(logits, dim=1)
action = torch.argmax(policy).item()
state, reward, done, _ = env.step(action)
env.render()
actions.append(action)
if curr_step > opt.num_global_steps or actions.count(actions[0]) == actions.maxlen:
done = True
if done:
curr_step = 0
actions.clear()
state = env.reset()
state = torch.from_numpy(state)
| 34.03012 | 108 | 0.576562 |
import torch
from src.env import create_train_env
from src.model import ActorCritic
import torch.nn.functional as F
from torch.distributions import Categorical
from collections import deque
from tensorboardX import SummaryWriter
import timeit
def local_train(index, opt, global_model, optimizer, save=False):
torch.manual_seed(123 + index)
if save:
start_time = timeit.default_timer()
writer = SummaryWriter(opt.log_path)
env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type)
local_model = ActorCritic(num_states, num_actions)
if opt.use_gpu:
local_model.cuda()
local_model.train()
state = torch.from_numpy(env.reset())
if opt.use_gpu:
state = state.cuda()
done = True
curr_step = 0
curr_episode = 0
while True:
if save:
if curr_episode % opt.save_interval == 0 and curr_episode > 0:
torch.save(global_model.state_dict(),
"{}/a3c_super_mario_bros_{}_{}".format(opt.saved_path, opt.world, opt.stage))
print("Process {}. Episode {}".format(index, curr_episode))
curr_episode += 1
local_model.load_state_dict(global_model.state_dict())
if done:
h_0 = torch.zeros((1, 512), dtype=torch.float)
c_0 = torch.zeros((1, 512), dtype=torch.float)
else:
h_0 = h_0.detach()
c_0 = c_0.detach()
if opt.use_gpu:
h_0 = h_0.cuda()
c_0 = c_0.cuda()
log_policies = []
values = []
rewards = []
entropies = []
for _ in range(opt.num_local_steps):
curr_step += 1
logits, value, h_0, c_0 = local_model(state, h_0, c_0)
policy = F.softmax(logits, dim=1)
log_policy = F.log_softmax(logits, dim=1)
entropy = -(policy * log_policy).sum(1, keepdim=True)
m = Categorical(policy)
action = m.sample().item()
state, reward, done, _ = env.step(action)
state = torch.from_numpy(state)
if opt.use_gpu:
state = state.cuda()
if curr_step > opt.num_global_steps:
done = True
if done:
curr_step = 0
state = torch.from_numpy(env.reset())
if opt.use_gpu:
state = state.cuda()
values.append(value)
log_policies.append(log_policy[0, action])
rewards.append(reward)
entropies.append(entropy)
if done:
break
R = torch.zeros((1, 1), dtype=torch.float)
if opt.use_gpu:
R = R.cuda()
if not done:
_, R, _, _ = local_model(state, h_0, c_0)
gae = torch.zeros((1, 1), dtype=torch.float)
if opt.use_gpu:
gae = gae.cuda()
actor_loss = 0
critic_loss = 0
entropy_loss = 0
next_value = R
for value, log_policy, reward, entropy in list(zip(values, log_policies, rewards, entropies))[::-1]:
gae = gae * opt.gamma * opt.tau
gae = gae + reward + opt.gamma * next_value.detach() - value.detach()
next_value = value
actor_loss = actor_loss + log_policy * gae
R = R * opt.gamma + reward
critic_loss = critic_loss + (R - value) ** 2 / 2
entropy_loss = entropy_loss + entropy
total_loss = -actor_loss + critic_loss - opt.beta * entropy_loss
writer.add_scalar("Train_{}/Loss".format(index), total_loss, curr_episode)
optimizer.zero_grad()
total_loss.backward()
for local_param, global_param in zip(local_model.parameters(), global_model.parameters()):
if global_param.grad is not None:
break
global_param._grad = local_param.grad
optimizer.step()
if curr_episode == int(opt.num_global_steps / opt.num_local_steps):
print("Training process {} terminated".format(index))
if save:
end_time = timeit.default_timer()
print('The code runs for %.2f s ' % (end_time - start_time))
return
def local_test(index, opt, global_model):
torch.manual_seed(123 + index)
env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type)
local_model = ActorCritic(num_states, num_actions)
local_model.eval()
state = torch.from_numpy(env.reset())
done = True
curr_step = 0
actions = deque(maxlen=opt.max_actions)
while True:
curr_step += 1
if done:
local_model.load_state_dict(global_model.state_dict())
with torch.no_grad():
if done:
h_0 = torch.zeros((1, 512), dtype=torch.float)
c_0 = torch.zeros((1, 512), dtype=torch.float)
else:
h_0 = h_0.detach()
c_0 = c_0.detach()
logits, value, h_0, c_0 = local_model(state, h_0, c_0)
policy = F.softmax(logits, dim=1)
action = torch.argmax(policy).item()
state, reward, done, _ = env.step(action)
env.render()
actions.append(action)
if curr_step > opt.num_global_steps or actions.count(actions[0]) == actions.maxlen:
done = True
if done:
curr_step = 0
actions.clear()
state = env.reset()
state = torch.from_numpy(state)
| true | true |
1c3778b44b02b78550788610afcbd3ba008c32b8 | 10,024 | py | Python | src/m2.py | hinetg/05a-Debugging-201930 | 59977b30596abb722b5d12337ea771cecbca34d7 | [
"MIT"
] | null | null | null | src/m2.py | hinetg/05a-Debugging-201930 | 59977b30596abb722b5d12337ea771cecbca34d7 | [
"MIT"
] | null | null | null | src/m2.py | hinetg/05a-Debugging-201930 | 59977b30596abb722b5d12337ea771cecbca34d7 | [
"MIT"
] | null | null | null | """
This module lets you practice DEBUGGING when RUN-TIME EXCEPTIONS occur,
focusing here on AttributeError exceptions:
'BLAHType' object has no attribute 'FOO'
and on TypeError exceptions, in particular those of the form:
'BLAHType' object is not callable.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Valerie Galluzzi, Mark Hays, Amanda Stouder, Aaron Wilkin,
their colleagues, and PUT_YOUR_NAME_HERE.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
###############################################################################
#
# DONE: 2. READ these instructions, ASKING QUESTIONS as needed.
#
# This module contains "broken" functions, as in m1.py.
# FOLLOW THE SAME STEPS as in the instructions of m1.py
# to find and correct the mistakes in these functions.
#
# The broken functions herein have the SAME SPECIFICATIONS
# as those in the m1 module. Therefore, you can use the
# SAME PICTURES (in the file m1_pictures.pdf) as you used
# for determining whether your corrected code passes the tests.
#
# *** IMPORTANT: ***
# Do NOT look back to m1.py to solve THESE problems.
# That would greatly diminish what you learn from THESE problems.
#
# *** IMPORTANT: ***
# Resist the urge to "fiddle" with the code until you stumble
# upon something that works. This exercise will be helpful
# to you ONLY if you use it as an opportunity to learn
# what the error messages mean and how to react to them.
#
# *** ASK QUESTIONS AS NEEDED! ***
#
# When you believe you understand these instructions,
# change the above TO DO to DONE.
#
###############################################################################
def main():
""" Calls the TEST functions in this module. """
run_test_all()
###############################################################################
# Students: Do NOT change the following tests.
# There are NO errors in the TESTS.
###############################################################################
def run_test_all():
""" Tests ALL the functions in this module. """
# Test broken_1:
window = rg.RoseWindow(title='Testing BROKEN_1')
circle1 = rg.Circle(rg.Point(50, 50), 15)
circle1.fill_color = 'blue'
broken_1(circle1, window) # Test 1 of broken_1
circle2 = rg.Circle(rg.Point(70, 150), 30)
circle2.fill_color = 'red'
broken_1(circle2, window) # Test 2 of broken_1
window.close_on_mouse_click()
# Test broken_2:
window = rg.RoseWindow(title='Testing BROKEN_2')
broken_2(50, 75, window) # Test 1 of broken_2
broken_2(100, 150, window) # Test 2 of broken_2
window.close_on_mouse_click()
# Test broken_3:
window = rg.RoseWindow(title='Testing BROKEN_3')
broken_3(5, rg.Point(100, 50), 80, 20, window) # Test 1 of broken_3
broken_3(3, rg.Point(50, 150), 40, 50, window) # Test 2 of broken_3
window.close_on_mouse_click()
# Test broken_4:
window = rg.RoseWindow(title='Testing BROKEN_4')
broken_4(50, 75, 40, window) # Test 1 of broken_4
broken_4(100, 150, 75, window) # Test 2 of broken_4
window.close_on_mouse_click()
# Test broken_5:
window = rg.RoseWindow(title='Testing BROKEN_5')
circle = rg.Circle(rg.Point(100, 50), 30)
circle.fill_color = 'pink'
broken_5(circle, window) # Test 1 of broken_5
circle = rg.Circle(rg.Point(250, 100), 80)
circle.fill_color = 'red'
broken_5(circle, window) # Test 2 of broken_5
window.close_on_mouse_click()
# Test broken_6:
expected = 1.8333333
actual = broken_6(3) # Test 1 of broken_6
print("Testing BROKEN_6:\n")
print('Expected for BROKEN_6, Test 1:', expected, '(approximately)')
print(' Actual for BROKEN_6, Test 1:', actual)
expected = 5.1873775
actual = broken_6(100) # Test 2 of broken_6
print()
print('Expected for BROKEN_6, Test 2:', expected, '(approximately)')
print(' Actual for BROKEN_6, Test 2:', actual)
print()
# -----------------------------------------------------------------------------
# DONE: 3. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# -----------------------------------------------------------------------------
def broken_1(circle, window):
"""
What comes in: an rg.Circle and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws the given rg.Circle on the given rg.RoseWindow,
then draws another rg.Circle whose RADIUS
is TWICE that of the given rg.Circle
and whose center is the same as that of the given rg.Circle.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type circle: rg.Circle
:type window: rg.RoseWindow
"""
circle.attach_to(window)
circle2 = rg.Circle(circle.center, 2 * circle.radius)
circle2.attach_to(window)
window.render()
# -----------------------------------------------------------------------------
# DONE: 4. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# -----------------------------------------------------------------------------
def broken_2(x, y, window):
"""
What comes in: Positive integers x and y, and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws a rg.Circle with radius 33, centered at (x, y),
on the given rg.RoseWindow.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type x: int
:type y: int
:type window: rg.RoseWindow
"""
circle = rg.Circle(rg.Point(x, y), 33)
circle.attach_to(window)
window.render()
# -----------------------------------------------------------------------------
# DONE: 5. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# -----------------------------------------------------------------------------
def broken_3(n, point, length, distance_between_lines, window):
"""
What comes in: The four arguments are:
-- A positive integer n.
-- An rg.Point.
-- A positive integer length.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws n vertical rg.Lines on the given rg.RoseWindow,
where the leftmost rg.Line has the given point as its topmost
point and all the rg.Lines have the given length
and they are the given distance apart.
Each line is drawn with a 0.5 second pause after drawing it.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type n: int
:type point: rg.Point
:type length: int
:type distance_between_lines: int
:type window: rg.RoseWindow
"""
a = rg.Point(point.x, point.y)
b = rg.Point(point.x, point.y + length)
for _ in range(n):
length = rg.Line(a, b)
length.attach_to(window)
window.render(0.5)
a = rg.Point(a.x + distance_between_lines, a.y)
b = rg.Point(b.x + distance_between_lines, b.y)
# -----------------------------------------------------------------------------
# DONE: 6. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# -----------------------------------------------------------------------------
def broken_4(x, y, radius, window):
"""
What comes in: Positive integers x and y, and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws a green-filled rg.Circle with the given radius,
centered at (x, y), on the given rg.RoseWindow
Must ** render ** but ** NOT close ** the window.
Type hints:
:type x: int
:type y: int
:type radius: int
:type window: rg.RoseWindow
"""
line = rg.Circle(rg.Point(x, y), radius)
line.fill_color = 'green'
line.attach_to(window)
window.render()
# -----------------------------------------------------------------------------
# DONE: 7. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# -----------------------------------------------------------------------------
def broken_5(circle, window):
"""
What comes in: an rg.Circle and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws the given rg.Circle and an rg.Square that circumscribes it,
both on the given rg.RoseWindow,
with the rg.Square having the same OUTLINE color
as the FILL color of the given rg.Circle.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type circle: rg.Circle
:type window: rg.RoseWindow
"""
circle.attach_to(window)
square = rg.Square(circle.center, 2 * circle.radius)
square.outlinecolor = circle.fill_color
square.attach_to(window)
window.render()
# -----------------------------------------------------------------------------
# DONE: 8. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# -----------------------------------------------------------------------------
def broken_6(n):
"""
What comes in: A positive integer n.
What goes out: Returns the sum:
1 + 1/2 + 1/3 + ... + 1/n.
Side effects: None.
"""
total = 0
for k in range(n):
total = total + (1 / (k + 1))
return total
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 36.450909 | 79 | 0.543895 |
import rosegraphics as rg
| true | true |
1c3778e807e3d679801befdc2b80d6a0bb75d415 | 7,279 | py | Python | tests/test-scenario/test_scenario_deployment.py | KellyGriffin/kalc | 9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583 | [
"Apache-2.0"
] | null | null | null | tests/test-scenario/test_scenario_deployment.py | KellyGriffin/kalc | 9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583 | [
"Apache-2.0"
] | null | null | null | tests/test-scenario/test_scenario_deployment.py | KellyGriffin/kalc | 9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583 | [
"Apache-2.0"
] | null | null | null | import pytest
import yaml
from kalc.model.kubernetes import KubernetesCluster
from kalc.model.kinds.Pod import Pod
from kalc.model.kinds.Node import Node
from kalc.model.kinds.Service import Service
from kalc.model.kinds.PriorityClass import PriorityClass
from kalc.model.system.Scheduler import Scheduler
from kalc.misc.const import *
from kalc.model.search import K8ServiceInterruptSearch, Check_services, OptimisticRun
from kalc.misc.object_factory import labelFactory
from poodle import debug_plan
from poodle.schedule import EmptyPlanError
from kalc.model.scenario import Scenario
import kalc.model.kinds.Service as mservice
from tests.test_util import print_objects
import kalc.model.kinds.Pod as mpod
#replicas 3 cpu: 100m memory: 500Mi
DEPLOYMENT_NEW = "./tests/test-scenario/deployment/deployment-new.yaml"
DEPLOYMENT_NEW_WO_PRIO = "./tests/test-scenario/deployment/deployment-new-wo-priority.yaml"
DUMP = "./tests/test-scenario/deployment/dump"
# cpu = 940m * 2 memory = 2701496Ki + 2701504Ki
NODE1 = "./tests/test-scenario/deployment/dump/node1.yaml"
NODE2 = "./tests/test-scenario/deployment/dump/node2.yaml"
# pod cpu = 100m * 7 memory = 500m * 5
PODS = "./tests/test-scenario/deployment/dump/pods.yaml"
# the same but one pon in pending TODO may me need to load from cluster
PODS_PENDING = "./tests/test-scenario/deployment/dump/pods_pending.yaml"
SERVICES = "./tests/test-scenario/deployment/dump/services.yaml"
REPLICASETS = "./tests/test-scenario/deployment/dump/replicasets.yaml"
PRIORITYCLASSES = "./tests/test-scenario/deployment/dump/priorityclass.yaml"
DEPLOYMENT = "./tests/test-scenario/deployment/dump/deployments.yaml"
@pytest.mark.nofast(reason="took time 124.63s")
def test_start_pod():
k = KubernetesCluster()
k.load(open(NODE1).read())
k.load(open(NODE2).read())
k.load(open(PODS).read())
# k.load(open(PODS_PENDING).read())
k.load(open(SERVICES).read())
k.load(open(REPLICASETS).read())
k.load(open(PRIORITYCLASSES).read())
k.load(open(DEPLOYMENT).read())
k.create_resource(open(DEPLOYMENT_NEW).read())
k._build_state()
class PodStart(K8ServiceInterruptSearch):
goal = lambda self: self.goalFoo()
def goalFoo(self):
for pod in filter(lambda x: isinstance(x, mpod.Pod), k.state_objects):
if pod.status != STATUS_POD["Running"]:
return False
return True
p = PodStart(k.state_objects) # self.scheduler.status == STATUS_SCHED["Clean"]
# print_objects(k.state_objects)
p.run(timeout=6600, sessionName="test_start_pods")
if not p.plan:
raise Exception("Could not solve %s" % p.__class__.__name__)
print(Scenario(p.plan).asyaml())
assert "StartPod" in p.plan.__str__() # test for transition from Pending to Running
pods = filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)
nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
for pod in pods:
assert pod.atNode in nodes._get_value()
class QueueLoadCheck(K8ServiceInterruptSearch):
goal = lambda self: self.scheduler.status == STATUS_SCHED["Changed"]
#we have pod with Pendining status in dump we should get it in Running status
@pytest.mark.skip(reason="specific scenario is not selected")
def test_start_pod_from_dump():
k = KubernetesCluster()
k.load(open(NODE1).read())
k.load(open(NODE2).read())
k.load(open(PODS_PENDING).read())
k.load(open(SERVICES).read())
k.load(open(REPLICASETS).read())
k.load(open(PRIORITYCLASSES).read())
k.load(open(DEPLOYMENT).read())
k._build_state()
p = QueueLoadCheck(k.state_objects) # self.scheduler.status == STATUS_SCHED["Clean"]
# print_objects(k.state_objects)
p.run(timeout=6600, sessionName="test_start_pod_from_dump")
if not p.plan:
raise Exception("Could not solve %s" % p.__class__.__name__)
print(Scenario(p.plan).asyaml())
assert "StartPod" in p.plan.__str__() # test for transition from Pending to Running
pods = filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)
nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
for pod in pods:
assert pod.atNode in nodes._get_value() # check each pod than each have atNode
#we have pod with Running status in dump kubernites shoul kill pods with lower piority then created
@pytest.mark.skip(reason="specific scenario is not selected")
def test_killpod():
k = KubernetesCluster()
k.load(open(NODE1).read()) # trim resource, run only one Node
k.load(open(PODS).read())
k.load(open(SERVICES).read())
k.load(open(REPLICASETS).read())
k.load(open(PRIORITYCLASSES).read())
k.load(open(DEPLOYMENT).read())
k.create_resource(open(DEPLOYMENT_NEW).read())
k._build_state()
p = OptimisticRun(k.state_objects) # TODO check me, i'd like to run exiction test with killpod execution
# print_objects(k.state_objects)
p.run(timeout=6600, sessionName="test_start_pods")
if not p.plan:
raise Exception("Could not solve %s" % p.__class__.__name__)
print(Scenario(p.plan).asyaml())
assert "StartPod" in p.plan.__str__() # test for transition from Pending to Running
#get pods only in Running state to check atNode value
runningPods = filter(lambda z: z.status != STATUS_POD["Running"], (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
for pod in runningPods:
assert pod.atNode in nodes._get_value() # check each pod than each have atNode
killingPods = filter(lambda z: z.status != STATUS_POD["Killing"], (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
assert len(killingPods) > 0 # test that some pod Killed
#we have pod with Running status in dump we should get "pod cant start" because our new pods have the same priority as are ran pods
@pytest.mark.skip(reason="specific scenario is not selected")
def test_pod_cant_start():
k = KubernetesCluster()
k.load(open(NODE1).read()) # trim resource, run only one Node
k.load(open(PODS).read())
k.load(open(SERVICES).read())
k.load(open(REPLICASETS).read())
k.load(open(PRIORITYCLASSES).read())
k.load(open(DEPLOYMENT).read())
k.create_resource(open(DEPLOYMENT_NEW_WO_PRIO).read())
k._build_state()
p = OptimisticRun(k.state_objects) # TODO check me, i'd like to run exiction test with killpod execution
# print_objects(k.state_objects)
p.run(timeout=6600, sessionName="test_pod_cant_start")
if not p.plan:
raise Exception("Could not solve %s" % p.__class__.__name__)
print(Scenario(p.plan).asyaml())
#get pods only in Running state to check atNode value
#TODO check pod cant start
# runningPods = filter(lambda z: z.status != STATUS_POD["Running"], (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
# nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
# for pod in runningPods:
# assert pod.atNode in nodes._get_value() # check each pod than each have atNode
# killingPods = filter(lambda z: z.status != STATUS_POD["Killing"], (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
# assert len(killingPods) > 0 # test that some pod Killed | 47.266234 | 133 | 0.717406 | import pytest
import yaml
from kalc.model.kubernetes import KubernetesCluster
from kalc.model.kinds.Pod import Pod
from kalc.model.kinds.Node import Node
from kalc.model.kinds.Service import Service
from kalc.model.kinds.PriorityClass import PriorityClass
from kalc.model.system.Scheduler import Scheduler
from kalc.misc.const import *
from kalc.model.search import K8ServiceInterruptSearch, Check_services, OptimisticRun
from kalc.misc.object_factory import labelFactory
from poodle import debug_plan
from poodle.schedule import EmptyPlanError
from kalc.model.scenario import Scenario
import kalc.model.kinds.Service as mservice
from tests.test_util import print_objects
import kalc.model.kinds.Pod as mpod
DEPLOYMENT_NEW = "./tests/test-scenario/deployment/deployment-new.yaml"
DEPLOYMENT_NEW_WO_PRIO = "./tests/test-scenario/deployment/deployment-new-wo-priority.yaml"
DUMP = "./tests/test-scenario/deployment/dump"
NODE1 = "./tests/test-scenario/deployment/dump/node1.yaml"
NODE2 = "./tests/test-scenario/deployment/dump/node2.yaml"
PODS = "./tests/test-scenario/deployment/dump/pods.yaml"
PODS_PENDING = "./tests/test-scenario/deployment/dump/pods_pending.yaml"
SERVICES = "./tests/test-scenario/deployment/dump/services.yaml"
REPLICASETS = "./tests/test-scenario/deployment/dump/replicasets.yaml"
PRIORITYCLASSES = "./tests/test-scenario/deployment/dump/priorityclass.yaml"
DEPLOYMENT = "./tests/test-scenario/deployment/dump/deployments.yaml"
@pytest.mark.nofast(reason="took time 124.63s")
def test_start_pod():
k = KubernetesCluster()
k.load(open(NODE1).read())
k.load(open(NODE2).read())
k.load(open(PODS).read())
k.load(open(SERVICES).read())
k.load(open(REPLICASETS).read())
k.load(open(PRIORITYCLASSES).read())
k.load(open(DEPLOYMENT).read())
k.create_resource(open(DEPLOYMENT_NEW).read())
k._build_state()
class PodStart(K8ServiceInterruptSearch):
goal = lambda self: self.goalFoo()
def goalFoo(self):
for pod in filter(lambda x: isinstance(x, mpod.Pod), k.state_objects):
if pod.status != STATUS_POD["Running"]:
return False
return True
p = PodStart(k.state_objects)
p.run(timeout=6600, sessionName="test_start_pods")
if not p.plan:
raise Exception("Could not solve %s" % p.__class__.__name__)
print(Scenario(p.plan).asyaml())
assert "StartPod" in p.plan.__str__()
pods = filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)
nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
for pod in pods:
assert pod.atNode in nodes._get_value()
class QueueLoadCheck(K8ServiceInterruptSearch):
goal = lambda self: self.scheduler.status == STATUS_SCHED["Changed"]
@pytest.mark.skip(reason="specific scenario is not selected")
def test_start_pod_from_dump():
k = KubernetesCluster()
k.load(open(NODE1).read())
k.load(open(NODE2).read())
k.load(open(PODS_PENDING).read())
k.load(open(SERVICES).read())
k.load(open(REPLICASETS).read())
k.load(open(PRIORITYCLASSES).read())
k.load(open(DEPLOYMENT).read())
k._build_state()
p = QueueLoadCheck(k.state_objects)
p.run(timeout=6600, sessionName="test_start_pod_from_dump")
if not p.plan:
raise Exception("Could not solve %s" % p.__class__.__name__)
print(Scenario(p.plan).asyaml())
assert "StartPod" in p.plan.__str__()
pods = filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)
nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
for pod in pods:
assert pod.atNode in nodes._get_value()
@pytest.mark.skip(reason="specific scenario is not selected")
def test_killpod():
k = KubernetesCluster()
k.load(open(NODE1).read())
k.load(open(PODS).read())
k.load(open(SERVICES).read())
k.load(open(REPLICASETS).read())
k.load(open(PRIORITYCLASSES).read())
k.load(open(DEPLOYMENT).read())
k.create_resource(open(DEPLOYMENT_NEW).read())
k._build_state()
p = OptimisticRun(k.state_objects)
# print_objects(k.state_objects)
p.run(timeout=6600, sessionName="test_start_pods")
if not p.plan:
raise Exception("Could not solve %s" % p.__class__.__name__)
print(Scenario(p.plan).asyaml())
assert "StartPod" in p.plan.__str__() # test for transition from Pending to Running
#get pods only in Running state to check atNode value
runningPods = filter(lambda z: z.status != STATUS_POD["Running"], (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
for pod in runningPods:
assert pod.atNode in nodes._get_value() # check each pod than each have atNode
killingPods = filter(lambda z: z.status != STATUS_POD["Killing"], (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
assert len(killingPods) > 0 # test that some pod Killed
#we have pod with Running status in dump we should get "pod cant start" because our new pods have the same priority as are ran pods
@pytest.mark.skip(reason="specific scenario is not selected")
def test_pod_cant_start():
k = KubernetesCluster()
k.load(open(NODE1).read()) # trim resource, run only one Node
k.load(open(PODS).read())
k.load(open(SERVICES).read())
k.load(open(REPLICASETS).read())
k.load(open(PRIORITYCLASSES).read())
k.load(open(DEPLOYMENT).read())
k.create_resource(open(DEPLOYMENT_NEW_WO_PRIO).read())
k._build_state()
p = OptimisticRun(k.state_objects) # TODO check me, i'd like to run exiction test with killpod execution
p.run(timeout=6600, sessionName="test_pod_cant_start")
if not p.plan:
raise Exception("Could not solve %s" % p.__class__.__name__)
print(Scenario(p.plan).asyaml())
| true | true |
1c37796bd6373f0d448d043663153ff5129d85dd | 190 | py | Python | apps/upload/urls.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
] | 1 | 2019-07-31T07:34:38.000Z | 2019-07-31T07:34:38.000Z | apps/upload/urls.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
] | 9 | 2019-12-05T00:39:29.000Z | 2022-02-10T14:13:29.000Z | apps/upload/urls.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import FileUploadView, FileDetailView
urlpatterns = [
path(r'', FileUploadView.as_view()),
path(r'<int:pk>/', FileDetailView.as_view()),
]
| 21.111111 | 49 | 0.710526 | from django.urls import path
from .views import FileUploadView, FileDetailView
urlpatterns = [
path(r'', FileUploadView.as_view()),
path(r'<int:pk>/', FileDetailView.as_view()),
]
| true | true |
1c3779f5382b829d1b98a26eb814a39ec688a54b | 2,242 | py | Python | parliament-member-details/__init__.py | it-pebune/ani-research-web-scraping | 16a8ac9eaec93144a515f803e9579b96a041b817 | [
"MIT"
] | null | null | null | parliament-member-details/__init__.py | it-pebune/ani-research-web-scraping | 16a8ac9eaec93144a515f803e9579b96a041b817 | [
"MIT"
] | 18 | 2022-01-20T11:22:35.000Z | 2022-03-06T21:22:48.000Z | parliament-member-details/__init__.py | it-pebune/ani-research-web-scraping | 16a8ac9eaec93144a515f803e9579b96a041b817 | [
"MIT"
] | null | null | null | import logging
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info("Python HTTP trigger function processed a request.")
import requests
import json
from bs4 import BeautifulSoup
from bs4.dammit import EncodingDetector
legislature = req.params.get("leg")
chamber = req.params.get("cham")
member_id = req.params.get("id")
# handle any missing parameter
if not all([legislature, chamber, member_id]):
return func.HttpResponse(
"Please enter values for 'leg' (legislature), 'cham' (chamber) "
+ "and 'id' (member id) parameters, e.g. leg=2020&cham=2&id=17",
status_code=406,
)
# handle wrong chamber value
if chamber not in ["1", "2"]:
return func.HttpResponse(
"'cham' (chamber) value should be '1' or '2'",
status_code=406,
)
link = "http://www.cdep.ro/pls/parlam/structura2015.mp?idm={}&cam={}&leg={}".format( # noqa: E501
member_id, chamber, legislature
)
req = requests.get(link)
# handle wrong legislature value
if req.status_code == 404:
return func.HttpResponse(
"Please enter a correct year value for leg (legislature).", status_code=406
)
# get the right encoding
http_encoding = (
req.encoding
if "charset" in req.headers.get("content-type", "").lower()
else None
)
html_encoding = EncodingDetector.find_declared_encoding(req.content, is_html=True)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(req.content, "lxml", from_encoding=encoding)
to_return = {}
name = soup.find("title").text
# handle wrong idm
if not name.strip():
return func.HttpResponse("Wrong id (parliament member id).", status_code=406)
profile_div = soup.find("div", attrs={"class": "profile-pic-dep"})
photo_link = "http://www.cdep.ro" + profile_div.find("img")["src"]
birth_date = profile_div.text.strip()[2:].strip()
to_return["name"] = name
to_return["photo"] = photo_link
to_return["dateOfBirth"] = birth_date
return func.HttpResponse(json.dumps(to_return), mimetype="application/json")
| 32.028571 | 102 | 0.642284 | import logging
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info("Python HTTP trigger function processed a request.")
import requests
import json
from bs4 import BeautifulSoup
from bs4.dammit import EncodingDetector
legislature = req.params.get("leg")
chamber = req.params.get("cham")
member_id = req.params.get("id")
if not all([legislature, chamber, member_id]):
return func.HttpResponse(
"Please enter values for 'leg' (legislature), 'cham' (chamber) "
+ "and 'id' (member id) parameters, e.g. leg=2020&cham=2&id=17",
status_code=406,
)
if chamber not in ["1", "2"]:
return func.HttpResponse(
"'cham' (chamber) value should be '1' or '2'",
status_code=406,
)
link = "http://www.cdep.ro/pls/parlam/structura2015.mp?idm={}&cam={}&leg={}".format(
member_id, chamber, legislature
)
req = requests.get(link)
if req.status_code == 404:
return func.HttpResponse(
"Please enter a correct year value for leg (legislature).", status_code=406
)
http_encoding = (
req.encoding
if "charset" in req.headers.get("content-type", "").lower()
else None
)
html_encoding = EncodingDetector.find_declared_encoding(req.content, is_html=True)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(req.content, "lxml", from_encoding=encoding)
to_return = {}
name = soup.find("title").text
if not name.strip():
return func.HttpResponse("Wrong id (parliament member id).", status_code=406)
profile_div = soup.find("div", attrs={"class": "profile-pic-dep"})
photo_link = "http://www.cdep.ro" + profile_div.find("img")["src"]
birth_date = profile_div.text.strip()[2:].strip()
to_return["name"] = name
to_return["photo"] = photo_link
to_return["dateOfBirth"] = birth_date
return func.HttpResponse(json.dumps(to_return), mimetype="application/json")
| true | true |
1c377a566a79a1c13d9805eef9db66ccd897bedf | 69,535 | py | Python | lib/googlecloudsdk/command_lib/container/flags.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/container/flags.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/container/flags.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and helpers for the container related commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import constants as compute_constants
from googlecloudsdk.api_lib.container import api_adapter
from googlecloudsdk.api_lib.container import util
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.container import constants
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
def AddBasicAuthFlags(parser):
"""Adds basic auth flags to the given parser.
Basic auth flags are: --username, --enable-basic-auth, and --password.
Args:
parser: A given parser.
"""
basic_auth_group = parser.add_group(help='Basic auth')
username_group = basic_auth_group.add_group(
mutex=True, help='Options to specify the username.')
username_help_text = """\
The user name to use for basic auth for the cluster. Use `--password` to specify
a password; if not, the server will randomly generate one."""
username_group.add_argument('--username', '-u', help=username_help_text)
enable_basic_auth_help_text = """\
Enable basic (username/password) auth for the cluster. `--enable-basic-auth` is
an alias for `--username=admin`; `--no-enable-basic-auth` is an alias for
`--username=""`. Use `--password` to specify a password; if not, the server will
randomly generate one. For cluster versions before 1.12, if neither
`--enable-basic-auth` nor `--username` is specified, `--enable-basic-auth` will
default to `true`. After 1.12, `--enable-basic-auth` will default to `false`."""
username_group.add_argument(
'--enable-basic-auth',
help=enable_basic_auth_help_text,
action='store_true',
default=None)
basic_auth_group.add_argument(
'--password',
help='The password to use for cluster auth. Defaults to a '
'server-specified randomly-generated string.')
def MungeBasicAuthFlags(args):
"""Munges flags associated with basic auth.
If --enable-basic-auth is specified, converts it --username value, and checks
that --password is only specified if it makes sense.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Raises:
util.Error, if flags conflict.
"""
if args.IsSpecified('enable_basic_auth'):
if not args.enable_basic_auth:
args.username = ''
else:
args.username = 'admin'
if not args.username and args.IsSpecified('password'):
raise util.Error(constants.USERNAME_PASSWORD_ERROR_MSG)
# TODO(b/28318474): move flags common across commands here.
def AddImageTypeFlag(parser, target):
"""Adds a --image-type flag to the given parser."""
help_text = """\
The image type to use for the {target}. Defaults to server-specified.
Image Type specifies the base OS that the nodes in the {target} will run on.
If an image type is specified, that will be assigned to the {target} and all
future upgrades will use the specified image type. If it is not specified the
server will pick the default image type.
The default image type and the list of valid image types are available
using the following command.
$ gcloud container get-server-config
""".format(target=target)
parser.add_argument('--image-type', help=help_text)
def AddImageFlag(parser, hidden=False):
"""Adds an --image flag to the given parser.
Args:
parser: A given parser.
hidden: if true, suppress help text for this option
"""
help_text = """\
A specific image to use on the new instances.
"""
parser.add_argument('--image', help=help_text, hidden=hidden)
def AddImageProjectFlag(parser, hidden=False):
"""Adds an --image-project flag to the given parser.
Args:
parser: A given parser.
hidden: if true, suppresses help text for this option.
"""
help_text = """/
A specific project from which contains the os image or image family. This is
required when using --image-type=CUSTOM.
"""
parser.add_argument('--image-project', help=help_text, hidden=hidden)
def AddImageFamilyFlag(parser, hidden=False):
"""Adds an --image-family flag to the given parser.
Args:
parser: A given parser.
hidden: if true, suppresses help text for this option.
"""
help_text = """/
A specific image-family from which the most recent image is used on new
instances. If both image and image family are specified, the image must be in
the image family, and the image is used.
"""
parser.add_argument('--image-family', help=help_text, hidden=hidden)
def AddNodeVersionFlag(parser, hidden=False):
"""Adds a --node-version flag to the given parser."""
help_text = """\
The Kubernetes version to use for nodes. Defaults to server-specified.
The default Kubernetes version is available using the following command.
$ gcloud container get-server-config
"""
return parser.add_argument('--node-version', help=help_text, hidden=hidden)
def AddClusterVersionFlag(parser, suppressed=False, help=None): # pylint: disable=redefined-builtin
"""Adds a --cluster-version flag to the given parser."""
if help is None:
help = """\
The Kubernetes version to use for the master and nodes. Defaults to
server-specified.
The default Kubernetes version is available using the following command.
$ gcloud container get-server-config
"""
return parser.add_argument('--cluster-version', help=help, hidden=suppressed)
def AddClusterAutoscalingFlags(parser, update_group=None, hidden=False):
"""Adds autoscaling related flags to parser.
Autoscaling related flags are: --enable-autoscaling
--min-nodes --max-nodes flags.
Args:
parser: A given parser.
update_group: An optional group of mutually exclusive flag options
to which an --enable-autoscaling flag is added.
hidden: If true, suppress help text for added options.
Returns:
Argument group for autoscaling flags.
"""
group = parser.add_argument_group('Cluster autoscaling')
autoscaling_group = group if update_group is None else update_group
autoscaling_group.add_argument(
'--enable-autoscaling',
default=None,
help="""\
Enables autoscaling for a node pool.
Enables autoscaling in the node pool specified by --node-pool or
the default node pool if --node-pool is not provided.""",
hidden=hidden,
action='store_true')
group.add_argument(
'--max-nodes',
help="""\
Maximum number of nodes in the node pool.
Maximum number of nodes to which the node pool specified by --node-pool
(or default node pool if unspecified) can scale. Ignored unless
--enable-autoscaling is also specified.""",
hidden=hidden,
type=int)
group.add_argument(
'--min-nodes',
help="""\
Minimum number of nodes in the node pool.
Minimum number of nodes to which the node pool specified by --node-pool
(or default node pool if unspecified) can scale. Ignored unless
--enable-autoscaling is also specified.""",
hidden=hidden,
type=int)
return group
def AddNodePoolAutoprovisioningFlag(parser, hidden=True):
"""Adds --enable-autoprovisioning flag for node-pool to parser.
Args:
parser: A given parser.
hidden: If true, suppress help text for added options.
"""
parser.add_argument(
'--enable-autoprovisioning',
help="""\
Enables Cluster Autoscaler to treat the node pool as if it was autoprovisioned.
Cluster Autoscaler will be able to delete the node pool if it's unneeded.""",
hidden=hidden,
default=None,
action='store_true')
def AddLocalSSDFlag(parser, suppressed=False, help_text=''):
"""Adds a --local-ssd-count flag to the given parser."""
help_text += """\
The number of local SSD disks to provision on each node.
Local SSDs have a fixed 375 GB capacity per device. The number of disks that
can be attached to an instance is limited by the maximum number of disks
available on a machine, which differs by compute zone. See
https://cloud.google.com/compute/docs/disks/local-ssd for more information."""
parser.add_argument(
'--local-ssd-count',
help=help_text,
hidden=suppressed,
type=int,
default=0)
def AddAcceleratorArgs(parser):
"""Adds Accelerator-related args."""
parser.add_argument(
'--accelerator',
type=arg_parsers.ArgDict(
spec={
'type': str,
'count': int,
},
required_keys=['type'],
max_length=2),
metavar='type=TYPE,[count=COUNT]',
help="""\
Attaches accelerators (e.g. GPUs) to all nodes.
*type*::: (Required) The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla K80)
of accelerator to attach to the instances. Use ```gcloud compute
accelerator-types list``` to learn about all available accelerator types.
*count*::: (Optional) The number of accelerators to attach to the
instances. The default value is 1.
""")
def AddAutoprovisioningFlags(parser, hidden=False):
"""Adds node autoprovisioning related flags to parser.
Autoprovisioning related flags are: --enable-autoprovisioning
--min-cpu --max-cpu --min-memory --max-memory flags.
Args:
parser: A given parser.
hidden: If true, suppress help text for added options.
"""
group = parser.add_argument_group('Node autoprovisioning', hidden=hidden)
group.add_argument(
'--enable-autoprovisioning',
required=True,
default=None,
help="""\
Enables node autoprovisioning for a cluster.
Cluster Autoscaler will be able to create new node pools. Requires maximum CPU
and memory limits to be specified.""",
hidden=hidden,
action='store_true')
limits_group = group.add_mutually_exclusive_group()
limits_group.add_argument(
'--autoprovisioning-config-file',
type=arg_parsers.BufferedFileInput(),
hidden=hidden,
help="""\
Path of the JSON/YAML file which contains information about the
cluster's autoscaling configuration. Currently it only contains
a list of resource limits of the cluster.
Each resource limits definition contains three fields:
resourceType, maximum and minimum.
Resource type can be "cpu", "memory" or an accelerator (e.g.
"nvidia-tesla-k80" for nVidia Tesla K80). Use gcloud compute accelerator-types
list to learn about available accelerator types.
Maximum is the maximum allowed amount with the unit of the resource.
Minimum is the minimum allowed amount with the unit of the resource.
""")
from_flags_group = limits_group.add_argument_group('Flags to configure '
'resource limits:')
from_flags_group.add_argument(
'--max-cpu',
required=True,
help="""\
Maximum number of cores in the cluster.
Maximum number of cores to which the cluster can scale.""",
hidden=hidden,
type=int)
from_flags_group.add_argument(
'--min-cpu',
help="""\
Minimum number of cores in the cluster.
Minimum number of cores to which the cluster can scale.""",
hidden=hidden,
type=int)
from_flags_group.add_argument(
'--max-memory',
required=True,
help="""\
Maximum memory in the cluster.
Maximum number of gigabytes of memory to which the cluster can scale.""",
hidden=hidden,
type=int)
from_flags_group.add_argument(
'--min-memory',
help="""\
Minimum memory in the cluster.
Minimum number of gigabytes of memory to which the cluster can scale.""",
hidden=hidden,
type=int)
accelerator_group = from_flags_group.add_argument_group(
'Arguments to set limits on accelerators:')
accelerator_group.add_argument(
'--max-accelerator',
type=arg_parsers.ArgDict(spec={
'type': str,
'count': int,
}, required_keys=['type', 'count'], max_length=2),
required=True,
metavar='type=TYPE,count=COUNT',
hidden=hidden,
help="""\
Sets maximum limit for a single type of accelerators (e.g. GPUs) in cluster.
*type*::: (Required) The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla K80)
of accelerator for which the limit is set. Use ```gcloud compute
accelerator-types list``` to learn about all available accelerator types.
*count*::: (Required) The maximum number of accelerators
to which the cluster can be scaled.
""")
accelerator_group.add_argument(
'--min-accelerator',
type=arg_parsers.ArgDict(spec={
'type': str,
'count': int,
}, required_keys=['type', 'count'], max_length=2),
metavar='type=TYPE,count=COUNT',
hidden=hidden,
help="""\
Sets minimum limit for a single type of accelerators (e.g. GPUs) in cluster. Defaults
to 0 for all accelerator types if it isn't set.
*type*::: (Required) The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla K80)
of accelerator for which the limit is set. Use ```gcloud compute
accelerator-types list``` to learn about all available accelerator types.
*count*::: (Required) The minimum number of accelerators
to which the cluster can be scaled.
""")
def AddEnableBinAuthzFlag(parser, hidden=False):
"""Adds a --enable-binauthz flag to parser."""
help_text = """Enable Binary Authorization for this cluster."""
parser.add_argument(
'--enable-binauthz',
action='store_true',
default=None,
help=help_text,
hidden=hidden,
)
def AddZoneAndRegionFlags(parser):
"""Adds the --zone and --region flags to the parser."""
# TODO(b/33343238): Remove the short form of the zone flag.
# TODO(b/18105938): Add zone prompting
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--zone',
'-z',
help='Compute zone (e.g. us-central1-a) for the cluster',
action=actions.StoreProperty(properties.VALUES.compute.zone))
group.add_argument(
'--region',
help='Compute region (e.g. us-central1) for the cluster.')
def AddAsyncFlag(parser):
"""Adds the --async flags to the given parser."""
parser.add_argument(
'--async',
action='store_true',
default=None,
help='Don\'t wait for the operation to complete.')
def AddEnableKubernetesAlphaFlag(parser):
"""Adds a --enable-kubernetes-alpha flag to parser."""
help_text = """\
Enable Kubernetes alpha features on this cluster. Selecting this
option will result in the cluster having all Kubernetes alpha API groups and
features turned on. Cluster upgrades (both manual and automatic) will be
disabled and the cluster will be automatically deleted after 30 days.
Alpha clusters are not covered by the Kubernetes Engine SLA and should not be
used for production workloads."""
parser.add_argument(
'--enable-kubernetes-alpha',
action='store_true',
help=help_text)
def AddEnableStackdriverKubernetesFlag(parser):
"""Adds a --enable-stackdriver-kubernetes flag to parser."""
help_text = """Enable Stackdriver Kubernetes monitoring and logging."""
parser.add_argument(
'--enable-stackdriver-kubernetes', action='store_true', help=help_text)
def AddNodeLabelsFlag(parser, for_node_pool=False):
"""Adds a --node-labels flag to the given parser."""
if for_node_pool:
help_text = """\
Applies the given kubernetes labels on all nodes in the new node-pool. Example:
$ {command} node-pool-1 --cluster=example-cluster --node-labels=label1=value1,label2=value2
"""
else:
help_text = """\
Applies the given kubernetes labels on all nodes in the new node-pool. Example:
$ {command} example-cluster --node-labels=label-a=value1,label-2=value2
"""
help_text += """
New nodes, including ones created by resize or recreate, will have these labels
on the kubernetes API node object and can be used in nodeSelectors.
See [](http://kubernetes.io/docs/user-guide/node-selection/) for examples.
Note that kubernetes labels, intended to associate cluster components
and resources with one another and manage resource lifecycles, are different
from Kubernetes Engine labels that are used for the purpose of tracking billing
and usage information."""
parser.add_argument(
'--node-labels',
metavar='NODE_LABEL',
type=arg_parsers.ArgDict(),
help=help_text)
def AddLocalSSDAndLocalSSDVolumeConfigsFlag(parser, for_node_pool=False,
suppressed=False):
"""Adds the --local-ssd-count and --local-ssd-volumes flags to the parser."""
help_text = """\
--local-ssd-volumes enables the ability to request local SSD with variable count, interfaces, and format\n
--local-ssd-count is the equivalent of using --local-ssd-volumes with type=scsi,format=fs
"""
group = parser.add_mutually_exclusive_group()
AddLocalSSDVolumeConfigsFlag(group, for_node_pool=for_node_pool,
help_text=help_text)
AddLocalSSDFlag(group, suppressed=suppressed, help_text=help_text)
def AddLocalSSDVolumeConfigsFlag(parser, for_node_pool=False, help_text=''):
"""Adds a --local-ssd-volumes flag to the given parser."""
help_text += """\
Adds the requested local SSDs on all nodes in default node-pool(s) in new cluster. Example:
$ {{command}} {0} --local-ssd-volumes count=2,type=nvme,format=fs
'count' must be between 1-8\n
'type' must be either scsi or nvme\n
'format' must be either fs or block
New nodes, including ones created by resize or recreate, will have these local SSDs.
Local SSDs have a fixed 375 GB capacity per device. The number of disks that
can be attached to an instance is limited by the maximum number of disks
available on a machine, which differs by compute zone. See
https://cloud.google.com/compute/docs/disks/local-ssd for more information.
""".format('node-pool-1 --cluster=example-cluster' if for_node_pool else
'example_cluster')
count_validator = arg_parsers.RegexpValidator(
r'^[1-8]$', 'Count must be a number between 1 and 8')
type_validator = arg_parsers.RegexpValidator(
r'^(scsi|nvme)$', 'Type must be either "scsi" or "nvme"')
format_validator = arg_parsers.RegexpValidator(
r'^(fs|block)$', 'Format must be either "fs" or "block"')
parser.add_argument(
'--local-ssd-volumes',
metavar='[count=COUNT],[type=TYPE],[format=FORMAT]',
type=arg_parsers.ArgDict(
spec={
'count': count_validator,
'type': type_validator,
'format': format_validator,
},
required_keys=['count', 'type', 'format'],
max_length=3),
action='append',
help=help_text)
def AddNodeTaintsFlag(parser, for_node_pool=False, hidden=False):
"""Adds a --node-taints flag to the given parser."""
if for_node_pool:
help_text = """\
Applies the given kubernetes taints on all nodes in the new node-pool, which can be used with tolerations for pod scheduling. Example:
$ {command} node-pool-1 --cluster=example-cluster --node-taints=key1=val1:NoSchedule,key2=val2:PreferNoSchedule
"""
else:
help_text = """\
Applies the given kubernetes taints on all nodes in default node-pool(s) in new cluster, which can be used with tolerations for pod scheduling. Example:
$ {command} example-cluster --node-taints=key1=val1:NoSchedule,key2=val2:PreferNoSchedule
"""
help_text += """
Note, this feature uses `gcloud beta` commands. To use gcloud beta commands,
you must configure `gcloud` to use the v1beta1 API as described here: https://cloud.google.com/kubernetes-engine/docs/reference/api-organization#beta.
To read more about node-taints, see https://cloud.google.com/kubernetes-engine/docs/node-taints.
"""
parser.add_argument(
'--node-taints',
metavar='NODE_TAINT',
type=arg_parsers.ArgDict(),
help=help_text,
hidden=hidden)
def AddPreemptibleFlag(parser, for_node_pool=False, suppressed=False):
"""Adds a --preemptible flag to parser."""
if for_node_pool:
help_text = """\
Create nodes using preemptible VM instances in the new nodepool.
$ {command} node-pool-1 --cluster=example-cluster --preemptible
"""
else:
help_text = """\
Create nodes using preemptible VM instances in the new cluster.
$ {command} example-cluster --preemptible
"""
help_text += """
New nodes, including ones created by resize or recreate, will use preemptible
VM instances. See https://cloud.google.com/kubernetes-engine/docs/preemptible-vm
for more information on how to use Preemptible VMs with Kubernetes Engine."""
parser.add_argument(
'--preemptible',
action='store_true',
help=help_text,
hidden=suppressed)
def AddNodePoolNameArg(parser, help_text):
"""Adds a name flag to the given parser.
Args:
parser: A given parser.
help_text: The help text describing the operation being performed.
"""
parser.add_argument('name', metavar='NAME', help=help_text)
def AddNodePoolClusterFlag(parser, help_text):
"""Adds a --cluster flag to the parser.
Args:
parser: A given parser.
help_text: The help text describing usage of the --cluster flag being set.
"""
parser.add_argument(
'--cluster',
help=help_text,
action=actions.StoreProperty(properties.VALUES.container.cluster))
def AddEnableAutoRepairFlag(parser, for_node_pool=False, for_create=False):
"""Adds a --enable-autorepair flag to parser."""
if for_node_pool:
help_text = """\
Enable node autorepair feature for a node-pool.
$ {command} node-pool-1 --cluster=example-cluster --enable-autorepair
"""
if for_create:
help_text += """
Node autorepair is enabled by default for node pools using COS as a base image,
use --no-enable-autorepair to disable.
"""
else:
help_text = """\
Enable node autorepair feature for a cluster's default node-pool(s).
$ {command} example-cluster --enable-autorepair
"""
if for_create:
help_text += """
Node autorepair is enabled by default for clusters using COS as a base image,
use --no-enable-autorepair to disable.
"""
help_text += """
See https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair for \
more info."""
parser.add_argument(
'--enable-autorepair', action='store_true', default=None,
help=help_text)
def AddEnableAutoUpgradeFlag(parser, for_node_pool=False, suppressed=False):
"""Adds a --enable-autoupgrade flag to parser."""
if for_node_pool:
help_text = """\
Sets autoupgrade feature for a node-pool.
$ {command} node-pool-1 --cluster=example-cluster --enable-autoupgrade
"""
else:
help_text = """\
Sets autoupgrade feature for a cluster's default node-pool(s).
$ {command} example-cluster --enable-autoupgrade
"""
help_text += """
See https://cloud.google.com/kubernetes-engine/docs/node-management for more \
info."""
parser.add_argument(
'--enable-autoupgrade',
action='store_true',
default=None,
help=help_text,
hidden=suppressed)
def AddTagsFlag(parser, help_text):
"""Adds a --tags to the given parser."""
parser.add_argument(
'--tags',
metavar='TAG',
type=arg_parsers.ArgList(min_length=1),
help=help_text)
def AddMasterAuthorizedNetworksFlags(parser, enable_group_for_update=None):
"""Adds Master Authorized Networks related flags to parser.
Master Authorized Networks related flags are:
--enable-master-authorized-networks --master-authorized-networks.
Args:
parser: A given parser.
enable_group_for_update: An optional group of mutually exclusive flag
options to which an --enable-master-authorized-networks flag is added
in an update command.
"""
if enable_group_for_update is None:
# Flags are being added to the same group.
master_flag_group = parser.add_argument_group('Master Authorized Networks')
enable_flag_group = master_flag_group
else:
# Flags are being added to different groups, so the new one should have no
# help text (has only one arg).
master_flag_group = parser.add_argument_group('')
enable_flag_group = enable_group_for_update
enable_flag_group.add_argument(
'--enable-master-authorized-networks',
default=None,
help="""\
Allow only specified set of CIDR blocks (specified by the
`--master-authorized-networks` flag) to connect to Kubernetes master through
HTTPS. Besides these blocks, the following have access as well:\n
1) The private network the cluster connects to if
`--enable-private-nodes` is specified.
2) Google Compute Engine Public IPs if `--enable-private-nodes` is not
specified.\n
Use `--no-enable-master-authorized-networks` to disable. When disabled, public
internet (0.0.0.0/0) is allowed to connect to Kubernetes master through HTTPS.
""",
action='store_true')
master_flag_group.add_argument(
'--master-authorized-networks',
type=arg_parsers.ArgList(min_length=1),
metavar='NETWORK',
help='The list of CIDR blocks (up to {max}) that are allowed to connect '
'to Kubernetes master through HTTPS. Specified in CIDR notation (e.g. '
'1.2.3.4/30). Can not be specified unless '
'`--enable-master-authorized-networks` is also specified.'.format(
max=api_adapter.MAX_AUTHORIZED_NETWORKS_CIDRS))
def AddNetworkPolicyFlags(parser, hidden=False):
"""Adds --enable-network-policy flags to parser."""
parser.add_argument(
'--enable-network-policy',
action='store_true',
default=None,
hidden=hidden,
help='Enable network policy enforcement for this cluster. If you are '
'enabling network policy on an existing cluster the network policy '
'addon must first be enabled on the master by using '
'--update-addons=NetworkPolicy=ENABLED flag.')
def AddPrivateClusterFlags(parser, with_deprecated=False):
"""Adds flags related to private clusters to parser."""
group = parser.add_argument_group('Private Clusters')
if with_deprecated:
group.add_argument(
'--private-cluster',
help=('Cluster is created with no public IP addresses on the cluster '
'nodes.'),
default=None,
action=actions.DeprecationAction(
'private-cluster',
warn='The --private-cluster flag is deprecated and will be removed '
'in a future release. Use --enable-private-nodes instead.',
action='store_true'))
group.add_argument(
'--enable-private-nodes',
help=('Cluster is created with no public IP addresses on the cluster '
'nodes.'),
default=None,
action='store_true')
group.add_argument(
'--enable-private-endpoint',
help=('Cluster is managed using the private IP address of the master '
'API endpoint.'),
default=None,
action='store_true')
group.add_argument(
'--master-ipv4-cidr',
help=('IPv4 CIDR range to use for the master network. This should have '
'a netmask of size /28 and should be used in conjunction with the '
'--enable-private-nodes flag.'),
default=None)
def AddEnableLegacyAuthorizationFlag(parser, hidden=False):
"""Adds a --enable-legacy-authorization flag to parser."""
help_text = """\
Enables the legacy ABAC authentication for the cluster.
User rights are granted through the use of policies which combine attributes
together. For a detailed look at these properties and related formats, see
https://kubernetes.io/docs/admin/authorization/abac/. To use RBAC permissions
instead, create or update your cluster with the option
`--no-enable-legacy-authorization`.
"""
parser.add_argument(
'--enable-legacy-authorization',
action='store_true',
default=None,
hidden=hidden,
help=help_text)
def AddAuthenticatorSecurityGroupFlags(parser, hidden=False):
"""Adds --security-group to parser."""
help_text = """\
The name of the RBAC security group for use with Google security groups
in Kubernetes RBAC
(https://kubernetes.io/docs/reference/access-authn-authz/rbac/).
To include group membership as part of the claims issued by Google
during authentication, a group must be designated as a security group by
including it as a direct member of this group.
If unspecified, no groups will be returned for use with RBAC."""
parser.add_argument(
'--security-group',
help=help_text,
default=None,
hidden=hidden)
def AddStartIpRotationFlag(parser, hidden=False):
"""Adds a --start-ip-rotation flag to parser."""
help_text = """\
Start the rotation of this cluster to a new IP. For example:
$ {command} example-cluster --start-ip-rotation
This causes the cluster to serve on two IPs, and will initiate a node upgrade \
to point to the new IP."""
parser.add_argument(
'--start-ip-rotation',
action='store_true',
default=False,
hidden=hidden,
help=help_text)
def AddStartCredentialRotationFlag(parser, hidden=False):
"""Adds a --start-credential-rotation flag to parser."""
help_text = """\
Start the rotation of IP and credentials for this cluster. For example:
$ {command} example-cluster --start-credential-rotation
This causes the cluster to serve on two IPs, and will initiate a node upgrade \
to point to the new IP."""
parser.add_argument(
'--start-credential-rotation',
action='store_true',
default=False,
hidden=hidden,
help=help_text)
def AddCompleteIpRotationFlag(parser, hidden=False):
"""Adds a --complete-ip-rotation flag to parser."""
help_text = """\
Complete the IP rotation for this cluster. For example:
$ {command} example-cluster --complete-ip-rotation
This causes the cluster to stop serving its old IP, and return to a single IP \
state."""
parser.add_argument(
'--complete-ip-rotation',
action='store_true',
default=False,
hidden=hidden,
help=help_text)
def AddCompleteCredentialRotationFlag(parser, hidden=False):
"""Adds a --complete-credential-rotation flag to parser."""
help_text = """\
Complete the IP and credential rotation for this cluster. For example:
$ {command} example-cluster --complete-credential-rotation
This causes the cluster to stop serving its old IP, return to a single IP, and \
invalidate old credentials."""
parser.add_argument(
'--complete-credential-rotation',
action='store_true',
default=False,
hidden=hidden,
help=help_text)
def AddMaintenanceWindowFlag(parser, hidden=False, add_unset_text=False):
"""Adds a --maintenance-window flag to parser."""
help_text = """\
Set a time of day when you prefer maintenance to start on this cluster. \
For example:
$ {command} example-cluster --maintenance-window=12:43
The time corresponds to the UTC time zone, and must be in HH:MM format.
"""
unset_text = """\
To remove an existing maintenance window from the cluster, use \
\'--maintenance-window=None\'
"""
description = 'Maintenance windows must be passed in using HH:MM format.'
unset_description = ' They can also be removed by using the word \"None\".'
if add_unset_text:
help_text += unset_text
description += unset_description
type_ = arg_parsers.RegexpValidator(
r'^([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$|^None$', description)
parser.add_argument(
'--maintenance-window',
default=None,
hidden=hidden,
type=type_,
help=help_text)
def AddLabelsFlag(parser, suppressed=False):
"""Adds Labels related flags to parser.
Args:
parser: A given parser.
suppressed: Whether or not to suppress help text.
"""
help_text = """\
Labels to apply to the Google Cloud resources in use by the Kubernetes Engine
cluster. These are unrelated to Kubernetes labels.
Example:
$ {command} example-cluster --labels=label_a=value1,label_b=,label_c=value3
"""
parser.add_argument(
'--labels',
metavar='KEY=VALUE',
type=arg_parsers.ArgDict(),
help=help_text,
hidden=suppressed)
def AddUpdateLabelsFlag(parser):
"""Adds Update Labels related flags to parser.
Args:
parser: A given parser.
"""
help_text = """\
Labels to apply to the Google Cloud resources in use by the Kubernetes Engine
cluster. These are unrelated to Kubernetes labels.
Example:
$ {command} example-cluster --update-labels=label_a=value1,label_b=value2
"""
parser.add_argument(
'--update-labels',
metavar='KEY=VALUE',
type=arg_parsers.ArgDict(),
help=help_text)
def AddRemoveLabelsFlag(parser):
"""Adds Remove Labels related flags to parser.
Args:
parser: A given parser.
"""
help_text = """\
Labels to remove from the Google Cloud resources in use by the Kubernetes Engine
cluster. These are unrelated to Kubernetes labels.
Example:
$ {command} example-cluster --remove-labels=label_a,label_b
"""
parser.add_argument(
'--remove-labels',
metavar='KEY',
type=arg_parsers.ArgList(),
help=help_text)
def AddDiskTypeFlag(parser):
"""Adds a --disk-type flag to the given parser.
Args:
parser: A given parser.
"""
help_text = """\
Type of the node VM boot disk. Defaults to pd-standard.
"""
parser.add_argument(
'--disk-type',
help=help_text,
choices=['pd-standard', 'pd-ssd'])
def AddIPAliasFlags(parser):
"""Adds flags related to IP aliases to the parser.
Args:
parser: A given parser.
"""
parser.add_argument(
'--enable-ip-alias',
action='store_true',
default=None,
help="""\
Enable use of alias IPs (https://cloud.google.com/compute/docs/alias-ip/)
for pod IPs. This will create two secondary ranges, one for the pod IPs
and another to reserve space for the services range.
""")
parser.add_argument(
'--services-ipv4-cidr',
metavar='CIDR',
help="""\
Set the IP range for the services IPs.
Can be specified as a netmask size (e.g. '/20') or as in CIDR notion
(e.g. '10.100.0.0/20'). If given as a netmask size, the IP range will
be chosen automatically from the available space in the network.
If unspecified, the services CIDR range will be chosen with a default
mask size.
Can not be specified unless '--enable-ip-alias' is also specified.
""")
parser.add_argument(
'--create-subnetwork',
metavar='KEY=VALUE',
type=arg_parsers.ArgDict(),
help="""\
Create a new subnetwork for the cluster. The name and range of the
subnetwork can be customized via optional 'name' and 'range' key-value
pairs.
'name' specifies the name of the subnetwork to be created.
'range' specifies the IP range for the new subnetwork. This can either
be a netmask size (e.g. '/20') or a CIDR range (e.g. '10.0.0.0/20').
If a netmask size is specified, the IP is automatically taken from the
free space in the cluster's network.
Examples:
Create a new subnetwork with a default name and size.
$ {command} --create-subnetwork ""
Create a new subnetwork named "my-subnet" with netmask of size 21.
$ {command} --create-subnetwork name=my-subnet,range=/21
Create a new subnetwork with a default name with the primary range of
10.100.0.0/16.
$ {command} --create-subnetwork range=10.100.0.0/16
Create a new subnetwork with the name "my-subnet" with a default range.
$ {command} --create-subnetwork name=my-subnet
Can not be specified unless '--enable-ip-alias' is also specified. Can
not be used in conjunction with the '--subnetwork' option.
""")
parser.add_argument(
'--cluster-secondary-range-name',
metavar='NAME',
help="""\
Set the secondary range to be used as the source for pod IPs. Alias
ranges will be allocated from this secondary range. NAME must be the
name of an existing secondary range in the cluster subnetwork.
Must be used in conjunction with '--enable-ip-alias'. Cannot be used
with --create-subnetwork.
""")
parser.add_argument(
'--services-secondary-range-name',
metavar='NAME',
help="""\
Set the secondary range to be used for services (e.g. ClusterIPs).
NAME must be the name of an existing secondary range in the cluster
subnetwork.
Must be used in conjunction with '--enable-ip-alias'. Cannot be used
with --create-subnetwork.
""")
def AddMaxPodsPerNodeFlag(parser, for_node_pool=False, hidden=False):
"""Adds max pod number constraints flags to the parser.
Args:
parser: A given parser.
for_node_pool: True if it's applied to a node pool.
False if it's applied to a cluster.
hidden: Whether or not to hide the help text.
"""
parser.add_argument(
'--max-pods-per-node',
default=None,
help="""\
The max number of pods per node for this node pool.
This flag sets the maximum number of pods that can be run at the same time on a
node. This will override the value given with --default-max-pods-per-node flag
set at the cluster level.
Must be used in conjunction with '--enable-ip-alias'.
""",
hidden=hidden,
type=int)
if not for_node_pool:
parser.add_argument(
'--default-max-pods-per-node',
default=None,
help="""\
The default max number of pods per node for node pools in the cluster.
This flag sets the default max-pods-per-node for node pools in the cluster. If
--max-pods-per-node is not specified explicitly for a node pool, this flag
value will be used.
Must be used in conjunction with '--enable-ip-alias'.
""",
hidden=hidden,
type=int)
def AddMinCpuPlatformFlag(parser, for_node_pool=False, hidden=False):
"""Adds the --min-cpu-platform flag to the parser.
Args:
parser: A given parser.
for_node_pool: True if it's applied a non-default node pool.
hidden: Whether or not to hide the help text.
"""
if for_node_pool:
help_text = """\
When specified, the nodes for the new node pool will be scheduled on host with
specified CPU architecture or a newer one.
Examples:
$ {command} node-pool-1 --cluster=example-cluster --min-cpu-platform=PLATFORM
"""
else:
help_text = """\
When specified, the nodes for the new cluster's default node pool will be
scheduled on host with specified CPU architecture or a newer one.
Examples:
$ {command} example-cluster --min-cpu-platform=PLATFORM
"""
help_text += """\
To list available CPU platforms in given zone, run:
$ gcloud beta compute zones describe ZONE --format="value(availableCpuPlatforms)"
CPU platform selection is available only in selected zones.
"""
parser.add_argument(
'--min-cpu-platform', metavar='PLATFORM', hidden=hidden, help=help_text)
def AddWorkloadMetadataFromNodeFlag(parser, hidden=False):
"""Adds the --workload-metadata-from-node flag to the parser.
Args:
parser: A given parser.
hidden: Whether or not to hide the help text.
"""
help_text = """\
Sets the node metadata option for workload metadata configuration. This feature
is scheduled to be deprecated in the future and later removed.
"""
parser.add_argument(
'--workload-metadata-from-node',
default=None,
choices={
'SECURE': 'Prevents workloads not in hostNetwork from accessing '
'certain VM metadata, specifically kube-env, which '
'contains Kubelet credentials, and the instance identity '
'token. This is a temporary security solution available '
'while the bootstrapping process for cluster nodes is '
'being redesigned with significant security improvements.',
'EXPOSED': 'Exposes all VM metadata to workloads.',
'UNSPECIFIED': 'Chooses the default.',
},
type=lambda x: x.upper(),
hidden=hidden,
help=help_text)
def AddTagOrDigestPositional(parser,
verb,
repeated=True,
tags_only=False,
arg_name=None,
metavar=None):
"""Adds a tag or digest positional arg."""
digest_str = '*.gcr.io/PROJECT_ID/IMAGE_PATH@sha256:DIGEST or'
if tags_only:
digest_str = ''
if not arg_name:
arg_name = 'image_names' if repeated else 'image_name'
metavar = metavar or 'IMAGE_NAME'
parser.add_argument(
arg_name,
metavar=metavar or arg_name.upper(),
nargs='+' if repeated else None,
help=('The fully qualified name(s) of image(s) to {verb}. '
'The name(s) should be formatted as {digest_str} '
'*.gcr.io/PROJECT_ID/IMAGE_PATH:TAG.'.format(
verb=verb, digest_str=digest_str)))
def AddImagePositional(parser, verb):
parser.add_argument(
'image_name',
help=('The name of the image to {verb}. The name format should be '
'*.gcr.io/PROJECT_ID/IMAGE_PATH[:TAG|@sha256:DIGEST]. '.format(
verb=verb)))
def AddNodeLocationsFlag(parser):
parser.add_argument(
'--node-locations',
type=arg_parsers.ArgList(min_length=1),
metavar='ZONE',
help="""\
The set of zones in which the specified node footprint should be replicated.
All zones must be in the same region as the cluster's master(s), specified by
the `--zone` or `--region` flag. Additionally, for zonal clusters,
`--node-locations` must contain the cluster's primary zone. If not specified,
all nodes will be in the cluster's primary zone (for zonal clusters) or spread
across three randomly chosen zones within the cluster's region (for regional
clusters).
Note that `NUM_NODES` nodes will be created in each zone, such that if you
specify `--num-nodes=4` and choose two locations, 8 nodes will be created.
Multiple locations can be specified, separated by commas. For example:
$ {command} example-cluster --zone us-central1-a --node-locations us-central1-a,us-central1-b
""")
def AddLoggingServiceFlag(parser, enable_kubernetes):
"""Adds a --logging-service flag to the parser.
Args:
parser: A given parser.
enable_kubernetes: Mention Kubernetes-native resource model in help string
"""
help_str = """\
Logging service to use for the cluster. Options are:
"logging.googleapis.com" (the Google Cloud Logging service),
"none" (logs will not be exported from the cluster)
"""
if enable_kubernetes:
help_str = """\
Logging service to use for the cluster. Options are:
"logging.googleapis.com/kubernetes" (the Google Cloud Logging
service with Kubernetes-native resource model enabled),
"logging.googleapis.com" (the Google Cloud Logging service),
"none" (logs will not be exported from the cluster)
"""
parser.add_argument('--logging-service', help=help_str)
def AddMonitoringServiceFlag(parser, enable_kubernetes):
"""Adds a --monitoring-service flag to the parser.
Args:
parser: A given parser.
enable_kubernetes: Mention Kubernetes-native resource model in help string
"""
help_str = """\
Monitoring service to use for the cluster. Options are:
"monitoring.googleapis.com" (the Google Cloud Monitoring service),
"none" (no metrics will be exported from the cluster)
"""
if enable_kubernetes:
help_str = """\
Monitoring service to use for the cluster. Options are:
"monitoring.googleapis.com/kubernetes" (the Google Cloud
Monitoring service with Kubernetes-native resource model enabled),
"monitoring.googleapis.com" (the Google Cloud Monitoring service),
"none" (no metrics will be exported from the cluster)
"""
parser.add_argument('--monitoring-service', help=help_str)
def AddNodeIdentityFlags(parser, example_target, new_behavior=True):
"""Adds node identity flags to the given parser.
Node identity flags are --scopes, --[no-]enable-cloud-endpoints (deprecated),
and --service-account. --service-account is mutually exclusive with the
others. --[no-]enable-cloud-endpoints is not allowed if property
container/new_scopes_behavior is set to true, and is removed completely if
new_behavior is set to true.
Args:
parser: A given parser.
example_target: the target for the command, e.g. mycluster.
new_behavior: Use new (alpha & beta) behavior: remove
--[no-]enable-cloud-endpoints.
"""
node_identity_group = parser.add_group(
mutex=True, help='Options to specify the node identity.')
scopes_group = node_identity_group.add_group(help='Scopes options.')
if new_behavior:
track_help = """
Unless container/new_scopes_behavior property is true, compute-rw and storage-ro
are always added, even if not explicitly specified, and --enable-cloud-endpoints
(by default) adds service-control and service-management scopes.
If container/new_scopes_behavior property is true, none of the above scopes are
added (though storage-ro, service-control, and service-management are all
included in the default scopes. In a future release, this will be the default
behavior.
"""
else:
track_help = ''
scopes_group.add_argument(
'--scopes',
type=arg_parsers.ArgList(),
metavar='SCOPE',
default='gke-default',
help="""\
Specifies scopes for the node instances. Examples:
$ {{command}} {example_target} --scopes=https://www.googleapis.com/auth/devstorage.read_only
$ {{command}} {example_target} --scopes=bigquery,storage-rw,compute-ro
Multiple SCOPEs can be specified, separated by commas. `logging-write`
and/or `monitoring` are added unless Cloud Logging and/or Cloud Monitoring
are disabled (see `--enable-cloud-logging` and `--enable-cloud-monitoring`
for more information).
{track_help}
{scopes_help}
""".format(
example_target=example_target,
track_help=track_help,
scopes_help=compute_constants.ScopesHelp()))
cloud_endpoints_help_text = """\
Automatically enable Google Cloud Endpoints to take advantage of API management
features by adding service-control and service-management scopes.
If `--no-enable-cloud-endpoints` is set, remove service-control and
service-management scopes, even if they are implicitly (via default) or
explicitly set via `--scopes`.
`--[no-]enable-cloud-endpoints` is not allowed if
`container/new_scopes_behavior` property is set to true.
"""
scopes_group.add_argument(
'--enable-cloud-endpoints',
action=actions.DeprecationAction(
'--[no-]enable-cloud-endpoints',
warn='Flag --[no-]enable-cloud-endpoints is deprecated and will be '
'removed in a future release. Scopes necessary for Google Cloud '
'Endpoints are now included in the default set and may be '
'excluded using --scopes.',
removed=new_behavior,
action='store_true'),
default=True,
help=cloud_endpoints_help_text)
sa_help_text = (
'The Google Cloud Platform Service Account to be used by the node VMs. '
'If a service account is specified, the cloud-platform and '
'userinfo.email scopes are used. If no Service Account is specified, the '
'project default service account is used.')
node_identity_group.add_argument('--service-account', help=sa_help_text)
def AddClusterNodeIdentityFlags(parser):
"""Adds node identity flags to the given parser.
This is a wrapper around AddNodeIdentityFlags for [alpha|beta] cluster, as it
provides example-cluster as the example and uses non-deprecated scopes
behavior.
Args:
parser: A given parser.
"""
AddNodeIdentityFlags(parser, example_target='example-cluster')
def AddDeprecatedClusterNodeIdentityFlags(parser):
"""Adds node identity flags to the given parser.
This is a wrapper around AddNodeIdentityFlags for [alpha|beta] cluster, as it
provides example-cluster as the example and uses non-deprecated scopes
behavior.
Args:
parser: A given parser.
"""
AddNodeIdentityFlags(
parser, example_target='example-cluster', new_behavior=False)
def AddNodePoolNodeIdentityFlags(parser):
"""Adds node identity flags to the given parser.
This is a wrapper around AddNodeIdentityFlags for (GA) node-pools, as it
provides node-pool-1 as the example and uses non-deprecated scopes behavior.
Args:
parser: A given parser.
"""
AddNodeIdentityFlags(
parser, example_target='node-pool-1 --cluster=example-cluster')
def AddDeprecatedNodePoolNodeIdentityFlags(parser):
"""Adds node identity flags to the given parser.
This is a wrapper around AddNodeIdentityFlags for (GA) node-pools, as it
provides node-pool-1 as the example and uses non-deprecated scopes behavior.
Args:
parser: A given parser.
"""
AddNodeIdentityFlags(
parser,
example_target='node-pool-1 --cluster=example-cluster',
new_behavior=False)
def AddAddonsFlagsWithOptions(parser, addon_options):
"""Adds the --addons flag to the parser with the given addon options."""
parser.add_argument(
'--addons',
type=arg_parsers.ArgList(choices=addon_options),
metavar='ADDON',
# TODO(b/65264376): Replace the doc link when a better doc is ready.
help="""\
Default set of addons includes {0}. Addons
(https://cloud.google.com/kubernetes-engine/reference/rest/v1/projects.zones.clusters#AddonsConfig)
are additional Kubernetes cluster components. Addons specified by this flag will
be enabled. The others will be disabled.
""".format(', '.join(api_adapter.DEFAULT_ADDONS)))
def AddAddonsFlags(parser):
"""Adds the --addons flag to the parser for the beta and GA tracks."""
AddAddonsFlagsWithOptions(parser, api_adapter.ADDONS_OPTIONS)
def AddAlphaAddonsFlags(parser):
"""Adds the --addons flag to the parser for the alpha track."""
AddAddonsFlagsWithOptions(parser, api_adapter.ALPHA_ADDONS_OPTIONS)
def AddBetaAddonsFlags(parser):
"""Adds the --addons flag to the parser for the beta track."""
AddAddonsFlagsWithOptions(parser, api_adapter.BETA_ADDONS_OPTIONS)
def AddPodSecurityPolicyFlag(parser, hidden=False):
"""Adds a --enable-pod-security-policy flag to parser."""
help_text = """\
Enables the pod security policy admission controller for the cluster. The pod
security policy admission controller adds fine-grained pod create and update
authorization controls through the PodSecurityPolicy API objects. For more
information, see
https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies.
"""
parser.add_argument(
'--enable-pod-security-policy',
action='store_true',
default=None,
hidden=hidden,
help=help_text)
def AddAllowRouteOverlapFlag(parser):
"""Adds a --allow-route-overlap flag to parser."""
help_text = """\
Allows the provided cluster CIDRs to overlap with existing routes
that are less specific and do not terminate at a VM.
When enabled, `--cluster-ipv4-cidr` must be fully specified (e.g. `10.96.0.0/14`
, but not `/14`). If `--enable-ip-alias` is also specified, both
`--cluster-ipv4-cidr` and `--services-ipv4-cidr` must be fully specified.
"""
parser.add_argument(
'--allow-route-overlap',
action='store_true',
default=None,
help=help_text)
def AddTpuFlags(parser, hidden=False, enable_tpu_service_networking=False):
"""Adds flags related to TPUs to the parser.
Args:
parser: A given parser.
hidden: Whether or not to hide the help text.
enable_tpu_service_networking: Whether to add the
enable_tpu_service_networking flag.
"""
tpu_group = parser.add_group(help='Flags relating to Cloud TPUs:')
tpu_group.add_argument(
'--enable-tpu',
action='store_true',
hidden=hidden,
help="""\
Enable Cloud TPUs for this cluster.
Can not be specified unless `--enable-kubernetes-alpha` and `--enable-ip-alias`
are also specified.
""")
group = tpu_group
if enable_tpu_service_networking:
group = tpu_group.add_mutually_exclusive_group()
group.add_argument(
'--enable-tpu-service-networking',
action='store_true',
hidden=hidden,
help="""\
Enable Cloud TPU's Service Networking mode. In this mode, the CIDR blocks used
by the Cloud TPUs will be allocated and managed by Service Networking, instead
of Kubernetes Engine.
This cannot be specified if `tpu-ipv4-cidr` is specified.
""")
group.add_argument(
'--tpu-ipv4-cidr',
metavar='CIDR',
hidden=hidden,
help="""\
Set the IP range for the Cloud TPUs.
Can be specified as a netmask size (e.g. '/20') or as in CIDR notion
(e.g. '10.100.0.0/20'). If given as a netmask size, the IP range will be chosen
automatically from the available space in the network.
If unspecified, the TPU CIDR range will use automatic default '/20'.
Can not be specified unless '--enable-tpu' and '--enable-ip-alias' are also
specified.
""")
def AddIssueClientCertificateFlag(parser):
"""Adds --issue-client-certificate flag to the parser."""
help_text = """\
Issue a TLS client certificate with admin permissions.
When enabled, the certificate and private key pair will be present in
MasterAuth field of the Cluster object. For cluster versions before 1.12, a
client certificate will be issued by default. As of 1.12, client certificates
are disabled by default.
"""
parser.add_argument(
'--issue-client-certificate',
action='store_true',
default=None,
help=help_text)
def AddIstioConfigFlag(parser, suppressed=False):
"""Adds --istio-config flag to the parser.
Args:
parser: A given parser.
suppressed: Whether or not to suppress help text.
"""
help_text = """\
Configurations for Istio addon, requires --addons contains Istio for create,
or --update-addons Istio=ENABLED for update.
*auth*:::Optional Type of auth MTLS_PERMISSIVE or MTLS_STRICT
Example:
$ {command} example-cluster --istio-config=auth=MTLS_PERMISSIVE
"""
parser.add_argument(
'--istio-config',
metavar='auth=MTLS_PERMISSIVE',
type=arg_parsers.ArgDict(
spec={
'auth': (lambda x: x.upper()),
}),
help=help_text,
hidden=suppressed)
def ValidateIstioConfigCreateArgs(istio_config_args, addons_args):
"""Validates flags specifying Istio config for create.
Args:
istio_config_args: parsed comandline arguments for --istio_config.
addons_args: parsed comandline arguments for --addons.
Raises:
InvalidArgumentException: when auth is not MTLS_PERMISSIVE nor MTLS_STRICT,
or --addon=Istio is not specified
"""
if istio_config_args:
auth = istio_config_args.get('auth', '')
if auth not in ['MTLS_PERMISSIVE', 'MTLS_STRICT']:
raise exceptions.InvalidArgumentException(
'--istio-config', 'auth is either MTLS_PERMISSIVE or MTLS_STRICT'
'e.g. --istio-config auth=MTLS_PERMISSIVE')
if 'Istio' not in addons_args:
raise exceptions.InvalidArgumentException(
'--istio-config', '--addon=Istio must be specified when '
'--istio-config is given')
def ValidateIstioConfigUpdateArgs(istio_config_args, disable_addons_args):
"""Validates flags specifying Istio config for update.
Args:
istio_config_args: parsed comandline arguments for --istio_config.
disable_addons_args: parsed comandline arguments for --update-addons.
Raises:
InvalidArgumentException: when auth is not MTLS_PERMISSIVE nor MTLS_STRICT,
or --update-addons=Istio=ENABLED is not specified
"""
if istio_config_args:
auth = istio_config_args.get('auth', '')
if auth not in ['MTLS_PERMISSIVE', 'MTLS_STRICT']:
raise exceptions.InvalidArgumentException(
'--istio-config', 'auth must be one of MTLS_PERMISSIVE or '
'MTLS_STRICT e.g. --istio-config auth=MTLS_PERMISSIVE')
disable_istio = disable_addons_args.get('Istio')
if disable_istio is None or disable_istio:
raise exceptions.InvalidArgumentException(
'--istio-config', '--update-addons=Istio=ENABLED must be specified '
'when --istio-config is given')
def AddConcurrentNodeCountFlag(parser):
help_text = """\
The number of nodes to upgrade concurrently. Valid values are [1, {max}].
It is a recommended best practice to set this value to no higher than 3% of
your cluster size.'
""".format(max=api_adapter.MAX_CONCURRENT_NODE_COUNT)
parser.add_argument(
'--concurrent-node-count',
type=arg_parsers.BoundedInt(1, api_adapter.MAX_CONCURRENT_NODE_COUNT),
help=help_text)
# TODO(b/110368338): Drop this warning when changing the default value of the
# flag.
def WarnForUnspecifiedIpAllocationPolicy(args):
if not args.IsSpecified('enable_ip_alias'):
log.warning(
'Currently VPC-native is not the default mode during cluster creation. '
'In the future, this will become the default mode and can be disabled '
'using `--no-enable-ip-alias` flag. Use `--[no-]enable-ip-alias` flag '
'to suppress this warning.')
def WarnForNodeModification(args, enable_autorepair):
if (args.image_type or '').lower() != 'ubuntu':
return
if enable_autorepair or args.enable_autoupgrade:
log.warning('Modifications on the boot disks of node VMs do not persist '
'across node recreations. Nodes are recreated during '
'manual-upgrade, auto-upgrade, auto-repair, and auto-scaling. '
'To preserve modifications across node recreation, use a '
'DaemonSet.')
def AddMachineTypeFlag(parser):
"""Adds --machine-type flag to the parser.
Args:
parser: A given parser.
"""
help_text = """\
The type of machine to use for nodes. Defaults to n1-standard-1.
The list of predefined machine types is available using the following command:
$ gcloud compute machine-types list
You can also specify custom machine types with the string "custom-CPUS-RAM"
where ```CPUS``` is the number of virtual CPUs and ```RAM``` is the amount of
RAM in MiB.
For example, to create a node pool using custom machines with 2 vCPUs and 12 GB
of RAM:
$ {command} high-mem-pool --machine-type=custom-2-12288
"""
parser.add_argument(
'--machine-type', '-m',
help=help_text)
def AddManagedPodIdentityFlags(parser):
"""Adds Managed Pod Identity flags to the parser."""
enable_help_text = """\
Enable Managed Pod Identity on the cluster.
When enabled, pods with cloud.google.com/service-account annotations will be
able to authenticate to Google Cloud Platform APIs on behalf of service account
specified in the annotation.
"""
parser.add_argument(
'--enable-managed-pod-identity',
action='store_true',
default=False,
# TODO(b/109942548): unhide this flag for Beta
hidden=True,
help=enable_help_text)
sa_help_text = """\
Federating Service Account to use with Managed Pod Identity.
Sets the name (email) of the GCP Service Account used to connect
Kubernetes Service Accounts to GCP Service Accounts.
Must be set with `--enable-managed-pod-identity`.
"""
parser.add_argument(
'--federating-service-account',
default=None,
# TODO(b/109942548): unhide this flag for Beta
hidden=True,
help=sa_help_text)
def AddResourceUsageExportFlags(parser, add_clear_flag=False, hidden=False):
"""Adds flags about exporting cluster resource usage to BigQuery."""
group = parser.add_group(
"Exports cluster's usage of cloud resources",
hidden=hidden)
if add_clear_flag:
group.is_mutex = True
group.add_argument(
'--clear-resource-usage-bigquery-dataset',
action='store_true',
hidden=hidden,
default=None,
help='Disables exporting cluster resource usage to BigQuery.')
group = group.add_group()
dataset_help_text = """\
The name of the BigQuery dataset to which the cluster's usage of cloud
resources is exported. A table will be created in the specified dataset to
store cluster resource usage. The resulting table can be joined with BigQuery
Billing Export to produce a fine-grained cost breakdown.
Example:
$ {command} example-cluster --resource-usage-bigquery-dataset=example_bigquery_dataset_name
"""
group.add_argument(
'--resource-usage-bigquery-dataset',
default=None,
hidden=hidden,
help=dataset_help_text)
network_egress_help_text = """`
Enable network egress metering on this cluster.
When enabled, a DaemonSet is deployed into the cluster. Each DaemonSet pod
meters network egress traffic by collecting data from the conntrack table, and
exports the metered metrics to the specified destination.
Network egress metering is disabled if this flag is omitted, or when
`--no-enable-network-egress-metering` is set.
"""
group.add_argument(
'--enable-network-egress-metering',
action='store_true',
default=None,
help=network_egress_help_text)
def AddEnablePrivateIpv6AccessFlag(parser, hidden=False):
"""Adds --enable-private-ipv6-access flag to the parser.
When enabled, this allows gRPC clients on this cluster's pods a fast
path to access Google hosted services (eg. Cloud Spanner,
Cloud Dataflow, Cloud Bigtable)
This is currently only available on Alpha clusters, and needs
'--enable-kubernetes-alpha' to be specified also.
Args:
parser: A given parser.
hidden: If true, suppress help text for added options.
"""
parser.add_argument(
'--enable-private-ipv6-access',
default=None,
help="""\
Enables private access to Google services over IPv6.
When enabled, this allows gRPC clients on this cluster's pods a fast path to
access Google hosted services (eg. Cloud Spanner, Cloud Dataflow, Cloud
Bigtable).
This is currently only available on Alpha clusters, specified by using
--enable-kubernetes-alpha.
""",
hidden=hidden,
action='store_true')
def AddVerticalPodAutoscalingFlag(parser, hidden=False):
"""Adds vertical pod autoscaling related flag to the parser.
VerticalPodAutoscaling related flag is: --enable-vertical-pod-autoscaling
Args:
parser: A given parser.
hidden: If true, suppress help text for added options.
"""
parser.add_argument(
'--enable-vertical-pod-autoscaling',
default=None,
help='Enables vertical pod autoscaling for a cluster.',
hidden=hidden,
action='store_true')
# TODO(b/112194849): Explain limitation to the sandbox pods and the nodes.
def AddSandboxFlag(parser, hidden=False):
"""Adds a --sandbox flag to the given parser.
Args:
parser: A given parser.
hidden: Whether or not to hide the help text.
"""
type_validator = arg_parsers.RegexpValidator(
r'^gvisor$', 'Type must be "gvisor"')
parser.add_argument(
'--sandbox',
type=arg_parsers.ArgDict(
spec={'type': type_validator},
required_keys=['type'],
max_length=1),
metavar='type=TYPE',
hidden=hidden,
help="""\
Enables the requested sandbox on all nodes in the node-pool. Example:
$ {command} node-pool-1 --cluster=example-cluster --sandbox type=gvisor
The only supported type is 'gvisor'.
""")
def AddSecurityProfileForCreateFlags(parser, hidden=False):
"""Adds flags related to Security Profile to the parser for cluster creation.
Args:
parser: A given parser.
hidden: Whether or not to hide the help text.
"""
group = parser.add_group(help='Flags for Security Profile:')
group.add_argument(
'--security-profile',
hidden=hidden,
help="""\
Name and version of the security profile to be applied to the cluster.
Example:
$ {command} example-cluster --security-profile=default-1.0-gke.0
""")
group.add_argument(
'--security-profile-runtime-rules',
default=True,
action='store_true',
hidden=hidden,
help="""\
Apply runtime rules in the specified security profile to the cluster.
When enabled (by default), a security profile controller and webhook
are deployed on the cluster to enforce the runtime rules. If
--no-security-profile-runtime-rules is specified to disable this
feature, only bootstrapping rules are applied, and no security profile
controller or webhook are installed.
""")
def AddSecurityProfileForUpdateFlag(parser, hidden=False):
"""Adds --security-profile to specify security profile for cluster update.
Args:
parser: A given parser.
hidden: Whether or not to hide the help text.
"""
parser.add_argument(
'--security-profile',
hidden=hidden,
help="""\
Name and version of the security profile to be applied to the cluster.
If not specified, the current setting of security profile will be
preserved.
Example:
$ {command} example-cluster --security-profile=default-1.0-gke.1
""")
def AddSecurityProfileForUpgradeFlags(parser, hidden=False):
"""Adds flags related to Security Profile to the parser for cluster upgrade.
Args:
parser: A given parser.
hidden: Whether or not to hide the help text.
"""
group = parser.add_group(help='Flags for Security Profile:')
group.add_argument(
'--security-profile',
hidden=hidden,
help="""\
Name and version of the security profile to be applied to the cluster.
If not specified, the current security profile settings are preserved.
If the current security profile is not supported in the new cluster
version, this option must be explicitly specified with a supported
security profile, otherwise the operation will fail.
Example:
$ {command} example-cluster --security-profile=default-1.0-gke.1
""")
group.add_argument(
'--security-profile-runtime-rules',
default=None,
action='store_true',
hidden=hidden,
help="""\
Apply runtime rules in the specified security profile to the cluster.
When enabled, a security profile controller and webhook
are deployed on the cluster to enforce the runtime rules. If
--no-security-profile-runtime-rules is specified to disable this
feature, only bootstrapping rules are applied, and no security profile
controller or webhook are installed.
""")
def AddNodeGroupFlag(parser):
"""Adds --node-group flag to the parser."""
help_text = """\
Assign instances of this pool to run on the specified GCE node group.
This is useful for running workloads on sole tenant nodes.
To see available sole tenant node-groups, run:
$ gcloud compute sole-tenancy node-groups list
To create a sole tenant node group, run:
$ gcloud compute sole-tenancy node-groups create [GROUP_NAME] \
--zone [ZONE] --node-template [TEMPLATE_NAME] --target-size [TARGET_SIZE]
See https://cloud.google.com/compute/docs/nodes for more
information on sole tenancy and node groups.
"""
parser.add_argument(
'--node-group',
hidden=True,
help=help_text)
def AddInitialNodePoolNameArg(parser, hidden=True):
"""Adds --node-pool-name argument to the parser."""
help_text = """\
Name of the initial node pool that will be created for the cluster.
Specifies the name to use for the initial node pool that will be created
with the cluster. If the settings specified require multiple node pools
to be created, the name for each pool will be prefixed by this name. For
example running the following will result in three node pools being
created, example-node-pool-0, example-node-pool-1 and
example-node-pool-2:
$ {command} example-cluster --num-nodes 9 --max-nodes-per-pool 3 \
--node-pool-name example-node-pool
"""
parser.add_argument('--node-pool-name', hidden=hidden, help=help_text)
def AddMetadataFlags(parser):
"""Adds --metadata and --metadata-from-file flags to the given parser."""
metadata_help = """\
Compute Engine metadata to be made available to the guest operating system
running on nodes within the node pool.
Each metadata entry is a key/value pair separated by an equals sign.
Metadata keys must be unique and less than 128 bytes in length. Values
must be less than or equal to 32,768 bytes in length. The total size of
all keys and values must be less than 512 KB. Multiple arguments can be
passed to this flag. For example:
``--metadata key-1=value-1,key-2=value-2,key-3=value-3''
Additionally, the following keys are reserved for use by Kubernetes
Engine:
* ``cluster-location''
* ``cluster-name''
* ``cluster-uid''
* ``configure-sh''
* ``enable-os-login''
* ``gci-update-strategy''
* ``gci-ensure-gke-docker''
* ``instance-template''
* ``kube-env''
* ``startup-script''
* ``user-data''
See also Compute Engine's
link:https://cloud.google.com/compute/docs/storing-retrieving-metadata[documentation]
on storing and retrieving instance metadata.
"""
parser.add_argument(
'--metadata',
type=arg_parsers.ArgDict(min_length=1),
default={},
help=metadata_help,
metavar='KEY=VALUE',
action=arg_parsers.StoreOnceAction)
metadata_from_file_help = """\
Same as ``--metadata'' except that the value for the entry will
be read from a local file.
"""
parser.add_argument(
'--metadata-from-file',
type=arg_parsers.ArgDict(min_length=1),
default={},
help=metadata_from_file_help,
metavar='KEY=LOCAL_FILE_PATH')
| 33.222647 | 152 | 0.708708 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import constants as compute_constants
from googlecloudsdk.api_lib.container import api_adapter
from googlecloudsdk.api_lib.container import util
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.container import constants
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
def AddBasicAuthFlags(parser):
basic_auth_group = parser.add_group(help='Basic auth')
username_group = basic_auth_group.add_group(
mutex=True, help='Options to specify the username.')
username_help_text = """\
The user name to use for basic auth for the cluster. Use `--password` to specify
a password; if not, the server will randomly generate one."""
username_group.add_argument('--username', '-u', help=username_help_text)
enable_basic_auth_help_text = """\
Enable basic (username/password) auth for the cluster. `--enable-basic-auth` is
an alias for `--username=admin`; `--no-enable-basic-auth` is an alias for
`--username=""`. Use `--password` to specify a password; if not, the server will
randomly generate one. For cluster versions before 1.12, if neither
`--enable-basic-auth` nor `--username` is specified, `--enable-basic-auth` will
default to `true`. After 1.12, `--enable-basic-auth` will default to `false`."""
username_group.add_argument(
'--enable-basic-auth',
help=enable_basic_auth_help_text,
action='store_true',
default=None)
basic_auth_group.add_argument(
'--password',
help='The password to use for cluster auth. Defaults to a '
'server-specified randomly-generated string.')
def MungeBasicAuthFlags(args):
if args.IsSpecified('enable_basic_auth'):
if not args.enable_basic_auth:
args.username = ''
else:
args.username = 'admin'
if not args.username and args.IsSpecified('password'):
raise util.Error(constants.USERNAME_PASSWORD_ERROR_MSG)
def AddImageTypeFlag(parser, target):
help_text = """\
The image type to use for the {target}. Defaults to server-specified.
Image Type specifies the base OS that the nodes in the {target} will run on.
If an image type is specified, that will be assigned to the {target} and all
future upgrades will use the specified image type. If it is not specified the
server will pick the default image type.
The default image type and the list of valid image types are available
using the following command.
$ gcloud container get-server-config
""".format(target=target)
parser.add_argument('--image-type', help=help_text)
def AddImageFlag(parser, hidden=False):
help_text = """\
A specific image to use on the new instances.
"""
parser.add_argument('--image', help=help_text, hidden=hidden)
def AddImageProjectFlag(parser, hidden=False):
help_text = """/
A specific project from which contains the os image or image family. This is
required when using --image-type=CUSTOM.
"""
parser.add_argument('--image-project', help=help_text, hidden=hidden)
def AddImageFamilyFlag(parser, hidden=False):
help_text = """/
A specific image-family from which the most recent image is used on new
instances. If both image and image family are specified, the image must be in
the image family, and the image is used.
"""
parser.add_argument('--image-family', help=help_text, hidden=hidden)
def AddNodeVersionFlag(parser, hidden=False):
help_text = """\
The Kubernetes version to use for nodes. Defaults to server-specified.
The default Kubernetes version is available using the following command.
$ gcloud container get-server-config
"""
return parser.add_argument('--node-version', help=help_text, hidden=hidden)
def AddClusterVersionFlag(parser, suppressed=False, help=None):
if help is None:
help = """\
The Kubernetes version to use for the master and nodes. Defaults to
server-specified.
The default Kubernetes version is available using the following command.
$ gcloud container get-server-config
"""
return parser.add_argument('--cluster-version', help=help, hidden=suppressed)
def AddClusterAutoscalingFlags(parser, update_group=None, hidden=False):
group = parser.add_argument_group('Cluster autoscaling')
autoscaling_group = group if update_group is None else update_group
autoscaling_group.add_argument(
'--enable-autoscaling',
default=None,
help="""\
Enables autoscaling for a node pool.
Enables autoscaling in the node pool specified by --node-pool or
the default node pool if --node-pool is not provided.""",
hidden=hidden,
action='store_true')
group.add_argument(
'--max-nodes',
help="""\
Maximum number of nodes in the node pool.
Maximum number of nodes to which the node pool specified by --node-pool
(or default node pool if unspecified) can scale. Ignored unless
--enable-autoscaling is also specified.""",
hidden=hidden,
type=int)
group.add_argument(
'--min-nodes',
help="""\
Minimum number of nodes in the node pool.
Minimum number of nodes to which the node pool specified by --node-pool
(or default node pool if unspecified) can scale. Ignored unless
--enable-autoscaling is also specified.""",
hidden=hidden,
type=int)
return group
def AddNodePoolAutoprovisioningFlag(parser, hidden=True):
parser.add_argument(
'--enable-autoprovisioning',
help="""\
Enables Cluster Autoscaler to treat the node pool as if it was autoprovisioned.
Cluster Autoscaler will be able to delete the node pool if it's unneeded.""",
hidden=hidden,
default=None,
action='store_true')
def AddLocalSSDFlag(parser, suppressed=False, help_text=''):
help_text += """\
The number of local SSD disks to provision on each node.
Local SSDs have a fixed 375 GB capacity per device. The number of disks that
can be attached to an instance is limited by the maximum number of disks
available on a machine, which differs by compute zone. See
https://cloud.google.com/compute/docs/disks/local-ssd for more information."""
parser.add_argument(
'--local-ssd-count',
help=help_text,
hidden=suppressed,
type=int,
default=0)
def AddAcceleratorArgs(parser):
parser.add_argument(
'--accelerator',
type=arg_parsers.ArgDict(
spec={
'type': str,
'count': int,
},
required_keys=['type'],
max_length=2),
metavar='type=TYPE,[count=COUNT]',
help="""\
Attaches accelerators (e.g. GPUs) to all nodes.
*type*::: (Required) The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla K80)
of accelerator to attach to the instances. Use ```gcloud compute
accelerator-types list``` to learn about all available accelerator types.
*count*::: (Optional) The number of accelerators to attach to the
instances. The default value is 1.
""")
def AddAutoprovisioningFlags(parser, hidden=False):
group = parser.add_argument_group('Node autoprovisioning', hidden=hidden)
group.add_argument(
'--enable-autoprovisioning',
required=True,
default=None,
help="""\
Enables node autoprovisioning for a cluster.
Cluster Autoscaler will be able to create new node pools. Requires maximum CPU
and memory limits to be specified.""",
hidden=hidden,
action='store_true')
limits_group = group.add_mutually_exclusive_group()
limits_group.add_argument(
'--autoprovisioning-config-file',
type=arg_parsers.BufferedFileInput(),
hidden=hidden,
help="""\
Path of the JSON/YAML file which contains information about the
cluster's autoscaling configuration. Currently it only contains
a list of resource limits of the cluster.
Each resource limits definition contains three fields:
resourceType, maximum and minimum.
Resource type can be "cpu", "memory" or an accelerator (e.g.
"nvidia-tesla-k80" for nVidia Tesla K80). Use gcloud compute accelerator-types
list to learn about available accelerator types.
Maximum is the maximum allowed amount with the unit of the resource.
Minimum is the minimum allowed amount with the unit of the resource.
""")
from_flags_group = limits_group.add_argument_group('Flags to configure '
'resource limits:')
from_flags_group.add_argument(
'--max-cpu',
required=True,
help="""\
Maximum number of cores in the cluster.
Maximum number of cores to which the cluster can scale.""",
hidden=hidden,
type=int)
from_flags_group.add_argument(
'--min-cpu',
help="""\
Minimum number of cores in the cluster.
Minimum number of cores to which the cluster can scale.""",
hidden=hidden,
type=int)
from_flags_group.add_argument(
'--max-memory',
required=True,
help="""\
Maximum memory in the cluster.
Maximum number of gigabytes of memory to which the cluster can scale.""",
hidden=hidden,
type=int)
from_flags_group.add_argument(
'--min-memory',
help="""\
Minimum memory in the cluster.
Minimum number of gigabytes of memory to which the cluster can scale.""",
hidden=hidden,
type=int)
accelerator_group = from_flags_group.add_argument_group(
'Arguments to set limits on accelerators:')
accelerator_group.add_argument(
'--max-accelerator',
type=arg_parsers.ArgDict(spec={
'type': str,
'count': int,
}, required_keys=['type', 'count'], max_length=2),
required=True,
metavar='type=TYPE,count=COUNT',
hidden=hidden,
help="""\
Sets maximum limit for a single type of accelerators (e.g. GPUs) in cluster.
*type*::: (Required) The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla K80)
of accelerator for which the limit is set. Use ```gcloud compute
accelerator-types list``` to learn about all available accelerator types.
*count*::: (Required) The maximum number of accelerators
to which the cluster can be scaled.
""")
accelerator_group.add_argument(
'--min-accelerator',
type=arg_parsers.ArgDict(spec={
'type': str,
'count': int,
}, required_keys=['type', 'count'], max_length=2),
metavar='type=TYPE,count=COUNT',
hidden=hidden,
help="""\
Sets minimum limit for a single type of accelerators (e.g. GPUs) in cluster. Defaults
to 0 for all accelerator types if it isn't set.
*type*::: (Required) The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla K80)
of accelerator for which the limit is set. Use ```gcloud compute
accelerator-types list``` to learn about all available accelerator types.
*count*::: (Required) The minimum number of accelerators
to which the cluster can be scaled.
""")
def AddEnableBinAuthzFlag(parser, hidden=False):
help_text = """Enable Binary Authorization for this cluster."""
parser.add_argument(
'--enable-binauthz',
action='store_true',
default=None,
help=help_text,
hidden=hidden,
)
def AddZoneAndRegionFlags(parser):
# TODO(b/33343238): Remove the short form of the zone flag.
# TODO(b/18105938): Add zone prompting
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--zone',
'-z',
help='Compute zone (e.g. us-central1-a) for the cluster',
action=actions.StoreProperty(properties.VALUES.compute.zone))
group.add_argument(
'--region',
help='Compute region (e.g. us-central1) for the cluster.')
def AddAsyncFlag(parser):
parser.add_argument(
'--async',
action='store_true',
default=None,
help='Don\'t wait for the operation to complete.')
def AddEnableKubernetesAlphaFlag(parser):
help_text = """\
Enable Kubernetes alpha features on this cluster. Selecting this
option will result in the cluster having all Kubernetes alpha API groups and
features turned on. Cluster upgrades (both manual and automatic) will be
disabled and the cluster will be automatically deleted after 30 days.
Alpha clusters are not covered by the Kubernetes Engine SLA and should not be
used for production workloads."""
parser.add_argument(
'--enable-kubernetes-alpha',
action='store_true',
help=help_text)
def AddEnableStackdriverKubernetesFlag(parser):
help_text = """Enable Stackdriver Kubernetes monitoring and logging."""
parser.add_argument(
'--enable-stackdriver-kubernetes', action='store_true', help=help_text)
def AddNodeLabelsFlag(parser, for_node_pool=False):
if for_node_pool:
help_text = """\
Applies the given kubernetes labels on all nodes in the new node-pool. Example:
$ {command} node-pool-1 --cluster=example-cluster --node-labels=label1=value1,label2=value2
"""
else:
help_text = """\
Applies the given kubernetes labels on all nodes in the new node-pool. Example:
$ {command} example-cluster --node-labels=label-a=value1,label-2=value2
"""
help_text += """
New nodes, including ones created by resize or recreate, will have these labels
on the kubernetes API node object and can be used in nodeSelectors.
See [](http://kubernetes.io/docs/user-guide/node-selection/) for examples.
Note that kubernetes labels, intended to associate cluster components
and resources with one another and manage resource lifecycles, are different
from Kubernetes Engine labels that are used for the purpose of tracking billing
and usage information."""
parser.add_argument(
'--node-labels',
metavar='NODE_LABEL',
type=arg_parsers.ArgDict(),
help=help_text)
def AddLocalSSDAndLocalSSDVolumeConfigsFlag(parser, for_node_pool=False,
suppressed=False):
help_text = """\
--local-ssd-volumes enables the ability to request local SSD with variable count, interfaces, and format\n
--local-ssd-count is the equivalent of using --local-ssd-volumes with type=scsi,format=fs
"""
group = parser.add_mutually_exclusive_group()
AddLocalSSDVolumeConfigsFlag(group, for_node_pool=for_node_pool,
help_text=help_text)
AddLocalSSDFlag(group, suppressed=suppressed, help_text=help_text)
def AddLocalSSDVolumeConfigsFlag(parser, for_node_pool=False, help_text=''):
help_text += """\
Adds the requested local SSDs on all nodes in default node-pool(s) in new cluster. Example:
$ {{command}} {0} --local-ssd-volumes count=2,type=nvme,format=fs
'count' must be between 1-8\n
'type' must be either scsi or nvme\n
'format' must be either fs or block
New nodes, including ones created by resize or recreate, will have these local SSDs.
Local SSDs have a fixed 375 GB capacity per device. The number of disks that
can be attached to an instance is limited by the maximum number of disks
available on a machine, which differs by compute zone. See
https://cloud.google.com/compute/docs/disks/local-ssd for more information.
""".format('node-pool-1 --cluster=example-cluster' if for_node_pool else
'example_cluster')
count_validator = arg_parsers.RegexpValidator(
r'^[1-8]$', 'Count must be a number between 1 and 8')
type_validator = arg_parsers.RegexpValidator(
r'^(scsi|nvme)$', 'Type must be either "scsi" or "nvme"')
format_validator = arg_parsers.RegexpValidator(
r'^(fs|block)$', 'Format must be either "fs" or "block"')
parser.add_argument(
'--local-ssd-volumes',
metavar='[count=COUNT],[type=TYPE],[format=FORMAT]',
type=arg_parsers.ArgDict(
spec={
'count': count_validator,
'type': type_validator,
'format': format_validator,
},
required_keys=['count', 'type', 'format'],
max_length=3),
action='append',
help=help_text)
def AddNodeTaintsFlag(parser, for_node_pool=False, hidden=False):
if for_node_pool:
help_text = """\
Applies the given kubernetes taints on all nodes in the new node-pool, which can be used with tolerations for pod scheduling. Example:
$ {command} node-pool-1 --cluster=example-cluster --node-taints=key1=val1:NoSchedule,key2=val2:PreferNoSchedule
"""
else:
help_text = """\
Applies the given kubernetes taints on all nodes in default node-pool(s) in new cluster, which can be used with tolerations for pod scheduling. Example:
$ {command} example-cluster --node-taints=key1=val1:NoSchedule,key2=val2:PreferNoSchedule
"""
help_text += """
Note, this feature uses `gcloud beta` commands. To use gcloud beta commands,
you must configure `gcloud` to use the v1beta1 API as described here: https://cloud.google.com/kubernetes-engine/docs/reference/api-organization#beta.
To read more about node-taints, see https://cloud.google.com/kubernetes-engine/docs/node-taints.
"""
parser.add_argument(
'--node-taints',
metavar='NODE_TAINT',
type=arg_parsers.ArgDict(),
help=help_text,
hidden=hidden)
def AddPreemptibleFlag(parser, for_node_pool=False, suppressed=False):
if for_node_pool:
help_text = """\
Create nodes using preemptible VM instances in the new nodepool.
$ {command} node-pool-1 --cluster=example-cluster --preemptible
"""
else:
help_text = """\
Create nodes using preemptible VM instances in the new cluster.
$ {command} example-cluster --preemptible
"""
help_text += """
New nodes, including ones created by resize or recreate, will use preemptible
VM instances. See https://cloud.google.com/kubernetes-engine/docs/preemptible-vm
for more information on how to use Preemptible VMs with Kubernetes Engine."""
parser.add_argument(
'--preemptible',
action='store_true',
help=help_text,
hidden=suppressed)
def AddNodePoolNameArg(parser, help_text):
parser.add_argument('name', metavar='NAME', help=help_text)
def AddNodePoolClusterFlag(parser, help_text):
parser.add_argument(
'--cluster',
help=help_text,
action=actions.StoreProperty(properties.VALUES.container.cluster))
def AddEnableAutoRepairFlag(parser, for_node_pool=False, for_create=False):
if for_node_pool:
help_text = """\
Enable node autorepair feature for a node-pool.
$ {command} node-pool-1 --cluster=example-cluster --enable-autorepair
"""
if for_create:
help_text += """
Node autorepair is enabled by default for node pools using COS as a base image,
use --no-enable-autorepair to disable.
"""
else:
help_text = """\
Enable node autorepair feature for a cluster's default node-pool(s).
$ {command} example-cluster --enable-autorepair
"""
if for_create:
help_text += """
Node autorepair is enabled by default for clusters using COS as a base image,
use --no-enable-autorepair to disable.
"""
help_text += """
See https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair for \
more info."""
parser.add_argument(
'--enable-autorepair', action='store_true', default=None,
help=help_text)
def AddEnableAutoUpgradeFlag(parser, for_node_pool=False, suppressed=False):
if for_node_pool:
help_text = """\
Sets autoupgrade feature for a node-pool.
$ {command} node-pool-1 --cluster=example-cluster --enable-autoupgrade
"""
else:
help_text = """\
Sets autoupgrade feature for a cluster's default node-pool(s).
$ {command} example-cluster --enable-autoupgrade
"""
help_text += """
See https://cloud.google.com/kubernetes-engine/docs/node-management for more \
info."""
parser.add_argument(
'--enable-autoupgrade',
action='store_true',
default=None,
help=help_text,
hidden=suppressed)
def AddTagsFlag(parser, help_text):
parser.add_argument(
'--tags',
metavar='TAG',
type=arg_parsers.ArgList(min_length=1),
help=help_text)
def AddMasterAuthorizedNetworksFlags(parser, enable_group_for_update=None):
if enable_group_for_update is None:
master_flag_group = parser.add_argument_group('Master Authorized Networks')
enable_flag_group = master_flag_group
else:
master_flag_group = parser.add_argument_group('')
enable_flag_group = enable_group_for_update
enable_flag_group.add_argument(
'--enable-master-authorized-networks',
default=None,
help="""\
Allow only specified set of CIDR blocks (specified by the
`--master-authorized-networks` flag) to connect to Kubernetes master through
HTTPS. Besides these blocks, the following have access as well:\n
1) The private network the cluster connects to if
`--enable-private-nodes` is specified.
2) Google Compute Engine Public IPs if `--enable-private-nodes` is not
specified.\n
Use `--no-enable-master-authorized-networks` to disable. When disabled, public
internet (0.0.0.0/0) is allowed to connect to Kubernetes master through HTTPS.
""",
action='store_true')
master_flag_group.add_argument(
'--master-authorized-networks',
type=arg_parsers.ArgList(min_length=1),
metavar='NETWORK',
help='The list of CIDR blocks (up to {max}) that are allowed to connect '
'to Kubernetes master through HTTPS. Specified in CIDR notation (e.g. '
'1.2.3.4/30). Can not be specified unless '
'`--enable-master-authorized-networks` is also specified.'.format(
max=api_adapter.MAX_AUTHORIZED_NETWORKS_CIDRS))
def AddNetworkPolicyFlags(parser, hidden=False):
parser.add_argument(
'--enable-network-policy',
action='store_true',
default=None,
hidden=hidden,
help='Enable network policy enforcement for this cluster. If you are '
'enabling network policy on an existing cluster the network policy '
'addon must first be enabled on the master by using '
'--update-addons=NetworkPolicy=ENABLED flag.')
def AddPrivateClusterFlags(parser, with_deprecated=False):
group = parser.add_argument_group('Private Clusters')
if with_deprecated:
group.add_argument(
'--private-cluster',
help=('Cluster is created with no public IP addresses on the cluster '
'nodes.'),
default=None,
action=actions.DeprecationAction(
'private-cluster',
warn='The --private-cluster flag is deprecated and will be removed '
'in a future release. Use --enable-private-nodes instead.',
action='store_true'))
group.add_argument(
'--enable-private-nodes',
help=('Cluster is created with no public IP addresses on the cluster '
'nodes.'),
default=None,
action='store_true')
group.add_argument(
'--enable-private-endpoint',
help=('Cluster is managed using the private IP address of the master '
'API endpoint.'),
default=None,
action='store_true')
group.add_argument(
'--master-ipv4-cidr',
help=('IPv4 CIDR range to use for the master network. This should have '
'a netmask of size /28 and should be used in conjunction with the '
'--enable-private-nodes flag.'),
default=None)
def AddEnableLegacyAuthorizationFlag(parser, hidden=False):
help_text = """\
Enables the legacy ABAC authentication for the cluster.
User rights are granted through the use of policies which combine attributes
together. For a detailed look at these properties and related formats, see
https://kubernetes.io/docs/admin/authorization/abac/. To use RBAC permissions
instead, create or update your cluster with the option
`--no-enable-legacy-authorization`.
"""
parser.add_argument(
'--enable-legacy-authorization',
action='store_true',
default=None,
hidden=hidden,
help=help_text)
def AddAuthenticatorSecurityGroupFlags(parser, hidden=False):
help_text = """\
The name of the RBAC security group for use with Google security groups
in Kubernetes RBAC
(https://kubernetes.io/docs/reference/access-authn-authz/rbac/).
To include group membership as part of the claims issued by Google
during authentication, a group must be designated as a security group by
including it as a direct member of this group.
If unspecified, no groups will be returned for use with RBAC."""
parser.add_argument(
'--security-group',
help=help_text,
default=None,
hidden=hidden)
def AddStartIpRotationFlag(parser, hidden=False):
help_text = """\
Start the rotation of this cluster to a new IP. For example:
$ {command} example-cluster --start-ip-rotation
This causes the cluster to serve on two IPs, and will initiate a node upgrade \
to point to the new IP."""
parser.add_argument(
'--start-ip-rotation',
action='store_true',
default=False,
hidden=hidden,
help=help_text)
def AddStartCredentialRotationFlag(parser, hidden=False):
help_text = """\
Start the rotation of IP and credentials for this cluster. For example:
$ {command} example-cluster --start-credential-rotation
This causes the cluster to serve on two IPs, and will initiate a node upgrade \
to point to the new IP."""
parser.add_argument(
'--start-credential-rotation',
action='store_true',
default=False,
hidden=hidden,
help=help_text)
def AddCompleteIpRotationFlag(parser, hidden=False):
help_text = """\
Complete the IP rotation for this cluster. For example:
$ {command} example-cluster --complete-ip-rotation
This causes the cluster to stop serving its old IP, and return to a single IP \
state."""
parser.add_argument(
'--complete-ip-rotation',
action='store_true',
default=False,
hidden=hidden,
help=help_text)
def AddCompleteCredentialRotationFlag(parser, hidden=False):
help_text = """\
Complete the IP and credential rotation for this cluster. For example:
$ {command} example-cluster --complete-credential-rotation
This causes the cluster to stop serving its old IP, return to a single IP, and \
invalidate old credentials."""
parser.add_argument(
'--complete-credential-rotation',
action='store_true',
default=False,
hidden=hidden,
help=help_text)
def AddMaintenanceWindowFlag(parser, hidden=False, add_unset_text=False):
help_text = """\
Set a time of day when you prefer maintenance to start on this cluster. \
For example:
$ {command} example-cluster --maintenance-window=12:43
The time corresponds to the UTC time zone, and must be in HH:MM format.
"""
unset_text = """\
To remove an existing maintenance window from the cluster, use \
\'--maintenance-window=None\'
"""
description = 'Maintenance windows must be passed in using HH:MM format.'
unset_description = ' They can also be removed by using the word \"None\".'
if add_unset_text:
help_text += unset_text
description += unset_description
type_ = arg_parsers.RegexpValidator(
r'^([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$|^None$', description)
parser.add_argument(
'--maintenance-window',
default=None,
hidden=hidden,
type=type_,
help=help_text)
def AddLabelsFlag(parser, suppressed=False):
help_text = """\
Labels to apply to the Google Cloud resources in use by the Kubernetes Engine
cluster. These are unrelated to Kubernetes labels.
Example:
$ {command} example-cluster --labels=label_a=value1,label_b=,label_c=value3
"""
parser.add_argument(
'--labels',
metavar='KEY=VALUE',
type=arg_parsers.ArgDict(),
help=help_text,
hidden=suppressed)
def AddUpdateLabelsFlag(parser):
help_text = """\
Labels to apply to the Google Cloud resources in use by the Kubernetes Engine
cluster. These are unrelated to Kubernetes labels.
Example:
$ {command} example-cluster --update-labels=label_a=value1,label_b=value2
"""
parser.add_argument(
'--update-labels',
metavar='KEY=VALUE',
type=arg_parsers.ArgDict(),
help=help_text)
def AddRemoveLabelsFlag(parser):
help_text = """\
Labels to remove from the Google Cloud resources in use by the Kubernetes Engine
cluster. These are unrelated to Kubernetes labels.
Example:
$ {command} example-cluster --remove-labels=label_a,label_b
"""
parser.add_argument(
'--remove-labels',
metavar='KEY',
type=arg_parsers.ArgList(),
help=help_text)
def AddDiskTypeFlag(parser):
help_text = """\
Type of the node VM boot disk. Defaults to pd-standard.
"""
parser.add_argument(
'--disk-type',
help=help_text,
choices=['pd-standard', 'pd-ssd'])
def AddIPAliasFlags(parser):
parser.add_argument(
'--enable-ip-alias',
action='store_true',
default=None,
help="""\
Enable use of alias IPs (https://cloud.google.com/compute/docs/alias-ip/)
for pod IPs. This will create two secondary ranges, one for the pod IPs
and another to reserve space for the services range.
""")
parser.add_argument(
'--services-ipv4-cidr',
metavar='CIDR',
help="""\
Set the IP range for the services IPs.
Can be specified as a netmask size (e.g. '/20') or as in CIDR notion
(e.g. '10.100.0.0/20'). If given as a netmask size, the IP range will
be chosen automatically from the available space in the network.
If unspecified, the services CIDR range will be chosen with a default
mask size.
Can not be specified unless '--enable-ip-alias' is also specified.
""")
parser.add_argument(
'--create-subnetwork',
metavar='KEY=VALUE',
type=arg_parsers.ArgDict(),
help="""\
Create a new subnetwork for the cluster. The name and range of the
subnetwork can be customized via optional 'name' and 'range' key-value
pairs.
'name' specifies the name of the subnetwork to be created.
'range' specifies the IP range for the new subnetwork. This can either
be a netmask size (e.g. '/20') or a CIDR range (e.g. '10.0.0.0/20').
If a netmask size is specified, the IP is automatically taken from the
free space in the cluster's network.
Examples:
Create a new subnetwork with a default name and size.
$ {command} --create-subnetwork ""
Create a new subnetwork named "my-subnet" with netmask of size 21.
$ {command} --create-subnetwork name=my-subnet,range=/21
Create a new subnetwork with a default name with the primary range of
10.100.0.0/16.
$ {command} --create-subnetwork range=10.100.0.0/16
Create a new subnetwork with the name "my-subnet" with a default range.
$ {command} --create-subnetwork name=my-subnet
Can not be specified unless '--enable-ip-alias' is also specified. Can
not be used in conjunction with the '--subnetwork' option.
""")
parser.add_argument(
'--cluster-secondary-range-name',
metavar='NAME',
help="""\
Set the secondary range to be used as the source for pod IPs. Alias
ranges will be allocated from this secondary range. NAME must be the
name of an existing secondary range in the cluster subnetwork.
Must be used in conjunction with '--enable-ip-alias'. Cannot be used
with --create-subnetwork.
""")
parser.add_argument(
'--services-secondary-range-name',
metavar='NAME',
help="""\
Set the secondary range to be used for services (e.g. ClusterIPs).
NAME must be the name of an existing secondary range in the cluster
subnetwork.
Must be used in conjunction with '--enable-ip-alias'. Cannot be used
with --create-subnetwork.
""")
def AddMaxPodsPerNodeFlag(parser, for_node_pool=False, hidden=False):
parser.add_argument(
'--max-pods-per-node',
default=None,
help="""\
The max number of pods per node for this node pool.
This flag sets the maximum number of pods that can be run at the same time on a
node. This will override the value given with --default-max-pods-per-node flag
set at the cluster level.
Must be used in conjunction with '--enable-ip-alias'.
""",
hidden=hidden,
type=int)
if not for_node_pool:
parser.add_argument(
'--default-max-pods-per-node',
default=None,
help="""\
The default max number of pods per node for node pools in the cluster.
This flag sets the default max-pods-per-node for node pools in the cluster. If
--max-pods-per-node is not specified explicitly for a node pool, this flag
value will be used.
Must be used in conjunction with '--enable-ip-alias'.
""",
hidden=hidden,
type=int)
def AddMinCpuPlatformFlag(parser, for_node_pool=False, hidden=False):
if for_node_pool:
help_text = """\
When specified, the nodes for the new node pool will be scheduled on host with
specified CPU architecture or a newer one.
Examples:
$ {command} node-pool-1 --cluster=example-cluster --min-cpu-platform=PLATFORM
"""
else:
help_text = """\
When specified, the nodes for the new cluster's default node pool will be
scheduled on host with specified CPU architecture or a newer one.
Examples:
$ {command} example-cluster --min-cpu-platform=PLATFORM
"""
help_text += """\
To list available CPU platforms in given zone, run:
$ gcloud beta compute zones describe ZONE --format="value(availableCpuPlatforms)"
CPU platform selection is available only in selected zones.
"""
parser.add_argument(
'--min-cpu-platform', metavar='PLATFORM', hidden=hidden, help=help_text)
def AddWorkloadMetadataFromNodeFlag(parser, hidden=False):
help_text = """\
Sets the node metadata option for workload metadata configuration. This feature
is scheduled to be deprecated in the future and later removed.
"""
parser.add_argument(
'--workload-metadata-from-node',
default=None,
choices={
'SECURE': 'Prevents workloads not in hostNetwork from accessing '
'certain VM metadata, specifically kube-env, which '
'contains Kubelet credentials, and the instance identity '
'token. This is a temporary security solution available '
'while the bootstrapping process for cluster nodes is '
'being redesigned with significant security improvements.',
'EXPOSED': 'Exposes all VM metadata to workloads.',
'UNSPECIFIED': 'Chooses the default.',
},
type=lambda x: x.upper(),
hidden=hidden,
help=help_text)
def AddTagOrDigestPositional(parser,
verb,
repeated=True,
tags_only=False,
arg_name=None,
metavar=None):
digest_str = '*.gcr.io/PROJECT_ID/IMAGE_PATH@sha256:DIGEST or'
if tags_only:
digest_str = ''
if not arg_name:
arg_name = 'image_names' if repeated else 'image_name'
metavar = metavar or 'IMAGE_NAME'
parser.add_argument(
arg_name,
metavar=metavar or arg_name.upper(),
nargs='+' if repeated else None,
help=('The fully qualified name(s) of image(s) to {verb}. '
'The name(s) should be formatted as {digest_str} '
'*.gcr.io/PROJECT_ID/IMAGE_PATH:TAG.'.format(
verb=verb, digest_str=digest_str)))
def AddImagePositional(parser, verb):
parser.add_argument(
'image_name',
help=('The name of the image to {verb}. The name format should be '
'*.gcr.io/PROJECT_ID/IMAGE_PATH[:TAG|@sha256:DIGEST]. '.format(
verb=verb)))
def AddNodeLocationsFlag(parser):
parser.add_argument(
'--node-locations',
type=arg_parsers.ArgList(min_length=1),
metavar='ZONE',
help="""\
The set of zones in which the specified node footprint should be replicated.
All zones must be in the same region as the cluster's master(s), specified by
the `--zone` or `--region` flag. Additionally, for zonal clusters,
`--node-locations` must contain the cluster's primary zone. If not specified,
all nodes will be in the cluster's primary zone (for zonal clusters) or spread
across three randomly chosen zones within the cluster's region (for regional
clusters).
Note that `NUM_NODES` nodes will be created in each zone, such that if you
specify `--num-nodes=4` and choose two locations, 8 nodes will be created.
Multiple locations can be specified, separated by commas. For example:
$ {command} example-cluster --zone us-central1-a --node-locations us-central1-a,us-central1-b
""")
def AddLoggingServiceFlag(parser, enable_kubernetes):
help_str = """\
Logging service to use for the cluster. Options are:
"logging.googleapis.com" (the Google Cloud Logging service),
"none" (logs will not be exported from the cluster)
"""
if enable_kubernetes:
help_str = """\
Logging service to use for the cluster. Options are:
"logging.googleapis.com/kubernetes" (the Google Cloud Logging
service with Kubernetes-native resource model enabled),
"logging.googleapis.com" (the Google Cloud Logging service),
"none" (logs will not be exported from the cluster)
"""
parser.add_argument('--logging-service', help=help_str)
def AddMonitoringServiceFlag(parser, enable_kubernetes):
help_str = """\
Monitoring service to use for the cluster. Options are:
"monitoring.googleapis.com" (the Google Cloud Monitoring service),
"none" (no metrics will be exported from the cluster)
"""
if enable_kubernetes:
help_str = """\
Monitoring service to use for the cluster. Options are:
"monitoring.googleapis.com/kubernetes" (the Google Cloud
Monitoring service with Kubernetes-native resource model enabled),
"monitoring.googleapis.com" (the Google Cloud Monitoring service),
"none" (no metrics will be exported from the cluster)
"""
parser.add_argument('--monitoring-service', help=help_str)
def AddNodeIdentityFlags(parser, example_target, new_behavior=True):
node_identity_group = parser.add_group(
mutex=True, help='Options to specify the node identity.')
scopes_group = node_identity_group.add_group(help='Scopes options.')
if new_behavior:
track_help = """
Unless container/new_scopes_behavior property is true, compute-rw and storage-ro
are always added, even if not explicitly specified, and --enable-cloud-endpoints
(by default) adds service-control and service-management scopes.
If container/new_scopes_behavior property is true, none of the above scopes are
added (though storage-ro, service-control, and service-management are all
included in the default scopes. In a future release, this will be the default
behavior.
"""
else:
track_help = ''
scopes_group.add_argument(
'--scopes',
type=arg_parsers.ArgList(),
metavar='SCOPE',
default='gke-default',
help="""\
Specifies scopes for the node instances. Examples:
$ {{command}} {example_target} --scopes=https://www.googleapis.com/auth/devstorage.read_only
$ {{command}} {example_target} --scopes=bigquery,storage-rw,compute-ro
Multiple SCOPEs can be specified, separated by commas. `logging-write`
and/or `monitoring` are added unless Cloud Logging and/or Cloud Monitoring
are disabled (see `--enable-cloud-logging` and `--enable-cloud-monitoring`
for more information).
{track_help}
{scopes_help}
""".format(
example_target=example_target,
track_help=track_help,
scopes_help=compute_constants.ScopesHelp()))
cloud_endpoints_help_text = """\
Automatically enable Google Cloud Endpoints to take advantage of API management
features by adding service-control and service-management scopes.
If `--no-enable-cloud-endpoints` is set, remove service-control and
service-management scopes, even if they are implicitly (via default) or
explicitly set via `--scopes`.
`--[no-]enable-cloud-endpoints` is not allowed if
`container/new_scopes_behavior` property is set to true.
"""
scopes_group.add_argument(
'--enable-cloud-endpoints',
action=actions.DeprecationAction(
'--[no-]enable-cloud-endpoints',
warn='Flag --[no-]enable-cloud-endpoints is deprecated and will be '
'removed in a future release. Scopes necessary for Google Cloud '
'Endpoints are now included in the default set and may be '
'excluded using --scopes.',
removed=new_behavior,
action='store_true'),
default=True,
help=cloud_endpoints_help_text)
sa_help_text = (
'The Google Cloud Platform Service Account to be used by the node VMs. '
'If a service account is specified, the cloud-platform and '
'userinfo.email scopes are used. If no Service Account is specified, the '
'project default service account is used.')
node_identity_group.add_argument('--service-account', help=sa_help_text)
def AddClusterNodeIdentityFlags(parser):
AddNodeIdentityFlags(parser, example_target='example-cluster')
def AddDeprecatedClusterNodeIdentityFlags(parser):
AddNodeIdentityFlags(
parser, example_target='example-cluster', new_behavior=False)
def AddNodePoolNodeIdentityFlags(parser):
AddNodeIdentityFlags(
parser, example_target='node-pool-1 --cluster=example-cluster')
def AddDeprecatedNodePoolNodeIdentityFlags(parser):
AddNodeIdentityFlags(
parser,
example_target='node-pool-1 --cluster=example-cluster',
new_behavior=False)
def AddAddonsFlagsWithOptions(parser, addon_options):
parser.add_argument(
'--addons',
type=arg_parsers.ArgList(choices=addon_options),
metavar='ADDON',
help="""\
Default set of addons includes {0}. Addons
(https://cloud.google.com/kubernetes-engine/reference/rest/v1/projects.zones.clusters#AddonsConfig)
are additional Kubernetes cluster components. Addons specified by this flag will
be enabled. The others will be disabled.
""".format(', '.join(api_adapter.DEFAULT_ADDONS)))
def AddAddonsFlags(parser):
AddAddonsFlagsWithOptions(parser, api_adapter.ADDONS_OPTIONS)
def AddAlphaAddonsFlags(parser):
AddAddonsFlagsWithOptions(parser, api_adapter.ALPHA_ADDONS_OPTIONS)
def AddBetaAddonsFlags(parser):
AddAddonsFlagsWithOptions(parser, api_adapter.BETA_ADDONS_OPTIONS)
def AddPodSecurityPolicyFlag(parser, hidden=False):
help_text = """\
Enables the pod security policy admission controller for the cluster. The pod
security policy admission controller adds fine-grained pod create and update
authorization controls through the PodSecurityPolicy API objects. For more
information, see
https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies.
"""
parser.add_argument(
'--enable-pod-security-policy',
action='store_true',
default=None,
hidden=hidden,
help=help_text)
def AddAllowRouteOverlapFlag(parser):
help_text = """\
Allows the provided cluster CIDRs to overlap with existing routes
that are less specific and do not terminate at a VM.
When enabled, `--cluster-ipv4-cidr` must be fully specified (e.g. `10.96.0.0/14`
, but not `/14`). If `--enable-ip-alias` is also specified, both
`--cluster-ipv4-cidr` and `--services-ipv4-cidr` must be fully specified.
"""
parser.add_argument(
'--allow-route-overlap',
action='store_true',
default=None,
help=help_text)
def AddTpuFlags(parser, hidden=False, enable_tpu_service_networking=False):
tpu_group = parser.add_group(help='Flags relating to Cloud TPUs:')
tpu_group.add_argument(
'--enable-tpu',
action='store_true',
hidden=hidden,
help="""\
Enable Cloud TPUs for this cluster.
Can not be specified unless `--enable-kubernetes-alpha` and `--enable-ip-alias`
are also specified.
""")
group = tpu_group
if enable_tpu_service_networking:
group = tpu_group.add_mutually_exclusive_group()
group.add_argument(
'--enable-tpu-service-networking',
action='store_true',
hidden=hidden,
help="""\
Enable Cloud TPU's Service Networking mode. In this mode, the CIDR blocks used
by the Cloud TPUs will be allocated and managed by Service Networking, instead
of Kubernetes Engine.
This cannot be specified if `tpu-ipv4-cidr` is specified.
""")
group.add_argument(
'--tpu-ipv4-cidr',
metavar='CIDR',
hidden=hidden,
help="""\
Set the IP range for the Cloud TPUs.
Can be specified as a netmask size (e.g. '/20') or as in CIDR notion
(e.g. '10.100.0.0/20'). If given as a netmask size, the IP range will be chosen
automatically from the available space in the network.
If unspecified, the TPU CIDR range will use automatic default '/20'.
Can not be specified unless '--enable-tpu' and '--enable-ip-alias' are also
specified.
""")
def AddIssueClientCertificateFlag(parser):
help_text = """\
Issue a TLS client certificate with admin permissions.
When enabled, the certificate and private key pair will be present in
MasterAuth field of the Cluster object. For cluster versions before 1.12, a
client certificate will be issued by default. As of 1.12, client certificates
are disabled by default.
"""
parser.add_argument(
'--issue-client-certificate',
action='store_true',
default=None,
help=help_text)
def AddIstioConfigFlag(parser, suppressed=False):
help_text = """\
Configurations for Istio addon, requires --addons contains Istio for create,
or --update-addons Istio=ENABLED for update.
*auth*:::Optional Type of auth MTLS_PERMISSIVE or MTLS_STRICT
Example:
$ {command} example-cluster --istio-config=auth=MTLS_PERMISSIVE
"""
parser.add_argument(
'--istio-config',
metavar='auth=MTLS_PERMISSIVE',
type=arg_parsers.ArgDict(
spec={
'auth': (lambda x: x.upper()),
}),
help=help_text,
hidden=suppressed)
def ValidateIstioConfigCreateArgs(istio_config_args, addons_args):
if istio_config_args:
auth = istio_config_args.get('auth', '')
if auth not in ['MTLS_PERMISSIVE', 'MTLS_STRICT']:
raise exceptions.InvalidArgumentException(
'--istio-config', 'auth is either MTLS_PERMISSIVE or MTLS_STRICT'
'e.g. --istio-config auth=MTLS_PERMISSIVE')
if 'Istio' not in addons_args:
raise exceptions.InvalidArgumentException(
'--istio-config', '--addon=Istio must be specified when '
'--istio-config is given')
def ValidateIstioConfigUpdateArgs(istio_config_args, disable_addons_args):
if istio_config_args:
auth = istio_config_args.get('auth', '')
if auth not in ['MTLS_PERMISSIVE', 'MTLS_STRICT']:
raise exceptions.InvalidArgumentException(
'--istio-config', 'auth must be one of MTLS_PERMISSIVE or '
'MTLS_STRICT e.g. --istio-config auth=MTLS_PERMISSIVE')
disable_istio = disable_addons_args.get('Istio')
if disable_istio is None or disable_istio:
raise exceptions.InvalidArgumentException(
'--istio-config', '--update-addons=Istio=ENABLED must be specified '
'when --istio-config is given')
def AddConcurrentNodeCountFlag(parser):
help_text = """\
The number of nodes to upgrade concurrently. Valid values are [1, {max}].
It is a recommended best practice to set this value to no higher than 3% of
your cluster size.'
""".format(max=api_adapter.MAX_CONCURRENT_NODE_COUNT)
parser.add_argument(
'--concurrent-node-count',
type=arg_parsers.BoundedInt(1, api_adapter.MAX_CONCURRENT_NODE_COUNT),
help=help_text)
def WarnForUnspecifiedIpAllocationPolicy(args):
if not args.IsSpecified('enable_ip_alias'):
log.warning(
'Currently VPC-native is not the default mode during cluster creation. '
'In the future, this will become the default mode and can be disabled '
'using `--no-enable-ip-alias` flag. Use `--[no-]enable-ip-alias` flag '
'to suppress this warning.')
def WarnForNodeModification(args, enable_autorepair):
if (args.image_type or '').lower() != 'ubuntu':
return
if enable_autorepair or args.enable_autoupgrade:
log.warning('Modifications on the boot disks of node VMs do not persist '
'across node recreations. Nodes are recreated during '
'manual-upgrade, auto-upgrade, auto-repair, and auto-scaling. '
'To preserve modifications across node recreation, use a '
'DaemonSet.')
def AddMachineTypeFlag(parser):
help_text = """\
The type of machine to use for nodes. Defaults to n1-standard-1.
The list of predefined machine types is available using the following command:
$ gcloud compute machine-types list
You can also specify custom machine types with the string "custom-CPUS-RAM"
where ```CPUS``` is the number of virtual CPUs and ```RAM``` is the amount of
RAM in MiB.
For example, to create a node pool using custom machines with 2 vCPUs and 12 GB
of RAM:
$ {command} high-mem-pool --machine-type=custom-2-12288
"""
parser.add_argument(
'--machine-type', '-m',
help=help_text)
def AddManagedPodIdentityFlags(parser):
enable_help_text = """\
Enable Managed Pod Identity on the cluster.
When enabled, pods with cloud.google.com/service-account annotations will be
able to authenticate to Google Cloud Platform APIs on behalf of service account
specified in the annotation.
"""
parser.add_argument(
'--enable-managed-pod-identity',
action='store_true',
default=False,
hidden=True,
help=enable_help_text)
sa_help_text = """\
Federating Service Account to use with Managed Pod Identity.
Sets the name (email) of the GCP Service Account used to connect
Kubernetes Service Accounts to GCP Service Accounts.
Must be set with `--enable-managed-pod-identity`.
"""
parser.add_argument(
'--federating-service-account',
default=None,
hidden=True,
help=sa_help_text)
def AddResourceUsageExportFlags(parser, add_clear_flag=False, hidden=False):
group = parser.add_group(
"Exports cluster's usage of cloud resources",
hidden=hidden)
if add_clear_flag:
group.is_mutex = True
group.add_argument(
'--clear-resource-usage-bigquery-dataset',
action='store_true',
hidden=hidden,
default=None,
help='Disables exporting cluster resource usage to BigQuery.')
group = group.add_group()
dataset_help_text = """\
The name of the BigQuery dataset to which the cluster's usage of cloud
resources is exported. A table will be created in the specified dataset to
store cluster resource usage. The resulting table can be joined with BigQuery
Billing Export to produce a fine-grained cost breakdown.
Example:
$ {command} example-cluster --resource-usage-bigquery-dataset=example_bigquery_dataset_name
"""
group.add_argument(
'--resource-usage-bigquery-dataset',
default=None,
hidden=hidden,
help=dataset_help_text)
network_egress_help_text = """`
Enable network egress metering on this cluster.
When enabled, a DaemonSet is deployed into the cluster. Each DaemonSet pod
meters network egress traffic by collecting data from the conntrack table, and
exports the metered metrics to the specified destination.
Network egress metering is disabled if this flag is omitted, or when
`--no-enable-network-egress-metering` is set.
"""
group.add_argument(
'--enable-network-egress-metering',
action='store_true',
default=None,
help=network_egress_help_text)
def AddEnablePrivateIpv6AccessFlag(parser, hidden=False):
parser.add_argument(
'--enable-private-ipv6-access',
default=None,
help="""\
Enables private access to Google services over IPv6.
When enabled, this allows gRPC clients on this cluster's pods a fast path to
access Google hosted services (eg. Cloud Spanner, Cloud Dataflow, Cloud
Bigtable).
This is currently only available on Alpha clusters, specified by using
--enable-kubernetes-alpha.
""",
hidden=hidden,
action='store_true')
def AddVerticalPodAutoscalingFlag(parser, hidden=False):
parser.add_argument(
'--enable-vertical-pod-autoscaling',
default=None,
help='Enables vertical pod autoscaling for a cluster.',
hidden=hidden,
action='store_true')
# TODO(b/112194849): Explain limitation to the sandbox pods and the nodes.
def AddSandboxFlag(parser, hidden=False):
type_validator = arg_parsers.RegexpValidator(
r'^gvisor$', 'Type must be "gvisor"')
parser.add_argument(
'--sandbox',
type=arg_parsers.ArgDict(
spec={'type': type_validator},
required_keys=['type'],
max_length=1),
metavar='type=TYPE',
hidden=hidden,
help="""\
Enables the requested sandbox on all nodes in the node-pool. Example:
$ {command} node-pool-1 --cluster=example-cluster --sandbox type=gvisor
The only supported type is 'gvisor'.
""")
def AddSecurityProfileForCreateFlags(parser, hidden=False):
group = parser.add_group(help='Flags for Security Profile:')
group.add_argument(
'--security-profile',
hidden=hidden,
help="""\
Name and version of the security profile to be applied to the cluster.
Example:
$ {command} example-cluster --security-profile=default-1.0-gke.0
""")
group.add_argument(
'--security-profile-runtime-rules',
default=True,
action='store_true',
hidden=hidden,
help="""\
Apply runtime rules in the specified security profile to the cluster.
When enabled (by default), a security profile controller and webhook
are deployed on the cluster to enforce the runtime rules. If
--no-security-profile-runtime-rules is specified to disable this
feature, only bootstrapping rules are applied, and no security profile
controller or webhook are installed.
""")
def AddSecurityProfileForUpdateFlag(parser, hidden=False):
parser.add_argument(
'--security-profile',
hidden=hidden,
help="""\
Name and version of the security profile to be applied to the cluster.
If not specified, the current setting of security profile will be
preserved.
Example:
$ {command} example-cluster --security-profile=default-1.0-gke.1
""")
def AddSecurityProfileForUpgradeFlags(parser, hidden=False):
group = parser.add_group(help='Flags for Security Profile:')
group.add_argument(
'--security-profile',
hidden=hidden,
help="""\
Name and version of the security profile to be applied to the cluster.
If not specified, the current security profile settings are preserved.
If the current security profile is not supported in the new cluster
version, this option must be explicitly specified with a supported
security profile, otherwise the operation will fail.
Example:
$ {command} example-cluster --security-profile=default-1.0-gke.1
""")
group.add_argument(
'--security-profile-runtime-rules',
default=None,
action='store_true',
hidden=hidden,
help="""\
Apply runtime rules in the specified security profile to the cluster.
When enabled, a security profile controller and webhook
are deployed on the cluster to enforce the runtime rules. If
--no-security-profile-runtime-rules is specified to disable this
feature, only bootstrapping rules are applied, and no security profile
controller or webhook are installed.
""")
def AddNodeGroupFlag(parser):
help_text = """\
Assign instances of this pool to run on the specified GCE node group.
This is useful for running workloads on sole tenant nodes.
To see available sole tenant node-groups, run:
$ gcloud compute sole-tenancy node-groups list
To create a sole tenant node group, run:
$ gcloud compute sole-tenancy node-groups create [GROUP_NAME] \
--zone [ZONE] --node-template [TEMPLATE_NAME] --target-size [TARGET_SIZE]
See https://cloud.google.com/compute/docs/nodes for more
information on sole tenancy and node groups.
"""
parser.add_argument(
'--node-group',
hidden=True,
help=help_text)
def AddInitialNodePoolNameArg(parser, hidden=True):
help_text = """\
Name of the initial node pool that will be created for the cluster.
Specifies the name to use for the initial node pool that will be created
with the cluster. If the settings specified require multiple node pools
to be created, the name for each pool will be prefixed by this name. For
example running the following will result in three node pools being
created, example-node-pool-0, example-node-pool-1 and
example-node-pool-2:
$ {command} example-cluster --num-nodes 9 --max-nodes-per-pool 3 \
--node-pool-name example-node-pool
"""
parser.add_argument('--node-pool-name', hidden=hidden, help=help_text)
def AddMetadataFlags(parser):
metadata_help = """\
Compute Engine metadata to be made available to the guest operating system
running on nodes within the node pool.
Each metadata entry is a key/value pair separated by an equals sign.
Metadata keys must be unique and less than 128 bytes in length. Values
must be less than or equal to 32,768 bytes in length. The total size of
all keys and values must be less than 512 KB. Multiple arguments can be
passed to this flag. For example:
``--metadata key-1=value-1,key-2=value-2,key-3=value-3''
Additionally, the following keys are reserved for use by Kubernetes
Engine:
* ``cluster-location''
* ``cluster-name''
* ``cluster-uid''
* ``configure-sh''
* ``enable-os-login''
* ``gci-update-strategy''
* ``gci-ensure-gke-docker''
* ``instance-template''
* ``kube-env''
* ``startup-script''
* ``user-data''
See also Compute Engine's
link:https://cloud.google.com/compute/docs/storing-retrieving-metadata[documentation]
on storing and retrieving instance metadata.
"""
parser.add_argument(
'--metadata',
type=arg_parsers.ArgDict(min_length=1),
default={},
help=metadata_help,
metavar='KEY=VALUE',
action=arg_parsers.StoreOnceAction)
metadata_from_file_help = """\
Same as ``--metadata'' except that the value for the entry will
be read from a local file.
"""
parser.add_argument(
'--metadata-from-file',
type=arg_parsers.ArgDict(min_length=1),
default={},
help=metadata_from_file_help,
metavar='KEY=LOCAL_FILE_PATH')
| true | true |
1c377b44182e00a970cf22713af3d0bbf658d4da | 10,340 | py | Python | mosdef_slitpore/analysis.py | rsdefever/mosdef_slitpore | 2150d1d0e062bf6aac660be8ae73d94e2a3c4438 | [
"MIT"
] | 3 | 2021-01-20T15:05:19.000Z | 2022-02-05T16:43:00.000Z | mosdef_slitpore/analysis.py | rsdefever/mosdef_slitpore | 2150d1d0e062bf6aac660be8ae73d94e2a3c4438 | [
"MIT"
] | 3 | 2020-12-01T01:04:27.000Z | 2020-12-09T01:00:15.000Z | mosdef_slitpore/analysis.py | rsdefever/mosdef_slitpore | 2150d1d0e062bf6aac660be8ae73d94e2a3c4438 | [
"MIT"
] | 3 | 2021-01-20T02:27:33.000Z | 2021-11-19T21:15:07.000Z | import numpy as np
def compute_density(
traj,
area,
surface_normal_dim=2,
pore_center=0.0,
max_distance=1.0,
bin_width=0.01,
symmetrize=False,
):
"""Compute the density of traj in atoms/nm^3
Parameters
----------
traj : mdtraj.Trajectory,
trajectory to analyze
area : float
area of the surface in nm^2
surface_normal_dim : enum (0,1,2), optional, default = 2
direction normal to the surface (x:0, y:1, z:2)
pore_center : float, optional, default = 0.0
coordinate of the pore center along surface_normal_dim
max_distance : float, optional, default = 1.0
max distance to consider from the center of the pore
bin_width : float, optional, default = 0.01
width of the bin for computing s
symmetrize : bool, optional, default = False
if binning should be done in abs(z) instead of z
Returns
-------
bin_centers : np.ndarray
the bin centers, shifted so that pore_center is at 0.0
density : np.ndarray
the density (atoms / nm^3) in each bin
"""
if symmetrize:
distances = abs(traj.xyz[:, :, surface_normal_dim] - pore_center)
else:
distances = traj.xyz[:, :, surface_normal_dim] - pore_center
bin_centers = []
density = []
for bin_center in np.arange(-max_distance, max_distance, bin_width):
mask = np.logical_and(
distances > bin_center - 0.5 * bin_width,
distances < bin_center + 0.5 * bin_width,
)
bin_centers.append(bin_center)
if symmetrize:
if np.isclose(bin_center, 0):
density.append(mask.sum() / (area * 1 * bin_width * traj.n_frames))
else:
density.append(mask.sum() / (area * 2 * bin_width * traj.n_frames))
else:
density.append(mask.sum() / (area * bin_width * traj.n_frames))
return bin_centers, density
def compute_s(
traj,
surface_normal_dim=2,
pore_center=0.0,
max_distance=1.0,
bin_width=0.01,
bond_array=None,
symmetrize=False,
):
"""Compute the "s" order parameter
Parameters
----------
traj : mdtraj.Trajectory,
trajectory to analyze
surface_normal_dim : enum (0,1,2), optional, default = 2
direction normal to the surface (x:0, y:1, z:2)
pore_center : float, optional, default = 0.0
coordinate of the pore center along surface_normal_dim
max_distance : float, optional, default = 1.0
max distance to consider from the center of the pore
bin_width : float, optional, default = 0.01
width of the bin for computing
bond_array : np.array(dtype=np.int32), optional, default = None
Array of bonds to pass into `make_molecules_whole`
Warning: This argument is necessary if loading in a mol2 file due to a
current bug in the MDTraj MOL2 reader: https://github.com/mdtraj/mdtraj/issues/1581
symmetrize : bool, optional, default = False
if binning should be done in abs(z) instead of z
Returns
-------
bin_centers : np.ndarray
the bin centers, shifted so that pore_center is at 0.0
s_values : np.ndarray
the value of s for each bin
"""
# Make molecules whole first
traj.make_molecules_whole(inplace=True, sorted_bonds=bond_array)
# Select ow and hw
water_o = traj.top.select("water and name O")
water_h = traj.top.select("water and name H")
traj_ow = traj.atom_slice(water_o)
traj_hw = traj.atom_slice(water_h)
# Compute angles between surface normal ([0,0,1]) and h-o-h bisector
hw_midpoints = traj_hw.xyz.reshape(traj_hw.n_frames, -1, 2, 3).mean(axis=2)
vectors = traj_ow.xyz - hw_midpoints
vectors /= np.linalg.norm(vectors, axis=-1, keepdims=True)
cos_angles = vectors[:, :, surface_normal_dim]
# Compute distances -- center of pore already @ 0,0; use OW position
if symmetrize:
distances = abs(traj_ow.xyz[:, :, surface_normal_dim] - pore_center)
else:
distances = traj_ow.xyz[:, :, surface_normal_dim] - pore_center
bin_centers = []
s_values = []
for bin_center in np.arange(-max_distance, max_distance, bin_width):
mask = np.logical_and(
distances > bin_center - 0.5 * bin_width,
distances < bin_center + 0.5 * bin_width,
)
s = (3.0 * np.mean(cos_angles[mask] ** 2) - 1.0) / 2.0
bin_centers.append(bin_center)
s_values.append(s)
return bin_centers, s_values
def compute_mol_per_area(
traj, area, dim, box_range, n_bins, shift=True, frame_range=None
):
"""
Calculate molecules per area
Parameters
----------
traj : mdtraj.trajectory
Trajectory
area : int or float
Area of box in dimensions where number density isn't calculated
dim : int
Dimension to calculate number density profile (x: 0, y: 1, z: 2)
box_range : array
Range of coordinates in 'dim' to evaluate
n_bins : int
Number of bins in histogram
shift : boolean, default=True
Shift center to zero if True
frame_range : Python range() (optional)
Range of frames to calculate number density function over
Returns
-------
areas : list
A list containing number density for each bin
new_bins : list
A list of bins
"""
water_o = traj.atom_slice(traj.topology.select("name O"))
resnames = np.unique([x.name for x in water_o.topology.residues])
if frame_range:
water_o = water_o[frame_range]
for i, frame in enumerate(water_o):
indices = [
[atom.index for atom in compound.atoms]
for compound in list(frame.topology.residues)
]
if frame_range:
if i == 0:
x = np.histogram(
frame.xyz[0, indices, dim].flatten(),
bins=n_bins,
range=(box_range[0], box_range[1]),
)
areas = x[0]
bins = x[1]
else:
areas += np.histogram(
frame.xyz[0, indices, dim].flatten(),
bins=n_bins,
range=(box_range[0], box_range[1]),
)[0]
else:
if i == 0:
x = np.histogram(
frame.xyz[0, indices, dim].flatten(),
bins=n_bins,
range=(box_range[0], box_range[1]),
)
areas = x[0]
bins = x[1]
else:
areas += np.histogram(
frame.xyz[0, indices, dim].flatten(),
bins=n_bins,
range=(box_range[0], box_range[1]),
)[0]
areas = np.divide(areas, water_o.n_frames)
new_bins = list()
for idx, bi in enumerate(bins):
if (idx + 1) >= len(bins):
continue
mid = (bins[idx] + bins[idx + 1]) / 2
new_bins.append(mid)
if shift:
middle = float(n_bins / 2)
if middle % 2 != 0:
shift_value = new_bins[int(middle - 0.5)]
else:
shift_value = new_bins[int(middle)]
new_bins = [(bi - shift_value) for bi in new_bins]
return (areas, new_bins)
def compute_angle(
traj,
surface_normal_dim=2,
pore_center=0.0,
max_distance=1.0,
bin_width=0.01,
symmetrize=False,
bond_array=None,
):
"""Compute the cos(angle) between HOH bisector and graphene surface normal
Parameters
----------
traj : mdtraj.Trajectory,
trajectory to analyze
surface_normal_dim : enum (0,1,2), optional, default = 2
direction normal to the surface (x:0, y:1, z:2)
pore_center : float, optional, default = 0.0
coordinate of the pore center along surface_normal_dim
max_distance : float, optional, default = 1.0
max distance to consider from the center of the pore
bin_width : float, optional, default = 0.01
width of the bin for computing s
symmetrize : bool, optional, default = False
if binning should be done in abs(z) instead of z
bond_array : np.array(dtype=np.int32), optional, default = None
Array of bonds to pass into `make_molecules_whole`
Warning: This argument is necessary if loading in a mol2 file due to a
current bug in the MDTraj MOL2 reader: https://github.com/mdtraj/mdtraj/issues/1581
Returns
-------
bin_centers : np.ndarray
the bin centers, shifted so that pore_center is at 0.0
cos_angle_values : np.ndarray
the value of average cos(angle) for each bin
cos_angles: np.ndarray
array that contains all the samples for cos(angle)
"""
# Make molecules whole first
traj.make_molecules_whole(inplace=True, sorted_bonds=bond_array)
# Select ow and hw
water_o = traj.top.select("water and name O")
water_h = traj.top.select("water and name H")
traj_ow = traj.atom_slice(water_o)
traj_hw = traj.atom_slice(water_h)
# Compute angles between surface normal ([0,0,1]/[0,0,-1]) and h-o-h bisector
hw_midpoints = traj_hw.xyz.reshape(traj_hw.n_frames, -1, 2, 3).mean(axis=2)
vectors = traj_ow.xyz - hw_midpoints
vectors /= np.linalg.norm(vectors, axis=-1, keepdims=True)
cos_angles = vectors[:, :, surface_normal_dim]
# The surface normal is decided by looking at the position of O in H2O
side_of_pore = np.sign(-traj_ow.xyz[:, :, surface_normal_dim] + pore_center)
cos_angles = np.multiply(cos_angles, side_of_pore)
# Compute distances -- center of pore already @ 0,0; use OW position
if symmetrize:
distances = abs(traj_ow.xyz[:, :, surface_normal_dim] - pore_center)
else:
distances = traj_ow.xyz[:, :, surface_normal_dim] - pore_center
bin_centers = []
cos_angle_values = []
for bin_center in np.arange(-max_distance, max_distance, bin_width):
mask = np.logical_and(
distances > bin_center - 0.5 * bin_width,
distances < bin_center + 0.5 * bin_width,
)
cos_angle = np.mean(cos_angles[mask])
bin_centers.append(bin_center)
cos_angle_values.append(cos_angle)
return bin_centers, cos_angle_values, cos_angles
| 35.050847 | 91 | 0.611122 | import numpy as np
def compute_density(
traj,
area,
surface_normal_dim=2,
pore_center=0.0,
max_distance=1.0,
bin_width=0.01,
symmetrize=False,
):
if symmetrize:
distances = abs(traj.xyz[:, :, surface_normal_dim] - pore_center)
else:
distances = traj.xyz[:, :, surface_normal_dim] - pore_center
bin_centers = []
density = []
for bin_center in np.arange(-max_distance, max_distance, bin_width):
mask = np.logical_and(
distances > bin_center - 0.5 * bin_width,
distances < bin_center + 0.5 * bin_width,
)
bin_centers.append(bin_center)
if symmetrize:
if np.isclose(bin_center, 0):
density.append(mask.sum() / (area * 1 * bin_width * traj.n_frames))
else:
density.append(mask.sum() / (area * 2 * bin_width * traj.n_frames))
else:
density.append(mask.sum() / (area * bin_width * traj.n_frames))
return bin_centers, density
def compute_s(
traj,
surface_normal_dim=2,
pore_center=0.0,
max_distance=1.0,
bin_width=0.01,
bond_array=None,
symmetrize=False,
):
traj.make_molecules_whole(inplace=True, sorted_bonds=bond_array)
water_o = traj.top.select("water and name O")
water_h = traj.top.select("water and name H")
traj_ow = traj.atom_slice(water_o)
traj_hw = traj.atom_slice(water_h)
hw_midpoints = traj_hw.xyz.reshape(traj_hw.n_frames, -1, 2, 3).mean(axis=2)
vectors = traj_ow.xyz - hw_midpoints
vectors /= np.linalg.norm(vectors, axis=-1, keepdims=True)
cos_angles = vectors[:, :, surface_normal_dim]
if symmetrize:
distances = abs(traj_ow.xyz[:, :, surface_normal_dim] - pore_center)
else:
distances = traj_ow.xyz[:, :, surface_normal_dim] - pore_center
bin_centers = []
s_values = []
for bin_center in np.arange(-max_distance, max_distance, bin_width):
mask = np.logical_and(
distances > bin_center - 0.5 * bin_width,
distances < bin_center + 0.5 * bin_width,
)
s = (3.0 * np.mean(cos_angles[mask] ** 2) - 1.0) / 2.0
bin_centers.append(bin_center)
s_values.append(s)
return bin_centers, s_values
def compute_mol_per_area(
traj, area, dim, box_range, n_bins, shift=True, frame_range=None
):
water_o = traj.atom_slice(traj.topology.select("name O"))
resnames = np.unique([x.name for x in water_o.topology.residues])
if frame_range:
water_o = water_o[frame_range]
for i, frame in enumerate(water_o):
indices = [
[atom.index for atom in compound.atoms]
for compound in list(frame.topology.residues)
]
if frame_range:
if i == 0:
x = np.histogram(
frame.xyz[0, indices, dim].flatten(),
bins=n_bins,
range=(box_range[0], box_range[1]),
)
areas = x[0]
bins = x[1]
else:
areas += np.histogram(
frame.xyz[0, indices, dim].flatten(),
bins=n_bins,
range=(box_range[0], box_range[1]),
)[0]
else:
if i == 0:
x = np.histogram(
frame.xyz[0, indices, dim].flatten(),
bins=n_bins,
range=(box_range[0], box_range[1]),
)
areas = x[0]
bins = x[1]
else:
areas += np.histogram(
frame.xyz[0, indices, dim].flatten(),
bins=n_bins,
range=(box_range[0], box_range[1]),
)[0]
areas = np.divide(areas, water_o.n_frames)
new_bins = list()
for idx, bi in enumerate(bins):
if (idx + 1) >= len(bins):
continue
mid = (bins[idx] + bins[idx + 1]) / 2
new_bins.append(mid)
if shift:
middle = float(n_bins / 2)
if middle % 2 != 0:
shift_value = new_bins[int(middle - 0.5)]
else:
shift_value = new_bins[int(middle)]
new_bins = [(bi - shift_value) for bi in new_bins]
return (areas, new_bins)
def compute_angle(
traj,
surface_normal_dim=2,
pore_center=0.0,
max_distance=1.0,
bin_width=0.01,
symmetrize=False,
bond_array=None,
):
traj.make_molecules_whole(inplace=True, sorted_bonds=bond_array)
water_o = traj.top.select("water and name O")
water_h = traj.top.select("water and name H")
traj_ow = traj.atom_slice(water_o)
traj_hw = traj.atom_slice(water_h)
hw_midpoints = traj_hw.xyz.reshape(traj_hw.n_frames, -1, 2, 3).mean(axis=2)
vectors = traj_ow.xyz - hw_midpoints
vectors /= np.linalg.norm(vectors, axis=-1, keepdims=True)
cos_angles = vectors[:, :, surface_normal_dim]
side_of_pore = np.sign(-traj_ow.xyz[:, :, surface_normal_dim] + pore_center)
cos_angles = np.multiply(cos_angles, side_of_pore)
if symmetrize:
distances = abs(traj_ow.xyz[:, :, surface_normal_dim] - pore_center)
else:
distances = traj_ow.xyz[:, :, surface_normal_dim] - pore_center
bin_centers = []
cos_angle_values = []
for bin_center in np.arange(-max_distance, max_distance, bin_width):
mask = np.logical_and(
distances > bin_center - 0.5 * bin_width,
distances < bin_center + 0.5 * bin_width,
)
cos_angle = np.mean(cos_angles[mask])
bin_centers.append(bin_center)
cos_angle_values.append(cos_angle)
return bin_centers, cos_angle_values, cos_angles
| true | true |
1c377c34b7a688d8e50a75b5faa66c9c8af0bd98 | 3,645 | py | Python | server/app/routes.py | fatematzuhora/2mb-random-objects | c9bd7e1477bf69d12ed300be912137aebdc1e5c3 | [
"MIT"
] | null | null | null | server/app/routes.py | fatematzuhora/2mb-random-objects | c9bd7e1477bf69d12ed300be912137aebdc1e5c3 | [
"MIT"
] | null | null | null | server/app/routes.py | fatematzuhora/2mb-random-objects | c9bd7e1477bf69d12ed300be912137aebdc1e5c3 | [
"MIT"
] | null | null | null | '''app routes'''
import os
import traceback
from random import choice, randint, uniform
from string import ascii_lowercase, digits
from flask import jsonify, make_response
from flask_cors import cross_origin
from app import app
def alphabetical_string(limit):
'''generate alphabetical_string with a given range
returns string
'''
obj = ''.join(choice(ascii_lowercase) for _ in range(limit))
return obj
def real_number(limit_one, limit_two):
'''generate real_number with the range of min and max
returns float
'''
first_num = int(''.join(choice(digits) for _ in range(limit_one)))
second_num = int(''.join(choice(digits) for _ in range(limit_two)))
obj = uniform(first_num, second_num)
return str(obj)
def integer(limit):
'''generate integer with a given range
returns string
'''
obj = ''.join(choice(digits) for _ in range(limit))
return obj
def alphanumeric(limit):
'''generate alphanumeric with a given range
returns string
'''
obj = ''.join(choice(ascii_lowercase + digits) for _ in range(limit))
return obj
def generate_object(option):
'''switch statement to generate on object with a given option
returns object
'''
return {
'alphabetical_string': alphabetical_string(randint(1, 100)),
'real_number': real_number(randint(1, 10), randint(1, 10)),
'integer': integer(randint(1, 10)),
'alphanumeric': alphanumeric(randint(1, 100)),
}[option]
# ================================
# endpoints for random_objects app
# ================================
@app.route("/random", methods=["POST"])
@cross_origin()
def random_objects():
'''endpoint to generate random objects of 2MB in size
returns jsonify object
'''
try:
choice_list = ['alphabetical_string', 'real_number', 'integer', 'alphanumeric']
random_object_list = []
total_char = 0
alphabetical_str = 0
real_num = 0
integer_num = 0
alphanumeric_str = 0
while total_char < 2097152:
option = choice(choice_list)
object_value = generate_object(option)
if (len(object_value) + total_char) > 2097152:
diff = 2097152 - total_char
object_value = alphabetical_string(diff - 2)
random_object_list.append(object_value)
total_char += len(object_value) + 2
if total_char < 2097152:
if option == 'alphabetical_string':
alphabetical_str += 1
elif option == 'real_number':
real_num += 1
elif option == 'integer':
integer_num += 1
elif option == 'alphanumeric':
alphanumeric_str += 1
else:
alphabetical_str += 1
with open(os.path.join('file', 'file.txt'), "w", encoding='utf8') as file:
for item in random_object_list:
file.write(str(item) + ', ')
data = {
'message': 'random objects',
'status': 201,
'data': {
'random_object_list': random_object_list,
'report': {
'alphabetical_str' : alphabetical_str,
'real_number' : real_num,
'integer' : integer_num,
'alphanumeric' : alphanumeric_str
}
}
}
return make_response(jsonify(data))
except:
traceback.print_exc()
return {'message': 'Internal server error. Failed to create random objects.'}, 500
| 31.422414 | 90 | 0.579424 | import os
import traceback
from random import choice, randint, uniform
from string import ascii_lowercase, digits
from flask import jsonify, make_response
from flask_cors import cross_origin
from app import app
def alphabetical_string(limit):
obj = ''.join(choice(ascii_lowercase) for _ in range(limit))
return obj
def real_number(limit_one, limit_two):
first_num = int(''.join(choice(digits) for _ in range(limit_one)))
second_num = int(''.join(choice(digits) for _ in range(limit_two)))
obj = uniform(first_num, second_num)
return str(obj)
def integer(limit):
obj = ''.join(choice(digits) for _ in range(limit))
return obj
def alphanumeric(limit):
obj = ''.join(choice(ascii_lowercase + digits) for _ in range(limit))
return obj
def generate_object(option):
return {
'alphabetical_string': alphabetical_string(randint(1, 100)),
'real_number': real_number(randint(1, 10), randint(1, 10)),
'integer': integer(randint(1, 10)),
'alphanumeric': alphanumeric(randint(1, 100)),
}[option]
@app.route("/random", methods=["POST"])
@cross_origin()
def random_objects():
try:
choice_list = ['alphabetical_string', 'real_number', 'integer', 'alphanumeric']
random_object_list = []
total_char = 0
alphabetical_str = 0
real_num = 0
integer_num = 0
alphanumeric_str = 0
while total_char < 2097152:
option = choice(choice_list)
object_value = generate_object(option)
if (len(object_value) + total_char) > 2097152:
diff = 2097152 - total_char
object_value = alphabetical_string(diff - 2)
random_object_list.append(object_value)
total_char += len(object_value) + 2
if total_char < 2097152:
if option == 'alphabetical_string':
alphabetical_str += 1
elif option == 'real_number':
real_num += 1
elif option == 'integer':
integer_num += 1
elif option == 'alphanumeric':
alphanumeric_str += 1
else:
alphabetical_str += 1
with open(os.path.join('file', 'file.txt'), "w", encoding='utf8') as file:
for item in random_object_list:
file.write(str(item) + ', ')
data = {
'message': 'random objects',
'status': 201,
'data': {
'random_object_list': random_object_list,
'report': {
'alphabetical_str' : alphabetical_str,
'real_number' : real_num,
'integer' : integer_num,
'alphanumeric' : alphanumeric_str
}
}
}
return make_response(jsonify(data))
except:
traceback.print_exc()
return {'message': 'Internal server error. Failed to create random objects.'}, 500
| true | true |
1c377cad6be391bc997b30a47dba86eff9e00e25 | 390 | py | Python | server/server.py | vertica/hackathon | 2f42beda38052f2da02b80b4c7d3e0a499d6c1c7 | [
"Apache-2.0"
] | 6 | 2016-10-15T16:59:04.000Z | 2018-02-22T17:22:28.000Z | server/server.py | vertica/hackathon | 2f42beda38052f2da02b80b4c7d3e0a499d6c1c7 | [
"Apache-2.0"
] | 1 | 2016-10-12T17:23:33.000Z | 2016-10-12T17:23:33.000Z | server/server.py | vertica/hackathon | 2f42beda38052f2da02b80b4c7d3e0a499d6c1c7 | [
"Apache-2.0"
] | null | null | null | import os
import sys
sys.path.append(os.getcwd() + "/deps")
from flask import Flask
from flask import render_template
app = Flask(__name__)
import db_model as db
import json
from datetime import date
from datetime import timedelta
@app.route("/")
def index():
results = db.select_one()
return render_template("index.html")
if __name__ == "__main__":
app.run('0.0.0.0')
| 15 | 40 | 0.712821 | import os
import sys
sys.path.append(os.getcwd() + "/deps")
from flask import Flask
from flask import render_template
app = Flask(__name__)
import db_model as db
import json
from datetime import date
from datetime import timedelta
@app.route("/")
def index():
results = db.select_one()
return render_template("index.html")
if __name__ == "__main__":
app.run('0.0.0.0')
| true | true |
1c377d3e4195e40bbba9ebf6060736f88c5127cf | 728 | py | Python | meiduo_mall/meiduo_mall/apps/orders/adminx.py | huzing2524/Django_MallWeb | a48d5be95a20867efb57235b09e2d6c65c5d8e3c | [
"MIT"
] | 2 | 2020-05-21T03:51:27.000Z | 2020-10-21T06:58:58.000Z | meiduo_mall/meiduo_mall/apps/orders/adminx.py | huzing2524/Django_MallWeb | a48d5be95a20867efb57235b09e2d6c65c5d8e3c | [
"MIT"
] | 4 | 2020-02-23T08:48:53.000Z | 2021-06-10T20:43:47.000Z | meiduo_mall/meiduo_mall/apps/orders/adminx.py | huzing2524/Django_MallWeb | a48d5be95a20867efb57235b09e2d6c65c5d8e3c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import xadmin
from orders.models import OrderInfo
class OrderInfoAdmin(object):
"""订单信息折线图"""
model_icon = "fa fa-shopping-cart" # 图标
refresh_times = [3, 5] # 可选以支持按多长时间(秒)刷新页面
# title 控制图标名称
# x-field 控制x轴字段
# y-field 控制y轴字段,可以是多个值
# title 控制图标名称;x-field 控制x轴字段;y-field 控制y轴字段,可以是多个值;order 控制默认排序
data_charts = {
"order_amount": {"title": "订单金额", "x-field": "create_time", "y-field": ("total_amount",),
"order": ("create_time",)},
"order_count": {"title": "订单量", "x-field": "create_time", "y-field": ("total_count",),
"order": ("create_time",)}
}
xadmin.site.register(OrderInfo, OrderInfoAdmin)
| 30.333333 | 97 | 0.589286 |
import xadmin
from orders.models import OrderInfo
class OrderInfoAdmin(object):
model_icon = "fa fa-shopping-cart"
refresh_times = [3, 5]
data_charts = {
"order_amount": {"title": "订单金额", "x-field": "create_time", "y-field": ("total_amount",),
"order": ("create_time",)},
"order_count": {"title": "订单量", "x-field": "create_time", "y-field": ("total_count",),
"order": ("create_time",)}
}
xadmin.site.register(OrderInfo, OrderInfoAdmin)
| true | true |
1c377ea32e1ab4c69a8929900cf11416ea8b8566 | 7,885 | py | Python | cinder/volume/drivers/fujitsu/eternus_dx_fc.py | ISCAS-VDI/cinder-base | 9529102548beef074264aaef31fa8267db99df61 | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/fujitsu/eternus_dx_fc.py | ISCAS-VDI/cinder-base | 9529102548beef074264aaef31fa8267db99df61 | [
"Apache-2.0"
] | 1 | 2021-03-21T11:38:29.000Z | 2021-03-21T11:38:29.000Z | cinder/volume/drivers/fujitsu/eternus_dx_fc.py | ISCAS-VDI/cinder-base | 9529102548beef074264aaef31fa8267db99df61 | [
"Apache-2.0"
] | 1 | 2021-03-21T11:37:47.000Z | 2021-03-21T11:37:47.000Z | # Copyright (c) 2015 FUJITSU LIMITED
# Copyright (c) 2012 EMC Corporation.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
FibreChannel Cinder Volume driver for Fujitsu ETERNUS DX S3 series.
"""
from oslo_log import log as logging
import six
from cinder.volume import driver
from cinder.volume.drivers.fujitsu import eternus_dx_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class FJDXFCDriver(driver.FibreChannelDriver):
"""FC Cinder Volume Driver for Fujitsu ETERNUS DX S3 series."""
def __init__(self, *args, **kwargs):
super(FJDXFCDriver, self).__init__(*args, **kwargs)
self.common = eternus_dx_common.FJDXCommon(
'fc',
configuration=self.configuration)
self.VERSION = self.common.VERSION
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Create volume."""
LOG.debug('create_volume, '
'volume id: %s, enter method.', volume['id'])
location, metadata = self.common.create_volume(volume)
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_volume, info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug('create_volume_from_snapshot, '
'volume id: %(vid)s, snap id: %(sid)s, enter method.',
{'vid': volume['id'], 'sid': snapshot['id']})
location, metadata = (
self.common.create_volume_from_snapshot(volume, snapshot))
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_volume_from_snapshot, '
'info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def create_cloned_volume(self, volume, src_vref):
"""Create cloned volume."""
LOG.debug('create_cloned_volume, '
'target volume id: %(tid)s, '
'source volume id: %(sid)s, enter method.',
{'tid': volume['id'], 'sid': src_vref['id']})
location, metadata = (
self.common.create_cloned_volume(volume, src_vref))
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_cloned_volume, '
'info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def delete_volume(self, volume):
"""Delete volume on ETERNUS."""
LOG.debug('delete_volume, '
'volume id: %s, enter method.', volume['id'])
vol_exist = self.common.delete_volume(volume)
LOG.debug('delete_volume, '
'delete: %s, exit method.', vol_exist)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug('create_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
location, metadata = self.common.create_snapshot(snapshot)
LOG.debug('create_snapshot, info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location)}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug('delete_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
vol_exist = self.common.delete_snapshot(snapshot)
LOG.debug('delete_snapshot, '
'delete: %s, exit method.', vol_exist)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
return
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
return
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
return
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
LOG.debug('initialize_connection, volume id: %(vid)s, '
'wwpns: %(wwpns)s, enter method.',
{'vid': volume['id'], 'wwpns': connector['wwpns']})
info = self.common.initialize_connection(volume, connector)
data = info['data']
init_tgt_map = (
self.common.build_fc_init_tgt_map(connector, data['target_wwn']))
data['initiator_target_map'] = init_tgt_map
info['data'] = data
LOG.debug('initialize_connection, '
'info: %s, exit method.', info)
return info
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
LOG.debug('terminate_connection, volume id: %(vid)s, '
'wwpns: %(wwpns)s, enter method.',
{'vid': volume['id'], 'wwpns': connector['wwpns']})
map_exist = self.common.terminate_connection(volume, connector)
attached = self.common.check_attached_volume_in_zone(connector)
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if not attached:
# No more volumes attached to the host
init_tgt_map = self.common.build_fc_init_tgt_map(connector)
info['data'] = {'initiator_target_map': init_tgt_map}
LOG.debug('terminate_connection, unmap: %(unmap)s, '
'connection info: %(info)s, exit method',
{'unmap': map_exist, 'info': info})
return info
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
LOG.debug('get_volume_stats, refresh: %s, enter method.', refresh)
pool_name = None
if refresh is True:
data, pool_name = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'FJDXFCDriver'
data['storage_protocol'] = 'FC'
self._stats = data
LOG.debug('get_volume_stats, '
'pool name: %s, exit method.', pool_name)
return self._stats
def extend_volume(self, volume, new_size):
"""Extend volume."""
LOG.debug('extend_volume, '
'volume id: %s, enter method.', volume['id'])
used_pool_name = self.common.extend_volume(volume, new_size)
LOG.debug('extend_volume, '
'used pool name: %s, exit method.', used_pool_name)
def _get_metadata(self, volume):
v_metadata = volume.get('volume_metadata')
if v_metadata:
ret = {data['key']: data['value'] for data in v_metadata}
else:
ret = volume.get('metadata', {})
return ret
| 36.674419 | 79 | 0.611795 |
from oslo_log import log as logging
import six
from cinder.volume import driver
from cinder.volume.drivers.fujitsu import eternus_dx_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class FJDXFCDriver(driver.FibreChannelDriver):
def __init__(self, *args, **kwargs):
super(FJDXFCDriver, self).__init__(*args, **kwargs)
self.common = eternus_dx_common.FJDXCommon(
'fc',
configuration=self.configuration)
self.VERSION = self.common.VERSION
def check_for_setup_error(self):
pass
def create_volume(self, volume):
LOG.debug('create_volume, '
'volume id: %s, enter method.', volume['id'])
location, metadata = self.common.create_volume(volume)
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_volume, info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def create_volume_from_snapshot(self, volume, snapshot):
LOG.debug('create_volume_from_snapshot, '
'volume id: %(vid)s, snap id: %(sid)s, enter method.',
{'vid': volume['id'], 'sid': snapshot['id']})
location, metadata = (
self.common.create_volume_from_snapshot(volume, snapshot))
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_volume_from_snapshot, '
'info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def create_cloned_volume(self, volume, src_vref):
LOG.debug('create_cloned_volume, '
'target volume id: %(tid)s, '
'source volume id: %(sid)s, enter method.',
{'tid': volume['id'], 'sid': src_vref['id']})
location, metadata = (
self.common.create_cloned_volume(volume, src_vref))
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_cloned_volume, '
'info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def delete_volume(self, volume):
LOG.debug('delete_volume, '
'volume id: %s, enter method.', volume['id'])
vol_exist = self.common.delete_volume(volume)
LOG.debug('delete_volume, '
'delete: %s, exit method.', vol_exist)
def create_snapshot(self, snapshot):
LOG.debug('create_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
location, metadata = self.common.create_snapshot(snapshot)
LOG.debug('create_snapshot, info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location)}
def delete_snapshot(self, snapshot):
LOG.debug('delete_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
vol_exist = self.common.delete_snapshot(snapshot)
LOG.debug('delete_snapshot, '
'delete: %s, exit method.', vol_exist)
def ensure_export(self, context, volume):
return
def create_export(self, context, volume, connector):
return
def remove_export(self, context, volume):
return
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
LOG.debug('initialize_connection, volume id: %(vid)s, '
'wwpns: %(wwpns)s, enter method.',
{'vid': volume['id'], 'wwpns': connector['wwpns']})
info = self.common.initialize_connection(volume, connector)
data = info['data']
init_tgt_map = (
self.common.build_fc_init_tgt_map(connector, data['target_wwn']))
data['initiator_target_map'] = init_tgt_map
info['data'] = data
LOG.debug('initialize_connection, '
'info: %s, exit method.', info)
return info
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
LOG.debug('terminate_connection, volume id: %(vid)s, '
'wwpns: %(wwpns)s, enter method.',
{'vid': volume['id'], 'wwpns': connector['wwpns']})
map_exist = self.common.terminate_connection(volume, connector)
attached = self.common.check_attached_volume_in_zone(connector)
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if not attached:
init_tgt_map = self.common.build_fc_init_tgt_map(connector)
info['data'] = {'initiator_target_map': init_tgt_map}
LOG.debug('terminate_connection, unmap: %(unmap)s, '
'connection info: %(info)s, exit method',
{'unmap': map_exist, 'info': info})
return info
def get_volume_stats(self, refresh=False):
LOG.debug('get_volume_stats, refresh: %s, enter method.', refresh)
pool_name = None
if refresh is True:
data, pool_name = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'FJDXFCDriver'
data['storage_protocol'] = 'FC'
self._stats = data
LOG.debug('get_volume_stats, '
'pool name: %s, exit method.', pool_name)
return self._stats
def extend_volume(self, volume, new_size):
LOG.debug('extend_volume, '
'volume id: %s, enter method.', volume['id'])
used_pool_name = self.common.extend_volume(volume, new_size)
LOG.debug('extend_volume, '
'used pool name: %s, exit method.', used_pool_name)
def _get_metadata(self, volume):
v_metadata = volume.get('volume_metadata')
if v_metadata:
ret = {data['key']: data['value'] for data in v_metadata}
else:
ret = volume.get('metadata', {})
return ret
| true | true |
1c377fbdffc7e939aac9a2f7bc4d0933a1addc8e | 4,062 | py | Python | datasets/mvtec.py | endrol/Anomaly_Clustering | 670546751543f1d919c4a788e96bcf4405e3423c | [
"MIT"
] | null | null | null | datasets/mvtec.py | endrol/Anomaly_Clustering | 670546751543f1d919c4a788e96bcf4405e3423c | [
"MIT"
] | null | null | null | datasets/mvtec.py | endrol/Anomaly_Clustering | 670546751543f1d919c4a788e96bcf4405e3423c | [
"MIT"
] | null | null | null | import os
import sys
from pathlib import Path
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from torch import Tensor
from torch.utils.data import Dataset
from torchvision import transforms as T
import config as c
__all__ = ("MVTecDataset")
# URL = 'ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz'
MVTEC_CLASS_NAMES = [
"bottle",
"cable",
"capsule",
"carpet",
"grid",
"hazelnut",
"leather",
"metal_nut",
"pill",
"screw",
"tile",
"toothbrush",
"transistor",
"wood",
"zipper",
]
class MVTecDataset(Dataset):
def __init__(self, is_train=True):
assert c.class_name in MVTEC_CLASS_NAMES, "class_name: {}, should be in {}".format(
c.class_name, MVTEC_CLASS_NAMES
)
self.dataset_path = c.mvtec_data_path
self.class_name = c.class_name
self.is_train = is_train
self.cropsize = c.crp_size
# load dataset
self.x, self.y, self.mask = self.load_dataset_folder()
# set transforms
if is_train:
self.transform_x = T.Compose(
[
T.Resize(c.img_size, Image.ANTIALIAS),
T.CenterCrop(c.crp_size),
T.ToTensor(),
]
)
# test:
else:
self.transform_x = T.Compose(
[T.Resize(c.img_size, Image.ANTIALIAS), T.CenterCrop(c.crp_size), T.ToTensor()]
)
# mask
self.transform_mask = T.Compose(
[T.Resize(c.img_size, Image.NEAREST), T.CenterCrop(c.crp_size), T.ToTensor()]
)
self.normalize = T.Compose([T.Normalize(c.norm_mean, c.norm_std)])
def __getitem__(self, idx):
x, y, mask = self.x[idx], self.y[idx], self.mask[idx]
# x = Image.open(x).convert('RGB')
x = Image.open(x)
if self.class_name in ["zipper", "screw", "grid"]: # handle greyscale classes
x = np.expand_dims(np.array(x), axis=2)
x = np.concatenate([x, x, x], axis=2)
x = Image.fromarray(x.astype("uint8")).convert("RGB")
#
x = self.normalize(self.transform_x(x))
#
if y == 0:
mask = torch.zeros([1, self.cropsize[0], self.cropsize[1]])
else:
mask = Image.open(mask)
mask = self.transform_mask(mask)
return x, y, mask
def __len__(self):
return len(self.x)
def load_dataset_folder(self):
phase = "train" if self.is_train else "test"
x, y, mask = [], [], []
img_dir = os.path.join(self.dataset_path, self.class_name, phase)
gt_dir = os.path.join(self.dataset_path, self.class_name, "ground_truth")
img_types = sorted(os.listdir(img_dir))
for img_type in img_types:
# load images
img_type_dir = os.path.join(img_dir, img_type)
if not os.path.isdir(img_type_dir):
continue
img_fpath_list = sorted(
[
os.path.join(img_type_dir, f)
for f in os.listdir(img_type_dir)
if f.endswith(".png")
]
)
x.extend(img_fpath_list)
# load gt labels
if img_type == "good":
y.extend([0] * len(img_fpath_list))
mask.extend([None] * len(img_fpath_list))
else:
y.extend([1] * len(img_fpath_list))
gt_type_dir = os.path.join(gt_dir, img_type)
img_fname_list = [os.path.splitext(os.path.basename(f))[0] for f in img_fpath_list]
gt_fpath_list = [
os.path.join(gt_type_dir, img_fname + "_mask.png")
for img_fname in img_fname_list
]
mask.extend(gt_fpath_list)
assert len(x) == len(y), "number of x and y should be same"
return list(x), list(y), list(mask)
| 30.772727 | 105 | 0.549483 | import os
import sys
from pathlib import Path
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from torch import Tensor
from torch.utils.data import Dataset
from torchvision import transforms as T
import config as c
__all__ = ("MVTecDataset")
MVTEC_CLASS_NAMES = [
"bottle",
"cable",
"capsule",
"carpet",
"grid",
"hazelnut",
"leather",
"metal_nut",
"pill",
"screw",
"tile",
"toothbrush",
"transistor",
"wood",
"zipper",
]
class MVTecDataset(Dataset):
def __init__(self, is_train=True):
assert c.class_name in MVTEC_CLASS_NAMES, "class_name: {}, should be in {}".format(
c.class_name, MVTEC_CLASS_NAMES
)
self.dataset_path = c.mvtec_data_path
self.class_name = c.class_name
self.is_train = is_train
self.cropsize = c.crp_size
self.x, self.y, self.mask = self.load_dataset_folder()
if is_train:
self.transform_x = T.Compose(
[
T.Resize(c.img_size, Image.ANTIALIAS),
T.CenterCrop(c.crp_size),
T.ToTensor(),
]
)
else:
self.transform_x = T.Compose(
[T.Resize(c.img_size, Image.ANTIALIAS), T.CenterCrop(c.crp_size), T.ToTensor()]
)
self.transform_mask = T.Compose(
[T.Resize(c.img_size, Image.NEAREST), T.CenterCrop(c.crp_size), T.ToTensor()]
)
self.normalize = T.Compose([T.Normalize(c.norm_mean, c.norm_std)])
def __getitem__(self, idx):
x, y, mask = self.x[idx], self.y[idx], self.mask[idx]
x = Image.open(x)
if self.class_name in ["zipper", "screw", "grid"]:
x = np.expand_dims(np.array(x), axis=2)
x = np.concatenate([x, x, x], axis=2)
x = Image.fromarray(x.astype("uint8")).convert("RGB")
x = self.normalize(self.transform_x(x))
if y == 0:
mask = torch.zeros([1, self.cropsize[0], self.cropsize[1]])
else:
mask = Image.open(mask)
mask = self.transform_mask(mask)
return x, y, mask
def __len__(self):
return len(self.x)
def load_dataset_folder(self):
phase = "train" if self.is_train else "test"
x, y, mask = [], [], []
img_dir = os.path.join(self.dataset_path, self.class_name, phase)
gt_dir = os.path.join(self.dataset_path, self.class_name, "ground_truth")
img_types = sorted(os.listdir(img_dir))
for img_type in img_types:
img_type_dir = os.path.join(img_dir, img_type)
if not os.path.isdir(img_type_dir):
continue
img_fpath_list = sorted(
[
os.path.join(img_type_dir, f)
for f in os.listdir(img_type_dir)
if f.endswith(".png")
]
)
x.extend(img_fpath_list)
if img_type == "good":
y.extend([0] * len(img_fpath_list))
mask.extend([None] * len(img_fpath_list))
else:
y.extend([1] * len(img_fpath_list))
gt_type_dir = os.path.join(gt_dir, img_type)
img_fname_list = [os.path.splitext(os.path.basename(f))[0] for f in img_fpath_list]
gt_fpath_list = [
os.path.join(gt_type_dir, img_fname + "_mask.png")
for img_fname in img_fname_list
]
mask.extend(gt_fpath_list)
assert len(x) == len(y), "number of x and y should be same"
return list(x), list(y), list(mask)
| true | true |
1c3780fe5e97c0cbeb6ccf8821cb0b6803774c70 | 3,750 | py | Python | lib/lane.py | bajcmartinez/Finding-Car-Lanes-Without-Deep-Learning | 2d660ce1f6f3ed5c57ddd919a13b65853dee0758 | [
"MIT"
] | 3 | 2021-09-06T18:02:33.000Z | 2021-12-04T20:10:36.000Z | lib/lane.py | bajcmartinez/Finding-Car-Lanes-Without-Deep-Learning | 2d660ce1f6f3ed5c57ddd919a13b65853dee0758 | [
"MIT"
] | 9 | 2021-04-26T15:08:20.000Z | 2021-09-08T07:10:33.000Z | lib/lane.py | bajcmartinez/Finding-Car-Lanes-Without-Deep-Learning | 2d660ce1f6f3ed5c57ddd919a13b65853dee0758 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
class Lane():
"""
Define a class to receive the characteristics of each line detection
"""
def __init__(self, xm_per_pix, ym_per_pix):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_x_fitted = []
# average x values of the fitted line over the last n iterations
self.best_x = None
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# polynomial coefficients for the recent fits
self.history_fit = []
# max count for elements in the history, 1 second approx
self.max_history = 30
# weights used to calculate the history average
self.history_weights = [x//2+1 for x in range(self.max_history)]
# radius of curvature of the line in some units
self.radius_of_curvature = None
# sanity check lane
self._insanity = 0.0
# distance in meters of vehicle center from the line
self.line_base_pos = None
# difference in fit coefficients between last and new fits
self.diffs = np.array([0, 0, 0], dtype='float')
# x values for detected line pixels
self.all_x = None
# y values for detected line pixels
self.all_y = None
# meters per pixel in dimension
self._xm_per_pix = xm_per_pix
self._ym_per_pix = ym_per_pix
def sanity_check_lane(self, R):
"""
Checks the radius of curvature `R` against the radius stored in the object.
"""
# Return true if there is no prior data
if self.radius_of_curvature is None:
return True
R0 = self.radius_of_curvature
self._insanity = abs(R - R0) / R0
return self._insanity <= 0.5
def calculate_curvature(self):
fit_cr = np.polyfit(self.all_y * self._ym_per_pix, self.all_x * self._xm_per_pix, 2)
plot_y = np.linspace(0, 720 - 1, 720)
y_eval = np.max(plot_y)
curve = ((1 + (2 * fit_cr[0] * y_eval * self._ym_per_pix + fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * fit_cr[0])
return curve
def add_fit(self, fit, points_x, points_y):
"""
Adds a fit to the current lane
:param fit: Second order polynomial that represents the lane
"""
if fit is not None:
if self.best_fit is not None:
# if we have a best fit, see how this new fit compares
self.diffs = abs(fit - self.best_fit)
self.detected = True
# update points
self.all_x = points_x
self.all_y = points_y
_radius_of_curvature = self.calculate_curvature()
self.detected = self.sanity_check_lane(_radius_of_curvature)
if self.detected:
self.radius_of_curvature = _radius_of_curvature
# if we detected a good fit then we store in current_fit
self.current_fit = fit
self.history_fit.append(fit)
# keep only last N items
self.history_fit = self.history_fit[-self.max_history:]
# calculate the average
self.best_fit = np.average(self.history_fit, axis=0, weights=self.history_weights[:len(self.history_fit)])
else:
# we fail the sanity check
self.detected = False
self.current_fit = [np.array([False])]
else:
self.detected = False
self.current_fit = [np.array([False])]
| 36.764706 | 122 | 0.5984 | import numpy as np
import cv2
class Lane():
def __init__(self, xm_per_pix, ym_per_pix):
self.detected = False
self.recent_x_fitted = []
self.best_x = None
self.best_fit = None
self.current_fit = [np.array([False])]
self.history_fit = []
self.max_history = 30
self.history_weights = [x//2+1 for x in range(self.max_history)]
self.radius_of_curvature = None
self._insanity = 0.0
self.line_base_pos = None
self.diffs = np.array([0, 0, 0], dtype='float')
self.all_x = None
self.all_y = None
self._xm_per_pix = xm_per_pix
self._ym_per_pix = ym_per_pix
def sanity_check_lane(self, R):
if self.radius_of_curvature is None:
return True
R0 = self.radius_of_curvature
self._insanity = abs(R - R0) / R0
return self._insanity <= 0.5
def calculate_curvature(self):
fit_cr = np.polyfit(self.all_y * self._ym_per_pix, self.all_x * self._xm_per_pix, 2)
plot_y = np.linspace(0, 720 - 1, 720)
y_eval = np.max(plot_y)
curve = ((1 + (2 * fit_cr[0] * y_eval * self._ym_per_pix + fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * fit_cr[0])
return curve
def add_fit(self, fit, points_x, points_y):
if fit is not None:
if self.best_fit is not None:
self.diffs = abs(fit - self.best_fit)
self.detected = True
self.all_x = points_x
self.all_y = points_y
_radius_of_curvature = self.calculate_curvature()
self.detected = self.sanity_check_lane(_radius_of_curvature)
if self.detected:
self.radius_of_curvature = _radius_of_curvature
self.current_fit = fit
self.history_fit.append(fit)
self.history_fit = self.history_fit[-self.max_history:]
self.best_fit = np.average(self.history_fit, axis=0, weights=self.history_weights[:len(self.history_fit)])
else:
self.detected = False
self.current_fit = [np.array([False])]
else:
self.detected = False
self.current_fit = [np.array([False])]
| true | true |
1c37823592f2b40118fd94b5e61dc5fb79f4e468 | 1,390 | py | Python | Data_Structures/Script02.py | Robert-Ma/Python_Exercises | 73498f7e44aea452b549776dad57545ccc27f355 | [
"MIT"
] | null | null | null | Data_Structures/Script02.py | Robert-Ma/Python_Exercises | 73498f7e44aea452b549776dad57545ccc27f355 | [
"MIT"
] | null | null | null | Data_Structures/Script02.py | Robert-Ma/Python_Exercises | 73498f7e44aea452b549776dad57545ccc27f355 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Write a Python program to iterate over an enum class and display individual member and their value;
Write a Python program to display all the member name of an enum class ordered by their values.
Write a Python program to get all values from an enum class.
"""
from enum import Enum
class Country(Enum):
Afghaistan = 93
Albania = 355
Algeria = 213
Andorra = 376
Angola = 244
Antarctica = 672
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
else:
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
else:
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
else:
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
else:
return NotImplemented
def __repr__(self):
return '(%s, %d)' % (self.name, self.value)
if __name__ == '__main__':
for x in list(sorted(Country)):
print(x.name, ':', x.value)
# To get all values
y = [item.value for item in Country]
print(y)
| 25.740741 | 99 | 0.615827 |
from enum import Enum
class Country(Enum):
Afghaistan = 93
Albania = 355
Algeria = 213
Andorra = 376
Angola = 244
Antarctica = 672
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
else:
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
else:
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
else:
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
else:
return NotImplemented
def __repr__(self):
return '(%s, %d)' % (self.name, self.value)
if __name__ == '__main__':
for x in list(sorted(Country)):
print(x.name, ':', x.value)
y = [item.value for item in Country]
print(y)
| true | true |
1c37832438c7ced5e2d754a8126138d1e1be8b9e | 14,324 | py | Python | examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py | jcapriot/simpeg | e88e653673c6b818592b6c075f76ee9215fe82b7 | [
"MIT"
] | 1 | 2020-06-04T21:57:47.000Z | 2020-06-04T21:57:47.000Z | examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py | jcapriot/simpeg | e88e653673c6b818592b6c075f76ee9215fe82b7 | [
"MIT"
] | null | null | null | examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.py | jcapriot/simpeg | e88e653673c6b818592b6c075f76ee9215fe82b7 | [
"MIT"
] | 1 | 2021-01-05T18:16:54.000Z | 2021-01-05T18:16:54.000Z | """
Magnetic inversion on a TreeMesh
================================
In this example, we demonstrate the use of a Magnetic Vector Inverison
on 3D TreeMesh for the inversion of magnetic affected by remanence.
The mesh is auto-generate based
on the position of the observation locations and topography.
We invert the data twice, first for a smooth starting model using the
Cartesian coordinate system, and second for a compact model using
the Spherical formulation.
The inverse problem uses the :class:'SimPEG.regularization.Sparse'
that
"""
from discretize import TreeMesh
from SimPEG import (
data,
data_misfit,
directives,
maps,
inverse_problem,
optimization,
inversion,
regularization,
)
from SimPEG import utils
from SimPEG.utils import mkvc
from discretize.utils import mesh_builder_xyz, refine_tree_xyz
from SimPEG.potential_fields import magnetics
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
# sphinx_gallery_thumbnail_number = 3
###############################################################################
# Setup
# -----
#
# Define the survey and model parameters
#
# First we need to define the direction of the inducing field
# As a simple case, we pick a vertical inducing field of magnitude 50,000 nT.
#
#
sp.random.seed(1)
# We will assume a vertical inducing field
H0 = (50000.0, 90.0, 0.0)
# The magnetization is set along a different direction (induced + remanence)
M = np.array([45.0, 90.0])
# Create grid of points for topography
# Lets create a simple Gaussian topo and set the active cells
[xx, yy] = np.meshgrid(np.linspace(-200, 200, 50), np.linspace(-200, 200, 50))
b = 100
A = 50
zz = A * np.exp(-0.5 * ((xx / b) ** 2.0 + (yy / b) ** 2.0))
topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)]
# Create and array of observation points
xr = np.linspace(-100.0, 100.0, 20)
yr = np.linspace(-100.0, 100.0, 20)
X, Y = np.meshgrid(xr, yr)
Z = A * np.exp(-0.5 * ((X / b) ** 2.0 + (Y / b) ** 2.0)) + 5
# Create a MAGsurvey
xyzLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)]
rxLoc = magnetics.receivers.Point(xyzLoc)
srcField = magnetics.sources.SourceField(receiver_list=[rxLoc], parameters=H0)
survey = magnetics.survey.Survey(srcField)
# Here how the topography looks with a quick interpolation, just a Gaussian...
tri = sp.spatial.Delaunay(topo)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection="3d")
ax.plot_trisurf(
topo[:, 0], topo[:, 1], topo[:, 2], triangles=tri.simplices, cmap=plt.cm.Spectral
)
ax.scatter3D(xyzLoc[:, 0], xyzLoc[:, 1], xyzLoc[:, 2], c="k")
plt.show()
###############################################################################
# Inversion Mesh
# --------------
#
# Here, we create a TreeMesh with base cell size of 5 m. We created a small
# utility function to center the mesh around points and to figure out the
# outer most dimension for adequate padding distance.
# The second stage allows to refine the mesh around points or surfaces
# (point assumed to follow some horizontal trend)
# The refinement process is repeated twice to allow for a finer level around
# the survey locations.
#
# Create a mesh
h = [5, 5, 5]
padDist = np.ones((3, 2)) * 100
mesh = mesh_builder_xyz(
xyzLoc, h, padding_distance=padDist, depth_core=100, mesh_type="tree"
)
mesh = refine_tree_xyz(
mesh, topo, method="surface", octree_levels=[4, 4], finalize=True
)
# Define an active cells from topo
actv = utils.surface2ind_topo(mesh, topo)
nC = int(actv.sum())
###########################################################################
# A simple function to plot vectors in TreeMesh
#
# Should eventually end up on discretize
#
def plotVectorSectionsOctree(
mesh,
m,
normal="X",
ind=0,
vmin=None,
vmax=None,
scale=1.0,
vec="k",
axs=None,
actvMap=None,
fill=True,
):
"""
Plot section through a 3D tensor model
"""
# plot recovered model
normalInd = {"X": 0, "Y": 1, "Z": 2}[normal]
antiNormalInd = {"X": [1, 2], "Y": [0, 2], "Z": [0, 1]}[normal]
h2d = (mesh.h[antiNormalInd[0]], mesh.h[antiNormalInd[1]])
x2d = (mesh.x0[antiNormalInd[0]], mesh.x0[antiNormalInd[1]])
#: Size of the sliced dimension
szSliceDim = len(mesh.h[normalInd])
if ind is None:
ind = int(szSliceDim // 2)
cc_tensor = [None, None, None]
for i in range(3):
cc_tensor[i] = np.cumsum(np.r_[mesh.x0[i], mesh.h[i]])
cc_tensor[i] = (cc_tensor[i][1:] + cc_tensor[i][:-1]) * 0.5
slice_loc = cc_tensor[normalInd][ind]
# Create a temporary TreeMesh with the slice through
temp_mesh = TreeMesh(h2d, x2d)
level_diff = mesh.max_level - temp_mesh.max_level
XS = [None, None, None]
XS[antiNormalInd[0]], XS[antiNormalInd[1]] = np.meshgrid(
cc_tensor[antiNormalInd[0]], cc_tensor[antiNormalInd[1]]
)
XS[normalInd] = np.ones_like(XS[antiNormalInd[0]]) * slice_loc
loc_grid = np.c_[XS[0].reshape(-1), XS[1].reshape(-1), XS[2].reshape(-1)]
inds = np.unique(mesh._get_containing_cell_indexes(loc_grid))
grid2d = mesh.gridCC[inds][:, antiNormalInd]
levels = mesh._cell_levels_by_indexes(inds) - level_diff
temp_mesh.insert_cells(grid2d, levels)
tm_gridboost = np.empty((temp_mesh.nC, 3))
tm_gridboost[:, antiNormalInd] = temp_mesh.gridCC
tm_gridboost[:, normalInd] = slice_loc
# Interpolate values to mesh.gridCC if not 'CC'
mx = actvMap * m[:, 0]
my = actvMap * m[:, 1]
mz = actvMap * m[:, 2]
m = np.c_[mx, my, mz]
# Interpolate values from mesh.gridCC to grid2d
ind_3d_to_2d = mesh._get_containing_cell_indexes(tm_gridboost)
v2d = m[ind_3d_to_2d, :]
amp = np.sum(v2d ** 2.0, axis=1) ** 0.5
if axs is None:
axs = plt.subplot(111)
if fill:
temp_mesh.plotImage(amp, ax=axs, clim=[vmin, vmax], grid=True)
axs.quiver(
temp_mesh.gridCC[:, 0],
temp_mesh.gridCC[:, 1],
v2d[:, antiNormalInd[0]],
v2d[:, antiNormalInd[1]],
pivot="mid",
scale_units="inches",
scale=scale,
linewidths=(1,),
edgecolors=(vec),
headaxislength=0.1,
headwidth=10,
headlength=30,
)
###########################################################################
# Forward modeling data
# ---------------------
#
# We can now create a magnetization model and generate data
# Lets start with a block below topography
#
model = np.zeros((mesh.nC, 3))
# Convert the inclination declination to vector in Cartesian
M_xyz = utils.mat_utils.dip_azimuth2cartesian(M[0], M[1])
# Get the indicies of the magnetized block
ind = utils.model_builder.getIndicesBlock(
np.r_[-20, -20, -10], np.r_[20, 20, 25], mesh.gridCC,
)[0]
# Assign magnetization values
model[ind, :] = np.kron(np.ones((ind.shape[0], 1)), M_xyz * 0.05)
# Remove air cells
model = model[actv, :]
# Create active map to go from reduce set to full
actvMap = maps.InjectActiveCells(mesh, actv, np.nan)
# Creat reduced identity map
idenMap = maps.IdentityMap(nP=nC * 3)
# Create the simulation
simulation = magnetics.simulation.Simulation3DIntegral(
survey=survey, mesh=mesh, chiMap=idenMap, actInd=actv, modelType="vector"
)
# Compute some data and add some random noise
d = simulation.dpred(mkvc(model))
std = 5 # nT
synthetic_data = d + np.random.randn(len(d)) * std
wd = np.ones(len(d)) * std
# Assign data and uncertainties to the survey
data_object = data.Data(survey, dobs=synthetic_data, standard_deviation=wd)
# Create an projection matrix for plotting later
actv_plot = maps.InjectActiveCells(mesh, actv, np.nan)
# Plot the model and data
plt.figure()
ax = plt.subplot(2, 1, 1)
im = utils.plot_utils.plot2Ddata(xyzLoc, synthetic_data, ax=ax)
plt.colorbar(im[0])
ax.set_title("Predicted data.")
plt.gca().set_aspect("equal", adjustable="box")
# Plot the vector model
ax = plt.subplot(2, 1, 2)
plotVectorSectionsOctree(
mesh,
model,
axs=ax,
normal="Y",
ind=66,
actvMap=actv_plot,
scale=0.5,
vmin=0.0,
vmax=0.025,
)
ax.set_xlim([-200, 200])
ax.set_ylim([-100, 75])
ax.set_xlabel("x")
ax.set_ylabel("y")
plt.gca().set_aspect("equal", adjustable="box")
plt.show()
######################################################################
# Inversion
# ---------
#
# We can now attempt the inverse calculations. We put some great care
# in design an inversion methology that would yield geologically
# reasonable solution for the non-induced problem.
# The inversion is done in two stages. First we compute a smooth
# solution using a Cartesian coordinate system, then a sparse
# inversion in the Spherical domain.
#
# Create sensitivity weights from our linear forward operator
rxLoc = survey.source_field.receiver_list[0].locations
# This Mapping connects the regularizations for the three-component
# vector model
wires = maps.Wires(("p", nC), ("s", nC), ("t", nC))
m0 = np.ones(3 * nC) * 1e-4 # Starting model
# Create three regularization for the different components
# of magnetization
reg_p = regularization.Sparse(mesh, indActive=actv, mapping=wires.p)
reg_p.mref = np.zeros(3 * nC)
reg_s = regularization.Sparse(mesh, indActive=actv, mapping=wires.s)
reg_s.mref = np.zeros(3 * nC)
reg_t = regularization.Sparse(mesh, indActive=actv, mapping=wires.t)
reg_t.mref = np.zeros(3 * nC)
reg = reg_p + reg_s + reg_t
reg.mref = np.zeros(3 * nC)
# Data misfit function
dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object)
dmis.W = 1.0 / data_object.standard_deviation
# Add directives to the inversion
opt = optimization.ProjectedGNCG(
maxIter=10, lower=-10, upper=10.0, maxIterLS=20, maxIterCG=20, tolCG=1e-4
)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt)
# A list of directive to control the inverson
betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e1)
# Add sensitivity weights
sensitivity_weights = directives.UpdateSensitivityWeights()
# Here is where the norms are applied
# Use pick a threshold parameter empirically based on the distribution of
# model parameters
IRLS = directives.Update_IRLS(f_min_change=1e-3, max_irls_iterations=2, beta_tol=5e-1)
# Pre-conditioner
update_Jacobi = directives.UpdatePreconditioner()
inv = inversion.BaseInversion(
invProb, directiveList=[sensitivity_weights, IRLS, update_Jacobi, betaest]
)
# Run the inversion
mrec_MVIC = inv.run(m0)
###############################################################
# Sparse Vector Inversion
# -----------------------
#
# Re-run the MVI in spherical domain so we can impose
# sparsity in the vectors.
#
#
spherical_map = maps.SphericalSystem()
m_start = utils.mat_utils.cartesian2spherical(mrec_MVIC.reshape((nC, 3), order="F"))
beta = invProb.beta
dmis.simulation.chiMap = spherical_map
dmis.simulation.model = m_start
# Create a block diagonal regularization
wires = maps.Wires(("amp", nC), ("theta", nC), ("phi", nC))
# Create a Combo Regularization
# Regularize the amplitude of the vectors
reg_a = regularization.Sparse(mesh, indActive=actv, mapping=wires.amp)
reg_a.norms = np.c_[0.0, 0.0, 0.0, 0.0] # Sparse on the model and its gradients
reg_a.mref = np.zeros(3 * nC)
# Regularize the vertical angle of the vectors
reg_t = regularization.Sparse(mesh, indActive=actv, mapping=wires.theta)
reg_t.alpha_s = 0.0 # No reference angle
reg_t.space = "spherical"
reg_t.norms = np.c_[0.0, 0.0, 0.0, 0.0] # Only norm on gradients used
# Regularize the horizontal angle of the vectors
reg_p = regularization.Sparse(mesh, indActive=actv, mapping=wires.phi)
reg_p.alpha_s = 0.0 # No reference angle
reg_p.space = "spherical"
reg_p.norms = np.c_[0.0, 0.0, 0.0, 0.0] # Only norm on gradients used
reg = reg_a + reg_t + reg_p
reg.mref = np.zeros(3 * nC)
lower_bound = np.kron(np.asarray([0, -np.inf, -np.inf]), np.ones(nC))
upper_bound = np.kron(np.asarray([10, np.inf, np.inf]), np.ones(nC))
# Add directives to the inversion
opt = optimization.ProjectedGNCG(
maxIter=20,
lower=lower_bound,
upper=upper_bound,
maxIterLS=20,
maxIterCG=30,
tolCG=1e-3,
stepOffBoundsFact=1e-3,
)
opt.approxHinv = None
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=beta)
# Here is where the norms are applied
irls = directives.Update_IRLS(
f_min_change=1e-4,
max_irls_iterations=20,
minGNiter=1,
beta_tol=0.5,
coolingRate=1,
coolEps_q=True,
sphericalDomain=True,
)
# Special directive specific to the mag amplitude problem. The sensitivity
# weights are update between each iteration.
spherical_projection = directives.ProjectSphericalBounds()
sensitivity_weights = directives.UpdateSensitivityWeights()
update_Jacobi = directives.UpdatePreconditioner()
inv = inversion.BaseInversion(
invProb,
directiveList=[spherical_projection, irls, sensitivity_weights, update_Jacobi],
)
mrec_MVI_S = inv.run(m_start)
#############################################################
# Final Plot
# ----------
#
# Let's compare the smooth and compact model
#
#
#
plt.figure(figsize=(8, 8))
ax = plt.subplot(2, 1, 1)
plotVectorSectionsOctree(
mesh,
mrec_MVIC.reshape((nC, 3), order="F"),
axs=ax,
normal="Y",
ind=65,
actvMap=actv_plot,
scale=0.05,
vmin=0.0,
vmax=0.005,
)
ax.set_xlim([-200, 200])
ax.set_ylim([-100, 75])
ax.set_title("Smooth model (Cartesian)")
ax.set_xlabel("x")
ax.set_ylabel("y")
plt.gca().set_aspect("equal", adjustable="box")
ax = plt.subplot(2, 1, 2)
vec_xyz = utils.mat_utils.spherical2cartesian(
invProb.model.reshape((nC, 3), order="F")
).reshape((nC, 3), order="F")
plotVectorSectionsOctree(
mesh,
vec_xyz,
axs=ax,
normal="Y",
ind=65,
actvMap=actv_plot,
scale=0.4,
vmin=0.0,
vmax=0.025,
)
ax.set_xlim([-200, 200])
ax.set_ylim([-100, 75])
ax.set_title("Sparse model (Spherical)")
ax.set_xlabel("x")
ax.set_ylabel("y")
plt.gca().set_aspect("equal", adjustable="box")
plt.show()
# Plot the final predicted data and the residual
plt.figure()
ax = plt.subplot(1, 2, 1)
utils.plot_utils.plot2Ddata(xyzLoc, invProb.dpred, ax=ax)
ax.set_title("Predicted data.")
plt.gca().set_aspect("equal", adjustable="box")
ax = plt.subplot(1, 2, 2)
utils.plot_utils.plot2Ddata(xyzLoc, synthetic_data - invProb.dpred, ax=ax)
ax.set_title("Data residual.")
plt.gca().set_aspect("equal", adjustable="box")
| 28.252465 | 86 | 0.669785 |
from discretize import TreeMesh
from SimPEG import (
data,
data_misfit,
directives,
maps,
inverse_problem,
optimization,
inversion,
regularization,
)
from SimPEG import utils
from SimPEG.utils import mkvc
from discretize.utils import mesh_builder_xyz, refine_tree_xyz
from SimPEG.potential_fields import magnetics
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
| true | true |
1c378325f2d1b1ac52d86fca34abbb71261ec135 | 2,811 | py | Python | tests/tests/correctness/EPLAnalytics/Detectors/Drift/drift_cor_006/run.py | rpeach-sag/apama-industry-analytics-kit | a3f6039915501d41251b6f7ec41b0cb8111baf7b | [
"Apache-2.0"
] | 3 | 2019-09-02T18:21:22.000Z | 2020-04-17T16:34:57.000Z | tests/tests/correctness/EPLAnalytics/Detectors/Drift/drift_cor_006/run.py | rpeach-sag/apama-industry-analytics-kit | a3f6039915501d41251b6f7ec41b0cb8111baf7b | [
"Apache-2.0"
] | null | null | null | tests/tests/correctness/EPLAnalytics/Detectors/Drift/drift_cor_006/run.py | rpeach-sag/apama-industry-analytics-kit | a3f6039915501d41251b6f7ec41b0cb8111baf7b | [
"Apache-2.0"
] | null | null | null | # $Copyright (c) 2015 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or Terracotta Inc., San Francisco, CA, USA, and/or Software AG (Canada) Inc., Cambridge, Ontario, Canada, and/or, Software AG (UK) Ltd., Derby, United Kingdom, and/or Software A.G. (Israel) Ltd., Or-Yehuda, Israel and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
from industry.framework.AnalyticsBaseTest import AnalyticsBaseTest
from pysys.constants import *
class PySysTest(AnalyticsBaseTest):
def execute(self):
# Start the correlator
correlator = self.startTest(logfile='correlator.log', logLevel="DEBUG")
self.injectAnalytic(correlator)
self.injectDrift(correlator)
self.ready(correlator)
correlator.receive(filename='OutputValue.evt', channels=['OutputValue'])
correlator.receive(filename='OutputPercentage.evt', channels=['OutputPercentage'])
correlator.receive(filename='OutputStandardDeviation.evt', channels=['OutputStandardDeviation'])
correlator.send('Config.evt')
self.waitForSignal('correlator.log',
expr='Analytic Drift started for inputDataNames',
condition='==3',
timeout=5)
correlator.send('BaselineMeasures.evt')
self.waitForSignal('correlator.log',
expr='Boundaries for sourceId',
condition='==3',
timeout=5)
correlator.send('ThresholdMeasures.evt')
self.waitForSignal('OutputStandardDeviation.evt',
expr='com\.industry\.analytics\.Data',
condition='==2',
timeout=5)
def validate(self):
# Ensure the test output was correct
exprList=[]
exprList.append('Validating com.industry.analytics.Analytic\("Drift",\["Input"\],\["OutputValue"\],{"offset":"2","offsetType":"absolute"}\)')
exprList.append('Validating com.industry.analytics.Analytic\("Drift",\["Input"\],\["OutputPercentage"\],{"offset":"10","offsetType":"percentage"}\)')
exprList.append('Validating com.industry.analytics.Analytic\("Drift",\["Input"\],\["OutputStandardDeviation"\],{"offset":"2","offsetType":"stddev"}\)')
self.assertOrderedGrep("correlator.log", exprList=exprList)
# Make sure that the we got the right log lines
self.assertLineCount('correlator.log', expr='Validating com.industry.analytics.Analytic\("Drift",', condition='==3')
self.assertLineCount('correlator.log', expr='Analytic Drift started for inputDataNames \["Input"\]', condition='==3')
self.assertDiff('OutputValue.evt',
'OutputValue.evt')
self.assertDiff('OutputPercentage.evt',
'OutputPercentage.evt')
self.assertDiff('OutputStandardDeviation.evt',
'OutputStandardDeviation.evt')
self.checkSanity()
| 50.196429 | 343 | 0.711491 |
from industry.framework.AnalyticsBaseTest import AnalyticsBaseTest
from pysys.constants import *
class PySysTest(AnalyticsBaseTest):
def execute(self):
correlator = self.startTest(logfile='correlator.log', logLevel="DEBUG")
self.injectAnalytic(correlator)
self.injectDrift(correlator)
self.ready(correlator)
correlator.receive(filename='OutputValue.evt', channels=['OutputValue'])
correlator.receive(filename='OutputPercentage.evt', channels=['OutputPercentage'])
correlator.receive(filename='OutputStandardDeviation.evt', channels=['OutputStandardDeviation'])
correlator.send('Config.evt')
self.waitForSignal('correlator.log',
expr='Analytic Drift started for inputDataNames',
condition='==3',
timeout=5)
correlator.send('BaselineMeasures.evt')
self.waitForSignal('correlator.log',
expr='Boundaries for sourceId',
condition='==3',
timeout=5)
correlator.send('ThresholdMeasures.evt')
self.waitForSignal('OutputStandardDeviation.evt',
expr='com\.industry\.analytics\.Data',
condition='==2',
timeout=5)
def validate(self):
exprList=[]
exprList.append('Validating com.industry.analytics.Analytic\("Drift",\["Input"\],\["OutputValue"\],{"offset":"2","offsetType":"absolute"}\)')
exprList.append('Validating com.industry.analytics.Analytic\("Drift",\["Input"\],\["OutputPercentage"\],{"offset":"10","offsetType":"percentage"}\)')
exprList.append('Validating com.industry.analytics.Analytic\("Drift",\["Input"\],\["OutputStandardDeviation"\],{"offset":"2","offsetType":"stddev"}\)')
self.assertOrderedGrep("correlator.log", exprList=exprList)
self.assertLineCount('correlator.log', expr='Validating com.industry.analytics.Analytic\("Drift",', condition='==3')
self.assertLineCount('correlator.log', expr='Analytic Drift started for inputDataNames \["Input"\]', condition='==3')
self.assertDiff('OutputValue.evt',
'OutputValue.evt')
self.assertDiff('OutputPercentage.evt',
'OutputPercentage.evt')
self.assertDiff('OutputStandardDeviation.evt',
'OutputStandardDeviation.evt')
self.checkSanity()
| true | true |
1c37840cb570ab3ca2252b7cfd8d758f9e8e0853 | 14,975 | gyp | Python | chrome/chrome_nibs.gyp | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2020-05-03T06:33:56.000Z | 2021-11-14T18:39:42.000Z | chrome/chrome_nibs.gyp | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/chrome_nibs.gyp | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This gyp file creates a fake target that is used to generate a minimal Xcode
# project, useful for editing XIB files.
#
# The sole target is called "chrome_nibs" and its sources are the minimum
# dependency set for all of the classes referred to by XIB files. If you are
# editing or adding a new XIB file, ensure that any classes to which you refer
# in the XIB are listed (both header and implementation) here so that Xcode can
# connect them.
#
# This target DOES NOT BUILD. Attempting to do so will generate lots of errors.
# Only use this target for editing XIBs.
#
# For more information, see
# <http://dev.chromium.org/developers/design-documents/mac-xib-files>.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../build/common.gypi',
'chrome_nibs.gypi',
],
'target_defaults': {
'include_dirs': [
'..',
],
},
'targets': [
{
'target_name': 'chrome_nibs',
'type': 'executable',
'mac_bundle': 1,
'sources': [
'../third_party/GTM/AppKit/GTMUILocalizer.h',
'../third_party/GTM/AppKit/GTMUILocalizer.mm',
'../third_party/GTM/AppKit/GTMUILocalizerAndLayoutTweaker.h',
'../third_party/GTM/AppKit/GTMUILocalizerAndLayoutTweaker.mm',
'../ui/base/cocoa/base_view.h',
'../ui/base/cocoa/base_view.mm',
'../ui/base/cocoa/hover_button.h',
'../ui/base/cocoa/hover_button.mm',
'../ui/base/cocoa/hover_image_button.h',
'../ui/base/cocoa/hover_image_button.mm',
'../ui/base/cocoa/menu_controller.h',
'../ui/base/cocoa/menu_controller.mm',
'browser/ui/cocoa/about_ipc_controller.h',
'browser/ui/cocoa/about_ipc_controller.mm',
'browser/ui/cocoa/animatable_view.h',
'browser/ui/cocoa/animatable_view.mm',
'browser/ui/cocoa/background_gradient_view.h',
'browser/ui/cocoa/background_gradient_view.mm',
'browser/ui/cocoa/base_bubble_controller.h',
'browser/ui/cocoa/base_bubble_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_all_tabs_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_all_tabs_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_view.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_view.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_window.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_window.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_toolbar_view.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_toolbar_view.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_unittest_helper.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_unittest_helper.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_view.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_view.mm',
'browser/ui/cocoa/bookmarks/bookmark_bubble_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_bubble_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_button.h',
'browser/ui/cocoa/bookmarks/bookmark_button.mm',
'browser/ui/cocoa/bookmarks/bookmark_button_cell.h',
'browser/ui/cocoa/bookmarks/bookmark_button_cell.mm',
'browser/ui/cocoa/bookmarks/bookmark_editor_base_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_editor_base_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_name_folder_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_name_folder_controller.mm',
'browser/ui/cocoa/browser/avatar_menu_bubble_controller.h',
'browser/ui/cocoa/browser/avatar_menu_bubble_controller.mm',
'browser/ui/cocoa/browser_window_controller.h',
'browser/ui/cocoa/browser_window_controller.mm',
'browser/ui/cocoa/browser_window_controller_private.h',
'browser/ui/cocoa/browser_window_controller_private.mm',
'browser/ui/cocoa/chrome_browser_window.h',
'browser/ui/cocoa/chrome_browser_window.mm',
'browser/ui/cocoa/chrome_event_processing_window.h',
'browser/ui/cocoa/chrome_event_processing_window.mm',
'browser/ui/cocoa/chrome_to_mobile_bubble_controller.h',
'browser/ui/cocoa/chrome_to_mobile_bubble_controller.mm',
'browser/ui/cocoa/clickhold_button_cell.h',
'browser/ui/cocoa/clickhold_button_cell.mm',
'browser/ui/cocoa/content_settings/collected_cookies_mac.h',
'browser/ui/cocoa/content_settings/collected_cookies_mac.mm',
'browser/ui/cocoa/content_settings/content_setting_bubble_cocoa.h',
'browser/ui/cocoa/content_settings/content_setting_bubble_cocoa.mm',
'browser/ui/cocoa/content_settings/cookie_details_view_controller.h',
'browser/ui/cocoa/content_settings/cookie_details_view_controller.mm',
'browser/ui/cocoa/custom_frame_view.h',
'browser/ui/cocoa/custom_frame_view.mm',
'browser/ui/cocoa/download/download_item_button.h',
'browser/ui/cocoa/download/download_item_button.mm',
'browser/ui/cocoa/download/download_item_cell.h',
'browser/ui/cocoa/download/download_item_cell.mm',
'browser/ui/cocoa/download/download_item_controller.h',
'browser/ui/cocoa/download/download_item_controller.mm',
'browser/ui/cocoa/download/download_shelf_controller.h',
'browser/ui/cocoa/download/download_shelf_controller.mm',
'browser/ui/cocoa/download/download_shelf_view.h',
'browser/ui/cocoa/download/download_shelf_view.mm',
'browser/ui/cocoa/download/download_show_all_button.h',
'browser/ui/cocoa/download/download_show_all_button.mm',
'browser/ui/cocoa/download/download_show_all_cell.h',
'browser/ui/cocoa/download/download_show_all_cell.mm',
'browser/ui/cocoa/draggable_button.h',
'browser/ui/cocoa/draggable_button.mm',
'browser/ui/cocoa/browser/edit_search_engine_cocoa_controller.h',
'browser/ui/cocoa/browser/edit_search_engine_cocoa_controller.mm',
'browser/ui/cocoa/constrained_window/constrained_window_button.h',
'browser/ui/cocoa/constrained_window/constrained_window_button.mm',
'browser/ui/cocoa/constrained_window/constrained_window_custom_window.h',
'browser/ui/cocoa/constrained_window/constrained_window_custom_window.mm',
'browser/ui/cocoa/extensions/browser_actions_container_view.h',
'browser/ui/cocoa/extensions/browser_actions_container_view.mm',
'browser/ui/cocoa/extensions/extension_install_dialog_controller.h',
'browser/ui/cocoa/extensions/extension_install_dialog_controller.mm',
'browser/ui/cocoa/extensions/extension_install_view_controller.h',
'browser/ui/cocoa/extensions/extension_install_view_controller.mm',
'browser/ui/cocoa/extensions/extension_installed_bubble_controller.h',
'browser/ui/cocoa/extensions/extension_installed_bubble_controller.mm',
'browser/ui/cocoa/fast_resize_view.h',
'browser/ui/cocoa/fast_resize_view.mm',
'browser/ui/cocoa/find_bar/find_bar_cocoa_controller.h',
'browser/ui/cocoa/find_bar/find_bar_cocoa_controller.mm',
'browser/ui/cocoa/find_bar/find_bar_text_field.h',
'browser/ui/cocoa/find_bar/find_bar_text_field.mm',
'browser/ui/cocoa/find_bar/find_bar_text_field_cell.h',
'browser/ui/cocoa/find_bar/find_bar_text_field_cell.mm',
'browser/ui/cocoa/find_bar/find_bar_view.h',
'browser/ui/cocoa/find_bar/find_bar_view.mm',
'browser/ui/cocoa/first_run_bubble_controller.h',
'browser/ui/cocoa/first_run_bubble_controller.mm',
'browser/ui/cocoa/first_run_dialog.h',
'browser/ui/cocoa/first_run_dialog.mm',
'browser/ui/cocoa/framed_browser_window.h',
'browser/ui/cocoa/framed_browser_window.mm',
'browser/ui/cocoa/fullscreen_exit_bubble_controller.h',
'browser/ui/cocoa/fullscreen_exit_bubble_controller.mm',
'browser/ui/cocoa/fullscreen_exit_bubble_view.h',
'browser/ui/cocoa/fullscreen_exit_bubble_view.mm',
'browser/ui/cocoa/global_error_bubble_controller.h',
'browser/ui/cocoa/global_error_bubble_controller.mm',
'browser/ui/cocoa/gradient_button_cell.h',
'browser/ui/cocoa/gradient_button_cell.mm',
'browser/ui/cocoa/hover_close_button.h',
'browser/ui/cocoa/hover_close_button.mm',
'browser/ui/cocoa/hung_renderer_controller.h',
'browser/ui/cocoa/hung_renderer_controller.mm',
'browser/ui/cocoa/hyperlink_button_cell.h',
'browser/ui/cocoa/hyperlink_button_cell.mm',
'browser/ui/cocoa/image_button_cell.h',
'browser/ui/cocoa/image_button_cell.mm',
'browser/ui/cocoa/info_bubble_view.h',
'browser/ui/cocoa/info_bubble_view.mm',
'browser/ui/cocoa/info_bubble_window.h',
'browser/ui/cocoa/info_bubble_window.mm',
'browser/ui/cocoa/infobars/after_translate_infobar_controller.h',
'browser/ui/cocoa/infobars/after_translate_infobar_controller.mm',
'browser/ui/cocoa/infobars/alternate_nav_infobar_controller.h',
'browser/ui/cocoa/infobars/alternate_nav_infobar_controller.mm',
'browser/ui/cocoa/infobars/before_translate_infobar_controller.h',
'browser/ui/cocoa/infobars/before_translate_infobar_controller.mm',
'browser/ui/cocoa/infobars/confirm_infobar_controller.h',
'browser/ui/cocoa/infobars/confirm_infobar_controller.mm',
'browser/ui/cocoa/infobars/extension_infobar_controller.h',
'browser/ui/cocoa/infobars/extension_infobar_controller.mm',
'browser/ui/cocoa/infobars/infobar_container_controller.h',
'browser/ui/cocoa/infobars/infobar_container_controller.mm',
'browser/ui/cocoa/infobars/infobar_controller.h',
'browser/ui/cocoa/infobars/infobar_controller.mm',
'browser/ui/cocoa/infobars/infobar_gradient_view.h',
'browser/ui/cocoa/infobars/infobar_gradient_view.mm',
'browser/ui/cocoa/location_bar/action_box_menu_bubble_controller.h',
'browser/ui/cocoa/location_bar/action_box_menu_bubble_controller.mm',
'browser/ui/cocoa/location_bar/autocomplete_text_field.h',
'browser/ui/cocoa/location_bar/autocomplete_text_field.mm',
'browser/ui/cocoa/location_bar/autocomplete_text_field_cell.h',
'browser/ui/cocoa/location_bar/autocomplete_text_field_cell.mm',
'browser/ui/cocoa/login_prompt_cocoa.h',
'browser/ui/cocoa/login_prompt_cocoa.mm',
'browser/ui/cocoa/menu_button.h',
'browser/ui/cocoa/menu_button.mm',
'browser/ui/cocoa/multi_key_equivalent_button.h',
'browser/ui/cocoa/multi_key_equivalent_button.mm',
'browser/ui/cocoa/new_tab_button.h',
'browser/ui/cocoa/new_tab_button.mm',
'browser/ui/cocoa/notifications/balloon_controller.h',
'browser/ui/cocoa/notifications/balloon_controller.mm',
'browser/ui/cocoa/notifications/balloon_view.h',
'browser/ui/cocoa/notifications/balloon_view.mm',
'browser/ui/cocoa/nsmenuitem_additions.h',
'browser/ui/cocoa/nsmenuitem_additions.mm',
'browser/ui/cocoa/nsview_additions.h',
'browser/ui/cocoa/nsview_additions.mm',
'browser/ui/cocoa/one_click_signin_view_controller.h',
'browser/ui/cocoa/one_click_signin_view_controller.mm',
'browser/ui/cocoa/screen_capture_notification_ui_cocoa.h',
'browser/ui/cocoa/screen_capture_notification_ui_cocoa.mm',
'browser/ui/cocoa/speech_recognition_window_controller.h',
'browser/ui/cocoa/speech_recognition_window_controller.mm',
'browser/ui/cocoa/status_bubble_mac.h',
'browser/ui/cocoa/status_bubble_mac.mm',
'browser/ui/cocoa/styled_text_field.h',
'browser/ui/cocoa/styled_text_field.mm',
'browser/ui/cocoa/styled_text_field_cell.h',
'browser/ui/cocoa/styled_text_field_cell.mm',
'browser/ui/cocoa/tab_contents/overlayable_contents_controller.h',
'browser/ui/cocoa/tab_contents/overlayable_contents_controller.mm',
'browser/ui/cocoa/tab_contents/sad_tab_controller.h',
'browser/ui/cocoa/tab_contents/sad_tab_controller.mm',
'browser/ui/cocoa/tab_contents/sad_tab_view.h',
'browser/ui/cocoa/tab_contents/sad_tab_view.mm',
'browser/ui/cocoa/tabs/tab_controller.h',
'browser/ui/cocoa/tabs/tab_controller.mm',
'browser/ui/cocoa/tabs/tab_strip_model_observer_bridge.h',
'browser/ui/cocoa/tabs/tab_strip_model_observer_bridge.mm',
'browser/ui/cocoa/tabs/tab_strip_view.h',
'browser/ui/cocoa/tabs/tab_strip_view.mm',
'browser/ui/cocoa/tabs/tab_view.h',
'browser/ui/cocoa/tabs/tab_view.mm',
'browser/ui/cocoa/tabs/tab_window_controller.h',
'browser/ui/cocoa/tabs/tab_window_controller.mm',
'browser/ui/cocoa/task_manager_mac.h',
'browser/ui/cocoa/task_manager_mac.mm',
'browser/ui/cocoa/themed_window.h',
'browser/ui/cocoa/themed_window.mm',
'browser/ui/cocoa/toolbar/reload_button.h',
'browser/ui/cocoa/toolbar/reload_button.mm',
'browser/ui/cocoa/toolbar/toolbar_button.h',
'browser/ui/cocoa/toolbar/toolbar_button.mm',
'browser/ui/cocoa/toolbar/toolbar_controller.h',
'browser/ui/cocoa/toolbar/toolbar_controller.mm',
'browser/ui/cocoa/toolbar/toolbar_view.h',
'browser/ui/cocoa/toolbar/toolbar_view.mm',
'browser/ui/cocoa/toolbar/wrench_toolbar_button_cell.h',
'browser/ui/cocoa/toolbar/wrench_toolbar_button_cell.mm',
'browser/ui/cocoa/ui_localizer.h',
'browser/ui/cocoa/ui_localizer.mm',
'browser/ui/cocoa/vertical_gradient_view.h',
'browser/ui/cocoa/vertical_gradient_view.mm',
'browser/ui/cocoa/view_id_util.h',
'browser/ui/cocoa/view_id_util.mm',
'browser/ui/cocoa/wrench_menu/menu_tracked_root_view.h',
'browser/ui/cocoa/wrench_menu/menu_tracked_root_view.mm',
'browser/ui/cocoa/wrench_menu/wrench_menu_controller.h',
'browser/ui/cocoa/wrench_menu/wrench_menu_controller.mm',
'browser/ui/cocoa/panels/panel_titlebar_view_cocoa.h',
'browser/ui/cocoa/panels/panel_titlebar_view_cocoa.mm',
'browser/ui/cocoa/panels/panel_window_controller_cocoa.h',
'browser/ui/cocoa/panels/panel_window_controller_cocoa.mm',
],
'mac_bundle_resources': [
'<@(mac_all_xibs)',
],
}, # target chrome_xibs
], # targets
}
| 54.85348 | 82 | 0.71606 |
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../build/common.gypi',
'chrome_nibs.gypi',
],
'target_defaults': {
'include_dirs': [
'..',
],
},
'targets': [
{
'target_name': 'chrome_nibs',
'type': 'executable',
'mac_bundle': 1,
'sources': [
'../third_party/GTM/AppKit/GTMUILocalizer.h',
'../third_party/GTM/AppKit/GTMUILocalizer.mm',
'../third_party/GTM/AppKit/GTMUILocalizerAndLayoutTweaker.h',
'../third_party/GTM/AppKit/GTMUILocalizerAndLayoutTweaker.mm',
'../ui/base/cocoa/base_view.h',
'../ui/base/cocoa/base_view.mm',
'../ui/base/cocoa/hover_button.h',
'../ui/base/cocoa/hover_button.mm',
'../ui/base/cocoa/hover_image_button.h',
'../ui/base/cocoa/hover_image_button.mm',
'../ui/base/cocoa/menu_controller.h',
'../ui/base/cocoa/menu_controller.mm',
'browser/ui/cocoa/about_ipc_controller.h',
'browser/ui/cocoa/about_ipc_controller.mm',
'browser/ui/cocoa/animatable_view.h',
'browser/ui/cocoa/animatable_view.mm',
'browser/ui/cocoa/background_gradient_view.h',
'browser/ui/cocoa/background_gradient_view.mm',
'browser/ui/cocoa/base_bubble_controller.h',
'browser/ui/cocoa/base_bubble_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_all_tabs_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_all_tabs_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_view.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_view.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_window.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_folder_window.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_toolbar_view.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_toolbar_view.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_unittest_helper.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_unittest_helper.mm',
'browser/ui/cocoa/bookmarks/bookmark_bar_view.h',
'browser/ui/cocoa/bookmarks/bookmark_bar_view.mm',
'browser/ui/cocoa/bookmarks/bookmark_bubble_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_bubble_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_button.h',
'browser/ui/cocoa/bookmarks/bookmark_button.mm',
'browser/ui/cocoa/bookmarks/bookmark_button_cell.h',
'browser/ui/cocoa/bookmarks/bookmark_button_cell.mm',
'browser/ui/cocoa/bookmarks/bookmark_editor_base_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_editor_base_controller.mm',
'browser/ui/cocoa/bookmarks/bookmark_name_folder_controller.h',
'browser/ui/cocoa/bookmarks/bookmark_name_folder_controller.mm',
'browser/ui/cocoa/browser/avatar_menu_bubble_controller.h',
'browser/ui/cocoa/browser/avatar_menu_bubble_controller.mm',
'browser/ui/cocoa/browser_window_controller.h',
'browser/ui/cocoa/browser_window_controller.mm',
'browser/ui/cocoa/browser_window_controller_private.h',
'browser/ui/cocoa/browser_window_controller_private.mm',
'browser/ui/cocoa/chrome_browser_window.h',
'browser/ui/cocoa/chrome_browser_window.mm',
'browser/ui/cocoa/chrome_event_processing_window.h',
'browser/ui/cocoa/chrome_event_processing_window.mm',
'browser/ui/cocoa/chrome_to_mobile_bubble_controller.h',
'browser/ui/cocoa/chrome_to_mobile_bubble_controller.mm',
'browser/ui/cocoa/clickhold_button_cell.h',
'browser/ui/cocoa/clickhold_button_cell.mm',
'browser/ui/cocoa/content_settings/collected_cookies_mac.h',
'browser/ui/cocoa/content_settings/collected_cookies_mac.mm',
'browser/ui/cocoa/content_settings/content_setting_bubble_cocoa.h',
'browser/ui/cocoa/content_settings/content_setting_bubble_cocoa.mm',
'browser/ui/cocoa/content_settings/cookie_details_view_controller.h',
'browser/ui/cocoa/content_settings/cookie_details_view_controller.mm',
'browser/ui/cocoa/custom_frame_view.h',
'browser/ui/cocoa/custom_frame_view.mm',
'browser/ui/cocoa/download/download_item_button.h',
'browser/ui/cocoa/download/download_item_button.mm',
'browser/ui/cocoa/download/download_item_cell.h',
'browser/ui/cocoa/download/download_item_cell.mm',
'browser/ui/cocoa/download/download_item_controller.h',
'browser/ui/cocoa/download/download_item_controller.mm',
'browser/ui/cocoa/download/download_shelf_controller.h',
'browser/ui/cocoa/download/download_shelf_controller.mm',
'browser/ui/cocoa/download/download_shelf_view.h',
'browser/ui/cocoa/download/download_shelf_view.mm',
'browser/ui/cocoa/download/download_show_all_button.h',
'browser/ui/cocoa/download/download_show_all_button.mm',
'browser/ui/cocoa/download/download_show_all_cell.h',
'browser/ui/cocoa/download/download_show_all_cell.mm',
'browser/ui/cocoa/draggable_button.h',
'browser/ui/cocoa/draggable_button.mm',
'browser/ui/cocoa/browser/edit_search_engine_cocoa_controller.h',
'browser/ui/cocoa/browser/edit_search_engine_cocoa_controller.mm',
'browser/ui/cocoa/constrained_window/constrained_window_button.h',
'browser/ui/cocoa/constrained_window/constrained_window_button.mm',
'browser/ui/cocoa/constrained_window/constrained_window_custom_window.h',
'browser/ui/cocoa/constrained_window/constrained_window_custom_window.mm',
'browser/ui/cocoa/extensions/browser_actions_container_view.h',
'browser/ui/cocoa/extensions/browser_actions_container_view.mm',
'browser/ui/cocoa/extensions/extension_install_dialog_controller.h',
'browser/ui/cocoa/extensions/extension_install_dialog_controller.mm',
'browser/ui/cocoa/extensions/extension_install_view_controller.h',
'browser/ui/cocoa/extensions/extension_install_view_controller.mm',
'browser/ui/cocoa/extensions/extension_installed_bubble_controller.h',
'browser/ui/cocoa/extensions/extension_installed_bubble_controller.mm',
'browser/ui/cocoa/fast_resize_view.h',
'browser/ui/cocoa/fast_resize_view.mm',
'browser/ui/cocoa/find_bar/find_bar_cocoa_controller.h',
'browser/ui/cocoa/find_bar/find_bar_cocoa_controller.mm',
'browser/ui/cocoa/find_bar/find_bar_text_field.h',
'browser/ui/cocoa/find_bar/find_bar_text_field.mm',
'browser/ui/cocoa/find_bar/find_bar_text_field_cell.h',
'browser/ui/cocoa/find_bar/find_bar_text_field_cell.mm',
'browser/ui/cocoa/find_bar/find_bar_view.h',
'browser/ui/cocoa/find_bar/find_bar_view.mm',
'browser/ui/cocoa/first_run_bubble_controller.h',
'browser/ui/cocoa/first_run_bubble_controller.mm',
'browser/ui/cocoa/first_run_dialog.h',
'browser/ui/cocoa/first_run_dialog.mm',
'browser/ui/cocoa/framed_browser_window.h',
'browser/ui/cocoa/framed_browser_window.mm',
'browser/ui/cocoa/fullscreen_exit_bubble_controller.h',
'browser/ui/cocoa/fullscreen_exit_bubble_controller.mm',
'browser/ui/cocoa/fullscreen_exit_bubble_view.h',
'browser/ui/cocoa/fullscreen_exit_bubble_view.mm',
'browser/ui/cocoa/global_error_bubble_controller.h',
'browser/ui/cocoa/global_error_bubble_controller.mm',
'browser/ui/cocoa/gradient_button_cell.h',
'browser/ui/cocoa/gradient_button_cell.mm',
'browser/ui/cocoa/hover_close_button.h',
'browser/ui/cocoa/hover_close_button.mm',
'browser/ui/cocoa/hung_renderer_controller.h',
'browser/ui/cocoa/hung_renderer_controller.mm',
'browser/ui/cocoa/hyperlink_button_cell.h',
'browser/ui/cocoa/hyperlink_button_cell.mm',
'browser/ui/cocoa/image_button_cell.h',
'browser/ui/cocoa/image_button_cell.mm',
'browser/ui/cocoa/info_bubble_view.h',
'browser/ui/cocoa/info_bubble_view.mm',
'browser/ui/cocoa/info_bubble_window.h',
'browser/ui/cocoa/info_bubble_window.mm',
'browser/ui/cocoa/infobars/after_translate_infobar_controller.h',
'browser/ui/cocoa/infobars/after_translate_infobar_controller.mm',
'browser/ui/cocoa/infobars/alternate_nav_infobar_controller.h',
'browser/ui/cocoa/infobars/alternate_nav_infobar_controller.mm',
'browser/ui/cocoa/infobars/before_translate_infobar_controller.h',
'browser/ui/cocoa/infobars/before_translate_infobar_controller.mm',
'browser/ui/cocoa/infobars/confirm_infobar_controller.h',
'browser/ui/cocoa/infobars/confirm_infobar_controller.mm',
'browser/ui/cocoa/infobars/extension_infobar_controller.h',
'browser/ui/cocoa/infobars/extension_infobar_controller.mm',
'browser/ui/cocoa/infobars/infobar_container_controller.h',
'browser/ui/cocoa/infobars/infobar_container_controller.mm',
'browser/ui/cocoa/infobars/infobar_controller.h',
'browser/ui/cocoa/infobars/infobar_controller.mm',
'browser/ui/cocoa/infobars/infobar_gradient_view.h',
'browser/ui/cocoa/infobars/infobar_gradient_view.mm',
'browser/ui/cocoa/location_bar/action_box_menu_bubble_controller.h',
'browser/ui/cocoa/location_bar/action_box_menu_bubble_controller.mm',
'browser/ui/cocoa/location_bar/autocomplete_text_field.h',
'browser/ui/cocoa/location_bar/autocomplete_text_field.mm',
'browser/ui/cocoa/location_bar/autocomplete_text_field_cell.h',
'browser/ui/cocoa/location_bar/autocomplete_text_field_cell.mm',
'browser/ui/cocoa/login_prompt_cocoa.h',
'browser/ui/cocoa/login_prompt_cocoa.mm',
'browser/ui/cocoa/menu_button.h',
'browser/ui/cocoa/menu_button.mm',
'browser/ui/cocoa/multi_key_equivalent_button.h',
'browser/ui/cocoa/multi_key_equivalent_button.mm',
'browser/ui/cocoa/new_tab_button.h',
'browser/ui/cocoa/new_tab_button.mm',
'browser/ui/cocoa/notifications/balloon_controller.h',
'browser/ui/cocoa/notifications/balloon_controller.mm',
'browser/ui/cocoa/notifications/balloon_view.h',
'browser/ui/cocoa/notifications/balloon_view.mm',
'browser/ui/cocoa/nsmenuitem_additions.h',
'browser/ui/cocoa/nsmenuitem_additions.mm',
'browser/ui/cocoa/nsview_additions.h',
'browser/ui/cocoa/nsview_additions.mm',
'browser/ui/cocoa/one_click_signin_view_controller.h',
'browser/ui/cocoa/one_click_signin_view_controller.mm',
'browser/ui/cocoa/screen_capture_notification_ui_cocoa.h',
'browser/ui/cocoa/screen_capture_notification_ui_cocoa.mm',
'browser/ui/cocoa/speech_recognition_window_controller.h',
'browser/ui/cocoa/speech_recognition_window_controller.mm',
'browser/ui/cocoa/status_bubble_mac.h',
'browser/ui/cocoa/status_bubble_mac.mm',
'browser/ui/cocoa/styled_text_field.h',
'browser/ui/cocoa/styled_text_field.mm',
'browser/ui/cocoa/styled_text_field_cell.h',
'browser/ui/cocoa/styled_text_field_cell.mm',
'browser/ui/cocoa/tab_contents/overlayable_contents_controller.h',
'browser/ui/cocoa/tab_contents/overlayable_contents_controller.mm',
'browser/ui/cocoa/tab_contents/sad_tab_controller.h',
'browser/ui/cocoa/tab_contents/sad_tab_controller.mm',
'browser/ui/cocoa/tab_contents/sad_tab_view.h',
'browser/ui/cocoa/tab_contents/sad_tab_view.mm',
'browser/ui/cocoa/tabs/tab_controller.h',
'browser/ui/cocoa/tabs/tab_controller.mm',
'browser/ui/cocoa/tabs/tab_strip_model_observer_bridge.h',
'browser/ui/cocoa/tabs/tab_strip_model_observer_bridge.mm',
'browser/ui/cocoa/tabs/tab_strip_view.h',
'browser/ui/cocoa/tabs/tab_strip_view.mm',
'browser/ui/cocoa/tabs/tab_view.h',
'browser/ui/cocoa/tabs/tab_view.mm',
'browser/ui/cocoa/tabs/tab_window_controller.h',
'browser/ui/cocoa/tabs/tab_window_controller.mm',
'browser/ui/cocoa/task_manager_mac.h',
'browser/ui/cocoa/task_manager_mac.mm',
'browser/ui/cocoa/themed_window.h',
'browser/ui/cocoa/themed_window.mm',
'browser/ui/cocoa/toolbar/reload_button.h',
'browser/ui/cocoa/toolbar/reload_button.mm',
'browser/ui/cocoa/toolbar/toolbar_button.h',
'browser/ui/cocoa/toolbar/toolbar_button.mm',
'browser/ui/cocoa/toolbar/toolbar_controller.h',
'browser/ui/cocoa/toolbar/toolbar_controller.mm',
'browser/ui/cocoa/toolbar/toolbar_view.h',
'browser/ui/cocoa/toolbar/toolbar_view.mm',
'browser/ui/cocoa/toolbar/wrench_toolbar_button_cell.h',
'browser/ui/cocoa/toolbar/wrench_toolbar_button_cell.mm',
'browser/ui/cocoa/ui_localizer.h',
'browser/ui/cocoa/ui_localizer.mm',
'browser/ui/cocoa/vertical_gradient_view.h',
'browser/ui/cocoa/vertical_gradient_view.mm',
'browser/ui/cocoa/view_id_util.h',
'browser/ui/cocoa/view_id_util.mm',
'browser/ui/cocoa/wrench_menu/menu_tracked_root_view.h',
'browser/ui/cocoa/wrench_menu/menu_tracked_root_view.mm',
'browser/ui/cocoa/wrench_menu/wrench_menu_controller.h',
'browser/ui/cocoa/wrench_menu/wrench_menu_controller.mm',
'browser/ui/cocoa/panels/panel_titlebar_view_cocoa.h',
'browser/ui/cocoa/panels/panel_titlebar_view_cocoa.mm',
'browser/ui/cocoa/panels/panel_window_controller_cocoa.h',
'browser/ui/cocoa/panels/panel_window_controller_cocoa.mm',
],
'mac_bundle_resources': [
'<@(mac_all_xibs)',
],
},
],
}
| true | true |
1c378411dc0d9a317865cb079a9677841b706018 | 1,737 | py | Python | rules_default/castervoice/rules/apps/file_manager/fman.py | MLH-Fellowship/LarynxCode | 840fee18c689a357052825607c27fc8e3e56571c | [
"MIT"
] | 1 | 2021-09-17T06:11:02.000Z | 2021-09-17T06:11:02.000Z | rules_default/castervoice/rules/apps/file_manager/fman.py | soma2000-lang/LarynxCode | 840fee18c689a357052825607c27fc8e3e56571c | [
"MIT"
] | 5 | 2021-02-03T05:29:41.000Z | 2021-02-08T01:14:11.000Z | rules_default/castervoice/rules/apps/file_manager/fman.py | soma2000-lang/LarynxCode | 840fee18c689a357052825607c27fc8e3e56571c | [
"MIT"
] | 4 | 2021-02-03T05:05:00.000Z | 2021-07-14T06:21:10.000Z | from dragonfly import Pause, Choice, MappingRule
from castervoice.lib.actions import Key, Text
from castervoice.lib.ctrl.mgr.rule_details import RuleDetails
from castervoice.lib.merge.additions import IntegerRefST
from castervoice.lib.merge.state.short import R
class fmanRule(MappingRule):
mapping = {
"copy": R(Key("f5")),
"deselect": R(Key("c-d")),
"edit": R(Key("f4")),
"explorer": R(Key("f10")),
# Set these yourself and add them to the Choice at the bottom
# Requires the favourites plug-in
"go <fav>": R(Key("c-0") + Pause("15") + Text("%(fav)s") + Key("enter")),
"go see": R(Key("c-p") + Pause("15") + Text("c") + Key("enter")),
"go to": R(Key("c-p")),
"move": R(Key("f6")),
"new file": R(Key("s-f4")),
"new folder": R(Key("f7")),
"open left": R(Key("c-left")),
"open right": R(Key("c-right")),
"properties": R(Key("a-enter")),
"refresh": R(Key("c-r")),
"rename": R(Key("s-f6")),
"search": R(Key("cs-f")),
"set favourite": R(Key("s-f")),
"show favourites": R(Key("c-0")),
"(show | hide) hidden": R(Key("c-dot")),
"sort [by] name": R(Key("c-f1")),
"sort [by] size": R(Key("c-f2")),
"sort [by] (modified | date)": R(Key("c-f3")),
"(stoosh | copy) path": R(Key("f11")),
"terminal": R(Key("f9")),
"command pallette": R(Key("cs-p")),
}
extras = [
IntegerRefST("num", 1, 4),
Choice("fav", {
"example favourite": "ef",
}),
]
defaults = {
"num": 1,
}
def get_rule():
return fmanRule, RuleDetails(name="F man", executable="fman", title="fman")
| 32.773585 | 81 | 0.513529 | from dragonfly import Pause, Choice, MappingRule
from castervoice.lib.actions import Key, Text
from castervoice.lib.ctrl.mgr.rule_details import RuleDetails
from castervoice.lib.merge.additions import IntegerRefST
from castervoice.lib.merge.state.short import R
class fmanRule(MappingRule):
mapping = {
"copy": R(Key("f5")),
"deselect": R(Key("c-d")),
"edit": R(Key("f4")),
"explorer": R(Key("f10")),
"go <fav>": R(Key("c-0") + Pause("15") + Text("%(fav)s") + Key("enter")),
"go see": R(Key("c-p") + Pause("15") + Text("c") + Key("enter")),
"go to": R(Key("c-p")),
"move": R(Key("f6")),
"new file": R(Key("s-f4")),
"new folder": R(Key("f7")),
"open left": R(Key("c-left")),
"open right": R(Key("c-right")),
"properties": R(Key("a-enter")),
"refresh": R(Key("c-r")),
"rename": R(Key("s-f6")),
"search": R(Key("cs-f")),
"set favourite": R(Key("s-f")),
"show favourites": R(Key("c-0")),
"(show | hide) hidden": R(Key("c-dot")),
"sort [by] name": R(Key("c-f1")),
"sort [by] size": R(Key("c-f2")),
"sort [by] (modified | date)": R(Key("c-f3")),
"(stoosh | copy) path": R(Key("f11")),
"terminal": R(Key("f9")),
"command pallette": R(Key("cs-p")),
}
extras = [
IntegerRefST("num", 1, 4),
Choice("fav", {
"example favourite": "ef",
}),
]
defaults = {
"num": 1,
}
def get_rule():
return fmanRule, RuleDetails(name="F man", executable="fman", title="fman")
| true | true |
1c378428ca95d2a4b7ad7546d2de1252b906f46e | 935 | py | Python | spackmon/apps/users/urls.py | iarspider/spack-monitor | 89acf94dc664b598d9f73292ae4d61fdccf3ac5b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-02-24T23:16:27.000Z | 2021-04-01T17:33:28.000Z | spackmon/apps/users/urls.py | iarspider/spack-monitor | 89acf94dc664b598d9f73292ae4d61fdccf3ac5b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 18 | 2021-02-11T00:57:53.000Z | 2021-12-09T16:30:17.000Z | spackmon/apps/users/urls.py | iarspider/spack-monitor | 89acf94dc664b598d9f73292ae4d61fdccf3ac5b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-12-08T12:16:15.000Z | 2021-12-08T12:16:15.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from django.conf.urls import url, include
from spackmon.apps.users import views
urlpatterns = [
url(r"^login/$", views.login, name="login"),
url(r"^accounts/login/$", views.login),
url(r"^logout/$", views.logout, name="logout"),
url(r"^token$", views.view_token, name="token"),
url(r"^auth/tokens$", views.view_token, name="tokens"),
url(r"^token/update$", views.update_token, name="update_token"),
url(r"^u/profile$", views.view_profile, name="profile"),
url(r"^u/delete$", views.delete_account, name="delete_account"), # delete account
url(r"^u/profile/(?P<username>.+)$", views.view_profile, name="profile"),
url(r"", include("social_django.urls", namespace="social")),
]
app_name = "users"
| 40.652174 | 86 | 0.687701 |
from django.conf.urls import url, include
from spackmon.apps.users import views
urlpatterns = [
url(r"^login/$", views.login, name="login"),
url(r"^accounts/login/$", views.login),
url(r"^logout/$", views.logout, name="logout"),
url(r"^token$", views.view_token, name="token"),
url(r"^auth/tokens$", views.view_token, name="tokens"),
url(r"^token/update$", views.update_token, name="update_token"),
url(r"^u/profile$", views.view_profile, name="profile"),
url(r"^u/delete$", views.delete_account, name="delete_account"),
url(r"^u/profile/(?P<username>.+)$", views.view_profile, name="profile"),
url(r"", include("social_django.urls", namespace="social")),
]
app_name = "users"
| true | true |
1c378433f864aabda3b3f783b8fd8b24dfada973 | 1,633 | py | Python | src/tars/utils/runner.py | fredmontet/tars | 922786e8c6456fc0cc1a9db07714f11dd78219d9 | [
"MIT"
] | 3 | 2022-02-06T14:41:07.000Z | 2022-03-25T16:27:45.000Z | src/tars/utils/runner.py | fredmontet/tars | 922786e8c6456fc0cc1a9db07714f11dd78219d9 | [
"MIT"
] | 6 | 2021-09-20T03:33:31.000Z | 2022-03-24T09:00:48.000Z | src/tars/utils/runner.py | fredmontet/tars | 922786e8c6456fc0cc1a9db07714f11dd78219d9 | [
"MIT"
] | null | null | null | import logging
from typing import Callable, NoReturn
from time import sleep
from pandas import Timestamp, Timedelta
class Runner:
"""
A Runner represent an object able to execute a function through time.
The function can be executed with a chosen frequency e.g. every 10 seconds
and for a optional duration e.g. 2 hours.
:ivar is_running : Boolean describing if the Runner is running or not.
"""
def __init__(self):
self.is_running = False
def start(self, func: Callable, frequency: str, duration: str = None) \
-> NoReturn:
""" Start the Runner
:param func: The function to be executed
:param frequency: String representing a frequency in the same form than a Pandas' Timedelta (https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html)
:param duration: String representing a frequency in the same form than a Pandas' Timedelta (https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html)
"""
self.is_running = True
if duration is not None:
end_time = Timestamp.now() + Timedelta(duration)
while self.is_running:
if duration is not None:
if Timestamp.now() >= end_time:
break
func()
sleep(Timedelta(frequency).total_seconds())
logging.debug(f'Runner started with frequency of {frequency} and '
f'duration of {duration}')
def stop(self) -> NoReturn:
""" Stop the Runner """
self.is_running = False
logging.debug(f'Runner stopped')
| 34.020833 | 168 | 0.636252 | import logging
from typing import Callable, NoReturn
from time import sleep
from pandas import Timestamp, Timedelta
class Runner:
def __init__(self):
self.is_running = False
def start(self, func: Callable, frequency: str, duration: str = None) \
-> NoReturn:
self.is_running = True
if duration is not None:
end_time = Timestamp.now() + Timedelta(duration)
while self.is_running:
if duration is not None:
if Timestamp.now() >= end_time:
break
func()
sleep(Timedelta(frequency).total_seconds())
logging.debug(f'Runner started with frequency of {frequency} and '
f'duration of {duration}')
def stop(self) -> NoReturn:
self.is_running = False
logging.debug(f'Runner stopped')
| true | true |
1c3784e5f59565c371126fac3abd2bbea28cfca3 | 7,188 | py | Python | toontown/toon/DistributedNPCPetclerkAI.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | 1 | 2020-09-27T22:12:47.000Z | 2020-09-27T22:12:47.000Z | toontown/toon/DistributedNPCPetclerkAI.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | null | null | null | toontown/toon/DistributedNPCPetclerkAI.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | 2 | 2020-09-26T20:37:18.000Z | 2020-11-15T20:55:33.000Z | from otp.ai.AIBaseGlobal import *
from pandac.PandaModules import *
from .DistributedNPCToonBaseAI import *
from toontown.toonbase import TTLocalizer
from direct.task import Task
from toontown.fishing import FishGlobals
from toontown.pets import PetUtil, PetDNA, PetConstants
from toontown.hood import ZoneUtil
class DistributedNPCPetclerkAI(DistributedNPCToonBaseAI):
def __init__(self, air, npcId):
DistributedNPCToonBaseAI.__init__(self, air, npcId)
self.givesQuests = 0
self.busy = 0
def delete(self):
taskMgr.remove(self.uniqueName('clearMovie'))
self.ignoreAll()
DistributedNPCToonBaseAI.delete(self)
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
if avId not in self.air.doId2do:
self.notify.warning('Avatar: %s not found' % avId)
return
if self.isBusy():
self.freeAvatar(avId)
return
self.petSeeds = self.air.petMgr.getAvailablePets(3, ZoneUtil.getCanonicalHoodId(self.zoneId))
numGenders = len(PetDNA.PetGenders)
self.petSeeds *= numGenders
self.petSeeds.sort()
self.sendUpdateToAvatarId(avId, 'setPetSeeds', [self.petSeeds])
self.transactionType = ''
av = self.air.doId2do[avId]
self.busy = avId
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.__handleUnexpectedExit, extraArgs=[avId])
flag = NPCToons.SELL_MOVIE_START
self.d_setMovie(avId, flag)
taskMgr.doMethodLater(PetConstants.PETCLERK_TIMER, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
DistributedNPCToonBaseAI.avatarEnter(self)
def rejectAvatar(self, avId):
self.notify.warning('rejectAvatar: should not be called by a fisherman!')
def d_setMovie(self, avId, flag, extraArgs = []):
self.sendUpdate('setMovie', [flag,
self.npcId,
avId,
extraArgs,
ClockDelta.globalClockDelta.getRealNetworkTime()])
def sendTimeoutMovie(self, task):
self.d_setMovie(self.busy, NPCToons.SELL_MOVIE_TIMEOUT)
self.sendClearMovie(None)
return Task.done
def sendClearMovie(self, task):
self.ignore(self.air.getAvatarExitEvent(self.busy))
taskMgr.remove(self.uniqueName('clearMovie'))
self.busy = 0
self.d_setMovie(0, NPCToons.SELL_MOVIE_CLEAR)
return Task.done
def fishSold(self):
avId = self.air.getAvatarIdFromSender()
if self.busy != avId:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCPetshopAI.fishSold busy with %s' % self.busy)
self.notify.warning('somebody called fishSold that I was not busy with! avId: %s' % avId)
return
av = simbase.air.doId2do.get(avId)
if av:
trophyResult = self.air.fishManager.creditFishTank(av)
if trophyResult:
movieType = NPCToons.SELL_MOVIE_TROPHY
extraArgs = [len(av.fishCollection), FishGlobals.getTotalNumFish()]
else:
movieType = NPCToons.SELL_MOVIE_COMPLETE
extraArgs = []
self.d_setMovie(avId, movieType, extraArgs)
self.transactionType = 'fish'
self.sendClearMovie(None)
return
def petAdopted(self, petNum, nameIndex):
avId = self.air.getAvatarIdFromSender()
if self.busy != avId:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCPetshopAI.petAdopted busy with %s' % self.busy)
self.notify.warning('somebody called petAdopted that I was not busy with! avId: %s' % avId)
return
av = simbase.air.doId2do.get(avId)
if av:
from toontown.hood import ZoneUtil
zoneId = ZoneUtil.getCanonicalSafeZoneId(self.zoneId)
if petNum not in range(0, len(self.petSeeds)):
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCPetshopAI.petAdopted and no such pet!')
self.notify.warning('somebody called petAdopted on a non-existent pet! avId: %s' % avId)
return
cost = PetUtil.getPetCostFromSeed(self.petSeeds[petNum], zoneId)
if cost > av.getTotalMoney():
self.air.writeServerEvent('suspicious', avId, "DistributedNPCPetshopAI.petAdopted and toon doesn't have enough money!")
self.notify.warning("somebody called petAdopted and didn't have enough money to adopt! avId: %s" % avId)
return
if av.petId != 0:
simbase.air.petMgr.deleteToonsPet(avId)
gender = petNum % len(PetDNA.PetGenders)
if nameIndex not in range(0, TTLocalizer.PetNameIndexMAX):
self.air.writeServerEvent('avoid_crash', avId, "DistributedNPCPetclerkAI.petAdopted and didn't have valid nameIndex!")
self.notify.warning("somebody called petAdopted and didn't have valid nameIndex to adopt! avId: %s" % avId)
return
simbase.air.petMgr.createNewPetFromSeed(avId, self.petSeeds[petNum], nameIndex=nameIndex, gender=gender, safeZoneId=zoneId)
self.notify.warning("Created new pet from seed")
self.transactionType = 'adopt'
bankPrice = min(av.getBankMoney(), cost)
walletPrice = cost - bankPrice
av.b_setBankMoney(av.getBankMoney() - bankPrice)
av.b_setMoney(av.getMoney() - walletPrice)
def petReturned(self):
avId = self.air.getAvatarIdFromSender()
if self.busy != avId:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCPetshopAI.petReturned busy with %s' % self.busy)
self.notify.warning('somebody called petReturned that I was not busy with! avId: %s' % avId)
return
av = simbase.air.doId2do.get(avId)
if av:
simbase.air.petMgr.deleteToonsPet(avId)
self.transactionType = 'return'
self.transactionDone()
def transactionDone(self):
avId = self.air.getAvatarIdFromSender()
if self.busy != avId:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCPetshopAI.transactionDone busy with %s' % self.busy)
self.notify.warning('somebody called transactionDone that I was not busy with! avId: %s' % avId)
return
av = simbase.air.doId2do.get(avId)
if av:
if self.transactionType == 'adopt':
self.d_setMovie(avId, NPCToons.SELL_MOVIE_PETADOPTED)
elif self.transactionType == 'return':
self.d_setMovie(avId, NPCToons.SELL_MOVIE_PETRETURNED)
elif self.transactionType == '':
self.d_setMovie(avId, NPCToons.SELL_MOVIE_PETCANCELED)
self.sendClearMovie(None)
return
def __handleUnexpectedExit(self, avId):
self.notify.warning('avatar:' + str(avId) + ' has exited unexpectedly')
self.notify.warning('not busy with avId: %s, busy: %s ' % (avId, self.busy))
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
return
| 46.076923 | 135 | 0.644546 | from otp.ai.AIBaseGlobal import *
from pandac.PandaModules import *
from .DistributedNPCToonBaseAI import *
from toontown.toonbase import TTLocalizer
from direct.task import Task
from toontown.fishing import FishGlobals
from toontown.pets import PetUtil, PetDNA, PetConstants
from toontown.hood import ZoneUtil
class DistributedNPCPetclerkAI(DistributedNPCToonBaseAI):
def __init__(self, air, npcId):
DistributedNPCToonBaseAI.__init__(self, air, npcId)
self.givesQuests = 0
self.busy = 0
def delete(self):
taskMgr.remove(self.uniqueName('clearMovie'))
self.ignoreAll()
DistributedNPCToonBaseAI.delete(self)
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
if avId not in self.air.doId2do:
self.notify.warning('Avatar: %s not found' % avId)
return
if self.isBusy():
self.freeAvatar(avId)
return
self.petSeeds = self.air.petMgr.getAvailablePets(3, ZoneUtil.getCanonicalHoodId(self.zoneId))
numGenders = len(PetDNA.PetGenders)
self.petSeeds *= numGenders
self.petSeeds.sort()
self.sendUpdateToAvatarId(avId, 'setPetSeeds', [self.petSeeds])
self.transactionType = ''
av = self.air.doId2do[avId]
self.busy = avId
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.__handleUnexpectedExit, extraArgs=[avId])
flag = NPCToons.SELL_MOVIE_START
self.d_setMovie(avId, flag)
taskMgr.doMethodLater(PetConstants.PETCLERK_TIMER, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
DistributedNPCToonBaseAI.avatarEnter(self)
def rejectAvatar(self, avId):
self.notify.warning('rejectAvatar: should not be called by a fisherman!')
def d_setMovie(self, avId, flag, extraArgs = []):
self.sendUpdate('setMovie', [flag,
self.npcId,
avId,
extraArgs,
ClockDelta.globalClockDelta.getRealNetworkTime()])
def sendTimeoutMovie(self, task):
self.d_setMovie(self.busy, NPCToons.SELL_MOVIE_TIMEOUT)
self.sendClearMovie(None)
return Task.done
def sendClearMovie(self, task):
self.ignore(self.air.getAvatarExitEvent(self.busy))
taskMgr.remove(self.uniqueName('clearMovie'))
self.busy = 0
self.d_setMovie(0, NPCToons.SELL_MOVIE_CLEAR)
return Task.done
def fishSold(self):
avId = self.air.getAvatarIdFromSender()
if self.busy != avId:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCPetshopAI.fishSold busy with %s' % self.busy)
self.notify.warning('somebody called fishSold that I was not busy with! avId: %s' % avId)
return
av = simbase.air.doId2do.get(avId)
if av:
trophyResult = self.air.fishManager.creditFishTank(av)
if trophyResult:
movieType = NPCToons.SELL_MOVIE_TROPHY
extraArgs = [len(av.fishCollection), FishGlobals.getTotalNumFish()]
else:
movieType = NPCToons.SELL_MOVIE_COMPLETE
extraArgs = []
self.d_setMovie(avId, movieType, extraArgs)
self.transactionType = 'fish'
self.sendClearMovie(None)
return
def petAdopted(self, petNum, nameIndex):
avId = self.air.getAvatarIdFromSender()
if self.busy != avId:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCPetshopAI.petAdopted busy with %s' % self.busy)
self.notify.warning('somebody called petAdopted that I was not busy with! avId: %s' % avId)
return
av = simbase.air.doId2do.get(avId)
if av:
from toontown.hood import ZoneUtil
zoneId = ZoneUtil.getCanonicalSafeZoneId(self.zoneId)
if petNum not in range(0, len(self.petSeeds)):
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCPetshopAI.petAdopted and no such pet!')
self.notify.warning('somebody called petAdopted on a non-existent pet! avId: %s' % avId)
return
cost = PetUtil.getPetCostFromSeed(self.petSeeds[petNum], zoneId)
if cost > av.getTotalMoney():
self.air.writeServerEvent('suspicious', avId, "DistributedNPCPetshopAI.petAdopted and toon doesn't have enough money!")
self.notify.warning("somebody called petAdopted and didn't have enough money to adopt! avId: %s" % avId)
return
if av.petId != 0:
simbase.air.petMgr.deleteToonsPet(avId)
gender = petNum % len(PetDNA.PetGenders)
if nameIndex not in range(0, TTLocalizer.PetNameIndexMAX):
self.air.writeServerEvent('avoid_crash', avId, "DistributedNPCPetclerkAI.petAdopted and didn't have valid nameIndex!")
self.notify.warning("somebody called petAdopted and didn't have valid nameIndex to adopt! avId: %s" % avId)
return
simbase.air.petMgr.createNewPetFromSeed(avId, self.petSeeds[petNum], nameIndex=nameIndex, gender=gender, safeZoneId=zoneId)
self.notify.warning("Created new pet from seed")
self.transactionType = 'adopt'
bankPrice = min(av.getBankMoney(), cost)
walletPrice = cost - bankPrice
av.b_setBankMoney(av.getBankMoney() - bankPrice)
av.b_setMoney(av.getMoney() - walletPrice)
def petReturned(self):
avId = self.air.getAvatarIdFromSender()
if self.busy != avId:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCPetshopAI.petReturned busy with %s' % self.busy)
self.notify.warning('somebody called petReturned that I was not busy with! avId: %s' % avId)
return
av = simbase.air.doId2do.get(avId)
if av:
simbase.air.petMgr.deleteToonsPet(avId)
self.transactionType = 'return'
self.transactionDone()
def transactionDone(self):
avId = self.air.getAvatarIdFromSender()
if self.busy != avId:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCPetshopAI.transactionDone busy with %s' % self.busy)
self.notify.warning('somebody called transactionDone that I was not busy with! avId: %s' % avId)
return
av = simbase.air.doId2do.get(avId)
if av:
if self.transactionType == 'adopt':
self.d_setMovie(avId, NPCToons.SELL_MOVIE_PETADOPTED)
elif self.transactionType == 'return':
self.d_setMovie(avId, NPCToons.SELL_MOVIE_PETRETURNED)
elif self.transactionType == '':
self.d_setMovie(avId, NPCToons.SELL_MOVIE_PETCANCELED)
self.sendClearMovie(None)
return
def __handleUnexpectedExit(self, avId):
self.notify.warning('avatar:' + str(avId) + ' has exited unexpectedly')
self.notify.warning('not busy with avId: %s, busy: %s ' % (avId, self.busy))
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
return
| true | true |
1c3785219c8448bb97b21b1931f5cfeb475fe9a1 | 3,353 | py | Python | alf/utils/encoding_network.py | ruizhaogit/alf | be1e65afa5f8401236d98db8f85a5e27fa1e18dc | [
"Apache-2.0"
] | 2 | 2021-03-22T14:57:03.000Z | 2021-09-28T07:02:10.000Z | alf/utils/encoding_network.py | ruizhaogit/alf | be1e65afa5f8401236d98db8f85a5e27fa1e18dc | [
"Apache-2.0"
] | null | null | null | alf/utils/encoding_network.py | ruizhaogit/alf | be1e65afa5f8401236d98db8f85a5e27fa1e18dc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tf_agents.networks.encoding_network import EncodingNetwork as TFAEncodingNetwork
from alf.layers import NestConcatenate
class EncodingNetwork(TFAEncodingNetwork):
"""Feed Forward network with CNN and FNN layers.."""
def __init__(self,
input_tensor_spec,
last_layer_size,
last_activation_fn=None,
dtype=tf.float32,
last_kernel_initializer=None,
last_bias_initializer=tf.initializers.Zeros(),
preprocessing_combiner=NestConcatenate(axis=-1),
**xargs):
"""Create an EncodingNetwork
This EncodingNetwork allows the last layer to have different setting
from the other layers.
Args:
last_layer_size (int): size of the last layer
last_activation_fn: Activation function of the last layer.
last_kernel_initializer: Initializer for the kernel of the last
layer. If none is provided a default
tf.initializers.VarianceScaling is used.
last_bias_initializer: initializer for the bias of the last layer.
preprocessing_combiner: (Optional.) A keras layer that takes a flat
list of tensors and combines them. Good options include
`tf.keras.layers.Add` and `tf.keras.layers.Concatenate(axis=-1)`.
This layer must not be already built. For more details see
the documentation of `networks.EncodingNetwork`. If there is only
one input, this will be ignored.
xargs (dict): See tf_agents.networks.encoding_network.EncodingNetwork
for detail
"""
if len(tf.nest.flatten(input_tensor_spec)) == 1:
preprocessing_combiner = None
super(EncodingNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
preprocessing_combiner=preprocessing_combiner,
dtype=dtype,
**xargs)
if not last_kernel_initializer:
last_kernel_initializer = tf.initializers.VarianceScaling(
scale=2.0, mode='fan_in', distribution='truncated_normal')
self._last_layer = tf.keras.layers.Dense(
last_layer_size,
activation=last_activation_fn,
kernel_initializer=last_kernel_initializer,
bias_initializer=last_bias_initializer,
dtype=dtype)
def call(self, observation, step_type=None, network_state=()):
state, network_state = super(EncodingNetwork, self).call(
observation, step_type=step_type, network_state=network_state)
return self._last_layer(state), network_state
| 44.706667 | 85 | 0.66776 |
import tensorflow as tf
from tf_agents.networks.encoding_network import EncodingNetwork as TFAEncodingNetwork
from alf.layers import NestConcatenate
class EncodingNetwork(TFAEncodingNetwork):
def __init__(self,
input_tensor_spec,
last_layer_size,
last_activation_fn=None,
dtype=tf.float32,
last_kernel_initializer=None,
last_bias_initializer=tf.initializers.Zeros(),
preprocessing_combiner=NestConcatenate(axis=-1),
**xargs):
if len(tf.nest.flatten(input_tensor_spec)) == 1:
preprocessing_combiner = None
super(EncodingNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
preprocessing_combiner=preprocessing_combiner,
dtype=dtype,
**xargs)
if not last_kernel_initializer:
last_kernel_initializer = tf.initializers.VarianceScaling(
scale=2.0, mode='fan_in', distribution='truncated_normal')
self._last_layer = tf.keras.layers.Dense(
last_layer_size,
activation=last_activation_fn,
kernel_initializer=last_kernel_initializer,
bias_initializer=last_bias_initializer,
dtype=dtype)
def call(self, observation, step_type=None, network_state=()):
state, network_state = super(EncodingNetwork, self).call(
observation, step_type=step_type, network_state=network_state)
return self._last_layer(state), network_state
| true | true |
1c378541630648325ed14f8cb9e318107b6a9270 | 828 | py | Python | src/datadog_api_client/v2/__init__.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | src/datadog_api_client/v2/__init__.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | src/datadog_api_client/v2/__init__.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# flake8: noqa
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
__version__ = "0.1.0"
# import ApiClient
from datadog_api_client.v2.api_client import ApiClient
# import Configuration
from datadog_api_client.v2.configuration import Configuration
# import exceptions
from datadog_api_client.v2.exceptions import OpenApiException
from datadog_api_client.v2.exceptions import ApiAttributeError
from datadog_api_client.v2.exceptions import ApiTypeError
from datadog_api_client.v2.exceptions import ApiValueError
from datadog_api_client.v2.exceptions import ApiKeyError
from datadog_api_client.v2.exceptions import ApiException
| 33.12 | 108 | 0.838164 |
__version__ = "0.1.0"
from datadog_api_client.v2.api_client import ApiClient
from datadog_api_client.v2.configuration import Configuration
from datadog_api_client.v2.exceptions import OpenApiException
from datadog_api_client.v2.exceptions import ApiAttributeError
from datadog_api_client.v2.exceptions import ApiTypeError
from datadog_api_client.v2.exceptions import ApiValueError
from datadog_api_client.v2.exceptions import ApiKeyError
from datadog_api_client.v2.exceptions import ApiException
| true | true |
1c378588a920518877a2c0491d68e007cd89eb9f | 559 | py | Python | portafolio/core/urls.py | jhonfmg7/portafolioDjangoV2 | f8fe158b97a79c148b062ae0410ef2c2d5938b8f | [
"Apache-2.0"
] | null | null | null | portafolio/core/urls.py | jhonfmg7/portafolioDjangoV2 | f8fe158b97a79c148b062ae0410ef2c2d5938b8f | [
"Apache-2.0"
] | null | null | null | portafolio/core/urls.py | jhonfmg7/portafolioDjangoV2 | f8fe158b97a79c148b062ae0410ef2c2d5938b8f | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from .views import *
urlpatterns = [
path('', index, name='home'),
path('about/', about, name='about'),
path('projects/', projects, name='projects'),
path('career/', career, name='career'),
path('discovery/', discovery, name='coming'),
path('contact/', contact, name='contact'),
path('success/', success, name='success')
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 31.055556 | 80 | 0.683363 | from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from .views import *
urlpatterns = [
path('', index, name='home'),
path('about/', about, name='about'),
path('projects/', projects, name='projects'),
path('career/', career, name='career'),
path('discovery/', discovery, name='coming'),
path('contact/', contact, name='contact'),
path('success/', success, name='success')
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
1c378635eb3ba1d00f160fd94b3d23eeeb5edfc2 | 811 | py | Python | dts_web/urls.py | StitchIQ/DTS_Django | 56bc33cc2640d9f69b61e26960c7a5dd2fcca8ae | [
"MIT"
] | null | null | null | dts_web/urls.py | StitchIQ/DTS_Django | 56bc33cc2640d9f69b61e26960c7a5dd2fcca8ae | [
"MIT"
] | 11 | 2018-05-16T14:09:11.000Z | 2018-05-24T14:21:44.000Z | dts_web/urls.py | StitchIQ/DTS_Django | 56bc33cc2640d9f69b61e26960c7a5dd2fcca8ae | [
"MIT"
] | null | null | null | """dts_web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import apps.views as html_view
urlpatterns = [
path('admin/', admin.site.urls),
path('', html_view.index)
]
| 32.44 | 77 | 0.709001 | from django.contrib import admin
from django.urls import path
import apps.views as html_view
urlpatterns = [
path('admin/', admin.site.urls),
path('', html_view.index)
]
| true | true |
1c3788f81631f4e1393be698931fd2d2782edca1 | 2,494 | py | Python | python/cloudNode.py | leekcake/CloudConvert | 1d5d9d56f85118d2cdb2922975e571084001fb85 | [
"MIT"
] | null | null | null | python/cloudNode.py | leekcake/CloudConvert | 1d5d9d56f85118d2cdb2922975e571084001fb85 | [
"MIT"
] | null | null | null | python/cloudNode.py | leekcake/CloudConvert | 1d5d9d56f85118d2cdb2922975e571084001fb85 | [
"MIT"
] | null | null | null | import logging
import math
import socket
import subprocess
import sys
import threading
import time
from io import BytesIO
def recvall(sock, n):
data = bytearray()
while len(data) < n:
sock.settimeout(10.0)
packet = sock.recv(n - len(data))
if not packet:
continue
data.extend(packet)
return data
def socketCopyToAndClose(src: socket.socket, dest, count):
left = count
while left != 0:
readed = src.recv(left)
if not readed:
time.sleep(0.01)
continue
left -= len(readed)
dest.write(readed)
dest.close()
class CloudNode:
def __init__(self, addr):
self.addr = addr
def start(self):
t = threading.Thread(target=self._thread_client)
t.start()
def _thread_client(self):
while True:
try:
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((self.addr, 39000))
# Server is node-host?
handshake = recvall(clientSocket, 5).decode()
if handshake != "Node?":
logging.error(f"Handshake failure: {handshake}")
clientSocket.close()
return
clientSocket.sendall('Yes!'.encode())
logging.info(f"Connected to Host")
while True:
logging.info(f"Receive Work Data")
dataLen = int.from_bytes(recvall(clientSocket, 4), byteorder='big')
logging.info(f"Processing...")
p = subprocess.Popen(['ffmpeg', '-f', 'mpegts', '-i', '-',
'-c:v', 'libx264', '-c:a', 'aac', '-preset', 'veryfast', '-f', 'mpegts', '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
copy = threading.Thread(target=socketCopyToAndClose, args=(clientSocket, p.stdin, dataLen,))
copy.start()
converted = p.stdout.read()
logging.info(f"Sending Result Data")
clientSocket.sendall('Done!'.encode())
clientSocket.sendall(len(converted).to_bytes(4, byteorder='big'))
clientSocket.sendall(converted)
except Exception as ex:
print(ex)
print("Retry after 1 seconds")
time.sleep(1)
| 31.56962 | 120 | 0.520449 | import logging
import math
import socket
import subprocess
import sys
import threading
import time
from io import BytesIO
def recvall(sock, n):
data = bytearray()
while len(data) < n:
sock.settimeout(10.0)
packet = sock.recv(n - len(data))
if not packet:
continue
data.extend(packet)
return data
def socketCopyToAndClose(src: socket.socket, dest, count):
left = count
while left != 0:
readed = src.recv(left)
if not readed:
time.sleep(0.01)
continue
left -= len(readed)
dest.write(readed)
dest.close()
class CloudNode:
def __init__(self, addr):
self.addr = addr
def start(self):
t = threading.Thread(target=self._thread_client)
t.start()
def _thread_client(self):
while True:
try:
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((self.addr, 39000))
handshake = recvall(clientSocket, 5).decode()
if handshake != "Node?":
logging.error(f"Handshake failure: {handshake}")
clientSocket.close()
return
clientSocket.sendall('Yes!'.encode())
logging.info(f"Connected to Host")
while True:
logging.info(f"Receive Work Data")
dataLen = int.from_bytes(recvall(clientSocket, 4), byteorder='big')
logging.info(f"Processing...")
p = subprocess.Popen(['ffmpeg', '-f', 'mpegts', '-i', '-',
'-c:v', 'libx264', '-c:a', 'aac', '-preset', 'veryfast', '-f', 'mpegts', '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
copy = threading.Thread(target=socketCopyToAndClose, args=(clientSocket, p.stdin, dataLen,))
copy.start()
converted = p.stdout.read()
logging.info(f"Sending Result Data")
clientSocket.sendall('Done!'.encode())
clientSocket.sendall(len(converted).to_bytes(4, byteorder='big'))
clientSocket.sendall(converted)
except Exception as ex:
print(ex)
print("Retry after 1 seconds")
time.sleep(1)
| true | true |
1c37897542d6f4130fd2a54c1f027e1c6ca61361 | 3,797 | py | Python | files/targets/linux_app/verbs/build.py | archive-repository/ezored-udemy | 87d8120cf3a81e204daa1fad6036deec82f85789 | [
"MIT"
] | null | null | null | files/targets/linux_app/verbs/build.py | archive-repository/ezored-udemy | 87d8120cf3a81e204daa1fad6036deec82f85789 | [
"MIT"
] | null | null | null | files/targets/linux_app/verbs/build.py | archive-repository/ezored-udemy | 87d8120cf3a81e204daa1fad6036deec82f85789 | [
"MIT"
] | null | null | null | """Build executable"""
import os
import ezored.app.const as const
from ezored.modules import file
from ezored.modules import log
from ezored.modules import runner
from ezored.modules import util
from files.config import target_linux_app as config
# -----------------------------------------------------------------------------
def run(params):
proj_path = params["proj_path"]
target_name = params["target_name"]
target_config = config.run(proj_path, target_name, params)
archs = target_config["archs"]
build_types = target_config["build_types"]
param_dry_run = util.list_has_key(params["args"], "--dry-run")
if param_dry_run:
log.info("Running in dry mode...")
if archs and len(archs) > 0:
for arch in archs:
for build_type in build_types:
log.info(
"Building for: {0}/{1}...".format(arch["conan_arch"], build_type)
)
# conan build
build_dir = os.path.join(
proj_path,
const.DIR_NAME_BUILD,
target_name,
build_type,
arch["conan_arch"],
const.DIR_NAME_BUILD_TARGET,
)
clean_build_dir = True
if param_dry_run and os.path.isdir(build_dir):
clean_build_dir = False
if clean_build_dir:
file.remove_dir(build_dir)
file.create_dir(build_dir)
run_args = [
"conan",
"build",
os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_CONAN,
const.DIR_NAME_FILES_TARGET_CONAN_RECIPE,
const.FILE_NAME_FILES_TARGET_CONAN_RECIPE_CONANFILE_PY,
),
"--source-folder",
os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_CMAKE,
),
"--build-folder",
os.path.join(
proj_path,
const.DIR_NAME_BUILD,
target_name,
build_type,
arch["conan_arch"],
const.DIR_NAME_BUILD_TARGET,
),
"--install-folder",
os.path.join(
proj_path,
const.DIR_NAME_BUILD,
target_name,
build_type,
arch["conan_arch"],
const.DIR_NAME_BUILD_CONAN,
),
]
runner.run(run_args, build_dir)
# copy assets
if "assets_dir" in target_config:
assets_dir = target_config["assets_dir"]
assets_dir = os.path.join(proj_path, assets_dir)
if os.path.isdir(assets_dir):
build_assets_dir = os.path.join(
build_dir, "bin", os.path.basename(assets_dir)
)
file.remove_dir(build_assets_dir)
file.copy_dir(assets_dir, build_assets_dir, symlinks=True)
else:
log.error('Arch list for "{0}" is invalid or empty'.format(target_name))
| 34.834862 | 85 | 0.458257 |
import os
import ezored.app.const as const
from ezored.modules import file
from ezored.modules import log
from ezored.modules import runner
from ezored.modules import util
from files.config import target_linux_app as config
def run(params):
proj_path = params["proj_path"]
target_name = params["target_name"]
target_config = config.run(proj_path, target_name, params)
archs = target_config["archs"]
build_types = target_config["build_types"]
param_dry_run = util.list_has_key(params["args"], "--dry-run")
if param_dry_run:
log.info("Running in dry mode...")
if archs and len(archs) > 0:
for arch in archs:
for build_type in build_types:
log.info(
"Building for: {0}/{1}...".format(arch["conan_arch"], build_type)
)
build_dir = os.path.join(
proj_path,
const.DIR_NAME_BUILD,
target_name,
build_type,
arch["conan_arch"],
const.DIR_NAME_BUILD_TARGET,
)
clean_build_dir = True
if param_dry_run and os.path.isdir(build_dir):
clean_build_dir = False
if clean_build_dir:
file.remove_dir(build_dir)
file.create_dir(build_dir)
run_args = [
"conan",
"build",
os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_CONAN,
const.DIR_NAME_FILES_TARGET_CONAN_RECIPE,
const.FILE_NAME_FILES_TARGET_CONAN_RECIPE_CONANFILE_PY,
),
"--source-folder",
os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_CMAKE,
),
"--build-folder",
os.path.join(
proj_path,
const.DIR_NAME_BUILD,
target_name,
build_type,
arch["conan_arch"],
const.DIR_NAME_BUILD_TARGET,
),
"--install-folder",
os.path.join(
proj_path,
const.DIR_NAME_BUILD,
target_name,
build_type,
arch["conan_arch"],
const.DIR_NAME_BUILD_CONAN,
),
]
runner.run(run_args, build_dir)
if "assets_dir" in target_config:
assets_dir = target_config["assets_dir"]
assets_dir = os.path.join(proj_path, assets_dir)
if os.path.isdir(assets_dir):
build_assets_dir = os.path.join(
build_dir, "bin", os.path.basename(assets_dir)
)
file.remove_dir(build_assets_dir)
file.copy_dir(assets_dir, build_assets_dir, symlinks=True)
else:
log.error('Arch list for "{0}" is invalid or empty'.format(target_name))
| true | true |
1c3789df973937f74f2cd8884e7b642413a40bc3 | 3,395 | py | Python | tests/test_clients_AliClient.py | amitmalav/service-fabrik-backup-restore | 5c1c46de8c9aba618906dda7f91e3c210cbf49c5 | [
"Apache-2.0"
] | null | null | null | tests/test_clients_AliClient.py | amitmalav/service-fabrik-backup-restore | 5c1c46de8c9aba618906dda7f91e3c210cbf49c5 | [
"Apache-2.0"
] | null | null | null | tests/test_clients_AliClient.py | amitmalav/service-fabrik-backup-restore | 5c1c46de8c9aba618906dda7f91e3c210cbf49c5 | [
"Apache-2.0"
] | null | null | null | from tests.utils.utilities import create_start_patcher, stop_all_patchers
from lib.clients.AliClient import AliClient
from lib.clients.BaseClient import BaseClient
import oss2
import os
import pytest
#Test data
valid_container = 'backup-container'
invalid_container = 'invalid-container'
configuration = {
'credhub_url' : None,
'type' : 'online',
'backup_guid' : 'backup-guid',
'instance_id' : 'vm-id',
'secret' : 'xyz',
'job_name' : 'service-job-name',
'container' : valid_container,
'access_key_id' : 'key-id',
'secret_access_key' : 'secret-key',
'endpoint': 'endpoint-name',
'region_name' : 'xyz'
}
directory_persistent = '/var/vcap/store'
directory_work_list = '/tmp'
log_dir = 'tests'
poll_delay_time = 10
poll_maximum_time = 60
operation_name = 'backup'
def mock_shell(command):
print(command)
if command == ('cat /proc/mounts | grep '+ directory_persistent):
return valid_volume_device
class OssClientDummy:
def __init__(self):
pass
class OssDummy:
class Bucket:
def __init__(self, name):
self.name = name
def put_object(self,Key):
if self.name == valid_container:
return
else:
auth = oss2.Auth(configuration['access_key_id'], configuration['secret_access_key'])
client = oss2.Bucket(auth, configuration['endpoint'], self.name)
response = json.load(open('tests/data/aws/bucket.put.nosuchbucket.json'))
exception = client.exceptions.NoSuchBucket(error_response=response,operation_name='PutObject')
raise exception
class AliSessionDummy:
def __init__(self):
pass
def resource(self, type, config=None):
if type == 'oss':
return OssDummy()
def client(self, type, config=None):
if type == 's3':
return OssClientDummy()
def get_dummy_ali_session():
return AliSessionDummy()
def get_dummy_container(auth, endpoint):
return OssDummy.Bucket(configuration['container'])
class TestAwsClient:
patchers = []
@classmethod
def setup_class(self):
#self.patchers.append(create_start_patcher(patch_function='__init__',patch_object=AliClient,side_effect=get_dummy_ali_session)['patcher'])
self.patchers.append(create_start_patcher(patch_function='get_container',patch_object=AliClient,side_effect=get_dummy_container)['patcher'])
self.patchers.append(create_start_patcher(patch_function='last_operation', patch_object=BaseClient)['patcher'])
self.patchers.append(create_start_patcher(patch_function='shell', patch_object=BaseClient, side_effect=mock_shell)['patcher'])
os.environ['SF_BACKUP_RESTORE_LOG_DIRECTORY'] = log_dir
os.environ['SF_BACKUP_RESTORE_LAST_OPERATION_DIRECTORY'] = log_dir
self.testAliClient = AliClient(operation_name, configuration, directory_persistent, directory_work_list,poll_delay_time, poll_maximum_time)
@classmethod
def teardown_class(self):
stop_all_patchers(self.patchers)
def test_create_aws_client(self):
assert isinstance(self.testAliClient.container, OssDummy.Bucket)
def test_get_container_exception(self):
with pytest.raises(Exception):
container = self.testAliClient.OssDummy.Bucket(invalid_container)
assert container is None
| 33.284314 | 148 | 0.699558 | from tests.utils.utilities import create_start_patcher, stop_all_patchers
from lib.clients.AliClient import AliClient
from lib.clients.BaseClient import BaseClient
import oss2
import os
import pytest
valid_container = 'backup-container'
invalid_container = 'invalid-container'
configuration = {
'credhub_url' : None,
'type' : 'online',
'backup_guid' : 'backup-guid',
'instance_id' : 'vm-id',
'secret' : 'xyz',
'job_name' : 'service-job-name',
'container' : valid_container,
'access_key_id' : 'key-id',
'secret_access_key' : 'secret-key',
'endpoint': 'endpoint-name',
'region_name' : 'xyz'
}
directory_persistent = '/var/vcap/store'
directory_work_list = '/tmp'
log_dir = 'tests'
poll_delay_time = 10
poll_maximum_time = 60
operation_name = 'backup'
def mock_shell(command):
print(command)
if command == ('cat /proc/mounts | grep '+ directory_persistent):
return valid_volume_device
class OssClientDummy:
def __init__(self):
pass
class OssDummy:
class Bucket:
def __init__(self, name):
self.name = name
def put_object(self,Key):
if self.name == valid_container:
return
else:
auth = oss2.Auth(configuration['access_key_id'], configuration['secret_access_key'])
client = oss2.Bucket(auth, configuration['endpoint'], self.name)
response = json.load(open('tests/data/aws/bucket.put.nosuchbucket.json'))
exception = client.exceptions.NoSuchBucket(error_response=response,operation_name='PutObject')
raise exception
class AliSessionDummy:
def __init__(self):
pass
def resource(self, type, config=None):
if type == 'oss':
return OssDummy()
def client(self, type, config=None):
if type == 's3':
return OssClientDummy()
def get_dummy_ali_session():
return AliSessionDummy()
def get_dummy_container(auth, endpoint):
return OssDummy.Bucket(configuration['container'])
class TestAwsClient:
patchers = []
@classmethod
def setup_class(self):
self.patchers.append(create_start_patcher(patch_function='get_container',patch_object=AliClient,side_effect=get_dummy_container)['patcher'])
self.patchers.append(create_start_patcher(patch_function='last_operation', patch_object=BaseClient)['patcher'])
self.patchers.append(create_start_patcher(patch_function='shell', patch_object=BaseClient, side_effect=mock_shell)['patcher'])
os.environ['SF_BACKUP_RESTORE_LOG_DIRECTORY'] = log_dir
os.environ['SF_BACKUP_RESTORE_LAST_OPERATION_DIRECTORY'] = log_dir
self.testAliClient = AliClient(operation_name, configuration, directory_persistent, directory_work_list,poll_delay_time, poll_maximum_time)
@classmethod
def teardown_class(self):
stop_all_patchers(self.patchers)
def test_create_aws_client(self):
assert isinstance(self.testAliClient.container, OssDummy.Bucket)
def test_get_container_exception(self):
with pytest.raises(Exception):
container = self.testAliClient.OssDummy.Bucket(invalid_container)
assert container is None
| true | true |
1c378a4dc71a9174c39b3e4f11e98ba2f71d02d9 | 2,002 | py | Python | cdApi/coupon.py | renqiukai/cd_api | 4f1f641adaf031252b097db03249a2581268cc11 | [
"MIT"
] | null | null | null | cdApi/coupon.py | renqiukai/cd_api | 4f1f641adaf031252b097db03249a2581268cc11 | [
"MIT"
] | null | null | null | cdApi/coupon.py | renqiukai/cd_api | 4f1f641adaf031252b097db03249a2581268cc11 | [
"MIT"
] | null | null | null | '''
@说明 :优惠券接口。
@时间 :2020/3/19 下午4:51:48
@作者 :任秋锴
@版本 :1.0
'''
from .base import base
class coupon(base):
def __init__(self, token):
super().__init__(token)
def list(self,
couponType=None, distType=None,
storeId=None, companyId=None,
name=None, type=0,
pageNum=1, pageSize=10):
"""
- couponType:优惠券类型 1/3/4/5
- distType:发放方式 1/2/3/4
- name:优惠券名称
- type:优惠券状态,全部0/未开始1/进行中2/已结束3/
"""
api_name = "manager/coupon/list"
data = {
"pageNum": pageNum,
"pageSize": pageSize,
}
return self.request(api_name, data)
def create(self, data):
api_name = "manager/coupon/add"
return self.request(api_name, data, method="POST")
def read(self, _id):
api_name = "manager/coupon/info"
data = {
"id": _id,
}
response = self.request(api_name, data, method="GET")
return self.response(response)
def update(self, data):
api_name = "manager/coupon/edit"
response = self.request(api_name, data, method="POST")
return self.response(response)
def updateDemo(self):
data = {
"id": 230,
"storeList": [
{"id": 1290, "type": 0, "parentId": 1289}
],
}
return self.update(data)
def update_product_list(self, _id, product_id_list: list = []):
"""更新优惠券商品列表
Args:
_id (int): 优惠券ID
product_list (list, optional): 商品列表,支持商品编码,列表保存. Defaults to [].
Returns:
response: 返回是否成功标志
"""
data = self.read(_id)
data["productId"] = product_id_list
return self.update(data)
def delete(self, _id):
api_name = "manager/coupon/discard"
data = {"id": _id}
response = self.request(api_name, data, method="POST")
return self.response(response)
| 25.341772 | 76 | 0.528472 |
from .base import base
class coupon(base):
def __init__(self, token):
super().__init__(token)
def list(self,
couponType=None, distType=None,
storeId=None, companyId=None,
name=None, type=0,
pageNum=1, pageSize=10):
api_name = "manager/coupon/list"
data = {
"pageNum": pageNum,
"pageSize": pageSize,
}
return self.request(api_name, data)
def create(self, data):
api_name = "manager/coupon/add"
return self.request(api_name, data, method="POST")
def read(self, _id):
api_name = "manager/coupon/info"
data = {
"id": _id,
}
response = self.request(api_name, data, method="GET")
return self.response(response)
def update(self, data):
api_name = "manager/coupon/edit"
response = self.request(api_name, data, method="POST")
return self.response(response)
def updateDemo(self):
data = {
"id": 230,
"storeList": [
{"id": 1290, "type": 0, "parentId": 1289}
],
}
return self.update(data)
def update_product_list(self, _id, product_id_list: list = []):
data = self.read(_id)
data["productId"] = product_id_list
return self.update(data)
def delete(self, _id):
api_name = "manager/coupon/discard"
data = {"id": _id}
response = self.request(api_name, data, method="POST")
return self.response(response)
| true | true |
1c378ada774d53aace7e40b7dd9258579b413e9b | 1,385 | py | Python | source/02_scattering_sphere/radialwave_scattering_porous_coupling/exact/__init__.py | ROMSOC/benchmarks-acoustic-propagation | 14dbe64c0279d25053e17c63b3797d6395cd50cc | [
"MIT"
] | 2 | 2021-09-21T15:46:17.000Z | 2022-03-10T02:18:56.000Z | source/02_scattering_sphere/radialwave_scattering_porous_coupling/exact/__init__.py | ROMSOC/benchmarks-acoustic-propagation | 14dbe64c0279d25053e17c63b3797d6395cd50cc | [
"MIT"
] | null | null | null | source/02_scattering_sphere/radialwave_scattering_porous_coupling/exact/__init__.py | ROMSOC/benchmarks-acoustic-propagation | 14dbe64c0279d25053e17c63b3797d6395cd50cc | [
"MIT"
] | 1 | 2021-09-02T00:48:51.000Z | 2021-09-02T00:48:51.000Z | # ------------------------------------------------------------------ #
# ╦═╗╔═╗╔╦╗╔═╗╔═╗╔═╗
# ╠╦╝║ ║║║║╚═╗║ ║║
# ╩╚═╚═╝╩ ╩╚═╝╚═╝╚═╝
# Reduced Order Modelling, Simulation, Optimization of Coupled Systems
# 2017-2021
#
# Authors :
# Ashwin Nayak, Andres Prieto, Daniel Fernandez Comesana
#
# Disclaimer :
# In downloading this SOFTWARE you are deemed to have read and agreed
# to the following terms: This SOFTWARE has been designed with an
# exclusive focus on civil applications. It is not to be used for any
# illegal, deceptive, misleading or unethical purpose or in any
# military applications. This includes ANY APPLICATION WHERE THE USE
# OF THE SOFTWARE MAY RESULT IN DEATH, PERSONAL INJURY OR SEVERE
# PHYSICAL OR ENVIRONMENTAL DAMAGE. Any redistribution of the software
# must retain this disclaimer. BY INSTALLING, COPYING, OR OTHERWISE
# USING THE SOFTWARE, YOU AGREE TO THE TERMS ABOVE. IF YOU DO NOT
# AGREE TO THESE TERMS, DO NOT INSTALL OR USE THE SOFTWARE.
#
# Acknowledgements:
# The ROMSOC project has received funding from the European Union’s
# Horizon 2020 research and innovation programme under the Marie
# Skłodowska-Curie Grant Agreement No. 765374.
# ------------------------------------------------------------------- #
| 49.464286 | 72 | 0.592058 | true | true | |
1c378b561a90a4ed274c10b232748a03260b27d9 | 893 | py | Python | src/clusto/test/base/countertests.py | thekad/clusto | c141ea3ef4931c6a21fdf42845c6e9de5ee08caa | [
"BSD-3-Clause"
] | 216 | 2015-01-10T17:03:25.000Z | 2022-03-24T07:23:41.000Z | src/clusto/test/base/countertests.py | thekad/clusto | c141ea3ef4931c6a21fdf42845c6e9de5ee08caa | [
"BSD-3-Clause"
] | 23 | 2015-01-08T16:51:22.000Z | 2021-03-13T12:56:04.000Z | src/clusto/test/base/countertests.py | thekad/clusto | c141ea3ef4931c6a21fdf42845c6e9de5ee08caa | [
"BSD-3-Clause"
] | 49 | 2015-01-08T00:13:17.000Z | 2021-09-22T02:01:20.000Z | from clusto.test import testbase
from clusto.schema import *
from clusto.drivers.base import *
class TestClustoCounter(testbase.ClustoTestBase):
def testCounterDefault(self):
e = Entity('e1')
c = Counter(e, 'key1')
self.assertEqual(c.value, 0)
d = Counter(e, 'key2', start=10)
self.assertEqual(d.value, 10)
def testCounterIncrement(self):
e = Entity('e1')
c = Counter(e, 'key1')
c.next()
c.next()
self.assertEqual(c.value,2)
def testGetCounter(self):
e = Entity('e1')
c = Counter.get(e, 'key1')
c.next()
self.assertEqual(c.value, 1)
d = Counter.get(e, 'key1', default=100)
d.next()
self.assertEqual(d.value, 2)
f = Counter.get(e, 'key2', default=20)
self.assertEqual(f.value, 20)
| 19 | 49 | 0.546473 | from clusto.test import testbase
from clusto.schema import *
from clusto.drivers.base import *
class TestClustoCounter(testbase.ClustoTestBase):
def testCounterDefault(self):
e = Entity('e1')
c = Counter(e, 'key1')
self.assertEqual(c.value, 0)
d = Counter(e, 'key2', start=10)
self.assertEqual(d.value, 10)
def testCounterIncrement(self):
e = Entity('e1')
c = Counter(e, 'key1')
c.next()
c.next()
self.assertEqual(c.value,2)
def testGetCounter(self):
e = Entity('e1')
c = Counter.get(e, 'key1')
c.next()
self.assertEqual(c.value, 1)
d = Counter.get(e, 'key1', default=100)
d.next()
self.assertEqual(d.value, 2)
f = Counter.get(e, 'key2', default=20)
self.assertEqual(f.value, 20)
| true | true |
1c378c5f2091666db2d57ad3a5c5280de537cbe5 | 30 | py | Python | auto_schema/fields/__init__.py | tingiskhan/auto-schema | 9b6f332347c4283c8d5033c0d7d0085f5ae8e817 | [
"MIT"
] | null | null | null | auto_schema/fields/__init__.py | tingiskhan/auto-schema | 9b6f332347c4283c8d5033c0d7d0085f5ae8e817 | [
"MIT"
] | null | null | null | auto_schema/fields/__init__.py | tingiskhan/auto-schema | 9b6f332347c4283c8d5033c0d7d0085f5ae8e817 | [
"MIT"
] | null | null | null | from .bytes import BytesField
| 15 | 29 | 0.833333 | from .bytes import BytesField
| true | true |
1c378c93d96d1105b11fbdbe2322d0ec1f5c6a2f | 1,368 | py | Python | training/distributed_training/pytorch/data_parallel/rnnt/entry_point.py | pollyrolly/amazon-sagemaker-examples | b1a56b4dc96201b769f7bbc1e207649423874586 | [
"Apache-2.0"
] | 2,610 | 2020-10-01T14:14:53.000Z | 2022-03-31T18:02:31.000Z | training/distributed_training/pytorch/data_parallel/rnnt/entry_point.py | pollyrolly/amazon-sagemaker-examples | b1a56b4dc96201b769f7bbc1e207649423874586 | [
"Apache-2.0"
] | 1,959 | 2020-09-30T20:22:42.000Z | 2022-03-31T23:58:37.000Z | training/distributed_training/pytorch/data_parallel/rnnt/entry_point.py | pollyrolly/amazon-sagemaker-examples | b1a56b4dc96201b769f7bbc1e207649423874586 | [
"Apache-2.0"
] | 2,052 | 2020-09-30T22:11:46.000Z | 2022-03-31T23:02:51.000Z | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import subprocess
import sys
import os
exe = 'python'
trainer = '/workspace/rnnt/train.py'
cmd_list = [exe] + [trainer] + sys.argv[1:]
cmd = ' '.join(cmd_list)
cmd += ' '
cmd += '--dataset_dir ' + os.environ['SM_CHANNEL_TRAIN'] + '/datasets/LibriSpeech/ '
cmd += '--output_dir ' + os.environ['SM_OUTPUT_DIR'] + ' '
cmd += '--val_manifests ' + os.environ['SM_CHANNEL_TRAIN'] + '/tokenized/librispeech-dev-clean-wav-tokenized.pkl '
cmd += '--train_manifests ' + os.environ['SM_CHANNEL_TRAIN'] + '/tokenized/librispeech-train-clean-100-wav-tokenized.pkl ' + os.environ['SM_CHANNEL_TRAIN'] + '/tokenized/librispeech-train-clean-360-wav-tokenized.pkl ' + os.environ['SM_CHANNEL_TRAIN'] + '/tokenized/librispeech-train-other-500-wav-tokenized.pkl '
print('Final command is: ', cmd)
subprocess.run(cmd, shell=True) | 44.129032 | 312 | 0.72807 |
import subprocess
import sys
import os
exe = 'python'
trainer = '/workspace/rnnt/train.py'
cmd_list = [exe] + [trainer] + sys.argv[1:]
cmd = ' '.join(cmd_list)
cmd += ' '
cmd += '--dataset_dir ' + os.environ['SM_CHANNEL_TRAIN'] + '/datasets/LibriSpeech/ '
cmd += '--output_dir ' + os.environ['SM_OUTPUT_DIR'] + ' '
cmd += '--val_manifests ' + os.environ['SM_CHANNEL_TRAIN'] + '/tokenized/librispeech-dev-clean-wav-tokenized.pkl '
cmd += '--train_manifests ' + os.environ['SM_CHANNEL_TRAIN'] + '/tokenized/librispeech-train-clean-100-wav-tokenized.pkl ' + os.environ['SM_CHANNEL_TRAIN'] + '/tokenized/librispeech-train-clean-360-wav-tokenized.pkl ' + os.environ['SM_CHANNEL_TRAIN'] + '/tokenized/librispeech-train-other-500-wav-tokenized.pkl '
print('Final command is: ', cmd)
subprocess.run(cmd, shell=True) | true | true |
1c378ca3536f50fbfdd74c7034b89973888a448c | 31 | py | Python | confu/arm/__init__.py | tiny-dnn/confu | 8f74d9fc0c04efe8cd1b92ae5f43a5d9b686500e | [
"MIT"
] | null | null | null | confu/arm/__init__.py | tiny-dnn/confu | 8f74d9fc0c04efe8cd1b92ae5f43a5d9b686500e | [
"MIT"
] | null | null | null | confu/arm/__init__.py | tiny-dnn/confu | 8f74d9fc0c04efe8cd1b92ae5f43a5d9b686500e | [
"MIT"
] | 1 | 2020-11-16T18:06:25.000Z | 2020-11-16T18:06:25.000Z | from confu.arm.isa import neon
| 15.5 | 30 | 0.806452 | from confu.arm.isa import neon
| true | true |
1c378d3c6a81d3459ec458dc9e030a7377f7e716 | 1,235 | py | Python | glob/setup.py | stlehmann/micropython-lib | fcbf03b152a56f091361cefc7857b4c39891d1a8 | [
"PSF-2.0"
] | null | null | null | glob/setup.py | stlehmann/micropython-lib | fcbf03b152a56f091361cefc7857b4c39891d1a8 | [
"PSF-2.0"
] | null | null | null | glob/setup.py | stlehmann/micropython-lib | fcbf03b152a56f091361cefc7857b4c39891d1a8 | [
"PSF-2.0"
] | null | null | null | import sys
# Remove current dir from sys.path, otherwise setuptools will peek up our
# module instead of system's.
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
import sdist_upip
setup(name='micropython-glob',
version='0.5.2',
description='CPython glob module ported to MicroPython',
long_description='This is a module ported from CPython standard library to be compatible with\nMicroPython interpreter. Usually, this means applying small patches for\nfeatures not supported (yet, or at all) in MicroPython. Sometimes, heavier\nchanges are required. Note that CPython modules are written with availability\nof vast resources in mind, and may not work for MicroPython ports with\nlimited heap. If you are affected by such a case, please help reimplement\nthe module from scratch.',
url='https://github.com/pfalcon/micropython-lib',
author='CPython Developers',
author_email='python-dev@python.org',
maintainer='Paul Sokolovsky',
maintainer_email='micropython-lib@googlegroups.com',
license='Python',
cmdclass={'sdist': sdist_upip.sdist},
py_modules=['glob'],
install_requires=['micropython-os', 'micropython-re-pcre', 'micropython-fnmatch'])
| 56.136364 | 502 | 0.746559 | import sys
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
import sdist_upip
setup(name='micropython-glob',
version='0.5.2',
description='CPython glob module ported to MicroPython',
long_description='This is a module ported from CPython standard library to be compatible with\nMicroPython interpreter. Usually, this means applying small patches for\nfeatures not supported (yet, or at all) in MicroPython. Sometimes, heavier\nchanges are required. Note that CPython modules are written with availability\nof vast resources in mind, and may not work for MicroPython ports with\nlimited heap. If you are affected by such a case, please help reimplement\nthe module from scratch.',
url='https://github.com/pfalcon/micropython-lib',
author='CPython Developers',
author_email='python-dev@python.org',
maintainer='Paul Sokolovsky',
maintainer_email='micropython-lib@googlegroups.com',
license='Python',
cmdclass={'sdist': sdist_upip.sdist},
py_modules=['glob'],
install_requires=['micropython-os', 'micropython-re-pcre', 'micropython-fnmatch'])
| true | true |
1c378d69144dec8341b85ba869b0dc66a0e68f41 | 36,535 | py | Python | stockstats.py | Arsh0023/stockstats | 3b13bc74b2106d1a5ebbb6f456344abc3a06ed0e | [
"BSD-3-Clause"
] | null | null | null | stockstats.py | Arsh0023/stockstats | 3b13bc74b2106d1a5ebbb6f456344abc3a06ed0e | [
"BSD-3-Clause"
] | null | null | null | stockstats.py | Arsh0023/stockstats | 3b13bc74b2106d1a5ebbb6f456344abc3a06ed0e | [
"BSD-3-Clause"
] | 1 | 2021-07-24T05:37:47.000Z | 2021-07-24T05:37:47.000Z | # coding=utf-8
# Copyright (c) 2016, Cedric Zhuang
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of disclaimer nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
import itertools
import logging
import operator
import random
import re
import numpy as np
import pandas as pd
from int_date import get_date_from_diff
__author__ = 'Cedric Zhuang'
log = logging.getLogger(__name__)
class StockDataFrame(pd.DataFrame):
OPERATORS = ['le', 'ge', 'lt', 'gt', 'eq', 'ne']
# Start of options.
KDJ_PARAM = (2.0 / 3.0, 1.0 / 3.0)
KDJ_WINDOW = 9
BOLL_PERIOD = 20
BOLL_STD_TIMES = 2
MACD_EMA_SHORT = 12
MACD_EMA_LONG = 26
MACD_EMA_SIGNAL = 9
PDI_SMMA = 14
MDI_SMMA = 14
DX_SMMA = 14
ADX_EMA = 6
ADXR_EMA = 6
CR_MA1 = 5
CR_MA2 = 10
CR_MA3 = 20
TRIX_EMA_WINDOW = 12
TEMA_EMA_WINDOW = 5
ATR_SMMA = 14
# End of options
@staticmethod
def _get_change(df):
""" Get the percentage change column
:param df: DataFrame object
:return: result series
"""
df['change'] = df['close'].pct_change() * 100
return df['change']
@staticmethod
def _get_p(df, column, shifts):
""" get the permutation of specified range
example:
index x x_-2,-1_p
0 1 NaN
1 -1 NaN
2 3 2 (0.x > 0, and assigned to weight 2)
3 5 1 (2.x > 0, and assigned to weight 1)
4 1 3
:param df: data frame
:param column: the column to calculate p from
:param shifts: the range to consider
:return:
"""
column_name = '{}_{}_p'.format(column, shifts)
# initialize the column if not
df.get(column)
shifts = StockDataFrame.to_ints(shifts)[::-1]
indices = None
count = 0
for shift in shifts:
shifted = df.shift(-shift)
index = (shifted[column] > 0) * (2 ** count)
if indices is None:
indices = index
else:
indices += index
count += 1
if indices is not None:
cp = indices.copy()
StockDataFrame.set_nan(cp, shifts)
df[column_name] = cp
@classmethod
def to_ints(cls, shifts):
items = map(cls._process_shifts_segment,
shifts.split(','))
return sorted(list(set(itertools.chain(*items))))
@classmethod
def to_int(cls, shifts):
numbers = cls.to_ints(shifts)
if len(numbers) != 1:
raise IndexError("only accept 1 number.")
return numbers[0]
@staticmethod
def to_floats(shifts):
floats = map(float, shifts.split(','))
return sorted(list(set(floats)))
@classmethod
def to_float(cls, shifts):
floats = cls.to_floats(shifts)
if len(floats) != 1:
raise IndexError('only accept 1 float.')
return floats[0]
@staticmethod
def _process_shifts_segment(shift_segment):
if '~' in shift_segment:
start, end = shift_segment.split('~')
shifts = range(int(start), int(end) + 1)
else:
shifts = [int(shift_segment)]
return shifts
@staticmethod
def set_nan(pd_obj, shift):
try:
iter(shift)
max_shift = max(shift)
min_shift = min(shift)
StockDataFrame._set_nan_of_single_shift(pd_obj, max_shift)
StockDataFrame._set_nan_of_single_shift(pd_obj, min_shift)
except TypeError:
# shift is not iterable
StockDataFrame._set_nan_of_single_shift(pd_obj, shift)
@staticmethod
def _set_nan_of_single_shift(pd_obj, shift):
val = np.nan
if shift > 0:
pd_obj.iloc[-shift:] = val
elif shift < 0:
pd_obj.iloc[:-shift] = val
@classmethod
def _get_r(cls, df, column, shifts):
""" Get rate of change of column
:param df: DataFrame object
:param column: column name of the rate to calculate
:param shifts: days to shift, accept one shift only
:return: None
"""
shift = cls.to_int(shifts)
rate_key = '{}_{}_r'.format(column, shift)
df[rate_key] = df[column].pct_change(periods=-shift) * 100
@classmethod
def _get_s(cls, df, column, shifts):
""" Get the column shifted by days
:param df: DataFrame object
:param column: name of the column to shift
:param shifts: days to shift, accept one shift only
:return: None
"""
shift = cls.to_int(shifts)
shifted_key = "{}_{}_s".format(column, shift)
df[shifted_key] = df[column].shift(-shift)
cp = df[shifted_key].copy()
StockDataFrame.set_nan(cp, shift)
df[shifted_key] = cp
@classmethod
def _get_log_ret(cls, df):
df['log-ret'] = np.log(df['close'] / df['close_-1_s'])
@classmethod
def _get_c(cls, df, column, shifts):
""" get the count of column in range (shifts)
example: kdjj_0_le_20_c
:param df: stock data
:param column: column name
:param shifts: range to count, only to previous
:return: result series
"""
column_name = '{}_{}_c'.format(column, shifts)
shifts = cls.get_only_one_positive_int(shifts)
df[column_name] = df[column].rolling(
center=False,
window=shifts,
min_periods=0).apply(np.count_nonzero)
return df[column_name]
@classmethod
def _get_fc(cls, df, column, shifts):
""" get the count of column in range of future (shifts)
example: kdjj_0_le_20_fc
:param df: stock data
:param column: column name
:param shifts: range to count, only to future
:return: result series
"""
column_name = '{}_{}_fc'.format(column, shifts)
shift = cls.get_only_one_positive_int(shifts)
reversed_series = df[column][::-1]
reversed_counts = reversed_series.rolling(
center=False,
window=shift,
min_periods=0).apply(np.count_nonzero)
counts = reversed_counts[::-1]
df[column_name] = counts
return counts
@classmethod
def _get_op(cls, df, column, threshold, op):
column_name = '{}_{}_{}'.format(column, threshold, op)
threshold = cls.to_float(threshold)
f = getattr(operator, op)
df[column_name] = f(df[column], threshold)
@staticmethod
def get_diff_convolve_array(shift):
if shift == 0:
ret = [1]
else:
ret = np.zeros(abs(shift) + 1)
if shift < 0:
ret[[0, -1]] = 1, -1
else:
ret[[0, -1]] = -1, 1
return ret
@classmethod
def _init_shifted_columns(cls, column, df, shifts):
# initialize the column if not
df.get(column)
shifts = cls.to_ints(shifts)
shift_column_names = ['{}_{}_s'.format(column, shift) for shift in
shifts]
[df.get(name) for name in shift_column_names]
return shift_column_names
@classmethod
def _get_max(cls, df, column, shifts):
column_name = '{}_{}_max'.format(column, shifts)
shift_column_names = cls._init_shifted_columns(column, df, shifts)
df[column_name] = np.max(df[shift_column_names], axis=1)
@classmethod
def _get_min(cls, df, column, shifts):
column_name = '{}_{}_min'.format(column, shifts)
shift_column_names = cls._init_shifted_columns(column, df, shifts)
df[column_name] = np.min(df[shift_column_names], axis=1)
@staticmethod
def _get_rsv(df, n_days):
""" Calculate the RSV (Raw Stochastic Value) within N days
This value is essential for calculating KDJs
Current day is included in N
:param df: data
:param n_days: N days
:return: None
"""
n_days = int(n_days)
column_name = 'rsv_{}'.format(n_days)
low_min = df['low'].rolling(
min_periods=1, window=n_days, center=False).min()
high_max = df['high'].rolling(
min_periods=1, window=n_days, center=False).max()
cv = (df['close'] - low_min) / (high_max - low_min)
df[column_name] = cv.fillna(0).astype('float64') * 100
@staticmethod
def _positive_sum(data):
data = [i if i > 0 else 0 for i in data]
ret = data[0]
for i in data[1:]:
ret = (ret * (len(data) - 1) + i) / len(data)
return ret
@staticmethod
def _negative_sum(data):
data = [-i if i < 0 else 0 for i in data]
ret = data[0]
for i in data[1:]:
ret = (ret * (len(data) - 1) + i) / len(data)
return ret
# noinspection PyUnresolvedReferences
@classmethod
def _get_rsi(cls, df, n_days):
""" Calculate the RSI (Relative Strength Index) within N days
calculated based on the formula at:
https://en.wikipedia.org/wiki/Relative_strength_index
:param df: data
:param n_days: N days
:return: None
"""
n_days = int(n_days)
d = df['close_-1_d']
df['closepm'] = (d + d.abs()) / 2
df['closenm'] = (-d + d.abs()) / 2
closepm_smma_column = 'closepm_{}_smma'.format(n_days)
closenm_smma_column = 'closenm_{}_smma'.format(n_days)
p_ema = df[closepm_smma_column]
n_ema = df[closenm_smma_column]
rs_column_name = 'rs_{}'.format(n_days)
rsi_column_name = 'rsi_{}'.format(n_days)
df[rs_column_name] = rs = p_ema / n_ema
df[rsi_column_name] = 100 - 100 / (1.0 + rs)
columns_to_remove = ['closepm',
'closenm',
closepm_smma_column,
closenm_smma_column]
cls._drop_columns(df, columns_to_remove)
@staticmethod
def _drop_columns(df, columns):
df.drop(columns, inplace=True, axis=1)
def _ensure_type(self, obj):
""" override the method in pandas, omit the check
This patch is not the perfect way but could make the lib work.
"""
return obj
@classmethod
def _get_smma(cls, df, column, windows):
""" get smoothed moving average.
:param df: data
:param windows: range
:return: result series
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_smma'.format(column, window)
smma = df[column].ewm(
ignore_na=False, alpha=1.0 / window,
min_periods=0, adjust=True).mean()
df[column_name] = smma
return smma
@classmethod
def _get_trix(cls, df, column=None, windows=None):
if column is None and windows is None:
column_name = 'trix'
else:
column_name = '{}_{}_trix'.format(column, windows)
if column is None:
column = 'close'
if windows is None:
windows = cls.TRIX_EMA_WINDOW
window = cls.get_only_one_positive_int(windows)
single = '{c}_{w}_ema'.format(c=column, w=window)
double = '{c}_{w}_ema_{w}_ema'.format(c=column, w=window)
triple = '{c}_{w}_ema_{w}_ema_{w}_ema'.format(c=column, w=window)
prev_triple = '{}_-1_s'.format(triple)
df[column_name] = ((df[triple] - df[prev_triple]) * 100
/ df[prev_triple])
columns_to_drop = [single, double, triple, prev_triple]
cls._drop_columns(df, columns_to_drop)
@classmethod
def _get_tema(cls, df, column=None, windows=None):
""" Another implementation for triple ema
Check the algorithm described below:
https://www.forextraders.com/forex-education/forex-technical-analysis/triple-exponential-moving-average-the-tema-indicator/
:param df: data frame
:param column: column to calculate ema
:param windows: window of the calculation
:return: result series
"""
if column is None and windows is None:
column_name = 'tema'
else:
column_name = '{}_{}_tema'.format(column, windows)
if column is None:
column = 'close'
if windows is None:
windows = cls.TEMA_EMA_WINDOW
window = cls.get_only_one_positive_int(windows)
single = '{c}_{w}_ema'.format(c=column, w=window)
double = '{c}_{w}_ema_{w}_ema'.format(c=column, w=window)
triple = '{c}_{w}_ema_{w}_ema_{w}_ema'.format(c=column, w=window)
df[column_name] = 3 * df[single] - 3 * df[double] + df[triple]
cls._drop_columns(df, [single, double, triple])
return df[column_name]
@classmethod
def _get_wr(cls, df, n_days):
""" Williams Overbought/Oversold Index
WMS=[(Hn—Ct)/(Hn—Ln)] ×100
Ct - the close price
Hn - N days high
Ln - N days low
:param df: data
:param n_days: N days
:return: None
"""
n_days = int(n_days)
ln = df['low'].rolling(min_periods=1, window=n_days,
center=False).min()
hn = df['high'].rolling(min_periods=1, window=n_days,
center=False).max()
column_name = 'wr_{}'.format(n_days)
df[column_name] = (hn - df['close']) / (hn - ln) * 100
@classmethod
def _get_cci(cls, df, n_days=None):
""" Commodity Channel Index
CCI = (Typical Price - 20-period SMA of TP) / (.015 x Mean Deviation)
Typical Price (TP) = (High + Low + Close)/3
TP is also implemented as 'middle'.
:param df: data
:param n_days: N days window
:return: None
"""
if n_days is None:
n_days = 14
column_name = 'cci'
else:
n_days = int(n_days)
column_name = 'cci_{}'.format(n_days)
tp = df['middle']
tp_sma = df['middle_{}_sma'.format(n_days)]
md = df['middle'].rolling(
min_periods=1, center=False, window=n_days).apply(
lambda x: np.fabs(x - x.mean()).mean())
df[column_name] = (tp - tp_sma) / (.015 * md)
@classmethod
def _get_tr(cls, df):
""" True Range of the trading
tr = max[(high - low), abs(high - close_prev), abs(low - close_prev)]
:param df: data
:return: None
"""
prev_close = df['close_-1_s']
high = df['high']
low = df['low']
c1 = high - low
c2 = np.abs(high - prev_close)
c3 = np.abs(low - prev_close)
df['tr'] = np.max((c1, c2, c3), axis=0)
@classmethod
def _get_atr(cls, df, window=None):
""" Average True Range
The average true range is an N-day smoothed moving average (SMMA) of
the true range values. Default to 14 days.
https://en.wikipedia.org/wiki/Average_true_range
:param df: data
:return: None
"""
if window is None:
window = cls.ATR_SMMA
column_name = 'atr'
else:
window = int(window)
column_name = 'atr_{}'.format(window)
tr_smma_column = 'tr_{}_smma'.format(window)
df[column_name] = df[tr_smma_column]
cls._drop_columns(df, [tr_smma_column])
@classmethod
def _get_dma(cls, df):
""" Different of Moving Average
default to 10 and 50.
:param df: data
:return: None
"""
df['dma'] = df['close_10_sma'] - df['close_50_sma']
@classmethod
def _get_dmi(cls, df):
""" get the default setting for DMI
including:
+DI: 14 days SMMA of +DM,
-DI: 14 days SMMA of -DM,
DX: based on +DI and -DI
ADX: 6 days SMMA of DX
:param df: data
:return:
"""
df['pdi'] = cls._get_pdi(df, cls.PDI_SMMA)
df['mdi'] = cls._get_mdi(df, cls.MDI_SMMA)
df['dx'] = cls._get_dx(df, cls.DX_SMMA)
df['adx'] = df['dx_{}_ema'.format(cls.ADX_EMA)]
df['adxr'] = df['adx_{}_ema'.format(cls.ADXR_EMA)]
@classmethod
def _get_um_dm(cls, df):
""" Up move and down move
initialize up move and down move
:param df: data
"""
hd = df['high_delta']
df['um'] = (hd + hd.abs()) / 2
ld = -df['low_delta']
df['dm'] = (ld + ld.abs()) / 2
@classmethod
def _get_pdm(cls, df, windows):
""" +DM, positive directional moving
If window is not 1, calculate the SMMA of +DM
:param df: data
:param windows: range
:return:
"""
window = cls.get_only_one_positive_int(windows)
column_name = 'pdm_{}'.format(window)
um, dm = df['um'], df['dm']
df['pdm'] = np.where(um > dm, um, 0)
if window > 1:
pdm = df['pdm_{}_ema'.format(window)]
else:
pdm = df['pdm']
df[column_name] = pdm
@classmethod
def _get_vr(cls, df, windows=None):
if windows is None:
window = 26
column_name = 'vr'
else:
window = cls.get_only_one_positive_int(windows)
column_name = 'vr_{}'.format(window)
df['av'] = np.where(df['change'] > 0, df['volume'], 0)
avs = df['av'].rolling(
min_periods=1, window=window, center=False).sum()
df['bv'] = np.where(df['change'] < 0, df['volume'], 0)
bvs = df['bv'].rolling(
min_periods=1, window=window, center=False).sum()
df['cv'] = np.where(df['change'] == 0, df['volume'], 0)
cvs = df['cv'].rolling(
min_periods=1, window=window, center=False).sum()
df[column_name] = (avs + cvs / 2) / (bvs + cvs / 2) * 100
cls._drop_columns(df, ['av', 'bv', 'cv'])
@classmethod
def _get_mdm(cls, df, windows):
""" -DM, negative directional moving accumulation
If window is not 1, return the SMA of -DM.
:param df: data
:param windows: range
:return:
"""
window = cls.get_only_one_positive_int(windows)
column_name = 'mdm_{}'.format(window)
um, dm = df['um'], df['dm']
df['mdm'] = np.where(dm > um, dm, 0)
if window > 1:
mdm = df['mdm_{}_ema'.format(window)]
else:
mdm = df['mdm']
df[column_name] = mdm
@classmethod
def _get_pdi(cls, df, windows):
""" +DI, positive directional moving index
:param df: data
:param windows: range
:return:
"""
window = cls.get_only_one_positive_int(windows)
pdm_column = 'pdm_{}'.format(window)
tr_column = 'atr_{}'.format(window)
pdi_column = 'pdi_{}'.format(window)
df[pdi_column] = df[pdm_column] / df[tr_column] * 100
return df[pdi_column]
@classmethod
def _get_mdi(cls, df, windows):
window = cls.get_only_one_positive_int(windows)
mdm_column = 'mdm_{}'.format(window)
tr_column = 'atr_{}'.format(window)
mdi_column = 'mdi_{}'.format(window)
df[mdi_column] = df[mdm_column] / df[tr_column] * 100
return df[mdi_column]
@classmethod
def _get_dx(cls, df, windows):
window = cls.get_only_one_positive_int(windows)
dx_column = 'dx_{}'.format(window)
mdi_column = 'mdi_{}'.format(window)
pdi_column = 'pdi_{}'.format(window)
mdi, pdi = df[mdi_column], df[pdi_column]
df[dx_column] = abs(pdi - mdi) / (pdi + mdi) * 100
return df[dx_column]
@classmethod
def _get_kdj_default(cls, df):
""" default KDJ, 9 days
:param df: k line data frame
:return: None
"""
df['kdjk'] = df['kdjk_{}'.format(cls.KDJ_WINDOW)]
df['kdjd'] = df['kdjd_{}'.format(cls.KDJ_WINDOW)]
df['kdjj'] = df['kdjj_{}'.format(cls.KDJ_WINDOW)]
@classmethod
def _get_cr(cls, df, window=26):
ym = df['middle_-1_s']
h = df['high']
p1_m = df.loc[:, ['middle_-1_s', 'high']].min(axis=1)
p2_m = df.loc[:, ['middle_-1_s', 'low']].min(axis=1)
p1 = (h - p1_m).rolling(
min_periods=1, window=window, center=False).sum()
p2 = (ym - p2_m).rolling(
min_periods=1, window=window, center=False).sum()
df['cr'] = p1 / p2 * 100
del df['middle_-1_s']
df['cr-ma1'] = cls._shifted_cr_sma(df, cls.CR_MA1)
df['cr-ma2'] = cls._shifted_cr_sma(df, cls.CR_MA2)
df['cr-ma3'] = cls._shifted_cr_sma(df, cls.CR_MA3)
@classmethod
def _shifted_cr_sma(cls, df, window):
name = cls._temp_name()
df[name] = df['cr'].rolling(min_periods=1, window=window,
center=False).mean()
to_shift = '{}_-{}_s'.format(name, int(window / 2.5 + 1))
ret = df[to_shift]
del df[name], df[to_shift]
return ret
@classmethod
def _temp_name(cls):
return 'sdf{}'.format(random.randint(0, 10e8))
@classmethod
def _get_middle(cls, df):
df['middle'] = (df['close'] + df['high'] + df['low']) / 3.0
@classmethod
def _calc_kd(cls, column):
param0, param1 = cls.KDJ_PARAM
k = 50.0
# noinspection PyTypeChecker
for i in param1 * column:
k = param0 * k + i
yield k
@classmethod
def _get_kdjk(cls, df, n_days):
""" Get the K of KDJ
K = 2/3 × (prev. K) +1/3 × (curr. RSV)
2/3 and 1/3 are the smooth parameters.
:param df: data
:param n_days: calculation range
:return: None
"""
rsv_column = 'rsv_{}'.format(n_days)
k_column = 'kdjk_{}'.format(n_days)
df[k_column] = list(cls._calc_kd(df.get(rsv_column)))
@classmethod
def _get_kdjd(cls, df, n_days):
""" Get the D of KDJ
D = 2/3 × (prev. D) +1/3 × (curr. K)
2/3 and 1/3 are the smooth parameters.
:param df: data
:param n_days: calculation range
:return: None
"""
k_column = 'kdjk_{}'.format(n_days)
d_column = 'kdjd_{}'.format(n_days)
df[d_column] = list(cls._calc_kd(df.get(k_column)))
@staticmethod
def _get_kdjj(df, n_days):
""" Get the J of KDJ
J = 3K-2D
:param df: data
:param n_days: calculation range
:return: None
"""
k_column = 'kdjk_{}'.format(n_days)
d_column = 'kdjd_{}'.format(n_days)
j_column = 'kdjj_{}'.format(n_days)
df[j_column] = 3 * df[k_column] - 2 * df[d_column]
@staticmethod
def remove_random_nan(pd_obj):
return pd_obj.where((pd.notnull(pd_obj)), None)
@staticmethod
def _get_d(df, column, shifts):
shift = StockDataFrame.to_int(shifts)
shift_column = '{}_{}_s'.format(column, shift)
column_name = '{}_{}_d'.format(column, shift)
df[column_name] = df[column] - df[shift_column]
cp = df[column_name].copy()
StockDataFrame.set_nan(cp, shift)
df[column_name] = cp
@classmethod
def _get_sma(cls, df, column, windows):
""" get simple moving average
:param df: data
:param column: column to calculate
:param windows: collection of window of simple moving average
:return: None
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_sma'.format(column, window)
df[column_name] = df[column].rolling(min_periods=1, window=window,
center=False).mean()
@classmethod
def _get_ema(cls, df, column, windows):
""" get exponential moving average
:param df: data
:param column: column to calculate
:param windows: collection of window of exponential moving average
:return: None
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_ema'.format(column, window)
if len(df[column]) > 0:
df[column_name] = df[column].ewm(
ignore_na=False, span=window,
min_periods=0, adjust=True).mean()
else:
df[column_name] = []
@classmethod
def _get_boll(cls, df):
""" Get Bollinger bands.
boll_ub means the upper band of the Bollinger bands
boll_lb means the lower band of the Bollinger bands
boll_ub = MA + Kσ
boll_lb = MA − Kσ
M = BOLL_PERIOD
K = BOLL_STD_TIMES
:param df: data
:return: None
"""
moving_avg = df['close_{}_sma'.format(cls.BOLL_PERIOD)]
moving_std = df['close_{}_mstd'.format(cls.BOLL_PERIOD)]
df['boll'] = moving_avg
moving_avg = list(map(np.float64, moving_avg))
moving_std = list(map(np.float64, moving_std))
# noinspection PyTypeChecker
df['boll_ub'] = np.add(moving_avg,
np.multiply(cls.BOLL_STD_TIMES, moving_std))
# noinspection PyTypeChecker
df['boll_lb'] = np.subtract(moving_avg,
np.multiply(cls.BOLL_STD_TIMES,
moving_std))
@classmethod
def _get_macd(cls, df):
""" Moving Average Convergence Divergence
This function will initialize all following columns.
MACD Line (macd): (12-day EMA - 26-day EMA)
Signal Line (macds): 9-day EMA of MACD Line
MACD Histogram (macdh): MACD Line - Signal Line
:param df: data
:return: None
"""
ema_short = 'close_{}_ema'.format(cls.MACD_EMA_SHORT)
ema_long = 'close_{}_ema'.format(cls.MACD_EMA_LONG)
ema_signal = 'macd_{}_ema'.format(cls.MACD_EMA_SIGNAL)
fast = df[ema_short]
slow = df[ema_long]
df['macd'] = fast - slow
df['macds'] = df[ema_signal]
df['macdh'] = (df['macd'] - df['macds'])
cls._drop_columns(df, [ema_short, ema_long, ema_signal])
@classmethod
def _get_vwap(cls,df):
df['avg_price'] = (df['high']+df['close']+df['low'])/3
df['cumilative_volume'] = df['volume'].cumsum()
df['pv'] = df['avg_price']*df['volume']
df['cumilative_pv'] = df['pv'].cumsum()
df['vwap'] = df['cumilative_pv']/df['cumilative_volume']
cls._drop_columns(df, ['avg_price', 'cumilative_volume', 'pv', 'cumilative_pv'])
@classmethod
def get_only_one_positive_int(cls, windows):
if isinstance(windows, int):
window = windows
else:
window = cls.to_int(windows)
if window <= 0:
raise IndexError("window must be greater than 0")
return window
@classmethod
def _get_mstd(cls, df, column, windows):
""" get moving standard deviation
:param df: data
:param column: column to calculate
:param windows: collection of window of moving standard deviation
:return: None
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_mstd'.format(column, window)
df[column_name] = df[column].rolling(min_periods=1, window=window,
center=False).std()
@classmethod
def _get_mvar(cls, df, column, windows):
""" get moving variance
:param df: data
:param column: column to calculate
:param windows: collection of window of moving variance
:return: None
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_mvar'.format(column, window)
df[column_name] = df[column].rolling(
min_periods=1, window=window, center=False).var()
@staticmethod
def parse_column_name(name):
m = re.match(r'(.*)_([\d\-+~,.]+)_(\w+)', name)
ret = [None, None, None]
if m is None:
m = re.match(r'(.*)_([\d\-+~,]+)', name)
if m is not None:
ret = m.group(1, 2)
ret = ret + (None,)
else:
ret = m.group(1, 2, 3)
return ret
CROSS_COLUMN_MATCH_STR = '(.+)_(x|xu|xd)_(.+)'
@classmethod
def is_cross_columns(cls, name):
return re.match(cls.CROSS_COLUMN_MATCH_STR, name) is not None
@classmethod
def parse_cross_column(cls, name):
m = re.match(cls.CROSS_COLUMN_MATCH_STR, name)
ret = [None, None, None]
if m is not None:
ret = m.group(1, 2, 3)
return ret
@staticmethod
def _get_rate(df):
""" same as percent
:param df: data frame
:return: None
"""
df['rate'] = df['close'].pct_change() * 100
@staticmethod
def _get_delta(df, key):
key_to_delta = key.replace('_delta', '')
df[key] = df[key_to_delta].diff()
return df[key]
@staticmethod
def _get_cross(df, key):
left, op, right = StockDataFrame.parse_cross_column(key)
lt_series = df[left] > df[right]
# noinspection PyTypeChecker
different = np.zeros_like(lt_series)
if len(different) > 1:
# noinspection PyTypeChecker
different[1:] = np.diff(lt_series)
different[0] = False
if op == 'x':
df[key] = different
elif op == 'xu':
df[key] = different & lt_series
elif op == 'xd':
df[key] = different & ~lt_series
return df[key]
@staticmethod
def init_columns(obj, columns):
if isinstance(columns, list):
for column in columns:
StockDataFrame.__init_column(obj, column)
else:
StockDataFrame.__init_column(obj, columns)
@classmethod
def __init_not_exist_column(cls, df, key):
if key == 'change':
cls._get_change(df)
elif key == 'rate':
cls._get_rate(df)
elif key == 'middle':
cls._get_middle(df)
elif key in ['boll', 'boll_ub', 'boll_lb']:
cls._get_boll(df)
elif key in ['macd', 'macds', 'macdh']:
cls._get_macd(df)
elif key in ['kdjk', 'kdjd', 'kdjj']:
cls._get_kdj_default(df)
elif key in ['cr', 'cr-ma1', 'cr-ma2', 'cr-ma3']:
cls._get_cr(df)
elif key in ['cci']:
cls._get_cci(df)
elif key in ['tr']:
cls._get_tr(df)
elif key in ['atr']:
cls._get_atr(df)
elif key in ['um', 'dm']:
cls._get_um_dm(df)
elif key in ['pdi', 'mdi', 'dx', 'adx', 'adxr']:
cls._get_dmi(df)
elif key in ['trix']:
cls._get_trix(df)
elif key in ['tema']:
cls._get_tema(df)
elif key in ['vr']:
cls._get_vr(df)
elif key in ['dma']:
cls._get_dma(df)
elif key == 'log-ret':
cls._get_log_ret(df)
elif key in ['vwap']:
cls._get_vwap(df)
elif key.endswith('_delta'):
cls._get_delta(df, key)
elif cls.is_cross_columns(key):
cls._get_cross(df, key)
else:
c, r, t = cls.parse_column_name(key)
if t is not None:
if t in cls.OPERATORS:
# support all kinds of compare operators
cls._get_op(df, c, r, t)
else:
func_name = '_get_{}'.format(t)
getattr(cls, func_name)(df, c, r)
else:
func_name = '_get_{}'.format(c)
getattr(cls, func_name)(df, r)
@staticmethod
def __init_column(df, key):
if key not in df:
if len(df) == 0:
df[key] = []
else:
StockDataFrame.__init_not_exist_column(df, key)
def __getitem__(self, item):
try:
result = self.retype(
super(StockDataFrame, self).__getitem__(item))
except KeyError:
try:
self.init_columns(self, item)
except AttributeError:
log.exception('{} not found.'.format(item))
result = self.retype(
super(StockDataFrame, self).__getitem__(item))
return result
def in_date_delta(self, delta_day, anchor=None):
if anchor is None:
anchor = self.get_today()
other_day = get_date_from_diff(anchor, delta_day)
if delta_day > 0:
start, end = anchor, other_day
else:
start, end = other_day, anchor
return self.retype(self.loc[start:end])
def till(self, end_date):
return self[self.index <= end_date]
def start_from(self, start_date):
return self[self.index >= start_date]
def within(self, start_date, end_date):
return self.start_from(start_date).till(end_date)
def copy(self, deep=True):
return self.retype(super(StockDataFrame, self).copy(deep))
@staticmethod
def retype(value, index_column=None):
""" if the input is a `DataFrame`, convert it to this class.
:param index_column: the column that will be used as index,
default to `date`
:param value: value to convert
:return: this extended class
"""
if index_column is None:
index_column = 'date'
if isinstance(value, pd.DataFrame):
# use all lower case for column name
value.columns = map(lambda c: c.lower(), value.columns)
if index_column in value.columns:
value.set_index(index_column, inplace=True)
value = StockDataFrame(value)
return value
| 33.703875 | 132 | 0.545669 |
from __future__ import unicode_literals
import itertools
import logging
import operator
import random
import re
import numpy as np
import pandas as pd
from int_date import get_date_from_diff
__author__ = 'Cedric Zhuang'
log = logging.getLogger(__name__)
class StockDataFrame(pd.DataFrame):
OPERATORS = ['le', 'ge', 'lt', 'gt', 'eq', 'ne']
KDJ_PARAM = (2.0 / 3.0, 1.0 / 3.0)
KDJ_WINDOW = 9
BOLL_PERIOD = 20
BOLL_STD_TIMES = 2
MACD_EMA_SHORT = 12
MACD_EMA_LONG = 26
MACD_EMA_SIGNAL = 9
PDI_SMMA = 14
MDI_SMMA = 14
DX_SMMA = 14
ADX_EMA = 6
ADXR_EMA = 6
CR_MA1 = 5
CR_MA2 = 10
CR_MA3 = 20
TRIX_EMA_WINDOW = 12
TEMA_EMA_WINDOW = 5
ATR_SMMA = 14
@staticmethod
def _get_change(df):
df['change'] = df['close'].pct_change() * 100
return df['change']
@staticmethod
def _get_p(df, column, shifts):
column_name = '{}_{}_p'.format(column, shifts)
df.get(column)
shifts = StockDataFrame.to_ints(shifts)[::-1]
indices = None
count = 0
for shift in shifts:
shifted = df.shift(-shift)
index = (shifted[column] > 0) * (2 ** count)
if indices is None:
indices = index
else:
indices += index
count += 1
if indices is not None:
cp = indices.copy()
StockDataFrame.set_nan(cp, shifts)
df[column_name] = cp
@classmethod
def to_ints(cls, shifts):
items = map(cls._process_shifts_segment,
shifts.split(','))
return sorted(list(set(itertools.chain(*items))))
@classmethod
def to_int(cls, shifts):
numbers = cls.to_ints(shifts)
if len(numbers) != 1:
raise IndexError("only accept 1 number.")
return numbers[0]
@staticmethod
def to_floats(shifts):
floats = map(float, shifts.split(','))
return sorted(list(set(floats)))
@classmethod
def to_float(cls, shifts):
floats = cls.to_floats(shifts)
if len(floats) != 1:
raise IndexError('only accept 1 float.')
return floats[0]
@staticmethod
def _process_shifts_segment(shift_segment):
if '~' in shift_segment:
start, end = shift_segment.split('~')
shifts = range(int(start), int(end) + 1)
else:
shifts = [int(shift_segment)]
return shifts
@staticmethod
def set_nan(pd_obj, shift):
try:
iter(shift)
max_shift = max(shift)
min_shift = min(shift)
StockDataFrame._set_nan_of_single_shift(pd_obj, max_shift)
StockDataFrame._set_nan_of_single_shift(pd_obj, min_shift)
except TypeError:
StockDataFrame._set_nan_of_single_shift(pd_obj, shift)
@staticmethod
def _set_nan_of_single_shift(pd_obj, shift):
val = np.nan
if shift > 0:
pd_obj.iloc[-shift:] = val
elif shift < 0:
pd_obj.iloc[:-shift] = val
@classmethod
def _get_r(cls, df, column, shifts):
shift = cls.to_int(shifts)
rate_key = '{}_{}_r'.format(column, shift)
df[rate_key] = df[column].pct_change(periods=-shift) * 100
@classmethod
def _get_s(cls, df, column, shifts):
shift = cls.to_int(shifts)
shifted_key = "{}_{}_s".format(column, shift)
df[shifted_key] = df[column].shift(-shift)
cp = df[shifted_key].copy()
StockDataFrame.set_nan(cp, shift)
df[shifted_key] = cp
@classmethod
def _get_log_ret(cls, df):
df['log-ret'] = np.log(df['close'] / df['close_-1_s'])
@classmethod
def _get_c(cls, df, column, shifts):
column_name = '{}_{}_c'.format(column, shifts)
shifts = cls.get_only_one_positive_int(shifts)
df[column_name] = df[column].rolling(
center=False,
window=shifts,
min_periods=0).apply(np.count_nonzero)
return df[column_name]
@classmethod
def _get_fc(cls, df, column, shifts):
column_name = '{}_{}_fc'.format(column, shifts)
shift = cls.get_only_one_positive_int(shifts)
reversed_series = df[column][::-1]
reversed_counts = reversed_series.rolling(
center=False,
window=shift,
min_periods=0).apply(np.count_nonzero)
counts = reversed_counts[::-1]
df[column_name] = counts
return counts
@classmethod
def _get_op(cls, df, column, threshold, op):
column_name = '{}_{}_{}'.format(column, threshold, op)
threshold = cls.to_float(threshold)
f = getattr(operator, op)
df[column_name] = f(df[column], threshold)
@staticmethod
def get_diff_convolve_array(shift):
if shift == 0:
ret = [1]
else:
ret = np.zeros(abs(shift) + 1)
if shift < 0:
ret[[0, -1]] = 1, -1
else:
ret[[0, -1]] = -1, 1
return ret
@classmethod
def _init_shifted_columns(cls, column, df, shifts):
df.get(column)
shifts = cls.to_ints(shifts)
shift_column_names = ['{}_{}_s'.format(column, shift) for shift in
shifts]
[df.get(name) for name in shift_column_names]
return shift_column_names
@classmethod
def _get_max(cls, df, column, shifts):
column_name = '{}_{}_max'.format(column, shifts)
shift_column_names = cls._init_shifted_columns(column, df, shifts)
df[column_name] = np.max(df[shift_column_names], axis=1)
@classmethod
def _get_min(cls, df, column, shifts):
column_name = '{}_{}_min'.format(column, shifts)
shift_column_names = cls._init_shifted_columns(column, df, shifts)
df[column_name] = np.min(df[shift_column_names], axis=1)
@staticmethod
def _get_rsv(df, n_days):
n_days = int(n_days)
column_name = 'rsv_{}'.format(n_days)
low_min = df['low'].rolling(
min_periods=1, window=n_days, center=False).min()
high_max = df['high'].rolling(
min_periods=1, window=n_days, center=False).max()
cv = (df['close'] - low_min) / (high_max - low_min)
df[column_name] = cv.fillna(0).astype('float64') * 100
@staticmethod
def _positive_sum(data):
data = [i if i > 0 else 0 for i in data]
ret = data[0]
for i in data[1:]:
ret = (ret * (len(data) - 1) + i) / len(data)
return ret
@staticmethod
def _negative_sum(data):
data = [-i if i < 0 else 0 for i in data]
ret = data[0]
for i in data[1:]:
ret = (ret * (len(data) - 1) + i) / len(data)
return ret
@classmethod
def _get_rsi(cls, df, n_days):
n_days = int(n_days)
d = df['close_-1_d']
df['closepm'] = (d + d.abs()) / 2
df['closenm'] = (-d + d.abs()) / 2
closepm_smma_column = 'closepm_{}_smma'.format(n_days)
closenm_smma_column = 'closenm_{}_smma'.format(n_days)
p_ema = df[closepm_smma_column]
n_ema = df[closenm_smma_column]
rs_column_name = 'rs_{}'.format(n_days)
rsi_column_name = 'rsi_{}'.format(n_days)
df[rs_column_name] = rs = p_ema / n_ema
df[rsi_column_name] = 100 - 100 / (1.0 + rs)
columns_to_remove = ['closepm',
'closenm',
closepm_smma_column,
closenm_smma_column]
cls._drop_columns(df, columns_to_remove)
@staticmethod
def _drop_columns(df, columns):
df.drop(columns, inplace=True, axis=1)
def _ensure_type(self, obj):
return obj
@classmethod
def _get_smma(cls, df, column, windows):
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_smma'.format(column, window)
smma = df[column].ewm(
ignore_na=False, alpha=1.0 / window,
min_periods=0, adjust=True).mean()
df[column_name] = smma
return smma
@classmethod
def _get_trix(cls, df, column=None, windows=None):
if column is None and windows is None:
column_name = 'trix'
else:
column_name = '{}_{}_trix'.format(column, windows)
if column is None:
column = 'close'
if windows is None:
windows = cls.TRIX_EMA_WINDOW
window = cls.get_only_one_positive_int(windows)
single = '{c}_{w}_ema'.format(c=column, w=window)
double = '{c}_{w}_ema_{w}_ema'.format(c=column, w=window)
triple = '{c}_{w}_ema_{w}_ema_{w}_ema'.format(c=column, w=window)
prev_triple = '{}_-1_s'.format(triple)
df[column_name] = ((df[triple] - df[prev_triple]) * 100
/ df[prev_triple])
columns_to_drop = [single, double, triple, prev_triple]
cls._drop_columns(df, columns_to_drop)
@classmethod
def _get_tema(cls, df, column=None, windows=None):
if column is None and windows is None:
column_name = 'tema'
else:
column_name = '{}_{}_tema'.format(column, windows)
if column is None:
column = 'close'
if windows is None:
windows = cls.TEMA_EMA_WINDOW
window = cls.get_only_one_positive_int(windows)
single = '{c}_{w}_ema'.format(c=column, w=window)
double = '{c}_{w}_ema_{w}_ema'.format(c=column, w=window)
triple = '{c}_{w}_ema_{w}_ema_{w}_ema'.format(c=column, w=window)
df[column_name] = 3 * df[single] - 3 * df[double] + df[triple]
cls._drop_columns(df, [single, double, triple])
return df[column_name]
@classmethod
def _get_wr(cls, df, n_days):
n_days = int(n_days)
ln = df['low'].rolling(min_periods=1, window=n_days,
center=False).min()
hn = df['high'].rolling(min_periods=1, window=n_days,
center=False).max()
column_name = 'wr_{}'.format(n_days)
df[column_name] = (hn - df['close']) / (hn - ln) * 100
@classmethod
def _get_cci(cls, df, n_days=None):
if n_days is None:
n_days = 14
column_name = 'cci'
else:
n_days = int(n_days)
column_name = 'cci_{}'.format(n_days)
tp = df['middle']
tp_sma = df['middle_{}_sma'.format(n_days)]
md = df['middle'].rolling(
min_periods=1, center=False, window=n_days).apply(
lambda x: np.fabs(x - x.mean()).mean())
df[column_name] = (tp - tp_sma) / (.015 * md)
@classmethod
def _get_tr(cls, df):
prev_close = df['close_-1_s']
high = df['high']
low = df['low']
c1 = high - low
c2 = np.abs(high - prev_close)
c3 = np.abs(low - prev_close)
df['tr'] = np.max((c1, c2, c3), axis=0)
@classmethod
def _get_atr(cls, df, window=None):
if window is None:
window = cls.ATR_SMMA
column_name = 'atr'
else:
window = int(window)
column_name = 'atr_{}'.format(window)
tr_smma_column = 'tr_{}_smma'.format(window)
df[column_name] = df[tr_smma_column]
cls._drop_columns(df, [tr_smma_column])
@classmethod
def _get_dma(cls, df):
df['dma'] = df['close_10_sma'] - df['close_50_sma']
@classmethod
def _get_dmi(cls, df):
df['pdi'] = cls._get_pdi(df, cls.PDI_SMMA)
df['mdi'] = cls._get_mdi(df, cls.MDI_SMMA)
df['dx'] = cls._get_dx(df, cls.DX_SMMA)
df['adx'] = df['dx_{}_ema'.format(cls.ADX_EMA)]
df['adxr'] = df['adx_{}_ema'.format(cls.ADXR_EMA)]
@classmethod
def _get_um_dm(cls, df):
hd = df['high_delta']
df['um'] = (hd + hd.abs()) / 2
ld = -df['low_delta']
df['dm'] = (ld + ld.abs()) / 2
@classmethod
def _get_pdm(cls, df, windows):
window = cls.get_only_one_positive_int(windows)
column_name = 'pdm_{}'.format(window)
um, dm = df['um'], df['dm']
df['pdm'] = np.where(um > dm, um, 0)
if window > 1:
pdm = df['pdm_{}_ema'.format(window)]
else:
pdm = df['pdm']
df[column_name] = pdm
@classmethod
def _get_vr(cls, df, windows=None):
if windows is None:
window = 26
column_name = 'vr'
else:
window = cls.get_only_one_positive_int(windows)
column_name = 'vr_{}'.format(window)
df['av'] = np.where(df['change'] > 0, df['volume'], 0)
avs = df['av'].rolling(
min_periods=1, window=window, center=False).sum()
df['bv'] = np.where(df['change'] < 0, df['volume'], 0)
bvs = df['bv'].rolling(
min_periods=1, window=window, center=False).sum()
df['cv'] = np.where(df['change'] == 0, df['volume'], 0)
cvs = df['cv'].rolling(
min_periods=1, window=window, center=False).sum()
df[column_name] = (avs + cvs / 2) / (bvs + cvs / 2) * 100
cls._drop_columns(df, ['av', 'bv', 'cv'])
@classmethod
def _get_mdm(cls, df, windows):
window = cls.get_only_one_positive_int(windows)
column_name = 'mdm_{}'.format(window)
um, dm = df['um'], df['dm']
df['mdm'] = np.where(dm > um, dm, 0)
if window > 1:
mdm = df['mdm_{}_ema'.format(window)]
else:
mdm = df['mdm']
df[column_name] = mdm
@classmethod
def _get_pdi(cls, df, windows):
window = cls.get_only_one_positive_int(windows)
pdm_column = 'pdm_{}'.format(window)
tr_column = 'atr_{}'.format(window)
pdi_column = 'pdi_{}'.format(window)
df[pdi_column] = df[pdm_column] / df[tr_column] * 100
return df[pdi_column]
@classmethod
def _get_mdi(cls, df, windows):
window = cls.get_only_one_positive_int(windows)
mdm_column = 'mdm_{}'.format(window)
tr_column = 'atr_{}'.format(window)
mdi_column = 'mdi_{}'.format(window)
df[mdi_column] = df[mdm_column] / df[tr_column] * 100
return df[mdi_column]
@classmethod
def _get_dx(cls, df, windows):
window = cls.get_only_one_positive_int(windows)
dx_column = 'dx_{}'.format(window)
mdi_column = 'mdi_{}'.format(window)
pdi_column = 'pdi_{}'.format(window)
mdi, pdi = df[mdi_column], df[pdi_column]
df[dx_column] = abs(pdi - mdi) / (pdi + mdi) * 100
return df[dx_column]
@classmethod
def _get_kdj_default(cls, df):
df['kdjk'] = df['kdjk_{}'.format(cls.KDJ_WINDOW)]
df['kdjd'] = df['kdjd_{}'.format(cls.KDJ_WINDOW)]
df['kdjj'] = df['kdjj_{}'.format(cls.KDJ_WINDOW)]
@classmethod
def _get_cr(cls, df, window=26):
ym = df['middle_-1_s']
h = df['high']
p1_m = df.loc[:, ['middle_-1_s', 'high']].min(axis=1)
p2_m = df.loc[:, ['middle_-1_s', 'low']].min(axis=1)
p1 = (h - p1_m).rolling(
min_periods=1, window=window, center=False).sum()
p2 = (ym - p2_m).rolling(
min_periods=1, window=window, center=False).sum()
df['cr'] = p1 / p2 * 100
del df['middle_-1_s']
df['cr-ma1'] = cls._shifted_cr_sma(df, cls.CR_MA1)
df['cr-ma2'] = cls._shifted_cr_sma(df, cls.CR_MA2)
df['cr-ma3'] = cls._shifted_cr_sma(df, cls.CR_MA3)
@classmethod
def _shifted_cr_sma(cls, df, window):
name = cls._temp_name()
df[name] = df['cr'].rolling(min_periods=1, window=window,
center=False).mean()
to_shift = '{}_-{}_s'.format(name, int(window / 2.5 + 1))
ret = df[to_shift]
del df[name], df[to_shift]
return ret
@classmethod
def _temp_name(cls):
return 'sdf{}'.format(random.randint(0, 10e8))
@classmethod
def _get_middle(cls, df):
df['middle'] = (df['close'] + df['high'] + df['low']) / 3.0
@classmethod
def _calc_kd(cls, column):
param0, param1 = cls.KDJ_PARAM
k = 50.0
for i in param1 * column:
k = param0 * k + i
yield k
@classmethod
def _get_kdjk(cls, df, n_days):
rsv_column = 'rsv_{}'.format(n_days)
k_column = 'kdjk_{}'.format(n_days)
df[k_column] = list(cls._calc_kd(df.get(rsv_column)))
@classmethod
def _get_kdjd(cls, df, n_days):
k_column = 'kdjk_{}'.format(n_days)
d_column = 'kdjd_{}'.format(n_days)
df[d_column] = list(cls._calc_kd(df.get(k_column)))
@staticmethod
def _get_kdjj(df, n_days):
k_column = 'kdjk_{}'.format(n_days)
d_column = 'kdjd_{}'.format(n_days)
j_column = 'kdjj_{}'.format(n_days)
df[j_column] = 3 * df[k_column] - 2 * df[d_column]
@staticmethod
def remove_random_nan(pd_obj):
return pd_obj.where((pd.notnull(pd_obj)), None)
@staticmethod
def _get_d(df, column, shifts):
shift = StockDataFrame.to_int(shifts)
shift_column = '{}_{}_s'.format(column, shift)
column_name = '{}_{}_d'.format(column, shift)
df[column_name] = df[column] - df[shift_column]
cp = df[column_name].copy()
StockDataFrame.set_nan(cp, shift)
df[column_name] = cp
@classmethod
def _get_sma(cls, df, column, windows):
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_sma'.format(column, window)
df[column_name] = df[column].rolling(min_periods=1, window=window,
center=False).mean()
@classmethod
def _get_ema(cls, df, column, windows):
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_ema'.format(column, window)
if len(df[column]) > 0:
df[column_name] = df[column].ewm(
ignore_na=False, span=window,
min_periods=0, adjust=True).mean()
else:
df[column_name] = []
@classmethod
def _get_boll(cls, df):
moving_avg = df['close_{}_sma'.format(cls.BOLL_PERIOD)]
moving_std = df['close_{}_mstd'.format(cls.BOLL_PERIOD)]
df['boll'] = moving_avg
moving_avg = list(map(np.float64, moving_avg))
moving_std = list(map(np.float64, moving_std))
df['boll_ub'] = np.add(moving_avg,
np.multiply(cls.BOLL_STD_TIMES, moving_std))
df['boll_lb'] = np.subtract(moving_avg,
np.multiply(cls.BOLL_STD_TIMES,
moving_std))
@classmethod
def _get_macd(cls, df):
ema_short = 'close_{}_ema'.format(cls.MACD_EMA_SHORT)
ema_long = 'close_{}_ema'.format(cls.MACD_EMA_LONG)
ema_signal = 'macd_{}_ema'.format(cls.MACD_EMA_SIGNAL)
fast = df[ema_short]
slow = df[ema_long]
df['macd'] = fast - slow
df['macds'] = df[ema_signal]
df['macdh'] = (df['macd'] - df['macds'])
cls._drop_columns(df, [ema_short, ema_long, ema_signal])
@classmethod
def _get_vwap(cls,df):
df['avg_price'] = (df['high']+df['close']+df['low'])/3
df['cumilative_volume'] = df['volume'].cumsum()
df['pv'] = df['avg_price']*df['volume']
df['cumilative_pv'] = df['pv'].cumsum()
df['vwap'] = df['cumilative_pv']/df['cumilative_volume']
cls._drop_columns(df, ['avg_price', 'cumilative_volume', 'pv', 'cumilative_pv'])
@classmethod
def get_only_one_positive_int(cls, windows):
if isinstance(windows, int):
window = windows
else:
window = cls.to_int(windows)
if window <= 0:
raise IndexError("window must be greater than 0")
return window
@classmethod
def _get_mstd(cls, df, column, windows):
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_mstd'.format(column, window)
df[column_name] = df[column].rolling(min_periods=1, window=window,
center=False).std()
@classmethod
def _get_mvar(cls, df, column, windows):
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_mvar'.format(column, window)
df[column_name] = df[column].rolling(
min_periods=1, window=window, center=False).var()
@staticmethod
def parse_column_name(name):
m = re.match(r'(.*)_([\d\-+~,.]+)_(\w+)', name)
ret = [None, None, None]
if m is None:
m = re.match(r'(.*)_([\d\-+~,]+)', name)
if m is not None:
ret = m.group(1, 2)
ret = ret + (None,)
else:
ret = m.group(1, 2, 3)
return ret
CROSS_COLUMN_MATCH_STR = '(.+)_(x|xu|xd)_(.+)'
@classmethod
def is_cross_columns(cls, name):
return re.match(cls.CROSS_COLUMN_MATCH_STR, name) is not None
@classmethod
def parse_cross_column(cls, name):
m = re.match(cls.CROSS_COLUMN_MATCH_STR, name)
ret = [None, None, None]
if m is not None:
ret = m.group(1, 2, 3)
return ret
@staticmethod
def _get_rate(df):
df['rate'] = df['close'].pct_change() * 100
@staticmethod
def _get_delta(df, key):
key_to_delta = key.replace('_delta', '')
df[key] = df[key_to_delta].diff()
return df[key]
@staticmethod
def _get_cross(df, key):
left, op, right = StockDataFrame.parse_cross_column(key)
lt_series = df[left] > df[right]
different = np.zeros_like(lt_series)
if len(different) > 1:
different[1:] = np.diff(lt_series)
different[0] = False
if op == 'x':
df[key] = different
elif op == 'xu':
df[key] = different & lt_series
elif op == 'xd':
df[key] = different & ~lt_series
return df[key]
@staticmethod
def init_columns(obj, columns):
if isinstance(columns, list):
for column in columns:
StockDataFrame.__init_column(obj, column)
else:
StockDataFrame.__init_column(obj, columns)
@classmethod
def __init_not_exist_column(cls, df, key):
if key == 'change':
cls._get_change(df)
elif key == 'rate':
cls._get_rate(df)
elif key == 'middle':
cls._get_middle(df)
elif key in ['boll', 'boll_ub', 'boll_lb']:
cls._get_boll(df)
elif key in ['macd', 'macds', 'macdh']:
cls._get_macd(df)
elif key in ['kdjk', 'kdjd', 'kdjj']:
cls._get_kdj_default(df)
elif key in ['cr', 'cr-ma1', 'cr-ma2', 'cr-ma3']:
cls._get_cr(df)
elif key in ['cci']:
cls._get_cci(df)
elif key in ['tr']:
cls._get_tr(df)
elif key in ['atr']:
cls._get_atr(df)
elif key in ['um', 'dm']:
cls._get_um_dm(df)
elif key in ['pdi', 'mdi', 'dx', 'adx', 'adxr']:
cls._get_dmi(df)
elif key in ['trix']:
cls._get_trix(df)
elif key in ['tema']:
cls._get_tema(df)
elif key in ['vr']:
cls._get_vr(df)
elif key in ['dma']:
cls._get_dma(df)
elif key == 'log-ret':
cls._get_log_ret(df)
elif key in ['vwap']:
cls._get_vwap(df)
elif key.endswith('_delta'):
cls._get_delta(df, key)
elif cls.is_cross_columns(key):
cls._get_cross(df, key)
else:
c, r, t = cls.parse_column_name(key)
if t is not None:
if t in cls.OPERATORS:
cls._get_op(df, c, r, t)
else:
func_name = '_get_{}'.format(t)
getattr(cls, func_name)(df, c, r)
else:
func_name = '_get_{}'.format(c)
getattr(cls, func_name)(df, r)
@staticmethod
def __init_column(df, key):
if key not in df:
if len(df) == 0:
df[key] = []
else:
StockDataFrame.__init_not_exist_column(df, key)
def __getitem__(self, item):
try:
result = self.retype(
super(StockDataFrame, self).__getitem__(item))
except KeyError:
try:
self.init_columns(self, item)
except AttributeError:
log.exception('{} not found.'.format(item))
result = self.retype(
super(StockDataFrame, self).__getitem__(item))
return result
def in_date_delta(self, delta_day, anchor=None):
if anchor is None:
anchor = self.get_today()
other_day = get_date_from_diff(anchor, delta_day)
if delta_day > 0:
start, end = anchor, other_day
else:
start, end = other_day, anchor
return self.retype(self.loc[start:end])
def till(self, end_date):
return self[self.index <= end_date]
def start_from(self, start_date):
return self[self.index >= start_date]
def within(self, start_date, end_date):
return self.start_from(start_date).till(end_date)
def copy(self, deep=True):
return self.retype(super(StockDataFrame, self).copy(deep))
@staticmethod
def retype(value, index_column=None):
if index_column is None:
index_column = 'date'
if isinstance(value, pd.DataFrame):
value.columns = map(lambda c: c.lower(), value.columns)
if index_column in value.columns:
value.set_index(index_column, inplace=True)
value = StockDataFrame(value)
return value
| true | true |
1c378dcd7d771f6a5c43a3d42e3f42f3b2bf271c | 202 | py | Python | pra subir/pythonexercicios/ex57.py | daianebandeira88/curso-python | 763f5f36b6d7329549ad861c63acc3c84aade887 | [
"MIT"
] | null | null | null | pra subir/pythonexercicios/ex57.py | daianebandeira88/curso-python | 763f5f36b6d7329549ad861c63acc3c84aade887 | [
"MIT"
] | null | null | null | pra subir/pythonexercicios/ex57.py | daianebandeira88/curso-python | 763f5f36b6d7329549ad861c63acc3c84aade887 | [
"MIT"
] | null | null | null | s=''
while s != 'm' and s !='f':
s=str(input('qual seu sexo? [ m / f ]:')).lower()
if s == 'm':
print('vc é do sexo masculino')
if s == 'f':
print('vc é do sexo feminino')
| 18.363636 | 53 | 0.460396 | s=''
while s != 'm' and s !='f':
s=str(input('qual seu sexo? [ m / f ]:')).lower()
if s == 'm':
print('vc é do sexo masculino')
if s == 'f':
print('vc é do sexo feminino')
| true | true |
1c378e6069e3f1a5bf7f2371219eeaf92876d2c0 | 36,627 | py | Python | infra/bots/recipes/test.py | InvictrixRom/external_skia | 5d1778b530aa0b845b8d6996815665f7cc44bf38 | [
"BSD-3-Clause"
] | null | null | null | infra/bots/recipes/test.py | InvictrixRom/external_skia | 5d1778b530aa0b845b8d6996815665f7cc44bf38 | [
"BSD-3-Clause"
] | null | null | null | infra/bots/recipes/test.py | InvictrixRom/external_skia | 5d1778b530aa0b845b8d6996815665f7cc44bf38 | [
"BSD-3-Clause"
] | 7 | 2017-09-30T23:06:11.000Z | 2019-05-30T08:54:33.000Z | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe module for Skia Swarming test.
DEPS = [
'core',
'env',
'flavor',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
'run',
'vars',
]
def dm_flags(api, bot):
args = []
# This enables non-deterministic random seeding of the GPU FP optimization
# test.
args.append('--randomProcessorTest')
# 32-bit desktop bots tend to run out of memory, because they have relatively
# far more cores than RAM (e.g. 32 cores, 3G RAM). Hold them back a bit.
if '-x86-' in bot and not 'NexusPlayer' in bot:
args.extend(['--threads', '4'])
# Avoid issues with dynamically exceeding resource cache limits.
if 'Test' in bot and 'DISCARDABLE' in bot:
args.extend(['--threads', '0'])
# See if staying on the main thread helps skia:6748.
if 'Test-iOS' in bot:
args.extend(['--threads', '0'])
# These are the canonical configs that we would ideally run on all bots. We
# may opt out or substitute some below for specific bots
configs = ['8888', 'srgb', 'pdf']
# Add in either gles or gl configs to the canonical set based on OS
sample_count = '8'
gl_prefix = 'gl'
if 'Android' in bot or 'iOS' in bot:
sample_count = '4'
# We want to test the OpenGL config not the GLES config on the Shield
if 'NVIDIA_Shield' not in bot:
gl_prefix = 'gles'
elif 'Intel' in bot:
sample_count = ''
elif 'ChromeOS' in bot:
gl_prefix = 'gles'
configs.extend([gl_prefix, gl_prefix + 'dft', gl_prefix + 'srgb'])
if sample_count is not '':
configs.append(gl_prefix + 'msaa' + sample_count)
# The NP produces a long error stream when we run with MSAA. The Tegra3 just
# doesn't support it.
if ('NexusPlayer' in bot or
'Tegra3' in bot or
# We aren't interested in fixing msaa bugs on current iOS devices.
'iPad4' in bot or
'iPadPro' in bot or
'iPhone6' in bot or
'iPhone7' in bot or
# skia:5792
'IntelHD530' in bot or
'IntelIris540' in bot):
configs = [x for x in configs if 'msaa' not in x]
# The NP produces different images for dft on every run.
if 'NexusPlayer' in bot:
configs = [x for x in configs if 'dft' not in x]
# Runs out of memory on Android bots. Everyone else seems fine.
if 'Android' in bot:
configs.remove('pdf')
if '-GCE-' in bot:
configs.extend(['565'])
configs.extend(['f16'])
configs.extend(['sp-8888', '2ndpic-8888']) # Test niche uses of SkPicture.
configs.extend(['lite-8888']) # Experimental display list.
configs.extend(['gbr-8888'])
if '-TSAN' not in bot and sample_count is not '':
if ('TegraK1' in bot or
'TegraX1' in bot or
'GTX550Ti' in bot or
'GTX660' in bot or
'GT610' in bot):
configs.append(gl_prefix + 'nvprdit' + sample_count)
# We want to test both the OpenGL config and the GLES config on Linux Intel:
# GL is used by Chrome, GLES is used by ChromeOS.
if 'Intel' in bot and api.vars.is_linux:
configs.extend(['gles', 'glesdft', 'glessrgb'])
# NP is running out of RAM when we run all these modes. skia:3255
if 'NexusPlayer' not in bot:
configs.extend(mode + '-8888' for mode in
['serialize', 'tiles_rt', 'pic'])
# Test instanced rendering on a limited number of platforms
if 'Nexus6' in bot:
configs.append(gl_prefix + 'inst') # inst msaa isn't working yet on Adreno.
elif 'NVIDIA_Shield' in bot or 'PixelC' in bot:
# Multisampled instanced configs use nvpr so we substitute inst msaa
# configs for nvpr msaa configs.
old = gl_prefix + 'nvpr'
new = gl_prefix + 'inst'
configs = [x.replace(old, new) for x in configs]
# We also test non-msaa instanced.
configs.append(new)
elif 'MacMini6.2' in bot and sample_count is not '':
configs.extend([gl_prefix + 'inst', gl_prefix + 'inst' + sample_count])
# CommandBuffer bot *only* runs the command_buffer config.
if 'CommandBuffer' in bot:
configs = ['commandbuffer']
# ANGLE bot *only* runs the angle configs
if 'ANGLE' in bot:
configs = ['angle_d3d11_es2',
'angle_d3d9_es2',
'angle_gl_es2',
'angle_d3d11_es3']
if sample_count is not '':
configs.append('angle_d3d11_es2_msaa' + sample_count)
configs.append('angle_d3d11_es3_msaa' + sample_count)
# Vulkan bot *only* runs the vk config.
if 'Vulkan' in bot:
configs = ['vk']
if 'ChromeOS' in bot:
# Just run GLES for now - maybe add gles_msaa4 in the future
configs = ['gles']
if 'Ci20' in bot:
# This bot is really slow, cut it down to just 8888.
configs = ['8888']
# This bot only differs from vanilla CPU bots in 8888 config.
if 'SK_FORCE_RASTER_PIPELINE_BLITTER' in bot:
configs = ['8888', 'srgb']
args.append('--config')
args.extend(configs)
# Test coverage counting path renderer.
if 'CCPR' in bot:
args.extend(['--pr', 'ccpr'])
# Run tests, gms, and image decoding tests everywhere.
args.extend('--src tests gm image colorImage svg'.split(' '))
if 'Vulkan' in bot and 'NexusPlayer' in bot:
args.remove('svg')
args.remove('image')
# Eventually I'd like these to pass, but for now just skip 'em.
if 'SK_FORCE_RASTER_PIPELINE_BLITTER' in bot:
args.remove('tests')
# Some people don't like verbose output.
verbose = False
blacklisted = []
def blacklist(quad):
config, src, options, name = quad.split(' ') if type(quad) is str else quad
if config == '_' or config in configs:
blacklisted.extend([config, src, options, name])
# TODO: ???
blacklist('f16 _ _ dstreadshuffle')
blacklist('glsrgb image _ _')
blacklist('glessrgb image _ _')
# Not any point to running these.
blacklist('gbr-8888 image _ _')
blacklist('gbr-8888 colorImage _ _')
if 'Valgrind' in bot:
# These take 18+ hours to run.
blacklist('pdf gm _ fontmgr_iter')
blacklist('pdf _ _ PANO_20121023_214540.jpg')
blacklist('pdf skp _ worldjournal')
blacklist('pdf skp _ desk_baidu.skp')
blacklist('pdf skp _ desk_wikipedia.skp')
blacklist('_ svg _ _')
if 'iOS' in bot:
blacklist(gl_prefix + ' skp _ _')
if 'Mac' in bot or 'iOS' in bot:
# CG fails on questionable bmps
blacklist('_ image gen_platf rgba32abf.bmp')
blacklist('_ image gen_platf rgb24prof.bmp')
blacklist('_ image gen_platf rgb24lprof.bmp')
blacklist('_ image gen_platf 8bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 4bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 32bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 24bpp-pixeldata-cropped.bmp')
# CG has unpredictable behavior on this questionable gif
# It's probably using uninitialized memory
blacklist('_ image gen_platf frame_larger_than_image.gif')
# CG has unpredictable behavior on incomplete pngs
# skbug.com/5774
blacklist('_ image gen_platf inc0.png')
blacklist('_ image gen_platf inc1.png')
blacklist('_ image gen_platf inc2.png')
blacklist('_ image gen_platf inc3.png')
blacklist('_ image gen_platf inc4.png')
blacklist('_ image gen_platf inc5.png')
blacklist('_ image gen_platf inc6.png')
blacklist('_ image gen_platf inc7.png')
blacklist('_ image gen_platf inc8.png')
blacklist('_ image gen_platf inc9.png')
blacklist('_ image gen_platf inc10.png')
blacklist('_ image gen_platf inc11.png')
blacklist('_ image gen_platf inc12.png')
blacklist('_ image gen_platf inc13.png')
blacklist('_ image gen_platf inc14.png')
# WIC fails on questionable bmps
if 'Win' in bot:
blacklist('_ image gen_platf rle8-height-negative.bmp')
blacklist('_ image gen_platf rle4-height-negative.bmp')
blacklist('_ image gen_platf pal8os2v2.bmp')
blacklist('_ image gen_platf pal8os2v2-16.bmp')
blacklist('_ image gen_platf rgba32abf.bmp')
blacklist('_ image gen_platf rgb24prof.bmp')
blacklist('_ image gen_platf rgb24lprof.bmp')
blacklist('_ image gen_platf 8bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 4bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 32bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 24bpp-pixeldata-cropped.bmp')
if 'x86_64' in bot and 'CPU' in bot:
# This GM triggers a SkSmallAllocator assert.
blacklist('_ gm _ composeshader_bitmap')
# WIC and CG fail on arithmetic jpegs
if 'Win' in bot or 'Mac' in bot:
blacklist('_ image gen_platf testimgari.jpg')
if 'Android' in bot or 'iOS' in bot:
# This test crashes the N9 (perhaps because of large malloc/frees). It also
# is fairly slow and not platform-specific. So we just disable it on all of
# Android and iOS. skia:5438
blacklist('_ test _ GrShape')
# skia:4095
bad_serialize_gms = ['bleed_image',
'c_gms',
'colortype',
'colortype_xfermodes',
'drawfilter',
'fontmgr_bounds_0.75_0',
'fontmgr_bounds_1_-0.25',
'fontmgr_bounds',
'fontmgr_match',
'fontmgr_iter',
'imagemasksubset']
# skia:5589
bad_serialize_gms.extend(['bitmapfilters',
'bitmapshaders',
'bleed',
'bleed_alpha_bmp',
'bleed_alpha_bmp_shader',
'convex_poly_clip',
'extractalpha',
'filterbitmap_checkerboard_32_32_g8',
'filterbitmap_image_mandrill_64',
'shadows',
'simpleaaclip_aaclip'])
# skia:5595
bad_serialize_gms.extend(['composeshader_bitmap',
'scaled_tilemodes_npot',
'scaled_tilemodes'])
# skia:5778
bad_serialize_gms.append('typefacerendering_pfaMac')
# skia:5942
bad_serialize_gms.append('parsedpaths')
# these use a custom image generator which doesn't serialize
bad_serialize_gms.append('ImageGeneratorExternal_rect')
bad_serialize_gms.append('ImageGeneratorExternal_shader')
# skia:6189
bad_serialize_gms.append('shadow_utils')
# Not expected to round trip encoding/decoding.
bad_serialize_gms.append('makecolorspace')
for test in bad_serialize_gms:
blacklist(['serialize-8888', 'gm', '_', test])
if 'Mac' not in bot:
for test in ['bleed_alpha_image', 'bleed_alpha_image_shader']:
blacklist(['serialize-8888', 'gm', '_', test])
# It looks like we skip these only for out-of-memory concerns.
if 'Win' in bot or 'Android' in bot:
for test in ['verylargebitmap', 'verylarge_picture_image']:
blacklist(['serialize-8888', 'gm', '_', test])
# skia:4769
for test in ['drawfilter']:
blacklist([ 'sp-8888', 'gm', '_', test])
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist(['2ndpic-8888', 'gm', '_', test])
blacklist([ 'lite-8888', 'gm', '_', test])
# skia:4703
for test in ['image-cacherator-from-picture',
'image-cacherator-from-raster',
'image-cacherator-from-ctable']:
blacklist([ 'sp-8888', 'gm', '_', test])
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist([ '2ndpic-8888', 'gm', '_', test])
blacklist(['serialize-8888', 'gm', '_', test])
# GM that requires raster-backed canvas
for test in ['gamut', 'complexclip4_bw', 'complexclip4_aa']:
blacklist([ 'sp-8888', 'gm', '_', test])
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist([ 'lite-8888', 'gm', '_', test])
blacklist([ '2ndpic-8888', 'gm', '_', test])
blacklist(['serialize-8888', 'gm', '_', test])
# GM that not support tiles_rt
for test in ['complexclip4_bw', 'complexclip4_aa']:
blacklist([ 'tiles_rt-8888', 'gm', '_', test])
# Extensions for RAW images
r = ["arw", "cr2", "dng", "nef", "nrw", "orf", "raf", "rw2", "pef", "srw",
"ARW", "CR2", "DNG", "NEF", "NRW", "ORF", "RAF", "RW2", "PEF", "SRW"]
# skbug.com/4888
# Blacklist RAW images (and a few large PNGs) on GPU bots
# until we can resolve failures.
if 'GPU' in bot:
blacklist('_ image _ interlaced1.png')
blacklist('_ image _ interlaced2.png')
blacklist('_ image _ interlaced3.png')
for raw_ext in r:
blacklist('_ image _ .%s' % raw_ext)
# Blacklist memory intensive tests on 32-bit bots.
if ('Win2k8' in bot or 'Win8' in bot) and 'x86-' in bot:
blacklist('_ image f16 _')
blacklist('_ image _ abnormal.wbmp')
blacklist('_ image _ interlaced1.png')
blacklist('_ image _ interlaced2.png')
blacklist('_ image _ interlaced3.png')
for raw_ext in r:
blacklist('_ image _ .%s' % raw_ext)
if 'IntelHD405' in bot and 'Ubuntu16' in bot:
# skia:6331
blacklist(['glmsaa8', 'image', 'gen_codec_gpu', 'abnormal.wbmp'])
blacklist(['glesmsaa4', 'image', 'gen_codec_gpu', 'abnormal.wbmp'])
if 'Nexus5' in bot:
# skia:5876
blacklist(['_', 'gm', '_', 'encode-platform'])
if 'AndroidOne-GPU' in bot: # skia:4697, skia:4704, skia:4694, skia:4705
blacklist(['_', 'gm', '_', 'bigblurs'])
blacklist(['_', 'gm', '_', 'bleed'])
blacklist(['_', 'gm', '_', 'bleed_alpha_bmp'])
blacklist(['_', 'gm', '_', 'bleed_alpha_bmp_shader'])
blacklist(['_', 'gm', '_', 'bleed_alpha_image'])
blacklist(['_', 'gm', '_', 'bleed_alpha_image_shader'])
blacklist(['_', 'gm', '_', 'bleed_image'])
blacklist(['_', 'gm', '_', 'dropshadowimagefilter'])
blacklist(['_', 'gm', '_', 'filterfastbounds'])
blacklist([gl_prefix, 'gm', '_', 'imageblurtiled'])
blacklist(['_', 'gm', '_', 'imagefiltersclipped'])
blacklist(['_', 'gm', '_', 'imagefiltersscaled'])
blacklist(['_', 'gm', '_', 'imageresizetiled'])
blacklist(['_', 'gm', '_', 'matrixconvolution'])
blacklist(['_', 'gm', '_', 'strokedlines'])
if sample_count is not '':
gl_msaa_config = gl_prefix + 'msaa' + sample_count
blacklist([gl_msaa_config, 'gm', '_', 'imageblurtiled'])
blacklist([gl_msaa_config, 'gm', '_', 'imagefiltersbase'])
match = []
if 'Valgrind' in bot: # skia:3021
match.append('~Threaded')
if 'Valgrind' in bot and 'PreAbandonGpuContext' in bot:
# skia:6575
match.append('~multipicturedraw_')
if 'CommandBuffer' in bot:
# https://crbug.com/697030
match.append('~HalfFloatAlphaTextureTest')
if 'AndroidOne' in bot: # skia:4711
match.append('~WritePixels')
if 'NexusPlayer' in bot:
match.append('~ResourceCache')
if 'Nexus10' in bot:
match.append('~CopySurface') # skia:5509
match.append('~SRGBReadWritePixels') # skia:6097
if 'GalaxyS6' in bot:
match.append('~SpecialImage') # skia:6338
match.append('~skbug6653') # skia:6653
if 'GalaxyS7_G930A' in bot:
match.append('~WritePixels') # skia:6427
if 'MSAN' in bot:
match.extend(['~Once', '~Shared']) # Not sure what's up with these tests.
if 'TSAN' in bot:
match.extend(['~ReadWriteAlpha']) # Flaky on TSAN-covered on nvidia bots.
match.extend(['~RGBA4444TextureTest', # Flakier than they are important.
'~RGB565TextureTest'])
if 'Vulkan' in bot and 'Adreno530' in bot:
# skia:5777
match.extend(['~CopySurface'])
if 'Vulkan' in bot and 'NexusPlayer' in bot:
match.extend(['~gradients_no_texture$', # skia:6132
'~tilemodes', # skia:6132
'~shadertext$', # skia:6132
'~bitmapfilters', # skia:6132
'~GrContextFactory_abandon']) #skia:6209
if 'Vulkan' in bot and 'IntelIris540' in bot and api.vars.is_linux:
match.extend(['~VkHeapTests']) # skia:6245
if 'Intel' in bot and api.vars.is_linux and not 'Vulkan' in bot:
# TODO(dogben): Track down what's causing bots to die.
verbose = True
if 'Vulkan' in bot and 'IntelIris540' in bot and 'Win' in bot:
# skia:6398
blacklist(['vk', 'gm', '_', 'aarectmodes'])
blacklist(['vk', 'gm', '_', 'aaxfermodes'])
blacklist(['vk', 'gm', '_', 'arithmode'])
blacklist(['vk', 'gm', '_', 'composeshader_bitmap'])
blacklist(['vk', 'gm', '_', 'composeshader_bitmap2'])
blacklist(['vk', 'gm', '_', 'dftextCOLR'])
blacklist(['vk', 'gm', '_', 'drawregionmodes'])
blacklist(['vk', 'gm', '_', 'filterfastbounds'])
blacklist(['vk', 'gm', '_', 'fontcache'])
blacklist(['vk', 'gm', '_', 'fontmgr_iterWin10'])
blacklist(['vk', 'gm', '_', 'fontmgr_iter_factoryWin10'])
blacklist(['vk', 'gm', '_', 'fontmgr_matchWin10'])
blacklist(['vk', 'gm', '_', 'fontscalerWin'])
blacklist(['vk', 'gm', '_', 'fontscalerdistortable'])
blacklist(['vk', 'gm', '_', 'gammagradienttext'])
blacklist(['vk', 'gm', '_', 'gammatextWin'])
blacklist(['vk', 'gm', '_', 'gradtext'])
blacklist(['vk', 'gm', '_', 'hairmodes'])
blacklist(['vk', 'gm', '_', 'imagefilters_xfermodes'])
blacklist(['vk', 'gm', '_', 'imagefiltersclipped'])
blacklist(['vk', 'gm', '_', 'imagefiltersgraph'])
blacklist(['vk', 'gm', '_', 'imagefiltersscaled'])
blacklist(['vk', 'gm', '_', 'imagefiltersstroked'])
blacklist(['vk', 'gm', '_', 'imagefilterstransformed'])
blacklist(['vk', 'gm', '_', 'imageresizetiled'])
blacklist(['vk', 'gm', '_', 'lcdblendmodes'])
blacklist(['vk', 'gm', '_', 'lcdoverlap'])
blacklist(['vk', 'gm', '_', 'lcdtextWin'])
blacklist(['vk', 'gm', '_', 'lcdtextsize'])
blacklist(['vk', 'gm', '_', 'matriximagefilter'])
blacklist(['vk', 'gm', '_', 'mixedtextblobsCOLR'])
blacklist(['vk', 'gm', '_', 'mixershader'])
blacklist(['vk', 'gm', '_', 'pictureimagefilter'])
blacklist(['vk', 'gm', '_', 'resizeimagefilter'])
blacklist(['vk', 'gm', '_', 'rotate_imagefilter'])
blacklist(['vk', 'gm', '_', 'savelayer_lcdtext'])
blacklist(['vk', 'gm', '_', 'srcmode'])
blacklist(['vk', 'gm', '_', 'surfaceprops'])
blacklist(['vk', 'gm', '_', 'textblobgeometrychange'])
blacklist(['vk', 'gm', '_', 'textbloblooper'])
blacklist(['vk', 'gm', '_', 'textblobmixedsizes'])
blacklist(['vk', 'gm', '_', 'textblobmixedsizes_df'])
blacklist(['vk', 'gm', '_', 'textblobrandomfont'])
blacklist(['vk', 'gm', '_', 'textfilter_color'])
blacklist(['vk', 'gm', '_', 'textfilter_image'])
blacklist(['vk', 'gm', '_', 'typefacerenderingWin'])
blacklist(['vk', 'gm', '_', 'varied_text_clipped_lcd'])
blacklist(['vk', 'gm', '_', 'varied_text_ignorable_clip_lcd'])
blacklist(['vk', 'gm', '_', 'xfermodeimagefilter'])
match.append('~ApplyGamma')
match.append('~ComposedImageFilterBounds_Gpu')
match.append('~DeferredTextureImage')
match.append('~GrMeshTest')
match.append('~ImageFilterFailAffectsTransparentBlack_Gpu')
match.append('~ImageFilterZeroBlurSigma_Gpu')
match.append('~ImageNewShader_GPU')
match.append('~NewTextureFromPixmap')
match.append('~ReadPixels_Gpu')
match.append('~ReadPixels_Texture')
match.append('~ReadWriteAlpha')
match.append('~skbug6653')
match.append('~SRGBReadWritePixels')
match.append('~SpecialImage_DeferredGpu')
match.append('~SpecialImage_Gpu')
match.append('~WritePixels_Gpu')
match.append('~WritePixelsNonTexture_Gpu')
match.append('~XfermodeImageFilterCroppedInput_Gpu')
if 'IntelIris540' in bot and 'ANGLE' in bot:
for config in ['angle_d3d9_es2', 'angle_d3d11_es2', 'angle_gl_es2']:
# skia:6103
blacklist([config, 'gm', '_', 'multipicturedraw_invpathclip_simple'])
blacklist([config, 'gm', '_', 'multipicturedraw_noclip_simple'])
blacklist([config, 'gm', '_', 'multipicturedraw_pathclip_simple'])
blacklist([config, 'gm', '_', 'multipicturedraw_rectclip_simple'])
blacklist([config, 'gm', '_', 'multipicturedraw_rrectclip_simple'])
# skia:6141
blacklist([config, 'gm', '_', 'discard'])
if 'IntelBayTrail' in bot and api.vars.is_linux:
match.append('~ImageStorageLoad') # skia:6358
if 'Ci20' in bot:
match.append('~Codec_Dimensions') # skia:6477
match.append('~FontMgrAndroidParser') # skia:6478
match.append('~PathOpsSimplify') # skia:6479
blacklist(['_', 'gm', '_', 'fast_slow_blurimagefilter']) # skia:6480
if ('Win10' in bot and 'Vulkan' in bot
and ('GTX1070' in bot or 'GTX660' in bot)):
blacklist('_ test _ SkImage_makeTextureImage') # skia:6554
if blacklisted:
args.append('--blacklist')
args.extend(blacklisted)
if match:
args.append('--match')
args.extend(match)
# These bots run out of memory running RAW codec tests. Do not run them in
# parallel
if ('NexusPlayer' in bot or 'Nexus5' in bot or 'Nexus9' in bot
or 'Win8-MSVC-ShuttleB' in bot):
args.append('--noRAW_threading')
if 'Valgrind' in bot and 'PreAbandonGpuContext' in bot:
verbose = True
if 'NexusPlayer' in bot and 'CPU' in bot:
# The Nexus Player's image decoding tests are slow enough that swarming
# times it out for not printing anything frequently enough. --verbose
# makes dm print something every time we start or complete a task.
verbose = True
if verbose:
args.append('--verbose')
return args
def key_params(api):
"""Build a unique key from the builder name (as a list).
E.g. arch x86 gpu GeForce320M mode MacMini4.1 os Mac10.6
"""
# Don't bother to include role, which is always Test.
# TryBots are uploaded elsewhere so they can use the same key.
blacklist = ['role', 'is_trybot']
flat = []
for k in sorted(api.vars.builder_cfg.keys()):
if k not in blacklist:
flat.append(k)
flat.append(api.vars.builder_cfg[k])
return flat
def test_steps(api):
"""Run the DM test."""
use_hash_file = False
if api.vars.upload_dm_results:
# This must run before we write anything into
# api.flavor.device_dirs.dm_dir or we may end up deleting our
# output on machines where they're the same.
api.flavor.create_clean_host_dir(api.vars.dm_dir)
host_dm_dir = str(api.vars.dm_dir)
device_dm_dir = str(api.flavor.device_dirs.dm_dir)
if host_dm_dir != device_dm_dir:
api.flavor.create_clean_device_dir(device_dm_dir)
# Obtain the list of already-generated hashes.
hash_filename = 'uninteresting_hashes.txt'
# Ensure that the tmp_dir exists.
api.run.run_once(api.file.ensure_directory,
'makedirs tmp_dir',
api.vars.tmp_dir)
host_hashes_file = api.vars.tmp_dir.join(hash_filename)
hashes_file = api.flavor.device_path_join(
api.flavor.device_dirs.tmp_dir, hash_filename)
api.run(
api.python.inline,
'get uninteresting hashes',
program="""
import contextlib
import math
import socket
import sys
import time
import urllib2
HASHES_URL = 'https://storage.googleapis.com/skia-infra-gm/hash_files/gold-prod-hashes.txt'
RETRIES = 5
TIMEOUT = 60
WAIT_BASE = 15
socket.setdefaulttimeout(TIMEOUT)
for retry in range(RETRIES):
try:
with contextlib.closing(
urllib2.urlopen(HASHES_URL, timeout=TIMEOUT)) as w:
hashes = w.read()
with open(sys.argv[1], 'w') as f:
f.write(hashes)
break
except Exception as e:
print 'Failed to get uninteresting hashes from %s:' % HASHES_URL
print e
if retry == RETRIES:
raise
waittime = WAIT_BASE * math.pow(2, retry)
print 'Retry in %d seconds.' % waittime
time.sleep(waittime)
""",
args=[host_hashes_file],
abort_on_failure=False,
fail_build_on_failure=False,
infra_step=True)
if api.path.exists(host_hashes_file):
api.flavor.copy_file_to_device(host_hashes_file, hashes_file)
use_hash_file = True
# Run DM.
properties = [
'gitHash', api.vars.got_revision,
'builder', api.vars.builder_name,
]
if api.vars.is_trybot:
properties.extend([
'issue', api.vars.issue,
'patchset', api.vars.patchset,
'patch_storage', api.vars.patch_storage,
])
properties.extend(['swarming_bot_id', api.vars.swarming_bot_id])
properties.extend(['swarming_task_id', api.vars.swarming_task_id])
args = [
'dm',
'--undefok', # This helps branches that may not know new flags.
'--resourcePath', api.flavor.device_dirs.resource_dir,
'--skps', api.flavor.device_dirs.skp_dir,
'--images', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'dm'),
'--colorImages', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'colorspace'),
'--nameByHash',
'--properties'
] + properties
args.extend(['--svgs', api.flavor.device_dirs.svg_dir])
args.append('--key')
args.extend(key_params(api))
if use_hash_file:
args.extend(['--uninterestingHashesFile', hashes_file])
if api.vars.upload_dm_results:
args.extend(['--writePath', api.flavor.device_dirs.dm_dir])
skip_flag = None
if api.vars.builder_cfg.get('cpu_or_gpu') == 'CPU':
skip_flag = '--nogpu'
elif api.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
skip_flag = '--nocpu'
if skip_flag:
args.append(skip_flag)
args.extend(dm_flags(api, api.vars.builder_name))
env = {}
if 'Ubuntu16' in api.vars.builder_name:
# The vulkan in this asset name simply means that the graphics driver
# supports Vulkan. It is also the driver used for GL code.
dri_path = api.vars.slave_dir.join('linux_vulkan_intel_driver_release')
if 'Debug' in api.vars.builder_name:
dri_path = api.vars.slave_dir.join('linux_vulkan_intel_driver_debug')
if 'Vulkan' in api.vars.builder_name:
sdk_path = api.vars.slave_dir.join('linux_vulkan_sdk', 'bin')
lib_path = api.vars.slave_dir.join('linux_vulkan_sdk', 'lib')
env.update({
'PATH':'%%(PATH)s:%s' % sdk_path,
'LD_LIBRARY_PATH': '%s:%s' % (lib_path, dri_path),
'LIBGL_DRIVERS_PATH': dri_path,
'VK_ICD_FILENAMES':'%s' % dri_path.join('intel_icd.x86_64.json'),
})
else:
# Even the non-vulkan NUC jobs could benefit from the newer drivers.
env.update({
'LD_LIBRARY_PATH': dri_path,
'LIBGL_DRIVERS_PATH': dri_path,
})
# See skia:2789.
extra_config_parts = api.vars.builder_cfg.get('extra_config', '').split('_')
if 'AbandonGpuContext' in extra_config_parts:
args.append('--abandonGpuContext')
if 'PreAbandonGpuContext' in extra_config_parts:
args.append('--preAbandonGpuContext')
if 'ReleaseAndAbandonGpuContext' in extra_config_parts:
args.append('--releaseAndAbandonGpuContext')
with api.env(env):
api.run(api.flavor.step, 'dm', cmd=args, abort_on_failure=False)
if api.vars.upload_dm_results:
# Copy images and JSON to host machine if needed.
api.flavor.copy_directory_contents_to_host(
api.flavor.device_dirs.dm_dir, api.vars.dm_dir)
def RunSteps(api):
api.core.setup()
env = {}
if 'iOS' in api.vars.builder_name:
env['IOS_BUNDLE_ID'] = 'com.google.dm'
env['IOS_MOUNT_POINT'] = api.vars.slave_dir.join('mnt_iosdevice')
with api.context(env=env):
try:
api.flavor.install_everything()
test_steps(api)
finally:
api.flavor.cleanup_steps()
api.run.check_failure()
TEST_BUILDERS = [
'Test-Android-Clang-AndroidOne-GPU-Mali400MP2-arm-Release-Android',
'Test-Android-Clang-Ci20-CPU-IngenicJZ4780-mipsel-Release-Android',
'Test-Android-Clang-GalaxyS6-GPU-MaliT760-arm64-Debug-Android',
'Test-Android-Clang-GalaxyS7_G930A-GPU-Adreno530-arm64-Debug-Android',
'Test-Android-Clang-NVIDIA_Shield-GPU-TegraX1-arm64-Debug-Android',
'Test-Android-Clang-Nexus10-GPU-MaliT604-arm-Release-Android',
'Test-Android-Clang-Nexus5-GPU-Adreno330-arm-Release-Android',
'Test-Android-Clang-PixelXL-GPU-Adreno530-arm64-Debug-Android_CCPR',
'Test-Android-Clang-Nexus6p-GPU-Adreno430-arm64-Debug-Android_Vulkan',
'Test-Android-Clang-PixelXL-GPU-Adreno530-arm64-Debug-Android_Vulkan',
'Test-Android-Clang-Nexus7-GPU-Tegra3-arm-Debug-Android',
'Test-Android-Clang-NexusPlayer-CPU-SSE4-x86-Release-Android',
'Test-Android-Clang-NexusPlayer-GPU-PowerVR-x86-Release-Android_Vulkan',
'Test-Android-Clang-PixelC-CPU-TegraX1-arm64-Debug-Android',
'Test-ChromeOS-Clang-Chromebook_C100p-GPU-MaliT764-arm-Debug',
'Test-Mac-Clang-MacMini6.2-CPU-AVX-x86_64-Debug',
'Test-Mac-Clang-MacMini6.2-GPU-IntelHD4000-x86_64-Debug-CommandBuffer',
'Test-Ubuntu-Clang-GCE-CPU-AVX2-x86_64-Debug-ASAN',
'Test-Ubuntu-Clang-GCE-CPU-AVX2-x86_64-Debug-MSAN',
'Test-Ubuntu-Clang-GCE-CPU-AVX2-x86_64-Release-TSAN',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86-Debug',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug',
'Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind',
('Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind' +
'_AbandonGpuContext'),
('Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind' +
'_PreAbandonGpuContext'),
('Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug-SK_USE_DISCARDABLE_' +
'SCALEDIMAGECACHE'),
'Test-Ubuntu16-Clang-NUC5PPYH-GPU-IntelHD405-x86_64-Debug',
'Test-Ubuntu16-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Debug-Vulkan',
'Test-Ubuntu16-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Release',
'Test-Ubuntu16-Clang-NUCDE3815TYKHE-GPU-IntelBayTrail-x86_64-Debug',
'Test-Win8-MSVC-Golo-CPU-AVX-x86-Debug',
'Test-Win10-MSVC-AlphaR2-GPU-RadeonR9M470X-x86_64-Debug-Vulkan',
('Test-Win10-MSVC-NUC5i7RYH-GPU-IntelIris6100-x86_64-Release-'
'ReleaseAndAbandonGpuContext'),
'Test-Win10-MSVC-NUC6i5SYK-GPU-IntelIris540-x86_64-Debug-ANGLE',
'Test-Win10-MSVC-NUC6i5SYK-GPU-IntelIris540-x86_64-Debug-Vulkan',
'Test-Win10-MSVC-ShuttleA-GPU-GTX660-x86_64-Debug-Vulkan',
'Test-Win10-MSVC-ShuttleC-GPU-GTX960-x86_64-Debug-ANGLE',
'Test-Win10-MSVC-ZBOX-GPU-GTX1070-x86_64-Debug-Vulkan',
'Test-iOS-Clang-iPadMini4-GPU-GX6450-arm-Release',
('Test-Ubuntu-Clang-GCE-CPU-AVX2-x86_64-Release-'
'SK_FORCE_RASTER_PIPELINE_BLITTER'),
]
def GenTests(api):
for builder in TEST_BUILDERS:
test = (
api.test(builder) +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get swarming bot id',
stdout=api.raw_io.output('skia-bot-123')) +
api.step_data('get swarming task id',
stdout=api.raw_io.output('123456'))
)
if 'Win' in builder:
test += api.platform('win', 64)
if 'ChromeOS' in builder:
test += api.step_data(
'read chromeos ip',
stdout=api.raw_io.output('{"user_ip":"foo@127.0.0.1"}'))
yield test
builder = 'Test-Win2k8-MSVC-GCE-CPU-AVX2-x86_64-Release'
yield (
api.test('trybot') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.properties(patch_storage='gerrit') +
api.properties.tryserver(
buildername=builder,
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
)+
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
builder = 'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug'
yield (
api.test('failed_dm') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('symbolized dm', retcode=1)
)
builder = 'Test-Android-Clang-Nexus7-GPU-Tegra3-arm-Debug-Android'
yield (
api.test('failed_get_hashes') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get uninteresting hashes', retcode=1)
)
builder = 'Test-Android-Clang-NexusPlayer-CPU-SSE4-x86-Debug-Android'
yield (
api.test('failed_push') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('push [START_DIR]/skia/resources/* '+
'/sdcard/revenge_of_the_skiabot/resources', retcode=1)
)
builder = 'Test-Android-Clang-Nexus10-GPU-MaliT604-arm-Debug-Android'
yield (
api.test('failed_pull') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('dm', retcode=1) +
api.step_data('pull /sdcard/revenge_of_the_skiabot/dm_out '+
'[CUSTOM_[SWARM_OUT_DIR]]/dm', retcode=1)
)
| 38.35288 | 99 | 0.62413 |
DEPS = [
'core',
'env',
'flavor',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
'run',
'vars',
]
def dm_flags(api, bot):
args = []
args.append('--randomProcessorTest')
if '-x86-' in bot and not 'NexusPlayer' in bot:
args.extend(['--threads', '4'])
if 'Test' in bot and 'DISCARDABLE' in bot:
args.extend(['--threads', '0'])
if 'Test-iOS' in bot:
args.extend(['--threads', '0'])
configs = ['8888', 'srgb', 'pdf']
sample_count = '8'
gl_prefix = 'gl'
if 'Android' in bot or 'iOS' in bot:
sample_count = '4'
if 'NVIDIA_Shield' not in bot:
gl_prefix = 'gles'
elif 'Intel' in bot:
sample_count = ''
elif 'ChromeOS' in bot:
gl_prefix = 'gles'
configs.extend([gl_prefix, gl_prefix + 'dft', gl_prefix + 'srgb'])
if sample_count is not '':
configs.append(gl_prefix + 'msaa' + sample_count)
if ('NexusPlayer' in bot or
'Tegra3' in bot or
# We aren't interested in fixing msaa bugs on current iOS devices.
'iPad4' in bot or
'iPadPro' in bot or
'iPhone6' in bot or
'iPhone7' in bot or
'IntelHD530' in bot or
'IntelIris540' in bot):
configs = [x for x in configs if 'msaa' not in x]
if 'NexusPlayer' in bot:
configs = [x for x in configs if 'dft' not in x]
if 'Android' in bot:
configs.remove('pdf')
if '-GCE-' in bot:
configs.extend(['565'])
configs.extend(['f16'])
configs.extend(['sp-8888', '2ndpic-8888'])
configs.extend(['lite-8888'])
configs.extend(['gbr-8888'])
if '-TSAN' not in bot and sample_count is not '':
if ('TegraK1' in bot or
'TegraX1' in bot or
'GTX550Ti' in bot or
'GTX660' in bot or
'GT610' in bot):
configs.append(gl_prefix + 'nvprdit' + sample_count)
if 'Intel' in bot and api.vars.is_linux:
configs.extend(['gles', 'glesdft', 'glessrgb'])
if 'NexusPlayer' not in bot:
configs.extend(mode + '-8888' for mode in
['serialize', 'tiles_rt', 'pic'])
if 'Nexus6' in bot:
configs.append(gl_prefix + 'inst')
elif 'NVIDIA_Shield' in bot or 'PixelC' in bot:
# Multisampled instanced configs use nvpr so we substitute inst msaa
# configs for nvpr msaa configs.
old = gl_prefix + 'nvpr'
new = gl_prefix + 'inst'
configs = [x.replace(old, new) for x in configs]
# We also test non-msaa instanced.
configs.append(new)
elif 'MacMini6.2' in bot and sample_count is not '':
configs.extend([gl_prefix + 'inst', gl_prefix + 'inst' + sample_count])
# CommandBuffer bot *only* runs the command_buffer config.
if 'CommandBuffer' in bot:
configs = ['commandbuffer']
# ANGLE bot *only* runs the angle configs
if 'ANGLE' in bot:
configs = ['angle_d3d11_es2',
'angle_d3d9_es2',
'angle_gl_es2',
'angle_d3d11_es3']
if sample_count is not '':
configs.append('angle_d3d11_es2_msaa' + sample_count)
configs.append('angle_d3d11_es3_msaa' + sample_count)
# Vulkan bot *only* runs the vk config.
if 'Vulkan' in bot:
configs = ['vk']
if 'ChromeOS' in bot:
# Just run GLES for now - maybe add gles_msaa4 in the future
configs = ['gles']
if 'Ci20' in bot:
# This bot is really slow, cut it down to just 8888.
configs = ['8888']
# This bot only differs from vanilla CPU bots in 8888 config.
if 'SK_FORCE_RASTER_PIPELINE_BLITTER' in bot:
configs = ['8888', 'srgb']
args.append('--config')
args.extend(configs)
# Test coverage counting path renderer.
if 'CCPR' in bot:
args.extend(['--pr', 'ccpr'])
# Run tests, gms, and image decoding tests everywhere.
args.extend('--src tests gm image colorImage svg'.split(' '))
if 'Vulkan' in bot and 'NexusPlayer' in bot:
args.remove('svg')
args.remove('image')
# Eventually I'd like these to pass, but for now just skip 'em.
if 'SK_FORCE_RASTER_PIPELINE_BLITTER' in bot:
args.remove('tests')
# Some people don't like verbose output.
verbose = False
blacklisted = []
def blacklist(quad):
config, src, options, name = quad.split(' ') if type(quad) is str else quad
if config == '_' or config in configs:
blacklisted.extend([config, src, options, name])
blacklist('f16 _ _ dstreadshuffle')
blacklist('glsrgb image _ _')
blacklist('glessrgb image _ _')
blacklist('gbr-8888 image _ _')
blacklist('gbr-8888 colorImage _ _')
if 'Valgrind' in bot:
blacklist('pdf gm _ fontmgr_iter')
blacklist('pdf _ _ PANO_20121023_214540.jpg')
blacklist('pdf skp _ worldjournal')
blacklist('pdf skp _ desk_baidu.skp')
blacklist('pdf skp _ desk_wikipedia.skp')
blacklist('_ svg _ _')
if 'iOS' in bot:
blacklist(gl_prefix + ' skp _ _')
if 'Mac' in bot or 'iOS' in bot:
blacklist('_ image gen_platf rgba32abf.bmp')
blacklist('_ image gen_platf rgb24prof.bmp')
blacklist('_ image gen_platf rgb24lprof.bmp')
blacklist('_ image gen_platf 8bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 4bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 32bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 24bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf frame_larger_than_image.gif')
# CG has unpredictable behavior on incomplete pngs
# skbug.com/5774
blacklist('_ image gen_platf inc0.png')
blacklist('_ image gen_platf inc1.png')
blacklist('_ image gen_platf inc2.png')
blacklist('_ image gen_platf inc3.png')
blacklist('_ image gen_platf inc4.png')
blacklist('_ image gen_platf inc5.png')
blacklist('_ image gen_platf inc6.png')
blacklist('_ image gen_platf inc7.png')
blacklist('_ image gen_platf inc8.png')
blacklist('_ image gen_platf inc9.png')
blacklist('_ image gen_platf inc10.png')
blacklist('_ image gen_platf inc11.png')
blacklist('_ image gen_platf inc12.png')
blacklist('_ image gen_platf inc13.png')
blacklist('_ image gen_platf inc14.png')
# WIC fails on questionable bmps
if 'Win' in bot:
blacklist('_ image gen_platf rle8-height-negative.bmp')
blacklist('_ image gen_platf rle4-height-negative.bmp')
blacklist('_ image gen_platf pal8os2v2.bmp')
blacklist('_ image gen_platf pal8os2v2-16.bmp')
blacklist('_ image gen_platf rgba32abf.bmp')
blacklist('_ image gen_platf rgb24prof.bmp')
blacklist('_ image gen_platf rgb24lprof.bmp')
blacklist('_ image gen_platf 8bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 4bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 32bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 24bpp-pixeldata-cropped.bmp')
if 'x86_64' in bot and 'CPU' in bot:
# This GM triggers a SkSmallAllocator assert.
blacklist('_ gm _ composeshader_bitmap')
# WIC and CG fail on arithmetic jpegs
if 'Win' in bot or 'Mac' in bot:
blacklist('_ image gen_platf testimgari.jpg')
if 'Android' in bot or 'iOS' in bot:
# This test crashes the N9 (perhaps because of large malloc/frees). It also
# is fairly slow and not platform-specific. So we just disable it on all of
# Android and iOS. skia:5438
blacklist('_ test _ GrShape')
# skia:4095
bad_serialize_gms = ['bleed_image',
'c_gms',
'colortype',
'colortype_xfermodes',
'drawfilter',
'fontmgr_bounds_0.75_0',
'fontmgr_bounds_1_-0.25',
'fontmgr_bounds',
'fontmgr_match',
'fontmgr_iter',
'imagemasksubset']
# skia:5589
bad_serialize_gms.extend(['bitmapfilters',
'bitmapshaders',
'bleed',
'bleed_alpha_bmp',
'bleed_alpha_bmp_shader',
'convex_poly_clip',
'extractalpha',
'filterbitmap_checkerboard_32_32_g8',
'filterbitmap_image_mandrill_64',
'shadows',
'simpleaaclip_aaclip'])
# skia:5595
bad_serialize_gms.extend(['composeshader_bitmap',
'scaled_tilemodes_npot',
'scaled_tilemodes'])
# skia:5778
bad_serialize_gms.append('typefacerendering_pfaMac')
# skia:5942
bad_serialize_gms.append('parsedpaths')
# these use a custom image generator which doesn't serialize
bad_serialize_gms.append('ImageGeneratorExternal_rect')
bad_serialize_gms.append('ImageGeneratorExternal_shader')
bad_serialize_gms.append('shadow_utils')
bad_serialize_gms.append('makecolorspace')
for test in bad_serialize_gms:
blacklist(['serialize-8888', 'gm', '_', test])
if 'Mac' not in bot:
for test in ['bleed_alpha_image', 'bleed_alpha_image_shader']:
blacklist(['serialize-8888', 'gm', '_', test])
if 'Win' in bot or 'Android' in bot:
for test in ['verylargebitmap', 'verylarge_picture_image']:
blacklist(['serialize-8888', 'gm', '_', test])
for test in ['drawfilter']:
blacklist([ 'sp-8888', 'gm', '_', test])
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist(['2ndpic-8888', 'gm', '_', test])
blacklist([ 'lite-8888', 'gm', '_', test])
for test in ['image-cacherator-from-picture',
'image-cacherator-from-raster',
'image-cacherator-from-ctable']:
blacklist([ 'sp-8888', 'gm', '_', test])
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist([ '2ndpic-8888', 'gm', '_', test])
blacklist(['serialize-8888', 'gm', '_', test])
for test in ['gamut', 'complexclip4_bw', 'complexclip4_aa']:
blacklist([ 'sp-8888', 'gm', '_', test])
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist([ 'lite-8888', 'gm', '_', test])
blacklist([ '2ndpic-8888', 'gm', '_', test])
blacklist(['serialize-8888', 'gm', '_', test])
for test in ['complexclip4_bw', 'complexclip4_aa']:
blacklist([ 'tiles_rt-8888', 'gm', '_', test])
r = ["arw", "cr2", "dng", "nef", "nrw", "orf", "raf", "rw2", "pef", "srw",
"ARW", "CR2", "DNG", "NEF", "NRW", "ORF", "RAF", "RW2", "PEF", "SRW"]
if 'GPU' in bot:
blacklist('_ image _ interlaced1.png')
blacklist('_ image _ interlaced2.png')
blacklist('_ image _ interlaced3.png')
for raw_ext in r:
blacklist('_ image _ .%s' % raw_ext)
if ('Win2k8' in bot or 'Win8' in bot) and 'x86-' in bot:
blacklist('_ image f16 _')
blacklist('_ image _ abnormal.wbmp')
blacklist('_ image _ interlaced1.png')
blacklist('_ image _ interlaced2.png')
blacklist('_ image _ interlaced3.png')
for raw_ext in r:
blacklist('_ image _ .%s' % raw_ext)
if 'IntelHD405' in bot and 'Ubuntu16' in bot:
blacklist(['glmsaa8', 'image', 'gen_codec_gpu', 'abnormal.wbmp'])
blacklist(['glesmsaa4', 'image', 'gen_codec_gpu', 'abnormal.wbmp'])
if 'Nexus5' in bot:
blacklist(['_', 'gm', '_', 'encode-platform'])
if 'AndroidOne-GPU' in bot:
blacklist(['_', 'gm', '_', 'bigblurs'])
blacklist(['_', 'gm', '_', 'bleed'])
blacklist(['_', 'gm', '_', 'bleed_alpha_bmp'])
blacklist(['_', 'gm', '_', 'bleed_alpha_bmp_shader'])
blacklist(['_', 'gm', '_', 'bleed_alpha_image'])
blacklist(['_', 'gm', '_', 'bleed_alpha_image_shader'])
blacklist(['_', 'gm', '_', 'bleed_image'])
blacklist(['_', 'gm', '_', 'dropshadowimagefilter'])
blacklist(['_', 'gm', '_', 'filterfastbounds'])
blacklist([gl_prefix, 'gm', '_', 'imageblurtiled'])
blacklist(['_', 'gm', '_', 'imagefiltersclipped'])
blacklist(['_', 'gm', '_', 'imagefiltersscaled'])
blacklist(['_', 'gm', '_', 'imageresizetiled'])
blacklist(['_', 'gm', '_', 'matrixconvolution'])
blacklist(['_', 'gm', '_', 'strokedlines'])
if sample_count is not '':
gl_msaa_config = gl_prefix + 'msaa' + sample_count
blacklist([gl_msaa_config, 'gm', '_', 'imageblurtiled'])
blacklist([gl_msaa_config, 'gm', '_', 'imagefiltersbase'])
match = []
if 'Valgrind' in bot:
match.append('~Threaded')
if 'Valgrind' in bot and 'PreAbandonGpuContext' in bot:
match.append('~multipicturedraw_')
if 'CommandBuffer' in bot:
match.append('~HalfFloatAlphaTextureTest')
if 'AndroidOne' in bot:
match.append('~WritePixels')
if 'NexusPlayer' in bot:
match.append('~ResourceCache')
if 'Nexus10' in bot:
match.append('~CopySurface')
match.append('~SRGBReadWritePixels')
if 'GalaxyS6' in bot:
match.append('~SpecialImage')
match.append('~skbug6653')
if 'GalaxyS7_G930A' in bot:
match.append('~WritePixels')
if 'MSAN' in bot:
match.extend(['~Once', '~Shared'])
if 'TSAN' in bot:
match.extend(['~ReadWriteAlpha']) # Flaky on TSAN-covered on nvidia bots.
match.extend(['~RGBA4444TextureTest', # Flakier than they are important.
'~RGB565TextureTest'])
if 'Vulkan' in bot and 'Adreno530' in bot:
# skia:5777
match.extend(['~CopySurface'])
if 'Vulkan' in bot and 'NexusPlayer' in bot:
match.extend(['~gradients_no_texture$', # skia:6132
'~tilemodes', # skia:6132
'~shadertext$', # skia:6132
'~bitmapfilters', # skia:6132
'~GrContextFactory_abandon']) #skia:6209
if 'Vulkan' in bot and 'IntelIris540' in bot and api.vars.is_linux:
match.extend(['~VkHeapTests']) # skia:6245
if 'Intel' in bot and api.vars.is_linux and not 'Vulkan' in bot:
# TODO(dogben): Track down what's causing bots to die.
verbose = True
if 'Vulkan' in bot and 'IntelIris540' in bot and 'Win' in bot:
blacklist(['vk', 'gm', '_', 'aarectmodes'])
blacklist(['vk', 'gm', '_', 'aaxfermodes'])
blacklist(['vk', 'gm', '_', 'arithmode'])
blacklist(['vk', 'gm', '_', 'composeshader_bitmap'])
blacklist(['vk', 'gm', '_', 'composeshader_bitmap2'])
blacklist(['vk', 'gm', '_', 'dftextCOLR'])
blacklist(['vk', 'gm', '_', 'drawregionmodes'])
blacklist(['vk', 'gm', '_', 'filterfastbounds'])
blacklist(['vk', 'gm', '_', 'fontcache'])
blacklist(['vk', 'gm', '_', 'fontmgr_iterWin10'])
blacklist(['vk', 'gm', '_', 'fontmgr_iter_factoryWin10'])
blacklist(['vk', 'gm', '_', 'fontmgr_matchWin10'])
blacklist(['vk', 'gm', '_', 'fontscalerWin'])
blacklist(['vk', 'gm', '_', 'fontscalerdistortable'])
blacklist(['vk', 'gm', '_', 'gammagradienttext'])
blacklist(['vk', 'gm', '_', 'gammatextWin'])
blacklist(['vk', 'gm', '_', 'gradtext'])
blacklist(['vk', 'gm', '_', 'hairmodes'])
blacklist(['vk', 'gm', '_', 'imagefilters_xfermodes'])
blacklist(['vk', 'gm', '_', 'imagefiltersclipped'])
blacklist(['vk', 'gm', '_', 'imagefiltersgraph'])
blacklist(['vk', 'gm', '_', 'imagefiltersscaled'])
blacklist(['vk', 'gm', '_', 'imagefiltersstroked'])
blacklist(['vk', 'gm', '_', 'imagefilterstransformed'])
blacklist(['vk', 'gm', '_', 'imageresizetiled'])
blacklist(['vk', 'gm', '_', 'lcdblendmodes'])
blacklist(['vk', 'gm', '_', 'lcdoverlap'])
blacklist(['vk', 'gm', '_', 'lcdtextWin'])
blacklist(['vk', 'gm', '_', 'lcdtextsize'])
blacklist(['vk', 'gm', '_', 'matriximagefilter'])
blacklist(['vk', 'gm', '_', 'mixedtextblobsCOLR'])
blacklist(['vk', 'gm', '_', 'mixershader'])
blacklist(['vk', 'gm', '_', 'pictureimagefilter'])
blacklist(['vk', 'gm', '_', 'resizeimagefilter'])
blacklist(['vk', 'gm', '_', 'rotate_imagefilter'])
blacklist(['vk', 'gm', '_', 'savelayer_lcdtext'])
blacklist(['vk', 'gm', '_', 'srcmode'])
blacklist(['vk', 'gm', '_', 'surfaceprops'])
blacklist(['vk', 'gm', '_', 'textblobgeometrychange'])
blacklist(['vk', 'gm', '_', 'textbloblooper'])
blacklist(['vk', 'gm', '_', 'textblobmixedsizes'])
blacklist(['vk', 'gm', '_', 'textblobmixedsizes_df'])
blacklist(['vk', 'gm', '_', 'textblobrandomfont'])
blacklist(['vk', 'gm', '_', 'textfilter_color'])
blacklist(['vk', 'gm', '_', 'textfilter_image'])
blacklist(['vk', 'gm', '_', 'typefacerenderingWin'])
blacklist(['vk', 'gm', '_', 'varied_text_clipped_lcd'])
blacklist(['vk', 'gm', '_', 'varied_text_ignorable_clip_lcd'])
blacklist(['vk', 'gm', '_', 'xfermodeimagefilter'])
match.append('~ApplyGamma')
match.append('~ComposedImageFilterBounds_Gpu')
match.append('~DeferredTextureImage')
match.append('~GrMeshTest')
match.append('~ImageFilterFailAffectsTransparentBlack_Gpu')
match.append('~ImageFilterZeroBlurSigma_Gpu')
match.append('~ImageNewShader_GPU')
match.append('~NewTextureFromPixmap')
match.append('~ReadPixels_Gpu')
match.append('~ReadPixels_Texture')
match.append('~ReadWriteAlpha')
match.append('~skbug6653')
match.append('~SRGBReadWritePixels')
match.append('~SpecialImage_DeferredGpu')
match.append('~SpecialImage_Gpu')
match.append('~WritePixels_Gpu')
match.append('~WritePixelsNonTexture_Gpu')
match.append('~XfermodeImageFilterCroppedInput_Gpu')
if 'IntelIris540' in bot and 'ANGLE' in bot:
for config in ['angle_d3d9_es2', 'angle_d3d11_es2', 'angle_gl_es2']:
blacklist([config, 'gm', '_', 'multipicturedraw_invpathclip_simple'])
blacklist([config, 'gm', '_', 'multipicturedraw_noclip_simple'])
blacklist([config, 'gm', '_', 'multipicturedraw_pathclip_simple'])
blacklist([config, 'gm', '_', 'multipicturedraw_rectclip_simple'])
blacklist([config, 'gm', '_', 'multipicturedraw_rrectclip_simple'])
blacklist([config, 'gm', '_', 'discard'])
if 'IntelBayTrail' in bot and api.vars.is_linux:
match.append('~ImageStorageLoad')
if 'Ci20' in bot:
match.append('~Codec_Dimensions')
match.append('~FontMgrAndroidParser')
match.append('~PathOpsSimplify')
blacklist(['_', 'gm', '_', 'fast_slow_blurimagefilter'])
if ('Win10' in bot and 'Vulkan' in bot
and ('GTX1070' in bot or 'GTX660' in bot)):
blacklist('_ test _ SkImage_makeTextureImage')
if blacklisted:
args.append('--blacklist')
args.extend(blacklisted)
if match:
args.append('--match')
args.extend(match)
if ('NexusPlayer' in bot or 'Nexus5' in bot or 'Nexus9' in bot
or 'Win8-MSVC-ShuttleB' in bot):
args.append('--noRAW_threading')
if 'Valgrind' in bot and 'PreAbandonGpuContext' in bot:
verbose = True
if 'NexusPlayer' in bot and 'CPU' in bot:
# times it out for not printing anything frequently enough. --verbose
# makes dm print something every time we start or complete a task.
verbose = True
if verbose:
args.append('--verbose')
return args
def key_params(api):
# Don't bother to include role, which is always Test.
blacklist = ['role', 'is_trybot']
flat = []
for k in sorted(api.vars.builder_cfg.keys()):
if k not in blacklist:
flat.append(k)
flat.append(api.vars.builder_cfg[k])
return flat
def test_steps(api):
use_hash_file = False
if api.vars.upload_dm_results:
api.flavor.create_clean_host_dir(api.vars.dm_dir)
host_dm_dir = str(api.vars.dm_dir)
device_dm_dir = str(api.flavor.device_dirs.dm_dir)
if host_dm_dir != device_dm_dir:
api.flavor.create_clean_device_dir(device_dm_dir)
# Obtain the list of already-generated hashes.
hash_filename = 'uninteresting_hashes.txt'
# Ensure that the tmp_dir exists.
api.run.run_once(api.file.ensure_directory,
'makedirs tmp_dir',
api.vars.tmp_dir)
host_hashes_file = api.vars.tmp_dir.join(hash_filename)
hashes_file = api.flavor.device_path_join(
api.flavor.device_dirs.tmp_dir, hash_filename)
api.run(
api.python.inline,
'get uninteresting hashes',
program="""
import contextlib
import math
import socket
import sys
import time
import urllib2
HASHES_URL = 'https://storage.googleapis.com/skia-infra-gm/hash_files/gold-prod-hashes.txt'
RETRIES = 5
TIMEOUT = 60
WAIT_BASE = 15
socket.setdefaulttimeout(TIMEOUT)
for retry in range(RETRIES):
try:
with contextlib.closing(
urllib2.urlopen(HASHES_URL, timeout=TIMEOUT)) as w:
hashes = w.read()
with open(sys.argv[1], 'w') as f:
f.write(hashes)
break
except Exception as e:
print 'Failed to get uninteresting hashes from %s:' % HASHES_URL
print e
if retry == RETRIES:
raise
waittime = WAIT_BASE * math.pow(2, retry)
print 'Retry in %d seconds.' % waittime
time.sleep(waittime)
""",
args=[host_hashes_file],
abort_on_failure=False,
fail_build_on_failure=False,
infra_step=True)
if api.path.exists(host_hashes_file):
api.flavor.copy_file_to_device(host_hashes_file, hashes_file)
use_hash_file = True
# Run DM.
properties = [
'gitHash', api.vars.got_revision,
'builder', api.vars.builder_name,
]
if api.vars.is_trybot:
properties.extend([
'issue', api.vars.issue,
'patchset', api.vars.patchset,
'patch_storage', api.vars.patch_storage,
])
properties.extend(['swarming_bot_id', api.vars.swarming_bot_id])
properties.extend(['swarming_task_id', api.vars.swarming_task_id])
args = [
'dm',
'--undefok', # This helps branches that may not know new flags.
'--resourcePath', api.flavor.device_dirs.resource_dir,
'--skps', api.flavor.device_dirs.skp_dir,
'--images', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'dm'),
'--colorImages', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'colorspace'),
'--nameByHash',
'--properties'
] + properties
args.extend(['--svgs', api.flavor.device_dirs.svg_dir])
args.append('--key')
args.extend(key_params(api))
if use_hash_file:
args.extend(['--uninterestingHashesFile', hashes_file])
if api.vars.upload_dm_results:
args.extend(['--writePath', api.flavor.device_dirs.dm_dir])
skip_flag = None
if api.vars.builder_cfg.get('cpu_or_gpu') == 'CPU':
skip_flag = '--nogpu'
elif api.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
skip_flag = '--nocpu'
if skip_flag:
args.append(skip_flag)
args.extend(dm_flags(api, api.vars.builder_name))
env = {}
if 'Ubuntu16' in api.vars.builder_name:
# The vulkan in this asset name simply means that the graphics driver
# supports Vulkan. It is also the driver used for GL code.
dri_path = api.vars.slave_dir.join('linux_vulkan_intel_driver_release')
if 'Debug' in api.vars.builder_name:
dri_path = api.vars.slave_dir.join('linux_vulkan_intel_driver_debug')
if 'Vulkan' in api.vars.builder_name:
sdk_path = api.vars.slave_dir.join('linux_vulkan_sdk', 'bin')
lib_path = api.vars.slave_dir.join('linux_vulkan_sdk', 'lib')
env.update({
'PATH':'%%(PATH)s:%s' % sdk_path,
'LD_LIBRARY_PATH': '%s:%s' % (lib_path, dri_path),
'LIBGL_DRIVERS_PATH': dri_path,
'VK_ICD_FILENAMES':'%s' % dri_path.join('intel_icd.x86_64.json'),
})
else:
# Even the non-vulkan NUC jobs could benefit from the newer drivers.
env.update({
'LD_LIBRARY_PATH': dri_path,
'LIBGL_DRIVERS_PATH': dri_path,
})
# See skia:2789.
extra_config_parts = api.vars.builder_cfg.get('extra_config', '').split('_')
if 'AbandonGpuContext' in extra_config_parts:
args.append('--abandonGpuContext')
if 'PreAbandonGpuContext' in extra_config_parts:
args.append('--preAbandonGpuContext')
if 'ReleaseAndAbandonGpuContext' in extra_config_parts:
args.append('--releaseAndAbandonGpuContext')
with api.env(env):
api.run(api.flavor.step, 'dm', cmd=args, abort_on_failure=False)
if api.vars.upload_dm_results:
# Copy images and JSON to host machine if needed.
api.flavor.copy_directory_contents_to_host(
api.flavor.device_dirs.dm_dir, api.vars.dm_dir)
def RunSteps(api):
api.core.setup()
env = {}
if 'iOS' in api.vars.builder_name:
env['IOS_BUNDLE_ID'] = 'com.google.dm'
env['IOS_MOUNT_POINT'] = api.vars.slave_dir.join('mnt_iosdevice')
with api.context(env=env):
try:
api.flavor.install_everything()
test_steps(api)
finally:
api.flavor.cleanup_steps()
api.run.check_failure()
TEST_BUILDERS = [
'Test-Android-Clang-AndroidOne-GPU-Mali400MP2-arm-Release-Android',
'Test-Android-Clang-Ci20-CPU-IngenicJZ4780-mipsel-Release-Android',
'Test-Android-Clang-GalaxyS6-GPU-MaliT760-arm64-Debug-Android',
'Test-Android-Clang-GalaxyS7_G930A-GPU-Adreno530-arm64-Debug-Android',
'Test-Android-Clang-NVIDIA_Shield-GPU-TegraX1-arm64-Debug-Android',
'Test-Android-Clang-Nexus10-GPU-MaliT604-arm-Release-Android',
'Test-Android-Clang-Nexus5-GPU-Adreno330-arm-Release-Android',
'Test-Android-Clang-PixelXL-GPU-Adreno530-arm64-Debug-Android_CCPR',
'Test-Android-Clang-Nexus6p-GPU-Adreno430-arm64-Debug-Android_Vulkan',
'Test-Android-Clang-PixelXL-GPU-Adreno530-arm64-Debug-Android_Vulkan',
'Test-Android-Clang-Nexus7-GPU-Tegra3-arm-Debug-Android',
'Test-Android-Clang-NexusPlayer-CPU-SSE4-x86-Release-Android',
'Test-Android-Clang-NexusPlayer-GPU-PowerVR-x86-Release-Android_Vulkan',
'Test-Android-Clang-PixelC-CPU-TegraX1-arm64-Debug-Android',
'Test-ChromeOS-Clang-Chromebook_C100p-GPU-MaliT764-arm-Debug',
'Test-Mac-Clang-MacMini6.2-CPU-AVX-x86_64-Debug',
'Test-Mac-Clang-MacMini6.2-GPU-IntelHD4000-x86_64-Debug-CommandBuffer',
'Test-Ubuntu-Clang-GCE-CPU-AVX2-x86_64-Debug-ASAN',
'Test-Ubuntu-Clang-GCE-CPU-AVX2-x86_64-Debug-MSAN',
'Test-Ubuntu-Clang-GCE-CPU-AVX2-x86_64-Release-TSAN',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86-Debug',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug',
'Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind',
('Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind' +
'_AbandonGpuContext'),
('Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind' +
'_PreAbandonGpuContext'),
('Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug-SK_USE_DISCARDABLE_' +
'SCALEDIMAGECACHE'),
'Test-Ubuntu16-Clang-NUC5PPYH-GPU-IntelHD405-x86_64-Debug',
'Test-Ubuntu16-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Debug-Vulkan',
'Test-Ubuntu16-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Release',
'Test-Ubuntu16-Clang-NUCDE3815TYKHE-GPU-IntelBayTrail-x86_64-Debug',
'Test-Win8-MSVC-Golo-CPU-AVX-x86-Debug',
'Test-Win10-MSVC-AlphaR2-GPU-RadeonR9M470X-x86_64-Debug-Vulkan',
('Test-Win10-MSVC-NUC5i7RYH-GPU-IntelIris6100-x86_64-Release-'
'ReleaseAndAbandonGpuContext'),
'Test-Win10-MSVC-NUC6i5SYK-GPU-IntelIris540-x86_64-Debug-ANGLE',
'Test-Win10-MSVC-NUC6i5SYK-GPU-IntelIris540-x86_64-Debug-Vulkan',
'Test-Win10-MSVC-ShuttleA-GPU-GTX660-x86_64-Debug-Vulkan',
'Test-Win10-MSVC-ShuttleC-GPU-GTX960-x86_64-Debug-ANGLE',
'Test-Win10-MSVC-ZBOX-GPU-GTX1070-x86_64-Debug-Vulkan',
'Test-iOS-Clang-iPadMini4-GPU-GX6450-arm-Release',
('Test-Ubuntu-Clang-GCE-CPU-AVX2-x86_64-Release-'
'SK_FORCE_RASTER_PIPELINE_BLITTER'),
]
def GenTests(api):
for builder in TEST_BUILDERS:
test = (
api.test(builder) +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get swarming bot id',
stdout=api.raw_io.output('skia-bot-123')) +
api.step_data('get swarming task id',
stdout=api.raw_io.output('123456'))
)
if 'Win' in builder:
test += api.platform('win', 64)
if 'ChromeOS' in builder:
test += api.step_data(
'read chromeos ip',
stdout=api.raw_io.output('{"user_ip":"foo@127.0.0.1"}'))
yield test
builder = 'Test-Win2k8-MSVC-GCE-CPU-AVX2-x86_64-Release'
yield (
api.test('trybot') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.properties(patch_storage='gerrit') +
api.properties.tryserver(
buildername=builder,
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
)+
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
builder = 'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug'
yield (
api.test('failed_dm') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('symbolized dm', retcode=1)
)
builder = 'Test-Android-Clang-Nexus7-GPU-Tegra3-arm-Debug-Android'
yield (
api.test('failed_get_hashes') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get uninteresting hashes', retcode=1)
)
builder = 'Test-Android-Clang-NexusPlayer-CPU-SSE4-x86-Debug-Android'
yield (
api.test('failed_push') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('push [START_DIR]/skia/resources/* '+
'/sdcard/revenge_of_the_skiabot/resources', retcode=1)
)
builder = 'Test-Android-Clang-Nexus10-GPU-MaliT604-arm-Debug-Android'
yield (
api.test('failed_pull') +
api.properties(buildername=builder,
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('dm', retcode=1) +
api.step_data('pull /sdcard/revenge_of_the_skiabot/dm_out '+
'[CUSTOM_[SWARM_OUT_DIR]]/dm', retcode=1)
)
| true | true |
1c378ff86090ecf7c67520a02e1303f2b422df6c | 8,193 | py | Python | tfx/examples/chicago_taxi/preprocess.py | MattMorgis/tfx | f11cc054f079c998a52002e14b6ba74063fed986 | [
"Apache-2.0"
] | 1 | 2019-04-05T19:39:53.000Z | 2019-04-05T19:39:53.000Z | tfx/examples/chicago_taxi/preprocess.py | MattMorgis/tfx | f11cc054f079c998a52002e14b6ba74063fed986 | [
"Apache-2.0"
] | null | null | null | tfx/examples/chicago_taxi/preprocess.py | MattMorgis/tfx | f11cc054f079c998a52002e14b6ba74063fed986 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessor applying tf.transform to the chicago_taxi data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import apache_beam as beam
import tensorflow as tf
import tensorflow_transform as transform
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tfx.examples.chicago_taxi.trainer import taxi
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse_to_dense(x.indices, [x.dense_shape[0], 1], x.values,
default_value),
axis=1)
# TODO(b/114126687): Make schema as a required argument and remove the
# hard-coded feature spec in trainer/taxi.py.
def transform_data(input_handle,
outfile_prefix,
working_dir,
schema_file,
transform_dir=None,
max_rows=None,
pipeline_args=None):
"""The main tf.transform method which analyzes and transforms data.
Args:
input_handle: BigQuery table name to process specified as DATASET.TABLE or
path to csv file with input data.
outfile_prefix: Filename prefix for emitted transformed examples
working_dir: Directory in which transformed examples and transform function
will be emitted.
schema_file: An file path that contains a text-serialized TensorFlow
metadata schema of the input data.
transform_dir: Directory in which the transform output is located. If
provided, this will load the transform_fn from disk instead of computing
it over the data. Hint: this is useful for transforming eval data.
max_rows: Number of rows to query from BigQuery
pipeline_args: additional DataflowRunner or DirectRunner args passed to the
beam pipeline.
"""
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in taxi.DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in taxi.VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[
taxi.transformed_name(key)] = transform.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=taxi.VOCAB_SIZE,
num_oov_buckets=taxi.OOV_SIZE)
for key in taxi.BUCKET_FEATURE_KEYS:
outputs[taxi.transformed_name(key)] = transform.bucketize(
_fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT)
for key in taxi.CATEGORICAL_FEATURE_KEYS:
outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY])
tips = _fill_in_missing(inputs[taxi.LABEL_KEY])
outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(
tf.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))),
tf.int64))
return outputs
schema = taxi.read_schema(schema_file)
raw_feature_spec = taxi.get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
raw_data_metadata = dataset_metadata.DatasetMetadata(raw_schema)
with beam.Pipeline(argv=pipeline_args) as pipeline:
with tft_beam.Context(temp_dir=working_dir):
if input_handle.lower().endswith('csv'):
csv_coder = taxi.make_csv_coder(schema)
raw_data = (
pipeline
| 'ReadFromText' >> beam.io.ReadFromText(
input_handle, skip_header_lines=1))
decode_transform = beam.Map(csv_coder.decode)
else:
query = taxi.make_sql(input_handle, max_rows, for_eval=False)
raw_data = (
pipeline
| 'ReadBigQuery' >> beam.io.Read(
beam.io.BigQuerySource(query=query, use_standard_sql=True)))
decode_transform = beam.Map(
taxi.clean_raw_data_dict, raw_feature_spec=raw_feature_spec)
if transform_dir is None:
decoded_data = raw_data | 'DecodeForAnalyze' >> decode_transform
transform_fn = (
(decoded_data, raw_data_metadata) |
('Analyze' >> tft_beam.AnalyzeDataset(preprocessing_fn)))
_ = (
transform_fn
| ('WriteTransformFn' >>
tft_beam.WriteTransformFn(working_dir)))
else:
transform_fn = pipeline | tft_beam.ReadTransformFn(transform_dir)
# Shuffling the data before materialization will improve Training
# effectiveness downstream. Here we shuffle the raw_data (as opposed to
# decoded data) since it has a compact representation.
shuffled_data = raw_data | 'RandomizeData' >> beam.transforms.Reshuffle()
decoded_data = shuffled_data | 'DecodeForTransform' >> decode_transform
(transformed_data, transformed_metadata) = (
((decoded_data, raw_data_metadata), transform_fn)
| 'Transform' >> tft_beam.TransformDataset())
coder = example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)
_ = (
transformed_data
| 'SerializeExamples' >> beam.Map(coder.encode)
| 'WriteExamples' >> beam.io.WriteToTFRecord(
os.path.join(working_dir, outfile_prefix), file_name_suffix='.gz')
)
def main():
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
help=('Input BigQuery table to process specified as: '
'DATASET.TABLE or path to csv file with input data.'))
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
parser.add_argument(
'--output_dir',
help=('Directory in which transformed examples and function '
'will be emitted.'))
parser.add_argument(
'--outfile_prefix',
help='Filename prefix for emitted transformed examples')
parser.add_argument(
'--transform_dir',
required=False,
default=None,
help='Directory in which the transform output is located')
parser.add_argument(
'--max_rows',
help='Number of rows to query from BigQuery',
default=None,
type=int)
known_args, pipeline_args = parser.parse_known_args()
transform_data(
input_handle=known_args.input,
outfile_prefix=known_args.outfile_prefix,
working_dir=known_args.output_dir,
schema_file=known_args.schema_file,
transform_dir=known_args.transform_dir,
max_rows=known_args.max_rows,
pipeline_args=pipeline_args)
if __name__ == '__main__':
main()
| 36.575893 | 80 | 0.695106 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import apache_beam as beam
import tensorflow as tf
import tensorflow_transform as transform
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tfx.examples.chicago_taxi.trainer import taxi
def _fill_in_missing(x):
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse_to_dense(x.indices, [x.dense_shape[0], 1], x.values,
default_value),
axis=1)
def transform_data(input_handle,
outfile_prefix,
working_dir,
schema_file,
transform_dir=None,
max_rows=None,
pipeline_args=None):
def preprocessing_fn(inputs):
outputs = {}
for key in taxi.DENSE_FLOAT_FEATURE_KEYS:
outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in taxi.VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[
taxi.transformed_name(key)] = transform.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=taxi.VOCAB_SIZE,
num_oov_buckets=taxi.OOV_SIZE)
for key in taxi.BUCKET_FEATURE_KEYS:
outputs[taxi.transformed_name(key)] = transform.bucketize(
_fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT)
for key in taxi.CATEGORICAL_FEATURE_KEYS:
outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY])
tips = _fill_in_missing(inputs[taxi.LABEL_KEY])
outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(
tf.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))),
tf.int64))
return outputs
schema = taxi.read_schema(schema_file)
raw_feature_spec = taxi.get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
raw_data_metadata = dataset_metadata.DatasetMetadata(raw_schema)
with beam.Pipeline(argv=pipeline_args) as pipeline:
with tft_beam.Context(temp_dir=working_dir):
if input_handle.lower().endswith('csv'):
csv_coder = taxi.make_csv_coder(schema)
raw_data = (
pipeline
| 'ReadFromText' >> beam.io.ReadFromText(
input_handle, skip_header_lines=1))
decode_transform = beam.Map(csv_coder.decode)
else:
query = taxi.make_sql(input_handle, max_rows, for_eval=False)
raw_data = (
pipeline
| 'ReadBigQuery' >> beam.io.Read(
beam.io.BigQuerySource(query=query, use_standard_sql=True)))
decode_transform = beam.Map(
taxi.clean_raw_data_dict, raw_feature_spec=raw_feature_spec)
if transform_dir is None:
decoded_data = raw_data | 'DecodeForAnalyze' >> decode_transform
transform_fn = (
(decoded_data, raw_data_metadata) |
('Analyze' >> tft_beam.AnalyzeDataset(preprocessing_fn)))
_ = (
transform_fn
| ('WriteTransformFn' >>
tft_beam.WriteTransformFn(working_dir)))
else:
transform_fn = pipeline | tft_beam.ReadTransformFn(transform_dir)
# Shuffling the data before materialization will improve Training
# effectiveness downstream. Here we shuffle the raw_data (as opposed to
# decoded data) since it has a compact representation.
shuffled_data = raw_data | 'RandomizeData' >> beam.transforms.Reshuffle()
decoded_data = shuffled_data | 'DecodeForTransform' >> decode_transform
(transformed_data, transformed_metadata) = (
((decoded_data, raw_data_metadata), transform_fn)
| 'Transform' >> tft_beam.TransformDataset())
coder = example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)
_ = (
transformed_data
| 'SerializeExamples' >> beam.Map(coder.encode)
| 'WriteExamples' >> beam.io.WriteToTFRecord(
os.path.join(working_dir, outfile_prefix), file_name_suffix='.gz')
)
def main():
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
help=('Input BigQuery table to process specified as: '
'DATASET.TABLE or path to csv file with input data.'))
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
parser.add_argument(
'--output_dir',
help=('Directory in which transformed examples and function '
'will be emitted.'))
parser.add_argument(
'--outfile_prefix',
help='Filename prefix for emitted transformed examples')
parser.add_argument(
'--transform_dir',
required=False,
default=None,
help='Directory in which the transform output is located')
parser.add_argument(
'--max_rows',
help='Number of rows to query from BigQuery',
default=None,
type=int)
known_args, pipeline_args = parser.parse_known_args()
transform_data(
input_handle=known_args.input,
outfile_prefix=known_args.outfile_prefix,
working_dir=known_args.output_dir,
schema_file=known_args.schema_file,
transform_dir=known_args.transform_dir,
max_rows=known_args.max_rows,
pipeline_args=pipeline_args)
if __name__ == '__main__':
main()
| true | true |
1c3790343ce0bc0f3bce26c0c186f3578f1e1cc6 | 11,054 | py | Python | tests/workers/test_module_event_handler.py | glucoseinc/CircleCore | 577f814ce2944efb6e5997f3d7838c71ce9aea6a | [
"MIT"
] | 3 | 2019-01-11T04:30:18.000Z | 2019-01-11T04:31:18.000Z | tests/workers/test_module_event_handler.py | glucoseinc/CircleCore | 577f814ce2944efb6e5997f3d7838c71ce9aea6a | [
"MIT"
] | 16 | 2018-11-21T11:47:18.000Z | 2021-09-01T03:52:35.000Z | tests/workers/test_module_event_handler.py | glucoseinc/CircleCore | 577f814ce2944efb6e5997f3d7838c71ce9aea6a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import base64
import json
import mimetypes
import os
from email.message import EmailMessage
from unittest.mock import MagicMock
from tornado import httpclient
from tornado.gen import sleep
from tornado.testing import AsyncHTTPTestCase, gen_test
from tornado.web import Application
from tornado.websocket import websocket_connect
from circle_core.models import MessageBox, MetaDataSession, Module, Schema, User
from circle_core.testing import mock_circlecore_context
from circle_core.workers.http import ModuleEventHandler
async def _receive_new_message_side_effect(*args, **kwargs):
return True
class TestModuleEventHandlerBase(AsyncHTTPTestCase):
def get_app(self):
return Application(
[(r'/modules/(?P<module_uuid>[0-9A-Fa-f-]+)/(?P<mbox_uuid>[0-9A-Fa-f-]+)', ModuleEventHandler)],
_core=self.app_mock
)
def setUp(self):
self.app_mock = MagicMock()
self.datareceiver = MagicMock()
self.datareceiver.receive_new_message.return_value = True
self.datareceiver.receive_new_message.side_effect = _receive_new_message_side_effect
self.app_mock.get_datareceiver.return_value = self.datareceiver
super().setUp()
self.ctxt = mock_circlecore_context()
self.ctxt.__enter__()
def tearDown(self):
self.ctxt.__exit__(None, None, None)
super().tearDown()
def reset_mock(self):
self.datareceiver.reset_mock()
self.datareceiver.receive_new_message.side_effect = _receive_new_message_side_effect
class TestModuleEventHandlerViaREST(TestModuleEventHandlerBase):
def test_rest_not_found(self):
"""登録されていないModuleからのPOSTは404"""
with MetaDataSession.begin():
user = User.create(account='tester', password='tester')
user.renew_token()
MetaDataSession.add(user)
response = self.fetch(
self.get_url('/modules/4ffab839-cf56-478a-8614-6003a5980855/00000000-0000-0000-0000-000000000000'),
method='POST',
body=json.dumps({
'x': 1,
'y': 2
}),
headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
self.assertEqual(response.code, 404)
def test_rest(self):
"""登録されているModuleからのPOSTは404"""
# make dummy environ
with MetaDataSession.begin():
user = User.create(account='tester', password='tester')
user.renew_token()
schema = Schema.create(display_name='Schema', properties='x:int,y:float')
module = Module.create(display_name='Module')
mbox = MessageBox(
uuid='4ffab839-cf56-478a-8614-6003a5980856', schema_uuid=schema.uuid, module_uuid=module.uuid
)
MetaDataSession.add(user)
MetaDataSession.add(schema)
MetaDataSession.add(module)
MetaDataSession.add(mbox)
response = self.fetch(
self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)),
method='POST',
body=json.dumps({
'x': 1,
'y': 2.5
}),
headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
self.assertEqual(response.code, 200)
self.datareceiver.receive_new_message.assert_called_once_with(str(mbox.uuid), {'x': 1, 'y': 2.5})
def test_rest_with_data(self):
"""登録されているModuleからのPOSTは404"""
# make dummy environ
with MetaDataSession.begin():
user = User.create(account='tester', password='tester')
user.renew_token()
schema = Schema.create(display_name='Schema', properties='x:int,y:float,data:blob')
module = Module.create(display_name='Module')
mbox = MessageBox(
uuid='4ffab839-cf56-478a-8614-6003a5980857', schema_uuid=schema.uuid, module_uuid=module.uuid
)
MetaDataSession.add(user)
MetaDataSession.add(schema)
MetaDataSession.add(module)
MetaDataSession.add(mbox)
async def _async_side_effect():
print('_async_side_effect')
return True
# data encodingはOK
response = self.fetch(
self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)),
method='POST',
body=json.dumps({
'x': 10.,
'y': 20.5,
'data': encode_to_data(*load_file('test.jpg'))
}),
headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
self.assertEqual(response.code, 200)
self.datareceiver.receive_new_message.assert_called_once()
args, kwargs = self.datareceiver.receive_new_message.call_args
assert args[0] == str(mbox.uuid)
assert args[1]['x'] == 10.
assert args[1]['y'] == 20.5
assert args[1]['data'].startswith('data:image/jpeg;')
self.reset_mock()
# そうじゃないのはNG
response = self.fetch(
self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)),
method='POST',
body=json.dumps({
'x': 10.,
'y': 20.5,
'data': 'hogehoge'
}),
headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
self.assertEqual(response.code, 400)
self.datareceiver.receive_new_message.assert_not_called()
self.reset_mock()
# multipartもOK
body, headers = make_multipart_request(
'application/json', json.dumps({
'x': 10.,
'y': 20.5,
'data': 'file:///test.jpg'
}), 'test.jpg'
)
headers['Authorization'] = 'Bearer {token}'.format(token=user.encoded_token)
response = self.fetch(
self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)),
method='POST',
headers=headers,
body=body,
)
self.assertEqual(response.code, 200)
args, kwargs = self.datareceiver.receive_new_message.call_args
assert args[0] == str(mbox.uuid)
assert args[1]['x'] == 10.
assert args[1]['y'] == 20.5
assert 'data' in args[1]
def load_file(filename):
path = os.path.join(os.path.split(__file__)[0], filename)
type, encoding = mimetypes.guess_type(path)
with open(path, 'rb') as fp:
data = fp.read()
return type, encoding, data
def encode_to_data(content_type, encoding, data):
return 'data:{content_type}{charset};bsae64,{encoded}'.format(
content_type=content_type,
charset=';charset={}'.format(encoding) if encoding else '',
encoded=base64.b64encode(data).decode('utf-8')
)
def make_multipart_request(content_type, mainbody, append_filename):
message = EmailMessage()
maintype, subtype = content_type.split('/')
message.set_content(mainbody.encode('utf-8'), maintype=maintype, subtype=subtype)
ct, enc, data = load_file(append_filename)
maintype, subtype = ct.split('/')
message.add_attachment(data, maintype=maintype, subtype=subtype, filename=append_filename)
headerlines, body = message.as_string().split('\n\n', 1)
headers = {}
for ln in headerlines.split('\n'):
k, v = ln.split(':', 1)
headers[k] = v.lstrip()
return body, headers
class TestModuleEventHandlerViaWebsocket(TestModuleEventHandlerBase):
def get_protocol(self):
return 'ws'
@gen_test(timeout=2)
def test_websocket_auth_failed(self):
"""Websocketにも認証がいる"""
# make dummy environ
with MetaDataSession.begin():
schema = Schema.create(display_name='Schema', properties='x:int,y:float')
module = Module.create(display_name='Module')
mbox = MessageBox(
uuid='4ffab839-cf56-478a-8614-6003a5980855', schema_uuid=schema.uuid, module_uuid=module.uuid
)
MetaDataSession.add(schema)
MetaDataSession.add(module)
MetaDataSession.add(mbox)
with self.assertRaises(httpclient.HTTPClientError):
dummy_module = yield websocket_connect(self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)))
dummy_module.write_message(json.dumps({'x': 1, 'y': 2}))
yield sleep(1)
self.datareceiver.receive_new_message.assert_not_called()
@gen_test(timeout=2)
def test_websocket_not_found(self):
"""登録されていないModuleから接続された際は切断."""
with MetaDataSession.begin():
user = User.create(account='tester', password='tester')
user.renew_token()
unknown_box = yield websocket_connect(
httpclient.HTTPRequest(
self.get_url('/modules/4ffab839-cf56-478a-8614-6003a5980855/00000000-0000-0000-0000-000000000000'),
headers={
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
)
res = yield unknown_box.read_message()
assert res is None
@gen_test(timeout=2)
def test_websocket_pass_to_nanomsg(self):
"""WebSocketで受け取ったModuleからのMessageに適切なtimestamp/countを付与してnanomsgに流せているかどうか."""
# make dummy environ
with MetaDataSession.begin():
user = User.create(account='tester', password='tester')
user.renew_token()
schema = Schema.create(display_name='Schema', properties='x:int,y:float')
module = Module.create(display_name='Module')
mbox = MessageBox(
uuid='4ffab839-cf56-478a-8614-6003a5980855', schema_uuid=schema.uuid, module_uuid=module.uuid
)
MetaDataSession.add(user)
MetaDataSession.add(schema)
MetaDataSession.add(module)
MetaDataSession.add(mbox)
dummy_module = yield websocket_connect(
httpclient.HTTPRequest(
self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)),
headers={
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
)
dummy_module.write_message(json.dumps({'x': 1, 'y': 2}))
# 素直にrecvするとIOLoopがブロックされてModuleHandlerが何も返せなくなるのでModuleHandlerをまず動かす
yield sleep(1)
self.datareceiver.receive_new_message.assert_called_once_with(
'4ffab839-cf56-478a-8614-6003a5980855', {
'x': 1,
'y': 2
}
)
| 35.203822 | 115 | 0.599964 |
import base64
import json
import mimetypes
import os
from email.message import EmailMessage
from unittest.mock import MagicMock
from tornado import httpclient
from tornado.gen import sleep
from tornado.testing import AsyncHTTPTestCase, gen_test
from tornado.web import Application
from tornado.websocket import websocket_connect
from circle_core.models import MessageBox, MetaDataSession, Module, Schema, User
from circle_core.testing import mock_circlecore_context
from circle_core.workers.http import ModuleEventHandler
async def _receive_new_message_side_effect(*args, **kwargs):
return True
class TestModuleEventHandlerBase(AsyncHTTPTestCase):
def get_app(self):
return Application(
[(r'/modules/(?P<module_uuid>[0-9A-Fa-f-]+)/(?P<mbox_uuid>[0-9A-Fa-f-]+)', ModuleEventHandler)],
_core=self.app_mock
)
def setUp(self):
self.app_mock = MagicMock()
self.datareceiver = MagicMock()
self.datareceiver.receive_new_message.return_value = True
self.datareceiver.receive_new_message.side_effect = _receive_new_message_side_effect
self.app_mock.get_datareceiver.return_value = self.datareceiver
super().setUp()
self.ctxt = mock_circlecore_context()
self.ctxt.__enter__()
def tearDown(self):
self.ctxt.__exit__(None, None, None)
super().tearDown()
def reset_mock(self):
self.datareceiver.reset_mock()
self.datareceiver.receive_new_message.side_effect = _receive_new_message_side_effect
class TestModuleEventHandlerViaREST(TestModuleEventHandlerBase):
def test_rest_not_found(self):
with MetaDataSession.begin():
user = User.create(account='tester', password='tester')
user.renew_token()
MetaDataSession.add(user)
response = self.fetch(
self.get_url('/modules/4ffab839-cf56-478a-8614-6003a5980855/00000000-0000-0000-0000-000000000000'),
method='POST',
body=json.dumps({
'x': 1,
'y': 2
}),
headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
self.assertEqual(response.code, 404)
def test_rest(self):
with MetaDataSession.begin():
user = User.create(account='tester', password='tester')
user.renew_token()
schema = Schema.create(display_name='Schema', properties='x:int,y:float')
module = Module.create(display_name='Module')
mbox = MessageBox(
uuid='4ffab839-cf56-478a-8614-6003a5980856', schema_uuid=schema.uuid, module_uuid=module.uuid
)
MetaDataSession.add(user)
MetaDataSession.add(schema)
MetaDataSession.add(module)
MetaDataSession.add(mbox)
response = self.fetch(
self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)),
method='POST',
body=json.dumps({
'x': 1,
'y': 2.5
}),
headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
self.assertEqual(response.code, 200)
self.datareceiver.receive_new_message.assert_called_once_with(str(mbox.uuid), {'x': 1, 'y': 2.5})
def test_rest_with_data(self):
with MetaDataSession.begin():
user = User.create(account='tester', password='tester')
user.renew_token()
schema = Schema.create(display_name='Schema', properties='x:int,y:float,data:blob')
module = Module.create(display_name='Module')
mbox = MessageBox(
uuid='4ffab839-cf56-478a-8614-6003a5980857', schema_uuid=schema.uuid, module_uuid=module.uuid
)
MetaDataSession.add(user)
MetaDataSession.add(schema)
MetaDataSession.add(module)
MetaDataSession.add(mbox)
async def _async_side_effect():
print('_async_side_effect')
return True
response = self.fetch(
self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)),
method='POST',
body=json.dumps({
'x': 10.,
'y': 20.5,
'data': encode_to_data(*load_file('test.jpg'))
}),
headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
self.assertEqual(response.code, 200)
self.datareceiver.receive_new_message.assert_called_once()
args, kwargs = self.datareceiver.receive_new_message.call_args
assert args[0] == str(mbox.uuid)
assert args[1]['x'] == 10.
assert args[1]['y'] == 20.5
assert args[1]['data'].startswith('data:image/jpeg;')
self.reset_mock()
response = self.fetch(
self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)),
method='POST',
body=json.dumps({
'x': 10.,
'y': 20.5,
'data': 'hogehoge'
}),
headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
self.assertEqual(response.code, 400)
self.datareceiver.receive_new_message.assert_not_called()
self.reset_mock()
body, headers = make_multipart_request(
'application/json', json.dumps({
'x': 10.,
'y': 20.5,
'data': 'file:///test.jpg'
}), 'test.jpg'
)
headers['Authorization'] = 'Bearer {token}'.format(token=user.encoded_token)
response = self.fetch(
self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)),
method='POST',
headers=headers,
body=body,
)
self.assertEqual(response.code, 200)
args, kwargs = self.datareceiver.receive_new_message.call_args
assert args[0] == str(mbox.uuid)
assert args[1]['x'] == 10.
assert args[1]['y'] == 20.5
assert 'data' in args[1]
def load_file(filename):
path = os.path.join(os.path.split(__file__)[0], filename)
type, encoding = mimetypes.guess_type(path)
with open(path, 'rb') as fp:
data = fp.read()
return type, encoding, data
def encode_to_data(content_type, encoding, data):
return 'data:{content_type}{charset};bsae64,{encoded}'.format(
content_type=content_type,
charset=';charset={}'.format(encoding) if encoding else '',
encoded=base64.b64encode(data).decode('utf-8')
)
def make_multipart_request(content_type, mainbody, append_filename):
message = EmailMessage()
maintype, subtype = content_type.split('/')
message.set_content(mainbody.encode('utf-8'), maintype=maintype, subtype=subtype)
ct, enc, data = load_file(append_filename)
maintype, subtype = ct.split('/')
message.add_attachment(data, maintype=maintype, subtype=subtype, filename=append_filename)
headerlines, body = message.as_string().split('\n\n', 1)
headers = {}
for ln in headerlines.split('\n'):
k, v = ln.split(':', 1)
headers[k] = v.lstrip()
return body, headers
class TestModuleEventHandlerViaWebsocket(TestModuleEventHandlerBase):
def get_protocol(self):
return 'ws'
@gen_test(timeout=2)
def test_websocket_auth_failed(self):
with MetaDataSession.begin():
schema = Schema.create(display_name='Schema', properties='x:int,y:float')
module = Module.create(display_name='Module')
mbox = MessageBox(
uuid='4ffab839-cf56-478a-8614-6003a5980855', schema_uuid=schema.uuid, module_uuid=module.uuid
)
MetaDataSession.add(schema)
MetaDataSession.add(module)
MetaDataSession.add(mbox)
with self.assertRaises(httpclient.HTTPClientError):
dummy_module = yield websocket_connect(self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)))
dummy_module.write_message(json.dumps({'x': 1, 'y': 2}))
yield sleep(1)
self.datareceiver.receive_new_message.assert_not_called()
@gen_test(timeout=2)
def test_websocket_not_found(self):
with MetaDataSession.begin():
user = User.create(account='tester', password='tester')
user.renew_token()
unknown_box = yield websocket_connect(
httpclient.HTTPRequest(
self.get_url('/modules/4ffab839-cf56-478a-8614-6003a5980855/00000000-0000-0000-0000-000000000000'),
headers={
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
)
res = yield unknown_box.read_message()
assert res is None
@gen_test(timeout=2)
def test_websocket_pass_to_nanomsg(self):
with MetaDataSession.begin():
user = User.create(account='tester', password='tester')
user.renew_token()
schema = Schema.create(display_name='Schema', properties='x:int,y:float')
module = Module.create(display_name='Module')
mbox = MessageBox(
uuid='4ffab839-cf56-478a-8614-6003a5980855', schema_uuid=schema.uuid, module_uuid=module.uuid
)
MetaDataSession.add(user)
MetaDataSession.add(schema)
MetaDataSession.add(module)
MetaDataSession.add(mbox)
dummy_module = yield websocket_connect(
httpclient.HTTPRequest(
self.get_url('/modules/{}/{}'.format(module.uuid, mbox.uuid)),
headers={
'Authorization': 'Bearer {token}'.format(token=user.encoded_token),
}
)
)
dummy_module.write_message(json.dumps({'x': 1, 'y': 2}))
yield sleep(1)
self.datareceiver.receive_new_message.assert_called_once_with(
'4ffab839-cf56-478a-8614-6003a5980855', {
'x': 1,
'y': 2
}
)
| true | true |
1c37912e81607ea595ce4d0f840633f776e62b45 | 4,323 | py | Python | gnsstools/satellites/galileo.py | arthurdjn/gnsstools | e496093bcecb4b543d5c73b6f5bdfc70b53dbfab | [
"MIT"
] | 3 | 2021-06-21T08:54:23.000Z | 2021-12-09T06:39:52.000Z | gnsstools/satellites/galileo.py | yxw027/gnsstools | e496093bcecb4b543d5c73b6f5bdfc70b53dbfab | [
"MIT"
] | null | null | null | gnsstools/satellites/galileo.py | yxw027/gnsstools | e496093bcecb4b543d5c73b6f5bdfc70b53dbfab | [
"MIT"
] | 3 | 2021-03-14T01:43:15.000Z | 2022-01-13T04:12:38.000Z | # File: galileo.py
# Creation: Sunday January 24th 2021
# Author: Arthur Dujardin
# ------
# Copyright (c) 2021 Arthur Dujardin
from .satellite import Satellite
class GALILEO(Satellite):
def __init__(self, prn=None, toc=None,
sv_clock_bias=None, sv_clock_drift=None, sv_clock_drift_rate=None,
iod_nav=None, crs=None, delta_n=None, m0=None,
cuc=None, e=None, cus=None, sqrt_a=None,
toe=None, cic=None, omega0=None, cis=None,
i0=None, crc=None, omega=None, omega_dot=None,
idot=None, gps_week=None, gal_week=None,
sisa=None, sv_health=None, bgd_e5a=None, bgd_e5b=None,
trans_time=None):
super().__init__(prn=prn, toc=toc)
# First row
self.sv_clock_bias = sv_clock_bias
self.sv_clock_drift = sv_clock_drift
self.sv_clock_drift_rate = sv_clock_drift_rate
# Second row
self.iod_nav = iod_nav
self.crs = crs
self.delta_n = delta_n
self.m0 = m0
# Third row
self.cuc = cuc
self.e = e
self.cus = cus
self.sqrt_a = sqrt_a
# Fourth row
self.toe = toe
self.cic = cic
self.omega0 = omega0
self.cis = cis
# Fifth row
self.i0 = i0
self.crc = crc
self.omega = omega
self.omega_dot = omega_dot
# Sixth row
self.idot = idot
self.gps_week = gps_week
self.gal_week = gal_week
# Seventh row
self.sisa = sisa
self.sv_heath = sv_health
self.bgd_e5a = bgd_e5a
self.bgd_e5b = bgd_e5b
# Eighth row
self.trans_time = trans_time
@property
def system(self):
return "E"
def __repr__(self):
rep = f"GALILEO("
# First line
rep += f"\n system: {self.system}"
rep += f"\n prn: {self.prn:d}"
rep += f"\n toc: {self.toc} [UTC] (Time Of Clock)"
rep += f"\n sv_clock_bias: {self.sv_clock_bias: .6e} [s]"
rep += f"\n sv_clock_drift: {self.sv_clock_drift: .6e} [s/s]"
rep += f"\n sv_clock_drift_rate: {self.sv_clock_drift_rate: .6e} [s/s2]"
# Second line
rep += f"\n iod_nav: {self.iod_nav: .6e} (Issue Of Data of the nav batch)"
rep += f"\n crs: {self.crs: .6e} [m]"
rep += f"\n delta_n: {self.delta_n: .6e} [rad/s]"
rep += f"\n m0: {self.m0: .6e} [rad]"
# Third line
rep += f"\n cuc: {self.cuc: .6e} [rad]"
rep += f"\n e: {self.e: .6e} (Eccentricity)"
rep += f"\n cus: {self.cus: .6e} [rad]"
rep += f"\n sqrt_a: {self.sqrt_a: .6e} [sqrt(m)]"
# Fourth line
rep += f"\n toe: {self.toe: .6e} [sec of GAL week] (Time Of Ephemeris)"
rep += f"\n cic: {self.cic: .6e} [rad]"
rep += f"\n omega0: {self.omega0: .6e} [rad]"
rep += f"\n cis: {self.cis: .6e} [rad]"
# Fifth line
rep += f"\n i0: {self.i0: .6e} [rad]"
rep += f"\n crc: {self.crc: .6e} [m]"
rep += f"\n omega: {self.omega: .6e} [rad]"
rep += f"\n omega_dot: {self.omega_dot: .6e} [rad/s]"
# Sixth line
rep += f"\n idot: {self.idot: .6e} [rad/s]"
rep += f"\n l2_codes: {self.l2_codes: .6e} (codes on L2 channel)"
rep += f"\n gal_week: {self.gal_week: .6e} (to go with TOE)"
# Seventh line
rep += f"\n sisa: {self.sisa: .6e} [m] (Signal in space accuracy)"
rep += f"\n sv_health: {self.sv_health: .6e} (See Galileo ICD Section 5.1.9.3)"
rep += f"\n bgd_e5a: {self.bgd_e5a: .6e} [s] (BGD E5a/E1)"
rep += f"\n bgd_e5b: {self.bgd_e5b: .6e} [s] (BGD E5b/E1)"
# Eighth line
rep += f"\n trans_time: {self.trans_time: .6e} [sec of GAL week] (e.g. derived from WN and TOW of page type 1)"
rep += f"\n)"
return rep
| 41.171429 | 129 | 0.480453 |
from .satellite import Satellite
class GALILEO(Satellite):
def __init__(self, prn=None, toc=None,
sv_clock_bias=None, sv_clock_drift=None, sv_clock_drift_rate=None,
iod_nav=None, crs=None, delta_n=None, m0=None,
cuc=None, e=None, cus=None, sqrt_a=None,
toe=None, cic=None, omega0=None, cis=None,
i0=None, crc=None, omega=None, omega_dot=None,
idot=None, gps_week=None, gal_week=None,
sisa=None, sv_health=None, bgd_e5a=None, bgd_e5b=None,
trans_time=None):
super().__init__(prn=prn, toc=toc)
self.sv_clock_bias = sv_clock_bias
self.sv_clock_drift = sv_clock_drift
self.sv_clock_drift_rate = sv_clock_drift_rate
self.iod_nav = iod_nav
self.crs = crs
self.delta_n = delta_n
self.m0 = m0
self.cuc = cuc
self.e = e
self.cus = cus
self.sqrt_a = sqrt_a
self.toe = toe
self.cic = cic
self.omega0 = omega0
self.cis = cis
self.i0 = i0
self.crc = crc
self.omega = omega
self.omega_dot = omega_dot
self.idot = idot
self.gps_week = gps_week
self.gal_week = gal_week
self.sisa = sisa
self.sv_heath = sv_health
self.bgd_e5a = bgd_e5a
self.bgd_e5b = bgd_e5b
self.trans_time = trans_time
@property
def system(self):
return "E"
def __repr__(self):
rep = f"GALILEO("
rep += f"\n system: {self.system}"
rep += f"\n prn: {self.prn:d}"
rep += f"\n toc: {self.toc} [UTC] (Time Of Clock)"
rep += f"\n sv_clock_bias: {self.sv_clock_bias: .6e} [s]"
rep += f"\n sv_clock_drift: {self.sv_clock_drift: .6e} [s/s]"
rep += f"\n sv_clock_drift_rate: {self.sv_clock_drift_rate: .6e} [s/s2]"
rep += f"\n iod_nav: {self.iod_nav: .6e} (Issue Of Data of the nav batch)"
rep += f"\n crs: {self.crs: .6e} [m]"
rep += f"\n delta_n: {self.delta_n: .6e} [rad/s]"
rep += f"\n m0: {self.m0: .6e} [rad]"
rep += f"\n cuc: {self.cuc: .6e} [rad]"
rep += f"\n e: {self.e: .6e} (Eccentricity)"
rep += f"\n cus: {self.cus: .6e} [rad]"
rep += f"\n sqrt_a: {self.sqrt_a: .6e} [sqrt(m)]"
rep += f"\n toe: {self.toe: .6e} [sec of GAL week] (Time Of Ephemeris)"
rep += f"\n cic: {self.cic: .6e} [rad]"
rep += f"\n omega0: {self.omega0: .6e} [rad]"
rep += f"\n cis: {self.cis: .6e} [rad]"
rep += f"\n i0: {self.i0: .6e} [rad]"
rep += f"\n crc: {self.crc: .6e} [m]"
rep += f"\n omega: {self.omega: .6e} [rad]"
rep += f"\n omega_dot: {self.omega_dot: .6e} [rad/s]"
rep += f"\n idot: {self.idot: .6e} [rad/s]"
rep += f"\n l2_codes: {self.l2_codes: .6e} (codes on L2 channel)"
rep += f"\n gal_week: {self.gal_week: .6e} (to go with TOE)"
rep += f"\n sisa: {self.sisa: .6e} [m] (Signal in space accuracy)"
rep += f"\n sv_health: {self.sv_health: .6e} (See Galileo ICD Section 5.1.9.3)"
rep += f"\n bgd_e5a: {self.bgd_e5a: .6e} [s] (BGD E5a/E1)"
rep += f"\n bgd_e5b: {self.bgd_e5b: .6e} [s] (BGD E5b/E1)"
rep += f"\n trans_time: {self.trans_time: .6e} [sec of GAL week] (e.g. derived from WN and TOW of page type 1)"
rep += f"\n)"
return rep
| true | true |
1c379203022852868267188e130ff323afe0d01d | 2,790 | py | Python | utils/misc.py | samuelbroscheit/open_knowledge_graph_embeddings | 1ce37a4261a37e357a0f4dac3ee130ff11cbea4e | [
"MIT"
] | 23 | 2020-11-09T13:17:44.000Z | 2021-12-31T09:53:49.000Z | utils/misc.py | samuelbroscheit/open_knowledge_graph_embeddings | 1ce37a4261a37e357a0f4dac3ee130ff11cbea4e | [
"MIT"
] | 3 | 2021-03-31T05:41:34.000Z | 2022-01-18T12:35:00.000Z | utils/misc.py | samuelbroscheit/open_knowledge_graph_embeddings | 1ce37a4261a37e357a0f4dac3ee130ff11cbea4e | [
"MIT"
] | 6 | 2020-11-09T13:18:49.000Z | 2022-03-07T21:11:40.000Z | import random
import numpy
import numpy as np
import torch
from torch.autograd import Variable
def onehot(indexes, N=None, ignore_index=None):
"""
Creates a one-representation of indexes with N possible entries
if N is not specified, it will suit the maximum index appearing.
indexes is a long-tensor of indexes
ignore_index will be zero in onehot representation
"""
return_variable = False
if isinstance(indexes, Variable):
return_variable = True
indexes = indexes.data
if N is None:
N = indexes.max() + 1
sz = list(indexes.size())
output = indexes.new().byte().resize_(*sz, N).zero_()
output.scatter_(-1, indexes.unsqueeze(-1), 1)
if ignore_index is not None and ignore_index >= 0:
output.masked_fill_(indexes.eq(ignore_index).unsqueeze(-1), 0)
if return_variable:
output = Variable(output, requires_grad=False)
return output
def set_global_seeds(i):
try:
import torch
except ImportError:
pass
else:
torch.manual_seed(i)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(i)
np.random.seed(i)
random.seed(i)
def prettyformat_dict_string(d, indent=''):
result = list()
for k, v in d.items():
if isinstance(v, dict):
result.append('{}{}:\t\n{}'.format(indent, k, prettyformat_dict_string(v, indent + ' ')))
else:
result.append('{}{}:\t{}\n'.format(indent, k, v))
return ''.join(result)
def pack_list_of_lists(lol):
offsets = list()
ent_list = list()
offsets.append(0)
for l in lol:
if isinstance(l, list) or isinstance(l, tuple):
ent_list.extend(l)
offsets.append(len(ent_list))
else:
ent_list.append(l)
offsets.append(len(ent_list))
offsets.append(-len(offsets)-1)
out = (numpy.array(offsets)+len(offsets)).tolist()
return out + ent_list
def unpack_list_of_lists(ents):
ent_list = list()
end = -1
all_begin = -1
all_end = -1
for off in ents:
if all_begin == -1:
all_begin = off
if off == 0:
break
if end == -1:
end = off
continue
else:
begin = end
end = off
all_end = off
ent_list.append(ents[begin:end].tolist())
return ent_list, ents[all_begin:all_end].tolist()
def argparse_bool_type(v):
"Type for argparse that correctly treats Boolean values"
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.") | 27.623762 | 102 | 0.598925 | import random
import numpy
import numpy as np
import torch
from torch.autograd import Variable
def onehot(indexes, N=None, ignore_index=None):
return_variable = False
if isinstance(indexes, Variable):
return_variable = True
indexes = indexes.data
if N is None:
N = indexes.max() + 1
sz = list(indexes.size())
output = indexes.new().byte().resize_(*sz, N).zero_()
output.scatter_(-1, indexes.unsqueeze(-1), 1)
if ignore_index is not None and ignore_index >= 0:
output.masked_fill_(indexes.eq(ignore_index).unsqueeze(-1), 0)
if return_variable:
output = Variable(output, requires_grad=False)
return output
def set_global_seeds(i):
try:
import torch
except ImportError:
pass
else:
torch.manual_seed(i)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(i)
np.random.seed(i)
random.seed(i)
def prettyformat_dict_string(d, indent=''):
result = list()
for k, v in d.items():
if isinstance(v, dict):
result.append('{}{}:\t\n{}'.format(indent, k, prettyformat_dict_string(v, indent + ' ')))
else:
result.append('{}{}:\t{}\n'.format(indent, k, v))
return ''.join(result)
def pack_list_of_lists(lol):
offsets = list()
ent_list = list()
offsets.append(0)
for l in lol:
if isinstance(l, list) or isinstance(l, tuple):
ent_list.extend(l)
offsets.append(len(ent_list))
else:
ent_list.append(l)
offsets.append(len(ent_list))
offsets.append(-len(offsets)-1)
out = (numpy.array(offsets)+len(offsets)).tolist()
return out + ent_list
def unpack_list_of_lists(ents):
ent_list = list()
end = -1
all_begin = -1
all_end = -1
for off in ents:
if all_begin == -1:
all_begin = off
if off == 0:
break
if end == -1:
end = off
continue
else:
begin = end
end = off
all_end = off
ent_list.append(ents[begin:end].tolist())
return ent_list, ents[all_begin:all_end].tolist()
def argparse_bool_type(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.") | true | true |
1c37920ca1f9519b054419686cf59a0137afe61a | 9,474 | py | Python | data_process/ops.py | ys10/GCIClassify | a66b1a257ac26b10732a68228721023b99f67a8e | [
"MIT"
] | null | null | null | data_process/ops.py | ys10/GCIClassify | a66b1a257ac26b10732a68228721023b99f67a8e | [
"MIT"
] | null | null | null | data_process/ops.py | ys10/GCIClassify | a66b1a257ac26b10732a68228721023b99f67a8e | [
"MIT"
] | null | null | null | # coding=utf-8
import os
import numpy as np
from scipy.signal import argrelextrema
from scipy.io import wavfile
def find_local_minimum(data, threshold=None):
"""
Find local minimum in data.
:param data: input data.
:param threshold: (optional) local minimum whose value is not less than threshold won't be selected.
:return: a 1-D array.
"""
local_min_idx = argrelextrema(data, np.less)
local_min_idx = local_min_idx[0]
if threshold:
local_min_idx = [idx for idx in local_min_idx if data[idx] < threshold]
return local_min_idx
def file_names(file_dir):
"""
List all file names(without extension) in target directory.
:param file_dir:
target directory.
:return:
a list containing file names.
"""
file_names_list = list()
for _, _, files in os.walk(file_dir):
for file in files:
file_names_list.append(file.split(".")[0])
return file_names_list
def read_wav_data(path):
"""
Read wav file.
:param path:
wav file path.
:return:
sampling rate, waveform data.
"""
rate, data = wavfile.read(path)
return rate, data[:]
def read_marks_data(path, rate, wave_length):
"""
Read marks file.
:param path:
marks file path(containing time of gci).
:param rate:
sampling rate.
:param wave_length:
wave length.
:return:
an list containing the index(time * rate) of gci.
"""
marks = list()
with open(path) as mark_file:
while 1:
lines = mark_file.readlines(10000)
if not lines:
break
marks.extend(map(lambda l: round(float(l) * rate), lines))
if marks[-1] >= wave_length:
return marks[:-2]
return marks
def label_peaks(peaks, marks, threshold):
"""
Label peaks with marks.
Give a distance threshold, for all peaks within distance from mark no more than threshold.
Pick up target peak follow these priorities
1. nearest right peak;
2. nearest left peak;
3. missed.
:param peaks: peak indices.
:param marks: marks indices.
:param threshold: distance threshold between a couple of (peak, mark).
:return: a tuple(labels, errors, pos_cnt) where:
labels: peak labels.
errors: distance between peaks and marks(zero for negative sample)
miss: missed marks
pos_cnt: positive sample count.
"""
labels = [0] * len(peaks)
errors = [0] * len(peaks)
miss = list() # missed marks
pos_cnt = 0 # positive labeled marks count
for mark in marks:
left_peaks = list()
right_peaks = list()
"""calculate a search range based on mark & threshold"""
search_range = calculate_search_range(mark, threshold)
"""record target peaks in search range"""
for j in range(0, len(peaks)):
peak = peaks[j]
if peak < search_range["left"]:
continue
elif peak > search_range["right"]:
continue
elif search_range["left"] <= peak < mark: # in left half search range
left_peaks.append(j)
elif mark <= peak <= search_range["right"]: # in right half search range
right_peaks.append(j)
else:
print("mark: {}, peak: {}, threshold: {}".format(mark, peak, threshold))
print("left_border: {}, right_border: {}".format(search_range["left"], search_range["right"]))
raise KeyError
"""pick up the optimum peak"""
left_peaks.sort()
right_peaks.sort()
if len(right_peaks) > 0: # nearest right peak exists.
right_peaks.sort()
peak_idx = right_peaks[0]
elif len(left_peaks) > 0: # nearest right peak does not exist, but nearest left peak exists.
left_peaks.sort()
peak_idx = left_peaks[len(left_peaks) - 1]
else: # neither nearest right or left peak exists, finally miss this mark & record it.
miss.append(mark)
continue
labels[peak_idx] = 1
peak = peaks[peak_idx]
error = abs(peak - mark)
errors[peak_idx] = error
pos_cnt += 1
assert len(peaks) == len(labels) == len(errors)
return labels, errors, miss, pos_cnt
def calculate_search_range(mark, threshold):
search_range = {"left": mark-threshold/2, "right": mark+threshold}
return search_range
# def label_peaks(peaks, marks, threshold):
# """
# Label peaks with marks.
# Give a distance threshold, for all peaks within distance from mark no more than threshold.
# Pick up target peak follow these priorities
# 1. nearest right peak;
# 2. nearest left peak;
# 3. missed.
# :param peaks: peak indices.
# :param marks: marks indices.
# :param threshold: distance threshold between a couple of (peak, mark).
# :return: a tuple(labels, errors, pos_cnt) where:
# labels: peak labels.
# errors: distance between peaks and marks(zero for negative sample)
# miss: missed marks
# pos_cnt: positive sample count.
# """
# marks.sort()
# peaks.sort()
# labels = [0] * len(peaks)
# errors = [0] * len(peaks)
# miss = list() # missed marks
# pos_cnt = 0 # positive labeled marks count
# current_peak = 0 # peak index
# for i in range(len(marks)):
# mark = marks[i]
# if current_peak >= len(peaks) - 1: # finally miss this mark & record it.
# miss.append(mark)
# continue
# left_peaks = []
# right_peaks = []
# for j in range(current_peak, len(peaks)):
# peak = peaks[j]
# error = abs(peak-mark)
# if peak < mark & error <= threshold:
# left_peaks.append(j)
# elif peak >= mark & error <= threshold:
# right_peaks.append(j)
# elif peak > mark: # Key step
# break
# left_peaks.sort()
# right_peaks.sort()
# if len(right_peaks) > 0: # nearest right peak exists.
# right_peaks.sort()
# peak_idx = right_peaks[0]
# elif len(left_peaks) > 0: # nearest right peak does not exist, but nearest left peak exists.
# left_peaks.sort()
# peak_idx = left_peaks[len(left_peaks) - 1]
# else: # neither nearest right or left peak exists, finally miss this mark & record it.
# miss.append(mark)
# # rate = 16000
# # print("\tmissed mark: " + str(mark / rate))
# # print("\tcurrent peak: " + str(peaks[current_peak] / rate))
# continue
# labels[peak_idx] = 1
# peak = peaks[peak_idx]
# error = abs(peak - mark)
# errors[peak_idx] = error
# pos_cnt += 1
# current_peak = peak_idx + 1
# assert len(peaks) == len(labels) == len(errors)
# return labels, errors, miss, pos_cnt
#
#
# def old_label_peaks(peaks, marks, threshold):
# """
# Label peaks with marks.
# Give a distance threshold, for all peaks within distance from mark no more than threshold.
# Pick up target peak follow these priorities
# 1. nearest right peak;
# 2. missed.
# :param peaks: peak indices.
# :param marks: marks indices.
# :param threshold: distance threshold between a couple of (peak, mark).
# :return: a tuple(labels, errors, pos_cnt) where:
# labels: peak labels.
# errors: distance between peaks and marks(zero for negative sample)
# miss: missed marks
# pos_cnt: positive sample count.
# """
# labels = [0] * len(peaks)
# errors = [0] * len(peaks)
# miss = list()
# pos_cnt = 0
# current_peak = 0
# for i in range(len(marks)):
# mark = marks[i]
# if current_peak == len(peaks): # finally miss this mark & record it.
# miss.append(mark)
# continue
# for j in range(current_peak, len(peaks)):
# peak = peaks[j]
# error = peak-mark
# if peak >= mark & error <= threshold: # label this peak & jump out of the loop.
# labels[j] = 1
# errors[j] = error
# pos_cnt += 1
# current_peak = j+1
# break
# if j == len(peaks)-1: # finally miss this mark & record it.
# miss.append(mark)
# assert len(peaks) == len(labels) == len(errors)
# return labels, errors, miss, pos_cnt
def crop_wav(wav, center, radius):
"""
Crop wav on [center - radius, center + radius + 1], and pad 0 for out of range indices.
:param wav: wav
:param center: crop center
:param radius: crop radius
:return: a slice whose length is radius*2 +1.
"""
left_border = center - radius
right_border = center + radius + 1
if left_border < 0:
zeros = np.zeros(-left_border)
cropped_wav = np.concatenate([zeros, wav[0: right_border]])
elif right_border > len(wav):
zeros = np.zeros(right_border - len(wav))
cropped_wav = np.concatenate([wav[left_border: len(wav)], zeros])
else:
cropped_wav = wav[left_border: right_border]
assert len(cropped_wav) == radius * 2 + 1
return cropped_wav
| 35.750943 | 110 | 0.578742 |
import os
import numpy as np
from scipy.signal import argrelextrema
from scipy.io import wavfile
def find_local_minimum(data, threshold=None):
local_min_idx = argrelextrema(data, np.less)
local_min_idx = local_min_idx[0]
if threshold:
local_min_idx = [idx for idx in local_min_idx if data[idx] < threshold]
return local_min_idx
def file_names(file_dir):
file_names_list = list()
for _, _, files in os.walk(file_dir):
for file in files:
file_names_list.append(file.split(".")[0])
return file_names_list
def read_wav_data(path):
rate, data = wavfile.read(path)
return rate, data[:]
def read_marks_data(path, rate, wave_length):
marks = list()
with open(path) as mark_file:
while 1:
lines = mark_file.readlines(10000)
if not lines:
break
marks.extend(map(lambda l: round(float(l) * rate), lines))
if marks[-1] >= wave_length:
return marks[:-2]
return marks
def label_peaks(peaks, marks, threshold):
labels = [0] * len(peaks)
errors = [0] * len(peaks)
miss = list()
pos_cnt = 0
for mark in marks:
left_peaks = list()
right_peaks = list()
search_range = calculate_search_range(mark, threshold)
for j in range(0, len(peaks)):
peak = peaks[j]
if peak < search_range["left"]:
continue
elif peak > search_range["right"]:
continue
elif search_range["left"] <= peak < mark:
left_peaks.append(j)
elif mark <= peak <= search_range["right"]:
right_peaks.append(j)
else:
print("mark: {}, peak: {}, threshold: {}".format(mark, peak, threshold))
print("left_border: {}, right_border: {}".format(search_range["left"], search_range["right"]))
raise KeyError
left_peaks.sort()
right_peaks.sort()
if len(right_peaks) > 0:
right_peaks.sort()
peak_idx = right_peaks[0]
elif len(left_peaks) > 0:
left_peaks.sort()
peak_idx = left_peaks[len(left_peaks) - 1]
else:
miss.append(mark)
continue
labels[peak_idx] = 1
peak = peaks[peak_idx]
error = abs(peak - mark)
errors[peak_idx] = error
pos_cnt += 1
assert len(peaks) == len(labels) == len(errors)
return labels, errors, miss, pos_cnt
def calculate_search_range(mark, threshold):
search_range = {"left": mark-threshold/2, "right": mark+threshold}
return search_range
# Label peaks with marks.
# Give a distance threshold, for all peaks within distance from mark no more than threshold.
# Pick up target peak follow these priorities
# 1. nearest right peak;
# 2. nearest left peak;
# 3. missed.
# :param peaks: peak indices.
# :param marks: marks indices.
# :param threshold: distance threshold between a couple of (peak, mark).
# :return: a tuple(labels, errors, pos_cnt) where:
# labels: peak labels.
# errors: distance between peaks and marks(zero for negative sample)
# miss: missed marks
# pos_cnt: positive sample count.
# """
eshold between a couple of (peak, mark).
# :return: a tuple(labels, errors, pos_cnt) where:
# labels: peak labels.
# errors: distance between peaks and marks(zero for negative sample)
# miss: missed marks
# pos_cnt: positive sample count.
# """
radius + 1
if left_border < 0:
zeros = np.zeros(-left_border)
cropped_wav = np.concatenate([zeros, wav[0: right_border]])
elif right_border > len(wav):
zeros = np.zeros(right_border - len(wav))
cropped_wav = np.concatenate([wav[left_border: len(wav)], zeros])
else:
cropped_wav = wav[left_border: right_border]
assert len(cropped_wav) == radius * 2 + 1
return cropped_wav
| true | true |
1c3793a20513ed473bbca4e1b4bdc673ad328aed | 229 | py | Python | src/vassal_deployer/__init__.py | evansde77/vassal_deployer | 4aaadd35b81c454a6264540f5fb795bfc1daa991 | [
"Apache-2.0"
] | null | null | null | src/vassal_deployer/__init__.py | evansde77/vassal_deployer | 4aaadd35b81c454a6264540f5fb795bfc1daa991 | [
"Apache-2.0"
] | null | null | null | src/vassal_deployer/__init__.py | evansde77/vassal_deployer | 4aaadd35b81c454a6264540f5fb795bfc1daa991 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
vassal_deployer
"""
__version__="0.0.2"
import os
from .logger import get_logger
logger = get_logger(
os.environ.get('VASSAL_DEPLOYER_LOG'),
os.environ.get('VASSAL_DEPLOYER_STDOUT', False)
)
| 14.3125 | 51 | 0.716157 |
__version__="0.0.2"
import os
from .logger import get_logger
logger = get_logger(
os.environ.get('VASSAL_DEPLOYER_LOG'),
os.environ.get('VASSAL_DEPLOYER_STDOUT', False)
)
| true | true |
1c3794349c3d473d227ff8b97d54267ceb30c171 | 1,081 | py | Python | setup.py | oronibrian/django-mpesa | fb5de34829fedf0d898d4daa5ad8a36efefd3aee | [
"MIT"
] | 1 | 2020-04-06T08:28:46.000Z | 2020-04-06T08:28:46.000Z | setup.py | oronibrian/django-mpesa | fb5de34829fedf0d898d4daa5ad8a36efefd3aee | [
"MIT"
] | 4 | 2020-02-11T23:54:32.000Z | 2021-06-10T21:16:48.000Z | setup.py | oronibrian/django-mpesa | fb5de34829fedf0d898d4daa5ad8a36efefd3aee | [
"MIT"
] | 1 | 2022-02-19T21:00:56.000Z | 2022-02-19T21:00:56.000Z |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# Allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = 'DjangoMpesa',
version = '1.4',
packages = ['mpesaApp'],
include_package_data = True,
license = 'BSD License',
description = 'A simple Django app for integrating mpesa stk push payment to your django site.',
long_description = README,
url = 'http://www.techtenant.co.ke/',
author = 'Oronz',
keywords = ['MPESA', 'Django', 'Djangompesa'], # Keywords that define your package best
author_email = 'brianoroni6@gmail.com',
classifiers =[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.8',
],
python_requires= '>=3.8',
)
| 30.885714 | 100 | 0.640148 |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = 'DjangoMpesa',
version = '1.4',
packages = ['mpesaApp'],
include_package_data = True,
license = 'BSD License',
description = 'A simple Django app for integrating mpesa stk push payment to your django site.',
long_description = README,
url = 'http://www.techtenant.co.ke/',
author = 'Oronz',
keywords = ['MPESA', 'Django', 'Djangompesa'],
author_email = 'brianoroni6@gmail.com',
classifiers =[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.8',
],
python_requires= '>=3.8',
)
| true | true |
1c3794e2265c22e85d615ad0ca29980fd3ca4ac2 | 1,856 | py | Python | tensorflow/utils.py | sutd-visual-computing-group/dag-gans | 68a76153650df6de2a6919a93a2d3b98ca6407e6 | [
"MIT"
] | 32 | 2021-01-02T14:57:02.000Z | 2022-03-15T12:00:16.000Z | tensorflow/utils.py | sutd-visual-computing-group/dag-gans | 68a76153650df6de2a6919a93a2d3b98ca6407e6 | [
"MIT"
] | 1 | 2021-01-11T06:28:01.000Z | 2021-01-11T08:45:04.000Z | tensorflow/utils.py | sutd-visual-computing-group/dag-gans | 68a76153650df6de2a6919a93a2d3b98ca6407e6 | [
"MIT"
] | 5 | 2021-04-17T08:50:52.000Z | 2022-02-06T06:44:24.000Z | import tensorflow as tf
import numpy as np
import math
def rotation(x, degs):
x_rot = []
angle = math.pi / 180
for deg in degs:
if deg == 0:
x_rot.append(x)
elif deg == 90:
x_rot.append(tf.contrib.image.rotate(x, 90 * angle))
elif deg == 180:
x_rot.append(tf.contrib.image.rotate(x, 180 * angle))
elif deg == 270:
x_rot.append(tf.contrib.image.rotate(x, 270 * angle))
return x_rot
def fliprot(x, aug):
x_flip = []
x_flip.append(x)
x_hflip = tf.image.flip_left_right(x)
x_flip.append(x_hflip)
x_flip.append(tf.image.flip_up_down(x))
x_flip.append(tf.image.flip_up_down(x_hflip))
return x_flip
def image_crop(x, offset_h, offset_w, target_h, target_w, size=[32,32]):
x_crop = tf.image.crop_to_bounding_box(x, offset_h, offset_w, target_h, target_w)
x_crop = tf.image.resize_bilinear(x_crop, size=size, align_corners=True)
return x_crop
def cropping(x, aug):
b, h, w, c = np.shape(x).as_list()
img_size = [h, w]
boxes = [[0, 0, h, w],
[0, 0, h*0.75, w*0.75],
[0, w*0.25, h*0.75, w*0.75],
[h*0.25, 0, h*0.75, w*0.75],
[h*0.25, w*0.25, h*0.75, w*0.75]]
x_crop = []
for i in range(np.shape(boxes)[0]):
cropped = image_crop(x, int(boxes[i][0]), int(boxes[i][1]), int(boxes[i][2]), int(boxes[i][3]), size=img_size)
x_crop.append(cropped)
return x_crop
def augmenting_data(x, aug, aug_list):
if aug == 'rotation':
return rotation(x, aug_list)
elif aug == 'fliprot':
return fliprot(x, aug_list)
elif aug == 'cropping':
return cropping(x, aug_list)
else:
print('utils.augmenting_data: the augmentation type is not supported. Exiting ...')
exit()
| 32.561404 | 118 | 0.578125 | import tensorflow as tf
import numpy as np
import math
def rotation(x, degs):
x_rot = []
angle = math.pi / 180
for deg in degs:
if deg == 0:
x_rot.append(x)
elif deg == 90:
x_rot.append(tf.contrib.image.rotate(x, 90 * angle))
elif deg == 180:
x_rot.append(tf.contrib.image.rotate(x, 180 * angle))
elif deg == 270:
x_rot.append(tf.contrib.image.rotate(x, 270 * angle))
return x_rot
def fliprot(x, aug):
x_flip = []
x_flip.append(x)
x_hflip = tf.image.flip_left_right(x)
x_flip.append(x_hflip)
x_flip.append(tf.image.flip_up_down(x))
x_flip.append(tf.image.flip_up_down(x_hflip))
return x_flip
def image_crop(x, offset_h, offset_w, target_h, target_w, size=[32,32]):
x_crop = tf.image.crop_to_bounding_box(x, offset_h, offset_w, target_h, target_w)
x_crop = tf.image.resize_bilinear(x_crop, size=size, align_corners=True)
return x_crop
def cropping(x, aug):
b, h, w, c = np.shape(x).as_list()
img_size = [h, w]
boxes = [[0, 0, h, w],
[0, 0, h*0.75, w*0.75],
[0, w*0.25, h*0.75, w*0.75],
[h*0.25, 0, h*0.75, w*0.75],
[h*0.25, w*0.25, h*0.75, w*0.75]]
x_crop = []
for i in range(np.shape(boxes)[0]):
cropped = image_crop(x, int(boxes[i][0]), int(boxes[i][1]), int(boxes[i][2]), int(boxes[i][3]), size=img_size)
x_crop.append(cropped)
return x_crop
def augmenting_data(x, aug, aug_list):
if aug == 'rotation':
return rotation(x, aug_list)
elif aug == 'fliprot':
return fliprot(x, aug_list)
elif aug == 'cropping':
return cropping(x, aug_list)
else:
print('utils.augmenting_data: the augmentation type is not supported. Exiting ...')
exit()
| true | true |
1c3795887d12e3edea16208d867075490f58e1ec | 2,558 | py | Python | model_zoo/official/nlp/prophetnet/src/utils/loss_monitor.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | model_zoo/official/nlp/prophetnet/src/utils/loss_monitor.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | model_zoo/official/nlp/prophetnet/src/utils/loss_monitor.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Loss monitor."""
import time
from mindspore.train.callback import Callback
from config import TransformerConfig
class LossCallBack(Callback):
"""
Monitor the loss in training.
If the loss is NAN or INF terminating training.
Note:
If per_print_times is 0 do not print loss.
Args:
per_print_times (int): Print loss every times. Default: 1.
"""
time_stamp_init = False
time_stamp_first = 0
def __init__(self, config: TransformerConfig, per_print_times: int = 1):
super(LossCallBack, self).__init__()
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("print_step must be int and >= 0.")
self.config = config
self._per_print_times = per_print_times
if not self.time_stamp_init:
self.time_stamp_first = self._get_ms_timestamp()
self.time_stamp_init = True
def step_end(self, run_context):
cb_params = run_context.original_args()
file_name = "./loss.log"
with open(file_name, "a+") as f:
time_stamp_current = self._get_ms_timestamp()
is_accu_step = cb_params.net_outputs[3]
accu_length = cb_params.net_outputs[4]
# Only update at non-accumulation steps
if not is_accu_step:
f.write("time: {}, epoch: {}, step: {}, outputs are {},{},{}.\n".format(
time_stamp_current - self.time_stamp_first,
cb_params.cur_epoch_num,
cb_params.cur_step_num // accu_length,
str(cb_params.net_outputs[0].asnumpy()),
str(cb_params.net_outputs[1].asnumpy()),
str(cb_params.net_outputs[2].asnumpy())
))
@staticmethod
def _get_ms_timestamp():
t = time.time()
return int(round(t * 1000))
| 37.072464 | 88 | 0.62588 |
import time
from mindspore.train.callback import Callback
from config import TransformerConfig
class LossCallBack(Callback):
time_stamp_init = False
time_stamp_first = 0
def __init__(self, config: TransformerConfig, per_print_times: int = 1):
super(LossCallBack, self).__init__()
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("print_step must be int and >= 0.")
self.config = config
self._per_print_times = per_print_times
if not self.time_stamp_init:
self.time_stamp_first = self._get_ms_timestamp()
self.time_stamp_init = True
def step_end(self, run_context):
cb_params = run_context.original_args()
file_name = "./loss.log"
with open(file_name, "a+") as f:
time_stamp_current = self._get_ms_timestamp()
is_accu_step = cb_params.net_outputs[3]
accu_length = cb_params.net_outputs[4]
if not is_accu_step:
f.write("time: {}, epoch: {}, step: {}, outputs are {},{},{}.\n".format(
time_stamp_current - self.time_stamp_first,
cb_params.cur_epoch_num,
cb_params.cur_step_num // accu_length,
str(cb_params.net_outputs[0].asnumpy()),
str(cb_params.net_outputs[1].asnumpy()),
str(cb_params.net_outputs[2].asnumpy())
))
@staticmethod
def _get_ms_timestamp():
t = time.time()
return int(round(t * 1000))
| true | true |
1c37959e88c15de590037cd7eb70979833e39fa3 | 1,614 | py | Python | Data_extraction/CS4.py | CarlOwOs/VH_and_PE_codes | 700726332489ed87270ec52d9efe46fcb835c598 | [
"MIT"
] | null | null | null | Data_extraction/CS4.py | CarlOwOs/VH_and_PE_codes | 700726332489ed87270ec52d9efe46fcb835c598 | [
"MIT"
] | null | null | null | Data_extraction/CS4.py | CarlOwOs/VH_and_PE_codes | 700726332489ed87270ec52d9efe46fcb835c598 | [
"MIT"
] | 3 | 2021-06-22T10:39:44.000Z | 2021-09-13T16:05:59.000Z | import pandas as pd
import numpy as np
import Auxiliary.auxiliary_functions as aux_fun
#--------------------------------------------------
def read_and_extract_target():
'''
This function reads the processed "events" df and computes which
of the observations correspond to an IC phenomena. After that computation,
only relevant columns are kept.
'''
events_label = pd.read_csv("./Temp/events_CS2.csv")
# Deleting the previous temporary files
aux_fun.delete_csvs(["events_CS2"],"./Temp/")
events_label["target"] = 0
for i,row in events_label.iterrows():
if row.tipus_event in ["Urgències per Insuficiència Cardíaca", "Ingrés per Insuficiència Cardíaca"]:
events_label.at[i,"target"] = 1
elif row.tipus_event == "Exitus" and row.causa_exitus == "Cardiovascular" and row.causa_exitus_cv=="Insuficiència cardíaca":
events_label.at[i,"target"] = 1
elif events_label.loc[i,"tipus_event"] in ["Ingrés per altra causa cardiològica"]:
events_label.at[i,"target"] = 2
events_label.drop(columns=['fecha_exitus_event', 'causa_exitus', 'causa_exitus_cv', 'origen_ingres_ic', 'tipus_event'], inplace= True)
return events_label
#--------------------------------------------------
def execute_script():
events_label = read_and_extract_target()
# Change this value to modify the file name.
names = ["events_label_CS4"]
# Change this variable to modify the saving path.
saving_path = './Temp/'
aux_fun.write_csvs([events_label],saving_path,names)
#--------------------------------------------------
| 47.470588 | 138 | 0.639405 | import pandas as pd
import numpy as np
import Auxiliary.auxiliary_functions as aux_fun
def read_and_extract_target():
events_label = pd.read_csv("./Temp/events_CS2.csv")
aux_fun.delete_csvs(["events_CS2"],"./Temp/")
events_label["target"] = 0
for i,row in events_label.iterrows():
if row.tipus_event in ["Urgències per Insuficiència Cardíaca", "Ingrés per Insuficiència Cardíaca"]:
events_label.at[i,"target"] = 1
elif row.tipus_event == "Exitus" and row.causa_exitus == "Cardiovascular" and row.causa_exitus_cv=="Insuficiència cardíaca":
events_label.at[i,"target"] = 1
elif events_label.loc[i,"tipus_event"] in ["Ingrés per altra causa cardiològica"]:
events_label.at[i,"target"] = 2
events_label.drop(columns=['fecha_exitus_event', 'causa_exitus', 'causa_exitus_cv', 'origen_ingres_ic', 'tipus_event'], inplace= True)
return events_label
def execute_script():
events_label = read_and_extract_target()
names = ["events_label_CS4"]
saving_path = './Temp/'
aux_fun.write_csvs([events_label],saving_path,names)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.