hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf8fc77117fc34e42ca6dd6fb457a323bede50c | 1,697 | py | Python | risk_bounded_planning/test.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | 3 | 2022-01-07T19:37:03.000Z | 2022-03-15T08:50:28.000Z | risk_bounded_planning/test.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | null | null | null | risk_bounded_planning/test.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 4 18:34:06 2021
@author: vxr131730
"""
import os
import pickle
SAVEPATH = os.path.join(os.path.abspath(os.path.dirname(os.path.realpath(__file__))), 'monte_carlo_results') # path to save data
MC_FOLDER = os.path.join('..', 'monte_carlo_results')
# Unbox Pickle file to load the nodeList data
filename = 'waypts_ref_data'
infile = open(filename,'rb')
waypts_ref_data = pickle.load(infile)
infile.close()
# Save in monte-carlo folder
trial_num = 3
file_name = "mc_results_"+str(trial_num)+'.pkl'
pickle_on = open(os.path.join(SAVEPATH, file_name),"wb")
pickle.dump(waypts_ref_data, pickle_on)
pickle_on.close()
# import matplotlib.pyplot as plt
# import matplotlib.patches as patches
# from matplotlib.patches import Rectangle
# left, width = 76, 2.5
# bottom, height = 64, 1.02
# right = left + width
# top = bottom + height
# fig,ax = plt.subplots(1)
# # axes coordinates are 0,0 is bottom left and 1,1 is upper right
# p = Rectangle((left, bottom), width, height,
# linewidth=1,edgecolor='r',facecolor='none', fill=True)
# ax.add_artist(p)
# plt.xlim([left-1, right+1])
# plt.ylim([bottom-1, top+1])
# # Plot the data
# fig = plt.figure(figsize = [16,9])
# ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
# # Plot the rectangle obstacles
# obstacles = patches.Rectangle(xy = [76.245, 64.49],
# width = 2.49,
# height = 1.02,
# angle = 0,
# color = "k")
# ax.add_patch(obstacles)
# # ax.add_artist(obstacles)
# plt.show() | 26.515625 | 130 | 0.616382 |
acf8fcad82c91229ac5a86e29e440e010d0699df | 1,030 | py | Python | Test28_DeblurGAN/deblur-gan/test.py | hooloong/My_TensorFlow | ef115989035b9ae14938dca47c0814b0d16dd6ba | [
"MIT"
] | 3 | 2018-07-29T17:31:58.000Z | 2019-06-27T10:36:34.000Z | Test28_DeblurGAN/deblur-gan/test.py | hooloong/My_TensorFlow | ef115989035b9ae14938dca47c0814b0d16dd6ba | [
"MIT"
] | null | null | null | Test28_DeblurGAN/deblur-gan/test.py | hooloong/My_TensorFlow | ef115989035b9ae14938dca47c0814b0d16dd6ba | [
"MIT"
] | 1 | 2019-02-18T02:27:39.000Z | 2019-02-18T02:27:39.000Z | import numpy as np
from PIL import Image
import click
from model import generator_model
from utils import load_images, deprocess_image
def test(batch_size):
data = load_images('./images/test', batch_size)
y_test, x_test = data['B'], data['A']
g = generator_model()
g.load_weights('generator.h5')
generated_images = g.predict(x=x_test, batch_size=batch_size)
generated = np.array([deprocess_image(img) for img in generated_images])
x_test = deprocess_image(x_test)
y_test = deprocess_image(y_test)
for i in range(generated_images.shape[0]):
y = y_test[i, :, :, :]
x = x_test[i, :, :, :]
img = generated[i, :, :, :]
output = np.concatenate((y, x, img), axis=1)
im = Image.fromarray(output.astype(np.uint8))
im.save('results{}.png'.format(i))
@click.command()
@click.option('--batch_size', default=4, help='Number of images to process')
def test_command(batch_size):
return test(batch_size)
if __name__ == "__main__":
test_command()
| 28.611111 | 76 | 0.664078 |
acf8fd867566d543b65510022a37e51793442f83 | 11,262 | py | Python | tools/train_rcnn.py | simon3dv/DA-POINTRCNN | 326d937ffcf6d3b5e9de5bf9a6e0ca97fc8cdac3 | [
"MIT"
] | null | null | null | tools/train_rcnn.py | simon3dv/DA-POINTRCNN | 326d937ffcf6d3b5e9de5bf9a6e0ca97fc8cdac3 | [
"MIT"
] | null | null | null | tools/train_rcnn.py | simon3dv/DA-POINTRCNN | 326d937ffcf6d3b5e9de5bf9a6e0ca97fc8cdac3 | [
"MIT"
] | null | null | null | import _init_path
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
import torch.nn as nn
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import os
import argparse
import logging
from functools import partial
from lib.net.point_rcnn import PointRCNN
import lib.net.train_functions as train_functions
from lib.datasets.kitti_rcnn_dataset import KittiRCNNDataset
from lib.config import cfg, cfg_from_file, save_config_to_file
import tools.train_utils.train_utils as train_utils
from tools.train_utils.fastai_optim import OptimWrapper
from tools.train_utils import learning_schedules_fastai as lsf
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument('--cfg_file', type=str, default='cfgs/default.yaml', help='specify the config for training')
parser.add_argument("--train_mode", type=str, default='rpn', required=True, help="specify the training mode")
parser.add_argument("--batch_size", type=int, default=16, required=True, help="batch size for training")
parser.add_argument("--epochs", type=int, default=200, required=True, help="Number of epochs to train for")
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument("--ckpt_save_interval", type=int, default=5, help="number of training epochs")
parser.add_argument('--output_dir', type=str, default=None, help='specify an output directory if needed')
parser.add_argument('--mgpus', action='store_true', default=False, help='whether to use multiple gpu')
parser.add_argument("--ckpt", type=str, default=None, help="continue training from this checkpoint")
parser.add_argument("--rpn_ckpt", type=str, default=None, help="specify the well-trained rpn checkpoint")
parser.add_argument("--gt_database", type=str, default=None,#'gt_database/train_gt_database_3level_Car.pkl'
help='generated gt database for augmentation')
parser.add_argument("--rcnn_training_roi_dir", type=str, default=None,
help='specify the saved rois for rcnn training when using rcnn_offline mode')
parser.add_argument("--rcnn_training_feature_dir", type=str, default=None,
help='specify the saved features for rcnn training when using rcnn_offline mode')
parser.add_argument('--train_with_eval', action='store_true', default=False, help='whether to train with evaluation')
parser.add_argument("--rcnn_eval_roi_dir", type=str, default=None,
help='specify the saved rois for rcnn evaluation when using rcnn_offline mode')
parser.add_argument("--rcnn_eval_feature_dir", type=str, default=None,
help='specify the saved features for rcnn evaluation when using rcnn_offline mode')
args = parser.parse_args()
def create_logger(log_file):
log_format = '%(asctime)s %(levelname)5s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format, filename=log_file)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(log_format))
logging.getLogger(__name__).addHandler(console)
return logging.getLogger(__name__)
def create_dataloader(logger):
DATA_PATH = os.path.join('../', 'data')
# create dataloader
train_set = KittiRCNNDataset(root_dir=DATA_PATH, npoints=cfg.RPN.NUM_POINTS, split=cfg.TRAIN.SPLIT, mode='TRAIN',
logger=logger,
classes=cfg.CLASSES,
rcnn_training_roi_dir=args.rcnn_training_roi_dir,
rcnn_training_feature_dir=args.rcnn_training_feature_dir,
gt_database_dir=args.gt_database)
train_loader = DataLoader(train_set, batch_size=args.batch_size, pin_memory=True,
num_workers=args.workers, shuffle=True, collate_fn=train_set.collate_batch,
drop_last=True)
if args.train_with_eval:
test_set = KittiRCNNDataset(root_dir=DATA_PATH, npoints=cfg.RPN.NUM_POINTS, split=cfg.TRAIN.VAL_SPLIT, mode='EVAL',
logger=logger,
classes=cfg.CLASSES,
rcnn_eval_roi_dir=args.rcnn_eval_roi_dir,
rcnn_eval_feature_dir=args.rcnn_eval_feature_dir)
test_loader = DataLoader(test_set, batch_size=1, shuffle=True, pin_memory=True,
num_workers=args.workers, collate_fn=test_set.collate_batch)
else:
test_loader = None
return train_loader, test_loader
def create_optimizer(model):
if cfg.TRAIN.OPTIMIZER == 'adam':
optimizer = optim.Adam(model.parameters(), lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WEIGHT_DECAY)
elif cfg.TRAIN.OPTIMIZER == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WEIGHT_DECAY,
momentum=cfg.TRAIN.MOMENTUM)
elif cfg.TRAIN.OPTIMIZER == 'adam_onecycle':
def children(m: nn.Module):
return list(m.children())
def num_children(m: nn.Module) -> int:
return len(children(m))
flatten_model = lambda m: sum(map(flatten_model, m.children()), []) if num_children(m) else [m]
get_layer_groups = lambda m: [nn.Sequential(*flatten_model(m))]
optimizer_func = partial(optim.Adam, betas=(0.9, 0.99))
optimizer = OptimWrapper.create(
optimizer_func, 3e-3, get_layer_groups(model), wd=cfg.TRAIN.WEIGHT_DECAY, true_wd=True, bn_wd=True
)
# fix rpn: do this since we use costomized optimizer.step
if cfg.RPN.ENABLED and cfg.RPN.FIXED:
for param in model.rpn.parameters():
param.requires_grad = False
else:
raise NotImplementedError
return optimizer
def create_scheduler(optimizer, total_steps, last_epoch):
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in cfg.TRAIN.DECAY_STEP_LIST:
if cur_epoch >= decay_step:
cur_decay = cur_decay * cfg.TRAIN.LR_DECAY
return max(cur_decay, cfg.TRAIN.LR_CLIP / cfg.TRAIN.LR)
def bnm_lmbd(cur_epoch):
cur_decay = 1
for decay_step in cfg.TRAIN.BN_DECAY_STEP_LIST:
if cur_epoch >= decay_step:
cur_decay = cur_decay * cfg.TRAIN.BN_DECAY
return max(cfg.TRAIN.BN_MOMENTUM * cur_decay, cfg.TRAIN.BNM_CLIP)
if cfg.TRAIN.OPTIMIZER == 'adam_onecycle':
lr_scheduler = lsf.OneCycle(
optimizer, total_steps, cfg.TRAIN.LR, list(cfg.TRAIN.MOMS), cfg.TRAIN.DIV_FACTOR, cfg.TRAIN.PCT_START
)
else:
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)
bnm_scheduler = train_utils.BNMomentumScheduler(model, bnm_lmbd, last_epoch=last_epoch)
return lr_scheduler, bnm_scheduler
if __name__ == "__main__":
torch.backends.cudnn.enabled = False
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
cfg.TAG = os.path.splitext(os.path.basename(args.cfg_file))[0]
if args.train_mode == 'rpn':
cfg.RPN.ENABLED = True
cfg.RCNN.ENABLED = False
root_result_dir = os.path.join('../', 'output', 'rpn', cfg.TAG)
elif args.train_mode == 'rcnn':
cfg.RCNN.ENABLED = True
cfg.RPN.ENABLED = cfg.RPN.FIXED = True
root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
elif args.train_mode == 'rcnn_offline':
cfg.RCNN.ENABLED = True
cfg.RPN.ENABLED = False
root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
else:
raise NotImplementedError
if args.output_dir is not None:
root_result_dir = args.output_dir
os.makedirs(root_result_dir, exist_ok=True)
log_file = os.path.join(root_result_dir, 'log_train.txt')
logger = create_logger(log_file)
logger.info('**********************Start logging**********************')
# log to file
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
for key, val in vars(args).items():
logger.info("{:16} {}".format(key, val))
save_config_to_file(cfg, logger=logger)
# copy important files to backup
backup_dir = os.path.join(root_result_dir, 'backup_files')
os.makedirs(backup_dir, exist_ok=True)
os.system('cp *.py %s/' % backup_dir)
os.system('cp ../lib/net/*.py %s/' % backup_dir)
os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)
# tensorboard log
tb_log = SummaryWriter(log_dir=os.path.join(root_result_dir, 'tensorboard'))
# create dataloader & network & optimizer
train_loader, test_loader = create_dataloader(logger)
model = PointRCNN(num_classes=train_loader.dataset.num_class, use_xyz=True, mode='TRAIN')
optimizer = create_optimizer(model)
if args.mgpus:
model = nn.DataParallel(model)
model.cuda()
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.ckpt is not None:
pure_model = model.module if isinstance(model, torch.nn.DataParallel) else model
it, start_epoch = train_utils.load_checkpoint(pure_model, optimizer, filename=args.ckpt, logger=logger)
last_epoch = start_epoch + 1
lr_scheduler, bnm_scheduler = create_scheduler(optimizer, total_steps=len(train_loader) * args.epochs,
last_epoch=last_epoch)
if args.rpn_ckpt is not None:
pure_model = model.module if isinstance(model, torch.nn.DataParallel) else model
total_keys = pure_model.state_dict().keys().__len__()
train_utils.load_part_ckpt(pure_model, filename=args.rpn_ckpt, logger=logger, total_keys=total_keys)
if cfg.TRAIN.LR_WARMUP and cfg.TRAIN.OPTIMIZER != 'adam_onecycle':
lr_warmup_scheduler = train_utils.CosineWarmupLR(optimizer, T_max=cfg.TRAIN.WARMUP_EPOCH * len(train_loader),
eta_min=cfg.TRAIN.WARMUP_MIN)
else:
lr_warmup_scheduler = None
# start training
logger.info('**********************Start training**********************')
ckpt_dir = os.path.join(root_result_dir, 'ckpt')
os.makedirs(ckpt_dir, exist_ok=True)
trainer = train_utils.Trainer(
model,
train_functions_da.model_joint_fn_decorator(),
optimizer,
ckpt_dir=ckpt_dir,
lr_scheduler=lr_scheduler,
bnm_scheduler=bnm_scheduler,
model_fn_eval=train_functions_da.model_joint_fn_decorator(),
tb_log=tb_log,
eval_frequency=1,
lr_warmup_scheduler=lr_warmup_scheduler,
warmup_epoch=cfg.TRAIN.WARMUP_EPOCH,
grad_norm_clip=cfg.TRAIN.GRAD_NORM_CLIP
)
trainer.train(
it,
start_epoch,
args.epochs,
train_loader,
test_loader,
ckpt_save_interval=args.ckpt_save_interval,
lr_scheduler_each_iter=(cfg.TRAIN.OPTIMIZER == 'adam_onecycle')
)
logger.info('**********************End training**********************')
| 44.164706 | 123 | 0.672527 |
acf8fd9af9fdf6e05044e3858650fd717fd507c2 | 2,546 | py | Python | services/code.py | r0uxt1/falcon | 852774e3628fcc08209f34a69bcae4e0d903d408 | [
"Unlicense"
] | null | null | null | services/code.py | r0uxt1/falcon | 852774e3628fcc08209f34a69bcae4e0d903d408 | [
"Unlicense"
] | null | null | null | services/code.py | r0uxt1/falcon | 852774e3628fcc08209f34a69bcae4e0d903d408 | [
"Unlicense"
] | null | null | null | import json
import argparse
import os
import sys
import webbrowser
from . import getSearchResults
from . import recommendationUtils
import pathlib
ROOT_PATH = pathlib.Path(__file__).parents[1].as_posix()
category = json.load(open(ROOT_PATH+"/dumps/category.json", "r"))
def get_code(term, language, control):
# converts filesystem to a nested dict
dict_add = lambda x, y={}: dict_add(x[:-1], y).setdefault(x[-1], {}) if (x) else y
baseDict = {}
list(
map(
lambda x: dict_add(x, baseDict),
[path["location"].split("/") for path in category],
)
)
searchResults = getSearchResults.getSearchResults(term)
print(searchResults)
term = searchResults[0]["location"].split("/")[-1]
allPaths = []
childPath = []
recommendationUtils.getPath(term, baseDict, "", allPaths, childPath)
print(allPaths)
print(childPath)
if not len(childPath[0]):
path = searchResults[0]["location"]
path = "cosmos" + path
if not os.path.exists(path):
print("Clone the cosmos repo first into root of project")
sys.exit()
print(path)
arr = os.listdir(path)
print(arr)
if not language:
print("Available options")
for code in arr:
print(code.split(".")[-1])
path = path + "/" + arr[0]
print(path)
if control == "open":
webbrowser.open(path)
else:
sys.exit()
else:
for code in arr:
if code.split(".")[-1] == language:
print(code)
else:
print("similar categories")
for results in searchResults:
print(results["category"])
def main(*args):
if len(args) == 0:
parser = argparse.ArgumentParser()
parser.add_argument("--term", help="The term to look for")
parser.add_argument(
"--language", default="cpp", help="enter the language extension"
)
parser.add_argument(
"--control",
help="enter what to do with end file can be save,edit or delete",
)
args = parser.parse_args()
term = args.term
language = args.language
control = args.control
else:
term = args[0]
language = args[1]
control = args[2]
if not term:
print("Enter a valid term")
sys.exit()
get_code(term, language, control)
if __name__ == "__main__":
main()
| 27.085106 | 86 | 0.558523 |
acf8fdba724b82c842285a517595beacf8530468 | 1,623 | py | Python | 2020/10/s.py | artcz/adventofcode | 8f520ff7f1a2afdbea8867635af4c6e7032aaf31 | [
"MIT"
] | 1 | 2020-12-02T12:08:05.000Z | 2020-12-02T12:08:05.000Z | 2020/10/s.py | artcz/adventofcode | 8f520ff7f1a2afdbea8867635af4c6e7032aaf31 | [
"MIT"
] | null | null | null | 2020/10/s.py | artcz/adventofcode | 8f520ff7f1a2afdbea8867635af4c6e7032aaf31 | [
"MIT"
] | null | null | null | from functools import lru_cache
lines = open("input").read().strip().splitlines()
print("--- Day10 ---")
def p1():
numbers = sorted([int(x) for x in lines])
numbers += [numbers[-1] + 3]
ones, threes = 0, 0
last = 0
for n in numbers:
if n - last == 1:
ones += 1
elif n - last == 3:
threes += 1
last = n
print(threes * ones)
def p2a():
"""
Proper, fast, DP solution
"""
numbers = sorted([int(x) for x in lines])
numbers = [0] + numbers
partials = [sum(n + x in set(numbers) for x in range(1, 3 + 1)) for n in numbers]
@lru_cache
def find(i):
p = partials[i]
if p == 0:
return 1
out = 0
for j in range(1, p + 1):
out += find(i + j)
return out
print(find(0))
def p2b():
"""
correct but too slow, basically a queue/stack-based bruteforce
"""
# Same as above for p2a
numbers = sorted([int(x) for x in lines])
numbers = [0] + numbers
partials = [sum(n + x in set(numbers) for x in range(1, 3 + 1)) for n in numbers]
# NOTE: this could be deque() with popleft() below for a proper queue
# In this case poping from the top seems to be good enough, so regular list
# is enough. :)
Q = []
Q.append(0)
found = 0
while Q:
i = Q.pop()
p = partials[i]
if p == 0:
found += 1
continue
while p > 0:
Q.append(i + p)
p -= 1
print("ANS", found)
print("Part1")
p1()
print("Part2")
p2a()
print("---- EOD ----")
| 19.094118 | 85 | 0.499076 |
acf8fddfba9a61d195b27810f0de0743b8f00ebc | 1,229 | py | Python | tests/test_utils_base.py | qbarthelemy/pyRiemann | b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3 | [
"BSD-3-Clause"
] | 301 | 2015-04-19T20:23:21.000Z | 2021-04-28T06:42:46.000Z | tests/test_utils_base.py | qbarthelemy/pyRiemann | b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3 | [
"BSD-3-Clause"
] | 98 | 2015-04-19T16:09:18.000Z | 2021-04-29T15:21:52.000Z | tests/test_utils_base.py | vishalbelsare/pyRiemann | a55b09e317975f7eaaeffd4e6f2977f4174d3d2d | [
"BSD-3-Clause"
] | 113 | 2015-05-13T07:40:48.000Z | 2021-04-26T01:29:49.000Z | import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from pyriemann.utils.mean import mean_riemann
from pyriemann.utils.base import (sqrtm, invsqrtm, logm, expm, powm)
def test_sqrtm():
"""Test matrix square root"""
C = 2*np.eye(3)
Ctrue = np.sqrt(2)*np.eye(3)
assert_array_almost_equal(sqrtm(C), Ctrue)
def test_invsqrtm():
"""Test matrix inverse square root"""
C = 2*np.eye(3)
Ctrue = (1.0/np.sqrt(2))*np.eye(3)
assert_array_almost_equal(invsqrtm(C), Ctrue)
def test_logm():
"""Test matrix logarithm"""
C = 2*np.eye(3)
Ctrue = np.log(2)*np.eye(3)
assert_array_almost_equal(logm(C), Ctrue)
def test_expm():
"""Test matrix exponential"""
C = 2*np.eye(3)
Ctrue = np.exp(2)*np.eye(3)
assert_array_almost_equal(expm(C), Ctrue)
def test_powm():
"""Test matrix power"""
C = 2*np.eye(3)
Ctrue = (2**0.5)*np.eye(3)
assert_array_almost_equal(powm(C, 0.5), Ctrue)
def test_check_raise():
"""Test chech SPD matrices"""
C = 2*np.ones((10, 3, 3))
# This is an indirect check, the riemannian mean must crash when the
# matrices are not SPD.
with pytest.raises(ValueError):
mean_riemann(C)
| 24.098039 | 72 | 0.653377 |
acf8ff2b2261e3a59a27ec9d63a0bf0580d6df1b | 1,593 | py | Python | auto_rec.py | FreedomSlow/Recommendation-Systems | 9799ce1ef8c5a5dc89fb2059c081065c87b4f294 | [
"Apache-2.0"
] | null | null | null | auto_rec.py | FreedomSlow/Recommendation-Systems | 9799ce1ef8c5a5dc89fb2059c081065c87b4f294 | [
"Apache-2.0"
] | null | null | null | auto_rec.py | FreedomSlow/Recommendation-Systems | 9799ce1ef8c5a5dc89fb2059c081065c87b4f294 | [
"Apache-2.0"
] | null | null | null | import torch
import numpy as np
import utils
class AutoRec(torch.nn.Module):
def __init__(self, num_input, num_hidden, dropout=0.05):
super(AutoRec, self).__init__()
self.encoder = torch.nn.Sequential(
torch.nn.Linear(num_input, num_hidden),
torch.nn.LogSigmoid(),
torch.nn.Dropout(dropout)
)
self.decoder = torch.nn.Linear(num_hidden, num_input)
# Initialize weights with Xavier distribution
for param in self.parameters():
torch.nn.init.xavier_normal_(param)
def forward(self, input_data):
return self.decoder(self.encoder(input_data))
# TODO: Write evaluator function
if __name__ == '__main__':
dataset, users_cnt, items_cnt = utils.load_dataset("ml_small")
train_df, test_df = utils.split_data(dataset, shuffle=False)
BATCH_SIZE = 32
EMBEDDING_DIM = 512
EPOCHS = 10
TARGET = "rating"
train_iter = utils.create_data_loader(train_df, batch_size=BATCH_SIZE, target_col=TARGET,
item_col="item_id", user_col="user_id")
test_iter = utils.create_data_loader(test_df, batch_size=BATCH_SIZE, target_col=TARGET,
item_col="item_id", user_col="user_id")
# Since we are making item-based AutoRec input dimension will be number of users
# That way our model will learn complete predictions for all missing values
ar_net = AutoRec(users_cnt, EMBEDDING_DIM)
utils.train_recommendation_model(ar_net, train_iter, test_iter, EPOCHS, learning_rate=1e-2)
| 34.630435 | 95 | 0.671061 |
acf8ffbfe72463b44b43250ae190089986393962 | 18,039 | py | Python | visualization/gridworld.py | KTH-RPL-Planiacs/least-limiting-advisers | e2040736123e12e227b95fffe17b68c4a6e80b87 | [
"MIT"
] | null | null | null | visualization/gridworld.py | KTH-RPL-Planiacs/least-limiting-advisers | e2040736123e12e227b95fffe17b68c4a6e80b87 | [
"MIT"
] | null | null | null | visualization/gridworld.py | KTH-RPL-Planiacs/least-limiting-advisers | e2040736123e12e227b95fffe17b68c4a6e80b87 | [
"MIT"
] | null | null | null | import pygame
import time
import pickle
import random
from advisers import AdviserType
from agent_synth_game import sog_fits_to_guard
class RobotView(pygame.sprite.Sprite):
def __init__(self, name, robot_id, width, height):
super().__init__()
self.robot_id = robot_id
self.image = pygame.image.load('data/robot%i.png' % (robot_id+1)).convert_alpha()
self.image = pygame.transform.scale(self.image, (int(width), int(height)))
self.rect = self.image.get_rect()
# self.image.set_colorkey((255, 255, 255))
self.name = name
class TrashView(pygame.sprite.Sprite):
def __init__(self, width, height):
super().__init__()
self.trash_full = pygame.image.load('data/trash_full.png').convert_alpha()
self.trash_full = pygame.transform.scale(self.trash_full, (int(width), int(height)))
self.trash_empty = pygame.image.load('data/trash_empty.png').convert_alpha()
self.trash_empty = pygame.transform.scale(self.trash_empty, (int(width), int(height)))
self.image = self.trash_full
self.rect = self.image.get_rect()
# self.image.set_colorkey((255, 255, 255))
def empty_trash(self):
self.image = self.trash_empty
class GridWorld:
def __init__(self, grid, robots, screen_x=500, screen_y=500, cell_margin=5):
# define colors
self.BLACK = (0, 0, 0)
self.WHITE = (255, 255, 255)
self.GREEN = (0, 255, 0)
self.RED = (255, 150, 150)
self.BLUE = (0, 0, 255)
self.YELLOW = (255, 255, 0)
# cell dimensions
self.WIDTH = int((screen_x / len(grid[0])) - cell_margin)
self.HEIGHT = int((screen_y / len(grid)) - cell_margin)
self.MARGIN = cell_margin
self.color = self.WHITE
# grid info
self.grid = grid
self.cell_count = 0
# simulation speed
self.FPS = 60 # frames per second
self.SPEED = 60 # frames per move
self.frame_count = 0
pygame.init()
pygame.font.init()
# set the width and height of the screen (width , height)
self.size = (screen_x, screen_y)
self.screen = pygame.display.set_mode(self.size)
self.font = pygame.font.SysFont('arial', 20)
pygame.display.set_caption("Grid world")
self.sprites_list = pygame.sprite.Group()
# agents
self.robots = robots
self.robot_views = []
for robot_id, robot in enumerate(self.robots):
robot_view = RobotView(robot.name, robot_id, self.WIDTH * 0.6, self.HEIGHT * 0.6)
self.robot_views.append(robot_view)
self.sprites_list.add(robot_view)
# trash cans
self.trash_views = []
self.trash_coords = [[3, 3],
[6, 3],
[3, 6],
[6, 6]]
for i in range(4):
trash_view = TrashView(self.WIDTH, self.HEIGHT)
displace_x = ((self.WIDTH + self.MARGIN)/2) - (trash_view.rect.width / 2) + self.WIDTH * 0.03
displace_y = ((self.HEIGHT + self.MARGIN) / 2) - (trash_view.rect.height / 2) + self.HEIGHT * 0.1
current_posx = self.trash_coords[i][0] * (self.WIDTH + self.MARGIN) + displace_x
current_posy = self.trash_coords[i][1] * (self.HEIGHT + self.MARGIN) + displace_y
trash_view.rect.x = int(current_posx)
trash_view.rect.y = int(current_posy)
self.trash_views.append(trash_view)
self.sprites_list.add(trash_view)
def text_objects(self, text, font):
text_surface = font.render(text, True, self.BLACK)
return text_surface, text_surface.get_rect()
def draw_cell(self, nodes):
for node in nodes:
row = node[1][0]
column = node[1][1]
value = node[0]
pygame.draw.rect(self.screen,
self.BLUE,
[(self.MARGIN + self.WIDTH) * column + self.MARGIN,
(self.MARGIN + self.HEIGHT) * row + self.MARGIN,
self.WIDTH,
self.HEIGHT])
text_surf, text_rect = self.text_objects(str(value), self.font)
text_rect.center = ((self.MARGIN + self.WIDTH) * column + 4 * self.MARGIN,
(self.MARGIN + self.HEIGHT) * row + 4 * self.MARGIN)
self.screen.blit(text_surf, text_rect)
def render(self):
# black the whole screen
self.screen.fill(self.BLACK)
# draw the grid
for row in range(len(self.grid)):
for col in range(len(self.grid[0])):
if self.grid[row][col] == 0:
self.color = self.BLACK
elif self.grid[row][col] == 1:
self.color = self.WHITE
elif self.grid[row][col] == 2:
self.color = (130, 115, 100)
else:
self.color = self.BLUE
pygame.draw.rect(self.screen,
self.color,
[(self.MARGIN + self.WIDTH) * col + self.MARGIN,
(self.MARGIN + self.HEIGHT) * row + self.MARGIN,
self.WIDTH,
self.HEIGHT])
# walls
pygame.draw.rect(self.screen,
self.BLACK,
[(self.MARGIN + self.WIDTH) * 0 + self.MARGIN,
(self.MARGIN + self.HEIGHT) * 4 + self.MARGIN,
(self.MARGIN + self.WIDTH) * 2, self.MARGIN * 2])
pygame.draw.rect(self.screen,
self.BLACK,
[(self.MARGIN + self.WIDTH) * 3 + self.MARGIN,
(self.MARGIN + self.HEIGHT) * 4 + self.MARGIN,
(self.MARGIN + self.WIDTH) * 4, self.MARGIN * 2])
pygame.draw.rect(self.screen,
self.BLACK,
[(self.MARGIN + self.WIDTH) * 8 + self.MARGIN,
(self.MARGIN + self.HEIGHT) * 4 + self.MARGIN,
(self.MARGIN + self.WIDTH) * 2, self.MARGIN * 2])
pygame.draw.rect(self.screen,
self.BLACK,
[(self.MARGIN + self.WIDTH) * 0 + self.MARGIN,
(self.MARGIN + self.HEIGHT) * 6 + self.MARGIN,
(self.MARGIN + self.WIDTH) * 2, self.MARGIN * 2])
pygame.draw.rect(self.screen,
self.BLACK,
[(self.MARGIN + self.WIDTH) * 3 + self.MARGIN,
(self.MARGIN + self.HEIGHT) * 6 + self.MARGIN,
(self.MARGIN + self.WIDTH) * 4, self.MARGIN * 2])
pygame.draw.rect(self.screen,
self.BLACK,
[(self.MARGIN + self.WIDTH) * 8 + self.MARGIN,
(self.MARGIN + self.HEIGHT) * 6 + self.MARGIN,
(self.MARGIN + self.WIDTH) * 2, self.MARGIN * 2])
pygame.draw.rect(self.screen,
self.BLACK,
[(self.MARGIN + self.WIDTH) * 5 + self.MARGIN,
(self.MARGIN + self.HEIGHT) * 0,
self.MARGIN * 2, (self.MARGIN + self.HEIGHT) * 4])
pygame.draw.rect(self.screen,
self.BLACK,
[(self.MARGIN + self.WIDTH) * 5 + self.MARGIN,
(self.MARGIN + self.HEIGHT) * 6,
self.MARGIN * 2, (self.MARGIN + self.HEIGHT) * 4])
# display the robots
self.sprites_list.draw(self.screen)
# flip the renderer buffer
pygame.display.flip()
def idle(self, idle_time):
pass
def run_step(self, states_dict):
self.frame_count += 1
if self.frame_count >= self.SPEED:
for robot in self.robots:
robot.current_state = robot.next_state
self.frame_count = 0
self.compute_next_step()
# update robot views
for robot_id, robot in enumerate(self.robots):
robot_view = self.robot_views[robot_id]
if isinstance(robot.current_state[0], tuple):
robot.current_coords = states_dict[robot.current_state[0][0]]
else:
robot.current_coords = states_dict[robot.current_state[0]]
if isinstance(robot.next_state[0], tuple):
next_coords = states_dict[robot.next_state[0][0]]
else:
next_coords = states_dict[robot.next_state[0]]
displace_x = ((self.WIDTH + self.MARGIN)/2) - (robot_view.rect.width / 2)
displace_y = ((self.HEIGHT + self.MARGIN) / 2) - (robot_view.rect.height / 2)
current_posx = robot.current_coords[1] * (self.WIDTH + self.MARGIN) + displace_x
current_posy = robot.current_coords[0] * (self.HEIGHT + self.MARGIN) + displace_y
next_posx = next_coords[1] * (self.WIDTH + self.MARGIN) + displace_x
next_posy = next_coords[0] * (self.HEIGHT + self.MARGIN) + displace_y
t = self.frame_count / self.SPEED
t = t * t * (3 - 2 * t)
robot_view.rect.x = int(current_posx * (1 - t) + next_posx * t)
robot_view.rect.y = int(current_posy * (1 - t) + next_posy * t)
if robot_id >= 2: # now check if we empty the trash cans
if robot.current_coords[0] == 3 and robot.current_coords[1] == 2:
self.trash_views[0].empty_trash()
if robot.current_coords[0] == 3 and robot.current_coords[1] == 7:
self.trash_views[1].empty_trash()
if robot.current_coords[0] == 6 and robot.current_coords[1] == 2:
self.trash_views[2].empty_trash()
if robot.current_coords[0] == 6 and robot.current_coords[1] == 7:
self.trash_views[3].empty_trash()
else: # check if we change the floors to cleaned
if self.check_clean_and_alone(robot, 0, 3, 0, 4, 1, 2):
for x in range(0, 5):
for y in range(0, 4):
self.grid[y][x] = 1
if self.check_clean_and_alone(robot, 0, 3, 5, 9, 1, 7):
for x in range(5, 10):
for y in range(0, 4):
self.grid[y][x] = 1
if self.check_clean_and_alone(robot, 6, 9, 0, 4, 8, 2):
for x in range(0, 5):
for y in range(6, 10):
self.grid[y][x] = 1
if self.check_clean_and_alone(robot, 6, 9, 5, 9, 8, 7):
for x in range(5, 10):
for y in range(6, 10):
self.grid[y][x] = 1
# render the results
self.render()
def check_clean_and_alone(self, robot, xmin, xmax, ymin, ymax, cleanx, cleany):
if robot.current_coords[0] == cleanx and robot.current_coords[1] == cleany:
alone = True
for other_robot in self.robots:
if other_robot.name == robot.name:
continue
if xmin <= other_robot.current_coords[0] <= xmax and ymin <= other_robot.current_coords[1] <= ymax:
alone = False
return alone
return False
def compute_next_step(self):
next_obs = ''
next_ap = []
for robot in self.robots:
# start in a player-1 state
if isinstance(robot.current_state[0], tuple):
# its a promise node
mpd_state = robot.current_state[0][0]
else:
mpd_state = robot.current_state[0]
action = robot.strategy[robot.current_state]
prob_state = None
for succ in robot.mdp.successors(mpd_state):
if robot.mdp.edges[mpd_state, succ]['act'] == action:
prob_state = succ
break
# compute probabilistic outcome of mdp action
outcomes = []
probs = []
for succ in robot.mdp.successors(prob_state):
outcomes.append(succ)
probs.append(robot.mdp.edges[prob_state, succ]['prob'])
choice = random.choices(outcomes, probs)[0]
robot.mdp_choice = choice
next_obs += robot.mdp.nodes[choice]['ap'][0]
next_ap.extend(robot.mdp.graph['ap'])
# now we have player-2 choices, now move the synth game
for robot in self.robots:
action = robot.strategy[robot.current_state]
p2_state = None
for succ in robot.synth.successors(robot.current_state):
if robot.synth.edges[robot.current_state, succ]['act'] == action:
p2_state = succ
break
# find the p2 action that matches the next_obs, next_ap
# first, skip fairness nodes
if len(p2_state) == 3 and p2_state[2] == 'fair':
p2_state = (p2_state[0], p2_state[1])
prob_state = None
for succ in robot.synth.successors(p2_state):
sog = robot.synth.edges[p2_state, succ]['guards']
matched_guards = sog_fits_to_guard(next_obs, sog, next_ap, robot.synth.graph['env_ap'])
if len(matched_guards) > 0:
prob_state = succ
break
# make the choice decided probabilistically earlier in mdp
for succ in robot.synth.successors(prob_state):
if succ[1] == 'promise' and succ[0][0] == robot.mdp_choice:
# check probabilistically if we need to fulfill a promise
promise_state = succ
outcomes = []
probs = []
for succsucc in robot.synth.successors(promise_state):
edge_data = robot.synth.edges[promise_state, succsucc]
if 'pre' in edge_data.keys():
fits = sog_fits_to_guard(next_obs, [edge_data['pre'][0]], next_ap, edge_data['pre'][1])
if len(fits) > 0:
outcomes.append(succsucc)
else:
outcomes.append(succsucc)
robot.next_state = random.choice(outcomes)
elif succ[0] == robot.mdp_choice:
robot.next_state = succ
break
def simulate_agents(self, states_dict):
next_time = time.time()
running = True
simulate = False
self.run_step(states_dict)
while running:
# handle all events
for event in pygame.event.get():
if event.type == pygame.KEYDOWN: # hit a key
if event.key == pygame.K_ESCAPE: # ESC key
running = False
if event.key == pygame.K_SPACE:
simulate = True
elif event.type == pygame.QUIT: # press X in window
running = False
# handle game state
now_time = time.time()
self.idle(max(0., next_time - now_time))
if now_time >= next_time:
if simulate:
self.run_step(states_dict)
next_time = now_time + (1 / self.FPS)
if __name__ == '__main__':
# pickled_agents = pickle.load(open('data/agents_converged_results_symmetric_corridor.p', 'rb'))
#
# # mdp player-1-states to coords
# mdp_states_dict = {
# 'crit': (2, 2),
# 'end_top': (0, 2),
# 'corridor_top': (1, 2),
# 'corridor_top_no_turn': (1, 2),
# 'corridor_bot': (3, 2),
# 'corridor_bot_no_turn': (3, 2),
# 'end_bot': (4, 2),
# 'end_left': (2, 0),
# 'corridor_left': (2, 1),
# 'corridor_left_no_turn': (2, 1),
# 'end_right': (2, 4),
# 'corridor_right': (2, 3),
# 'corridor_right_no_turn': (2, 3)
# }
# # build grid structure
# ex_grid = [[0 for col in range(5)] for row in range(5)]
# for i in range(5):
# ex_grid[2][i] = 1
# ex_grid[i][2] = 1
# ex_grid[2][2] = 2
pickled_agents = pickle.load(open('data/agents_results_office_roomtest.p', 'rb'))
mdp_states_dict = {}
for x in range(10):
for y in range(10):
mdp_states_dict["%i,%i" % (x,y)] = (x,y)
# build grid structure
ex_grid = [[2 for col in range(10)] for row in range(10)]
for x in range(10):
ex_grid[4][x] = 1
ex_grid[5][x] = 1
# set current state of agents
for agent in pickled_agents:
agent.current_state = agent.synth.graph['init']
# safety print
for agent in pickled_agents:
print('Safety Advisers for Agent %s:' % agent.name)
for adviser in agent.own_advisers:
if not adviser.adv_type == AdviserType.SAFETY:
continue
adviser.print_advice()
print('')
# fairness print
for agent in pickled_agents:
print('Fairness Advisers for Agent %s:' % agent.name)
for adviser in agent.own_advisers:
if not adviser.adv_type == AdviserType.FAIRNESS:
continue
adviser.print_advice()
print('')
gridworld = GridWorld(grid=ex_grid, robots=pickled_agents, screen_x=1000, screen_y=1000)
gridworld.compute_next_step()
gridworld.simulate_agents(mdp_states_dict)
| 41.756944 | 115 | 0.515439 |
acf9008f74a0916a52521575bdd8ac9ca7f8c8db | 553 | py | Python | chapter3/problem4.py | hahnicity/ace | 60e934304b94614c435c7f3da60e3ea13622173e | [
"Unlicense"
] | 5 | 2017-07-06T07:08:03.000Z | 2020-03-11T17:48:02.000Z | chapter3/problem4.py | lnsongxf/ace | 60e934304b94614c435c7f3da60e3ea13622173e | [
"Unlicense"
] | null | null | null | chapter3/problem4.py | lnsongxf/ace | 60e934304b94614c435c7f3da60e3ea13622173e | [
"Unlicense"
] | 5 | 2017-07-06T07:08:15.000Z | 2020-12-19T21:52:07.000Z | """
Prove Broyden's method is a solution to
min sigma(sigma((A* - A)^2))
I will show that the equation `A* <- A + (g - Ad)(d'/d'd)` is a solution to the
Frobenius norm outlined above.
A* <- A + (g - Ad)(d'/d'd) <=> ||A* - A|| = ||(g - Ad)(d'/d'd)|| <=
||g - Ad||*||(d'/d'd)|| = ||g - Ad|| * 1 / sigma(xi^2)
We notice that g is subject to s.t. g = A*d
||g - Ad|| * 1 / sigma(xi^2) = ||A*d - Ad|| * 1 / sigma(xi^2) <=
||A* - A|| * ||d|| * 1 / sigma(xi^2) = ||A* - A||
Thereby showing that ||A* - A|| <= ||(g - Ad)(d'/d'd)||
"""
pass
| 27.65 | 79 | 0.459313 |
acf900c358a7ef4606b4d29107b3ab14b5e0d11e | 2,472 | py | Python | src/datamgr/datamanager/api/modules/tof.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/datamgr/datamanager/api/modules/tof.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/datamgr/datamanager/api/modules/tof.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import, unicode_literals
from api.base import DataAPI
from conf.settings import APP_ID, APP_TOKEN, TOF_API_URL
def base_before_request(params):
params["app_code"] = APP_ID
params["app_secret"] = APP_TOKEN
return params
class TOFApi(object):
MODULE = "TOF"
def __init__(self):
self.get_staff_info = DataAPI(
method="POST",
url=TOF_API_URL + "get_staff_info/",
module=self.MODULE,
description="获取员工信息",
before_request=base_before_request,
)
self.get_dept_staffs_with_level = DataAPI(
method="POST",
url=TOF_API_URL + "get_dept_staffs_with_level/",
module=self.MODULE,
description="获取部门员工信息",
before_request=base_before_request,
)
self.get_staff_direct_leader = DataAPI(
method="POST",
url=TOF_API_URL + "get_staff_direct_leader/",
module=self.MODULE,
description="获取员工直属leader信息",
before_request=base_before_request,
)
| 43.368421 | 111 | 0.696197 |
acf90149ae1d5391921c145661fb813109951569 | 4,638 | py | Python | digital_storage_calculator/data_units/decimal_based_on_bytes_units.py | GonnaFlyMethod/digital-storage-calculator | f5e6d3614711651c8fbc04cca9862e50d4f71ff9 | [
"MIT"
] | null | null | null | digital_storage_calculator/data_units/decimal_based_on_bytes_units.py | GonnaFlyMethod/digital-storage-calculator | f5e6d3614711651c8fbc04cca9862e50d4f71ff9 | [
"MIT"
] | null | null | null | digital_storage_calculator/data_units/decimal_based_on_bytes_units.py | GonnaFlyMethod/digital-storage-calculator | f5e6d3614711651c8fbc04cca9862e50d4f71ff9 | [
"MIT"
] | null | null | null | class Kilobyte:
def __init__(self, value_kilobytes: int):
self._value_kilobytes = value_kilobytes
self.one_kilobyte_in_bits = 8000
self._value_bits = self._convert_into_bits(value_kilobytes)
self.id = "KB"
def _convert_into_bits(self, value_kilobytes: int) -> int:
return value_kilobytes * self.one_kilobyte_in_bits
def convert_from_bits_to_kilobytes(self, bits: int) -> float:
return (bits / self.one_kilobyte_in_bits)
def get_val_in_bits(self) -> int:
return self._value_bits
def get_val_in_kilobytes(self) -> int:
return self._value_kilobytes
class Megabyte:
def __init__(self, value_megabytes: int):
self._value_megabytes = value_megabytes
self.one_megabyte_in_bits = 8e+6
self._value_bits = self._convert_into_bits(value_megabytes)
self.id = "MB"
def _convert_into_bits(self, value_megabytes: int) -> int:
return value_megabytes * self.one_megabyte_in_bits
def convert_from_bits_to_megabytes(self, bits: int) -> float:
return (bits / self.one_megabyte_in_bits)
def get_val_in_bits(self) -> int:
return self._value_bits
def get_val_in_megabytes(self) -> int:
return self._value_megabytes
class Gigabyte:
def __init__(self, value_gigabytes: int):
self._value_gigabytes = value_gigabytes
self.one_gigabyte_in_bits = 8e+9
self._value_bits = self._convert_into_bits(value_gigabytes)
self.id = "GB"
def _convert_into_bits(self, value_gigabytes: int) -> int:
return value_gigabytes * self.one_gigabyte_in_bits
def convert_from_bits_to_gigabytes(self, bits: int) -> float:
return (bits / self.one_gigabyte_in_bits)
def get_val_in_bits(self) -> int:
return self._value_bits
def get_val_in_gigabytes(self) -> int:
return self._value_gigabytes
class Terabyte:
def __init__(self, value_terabytes: int):
self._value_terabytes = value_terabytes
self.one_terabyte_in_bits = 8e+12
self._value_bits = self._convert_into_bits(value_terabytes)
self.id = "TB"
def _convert_into_bits(self, value_terabytes: int) -> int:
return value_terabytes * self.one_terabyte_in_bits
def convert_from_bits_to_terabytes(self, bits: int) -> float:
return (bits / self.one_terabyte_in_bits)
def get_val_in_bits(self) -> int:
return self._value_bits
def get_val_in_terabytes(self) -> int:
return self._value_terabytes
class Petabyte:
def __init__(self, value_petabytes: int):
self._value_petabytes = value_petabytes
self.one_petabyte_in_bits = 8e+15
self._value_bits = self._convert_into_bits(value_petabytes)
self.id = "PB"
def _convert_into_bits(self, value_petabytes: int) -> int:
return value_petabytes * self.one_petabyte_in_bits
def convert_from_bits_to_petabytes(self, bits: int) -> float:
return (bits / self.one_petabyte_in_bits)
def get_val_in_bits(self) -> int:
return self._value_bytes
def get_val_in_petabytes(self) -> int:
return self._value_petabytes
class Exabyte:
def __init__(self, value_exabytes: int):
self._value_exabytes = value_exabytes
self.one_exabyte_in_bits = 8e+18
self._value_bits = self._convert_into_bits(value_exabytes)
self.id = "EB"
def _convert_into_bits(self, value_exabytes: int) -> int:
return value_exabytes * self.one_exabyte_in_bits
def convert_from_bits_to_exabytes(self, bits: int) -> float:
return (bits / self.one_exabyte_in_bits)
def get_val_in_bits(self) -> int:
return self._value_bits
def get_val_in_exabytes(self) -> int:
return self._value_exabytes
class Zettabyte:
def __init__(self, value_zettabytes: int):
self._value_zettabytes = value_zettabytes
self.one_zettabyte_in_bits = 8e+21
self._value_bits = self._convert_into_bits(value_zettabytes)
self.id = "ZB"
def _convert_into_bits(self, value_zettabytes: int) -> int:
return value_zettabytes * self.one_zettabyte_in_bits
def convert_from_bits_to_zettabytes(self, bits: int) -> float:
return (bits / self.one_zettabyte_in_bits)
def get_val_in_bits(self) -> int:
return self._value_bits
def get_val_in_zettabyte(self) -> int:
return self._value_zettabytes
class Yottabyte:
def __init__(self, value_yottabytes: int):
self._value_yottabytes = value_yottabytes
self.one_yottabyte_in_bits = 8e+24
self._value_bits = self._convert_into_bits(value_yottabytes)
self.id = "YB"
def _convert_into_bits(self, value_yottabytes: int) -> int:
return value_yottabytes * self.one_yottabyte_in_bits
def convert_from_bits_to_yottabytes(self, bits: int) -> float:
return (bits / self.one_yottabyte_in_bits)
def get_val_in_bits(self) -> int:
return self._value_bits
def get_val_in_yottabyte(self) -> int:
return self._value_yottabytes
| 27.939759 | 63 | 0.769728 |
acf9016aefefd0064df15fec4f1954c14a64c388 | 1,455 | py | Python | LC/23.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | 2 | 2018-02-24T17:20:02.000Z | 2018-02-24T17:25:43.000Z | LC/23.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | LC/23.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
# # use built-in sort function
# res=[]
# for l in lists:
# while l:
# res.append(l.val)
# l=l.next
# return sorted(res)
# use two sort
if not lists:
return None
if len(lists)==1:
return lists[0]
if len(lists)==2:
return self.mergeTwoLists(lists[0], lists[1])
return self.mergeTwoLists(self.mergeKLists(lists[:len(lists)/2]),self.mergeKLists(lists[len(lists)/2:]))
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
res=ListNode(0)
last=res
while(l1 or l2):
if l1==None:
last.next=l2
return res.next
if l2==None:
last.next=l1
return res.next
if l1.val < l2.val:
last.next=l1
last=last.next
l1=l1.next
else:
last.next=l2
last=last.next
l2=l2.next
return res.next | 26.944444 | 112 | 0.454983 |
acf901caf93cbe3dbe015af1d408cda15f3bb0d6 | 4,382 | py | Python | mypyc/namegen.py | cibinmathew/mypy | 49825a9057d8c52603e91f6b99e4de94ca3d8a66 | [
"PSF-2.0"
] | 12,496 | 2016-02-19T13:38:26.000Z | 2022-03-31T23:56:19.000Z | mypyc/namegen.py | cibinmathew/mypy | 49825a9057d8c52603e91f6b99e4de94ca3d8a66 | [
"PSF-2.0"
] | 9,429 | 2016-02-19T13:41:32.000Z | 2022-03-31T23:29:38.000Z | mypyc/namegen.py | Zeckie/baselinedmypy | 142c896a7ec0a10697375833fd897b293a748699 | [
"PSF-2.0"
] | 2,770 | 2016-02-19T16:18:19.000Z | 2022-03-31T08:12:49.000Z | from typing import List, Dict, Tuple, Set, Optional, Iterable
class NameGenerator:
"""Utility for generating distinct C names from Python names.
Since C names can't use '.' (or unicode), some care is required to
make C names generated from Python names unique. Also, we want to
avoid generating overly long C names since they make the generated
code harder to read.
Note that we don't restrict ourselves to a 32-character distinguishing
prefix guaranteed by the C standard since all the compilers we care
about at the moment support longer names without issues.
For names that are exported in a shared library (not static) use
exported_name() instead.
Summary of the approach:
* Generate a unique name prefix from suffix of fully-qualified
module name used for static names. If only compiling a single
module, this can be empty. For example, if the modules are
'foo.bar' and 'foo.baz', the prefixes can be 'bar_' and 'baz_',
respectively. If the modules are 'bar.foo' and 'baz.foo', the
prefixes will be 'bar_foo_' and 'baz_foo_'.
* Replace '.' in the Python name with '___' in the C name. (And
replace the unlikely but possible '___' with '___3_'. This
collides '___' with '.3_', but this is OK because names
may not start with a digit.)
The generated should be internal to a build and thus the mapping is
arbitrary. Just generating names '1', '2', ... would be correct,
though not very usable.
"""
def __init__(self, groups: Iterable[List[str]]) -> None:
"""Initialize with a list of modules in each compilation group.
The names of modules are used to shorten names referring to
modules, for convenience. Arbitrary module
names are supported for generated names, but uncompiled modules
will use long names.
"""
self.module_map: Dict[str, str] = {}
for names in groups:
self.module_map.update(make_module_translation_map(names))
self.translations: Dict[Tuple[str, str], str] = {}
self.used_names: Set[str] = set()
def private_name(self, module: str, partial_name: Optional[str] = None) -> str:
"""Return a C name usable for a static definition.
Return a distinct result for each (module, partial_name) pair.
The caller should add a suitable prefix to the name to avoid
conflicts with other C names. Only ensure that the results of
this function are unique, not that they aren't overlapping with
arbitrary names.
If a name is not specific to any module, the module argument can
be an empty string.
"""
# TODO: Support unicode
if partial_name is None:
return exported_name(self.module_map[module].rstrip('.'))
if (module, partial_name) in self.translations:
return self.translations[module, partial_name]
if module in self.module_map:
module_prefix = self.module_map[module]
elif module:
module_prefix = module + '.'
else:
module_prefix = ''
actual = exported_name('{}{}'.format(module_prefix, partial_name))
self.translations[module, partial_name] = actual
return actual
def exported_name(fullname: str) -> str:
"""Return a C name usable for an exported definition.
This is like private_name(), but the output only depends on the
'fullname' argument, so the names are distinct across multiple
builds.
"""
# TODO: Support unicode
return fullname.replace('___', '___3_').replace('.', '___')
def make_module_translation_map(names: List[str]) -> Dict[str, str]:
num_instances: Dict[str, int] = {}
for name in names:
for suffix in candidate_suffixes(name):
num_instances[suffix] = num_instances.get(suffix, 0) + 1
result = {}
for name in names:
for suffix in candidate_suffixes(name):
if num_instances[suffix] == 1:
result[name] = suffix
break
else:
assert False, names
return result
def candidate_suffixes(fullname: str) -> List[str]:
components = fullname.split('.')
result = ['']
for i in range(len(components)):
result.append('.'.join(components[-i - 1:]) + '.')
return result
| 38.438596 | 83 | 0.654496 |
acf9028a859aff038683b04629efc05d1e8d3935 | 1,258 | py | Python | azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/storage_insight_status.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/storage_insight_status.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/storage_insight_status.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageInsightStatus(Model):
"""The status of the storage insight.
:param state: The state of the storage insight connection to the
workspace. Possible values include: 'OK', 'ERROR'
:type state: str or :class:`StorageInsightState
<azure.mgmt.loganalytics.models.StorageInsightState>`
:param description: Description of the state of the storage insight.
:type description: str
"""
_validation = {
'state': {'required': True},
}
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, state, description=None):
self.state = state
self.description = description
| 33.105263 | 76 | 0.599364 |
acf902a2f61e809ebb77700874abcc323da902e9 | 5,464 | py | Python | src/python/fit_with_stim.py | rhjohnstone/RossJ | 3bf0a81d085de33c5b461c10f2d66f52bf86450f | [
"FTL"
] | null | null | null | src/python/fit_with_stim.py | rhjohnstone/RossJ | 3bf0a81d085de33c5b461c10f2d66f52bf86450f | [
"FTL"
] | null | null | null | src/python/fit_with_stim.py | rhjohnstone/RossJ | 3bf0a81d085de33c5b461c10f2d66f52bf86450f | [
"FTL"
] | null | null | null | import ap_simulator
import numpy as np
import numpy.random as npr
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import mcmc_setup as ms
import cma
import multiprocessing as mp
import itertools as it
import scipy.optimize as so
import sys
python_seed = 1
trace_number = 97
npr.seed(python_seed)
def prior_upper_bounds(original_gs):
return 100*np.array(original_gs)
def normalise_trace(trace):
trace_min = np.min(trace)
trace_max = np.max(trace)
scale = trace_max - trace_min
scaled_trace = (trace-trace_min) / scale
return scaled_trace
#temp_dog_AP_file = "projects/RossJ/python/input/ken/036-2014091101 Control 1Hz analyzed.txt_averagedTrace.txt"
#dog_AP = np.loadtxt(temp_dog_AP_file)#,delimiter=',')
#expt_times = dog_AP[:,0]
#expt_trace = dog_AP[:,1]
expt_dir = "/home/rossj/Documents/roche_data/2017-01_data/170123_2_2"
traces_dir = expt_dir + '/traces'
output_dir = expt_dir + '/output'
AP = np.loadtxt(traces_dir+'/{}.csv'.format(trace_number),delimiter=',')
expt_times = AP[:,0]
expt_trace = AP[:,1]
#scaled_expt_trace = normalise_trace(expt_trace)
plt.plot(expt_times,expt_trace)
plt.show()
# 1. Hodgkin Huxley
# 2. Beeler Reuter
# 3. Luo Rudy
# 4. ten Tusscher
# 5. O'Hara Rudy
# 6. Davies (canine)
# 7. Paci (SC-CM ventricular)
model_number = 7
protocol = 1
#solve_start,solve_end,solve_timestep,stimulus_magnitude,stimulus_duration,stimulus_period,stimulus_start_time = ms.get_protocol_details(protocol)
stimulus_start_time = 50 # manually from looking, should really set it exactly from original trace files
stimulus_magnitude = 0 # -25.5
stimulus_duration = 2
stimulus_period = 1000
solve_start = expt_times[0]
solve_end = expt_times[-1]
solve_timestep = expt_times[1]-expt_times[0]
print (solve_end-solve_start)/solve_timestep
print len(expt_times)
noise_sd = 0.25
c_seed = 1
data_clamp_on = 50
data_clamp_off = 52
extra_K_conc = 4
num_solves = 1
original_gs, g_parameters = ms.get_original_params(model_number)
#upper_bounds = prior_upper_bounds(original_gs)
upper_bounds = [np.inf]*len(original_gs)
times = np.arange(solve_start,solve_end+solve_timestep,solve_timestep)
ap = ap_simulator.APSimulator()
ap.SetNumberOfSolves(num_solves)
ap.DefineSolveTimes(solve_start,solve_end,solve_timestep)
ap.DefineStimulus(stimulus_magnitude,stimulus_duration,stimulus_period,stimulus_start_time)
ap.DefineModel(model_number)
ap.UseDataClamp(data_clamp_on, data_clamp_off)
ap.SetExperimentalTraceAndTimesForDataClamp(expt_times, expt_trace)
ap.SetExtracellularPotassiumConc(extra_K_conc)
opts = cma.CMAOptions()
opts['seed'] = 100*python_seed
#x0 = np.copy(original_gs)
x0 = original_gs * (1. + npr.randn(len(original_gs)))
x0[np.where(x0<0)] = 1e-5
print "x0 =", x0
#sys.exit()
def sum_of_square_diffs(params):#,expt_trace,upper_bounds,ap):
if np.any(params<0) or np.any(params>upper_bounds):
#print test_gs
return np.inf
ap.LoadStateVariables()
test_trace = ap.SolveForVoltageTraceWithParams(params)
return np.sum((test_trace-expt_trace)**2)
def normalised_sum_of_square_diffs(params):#,expt_trace,upper_bounds,ap):
if np.any(params<0) or np.any(params>upper_bounds):
#print test_gs
return np.inf
scaled_test_trace = normalise_trace(ap.SolveForVoltageTraceWithParams(params))
return np.sum((scaled_test_trace-scaled_expt_trace)**2)
original_obj_fun = sum_of_square_diffs(x0)#,expt_trace,upper_bounds,ap)
print "original_obj_fun =", original_obj_fun
#sys.exit()
start = time.time()
num_cores = mp.cpu_count()-1
if (num_cores < 1):
num_cores = 1
pool = mp.Pool(num_cores)
sigma0 = 0.00001
es = cma.CMAEvolutionStrategy(x0, sigma0, opts)
while not es.stop():
X = es.ask()
#f_vals = pool.map_async(sum_of_square_diffs,X).get(9999999)
#f_vals = pool.map_async(normalised_sum_of_square_diffs,X).get(9999)
f_vals = [sum_of_square_diffs(x,expt_trace,upper_bounds,ap) for x in X]
es.tell(X, f_vals)
es.disp()
pool.close()
res = es.result()
time_taken = time.time()-start
print "res[0] =", res[0]
best_gs = res[0]
best_f = res[1]
print "\nTime taken: {} s\n".format(round(time_taken,1))
#best_fit_trace = normalise_trace(ap.SolveForVoltageTraceWithParams(best_gs))
best_fit_trace = ap.SolveForVoltageTraceWithParams(best_gs)
print "original_gs:", original_gs
print "best_gs:", best_gs
#print original_gs/best_gs
true_trace = ap.SolveForVoltageTraceWithParams(original_gs)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid()
#ax.plot(expt_times,scaled_expt_trace,color='red', label='Expt')
ax.plot(expt_times,expt_trace,color='red', label='Expt')
ax.plot(times,true_trace,color='blue',label='Original')
ax.plot(times,best_fit_trace,color='green',label='Best fit')
ax.legend()
fig.tight_layout()
#fig.savefig("ken_normalised_trace_fit_to_model_{}_python_seed_{}.png".format(model_number,python_seed))
fig.savefig(output_dir + "trace_{}_fit_to_model_{}_python_seed_{}.png".format(trace_number,model_number,python_seed))
plt.close()
#params_file = "ken_normalised_best_fit_params_model_{}.txt".format(model_number)
params_file = output_dir + "trace_{}_best_fit_params_model_{}.txt".format(trace_number,model_number)
with open(params_file,'a') as outfile:
outfile.write("py_seed: " + str(python_seed) + "\n")
outfile.write("initial: " + str(x0) + "\n")
outfile.write("best_gs: " + str(best_gs) + "\n")
outfile.write("best_f: " + str(best_f) + "\n\n")
| 29.06383 | 146 | 0.758785 |
acf90531d359e94182c17048c6254fc4df5d09ab | 2,082 | py | Python | python/ql/test/experimental/dataflow/variable-capture/dict.py | timoles/codeql | 2d24387e9e300bf03be35694816b1e76ae88a50c | [
"MIT"
] | 4,036 | 2020-04-29T00:09:57.000Z | 2022-03-31T14:16:38.000Z | python/ql/test/experimental/dataflow/variable-capture/dict.py | baby636/codeql | 097b6e5e3364ecc7103586d6feb308861e15538e | [
"MIT"
] | 2,970 | 2020-04-28T17:24:18.000Z | 2022-03-31T22:40:46.000Z | python/ql/test/experimental/dataflow/variable-capture/dict.py | ScriptBox99/github-codeql | 2ecf0d3264db8fb4904b2056964da469372a235c | [
"MIT"
] | 794 | 2020-04-29T00:28:25.000Z | 2022-03-30T08:21:46.000Z | # Here we test writing to a captured variable via a dictionary (see `out`).
# We also test reading one captured variable and writing the value to another (see `through`).
# All functions starting with "test_" should run and execute `print("OK")` exactly once.
# This can be checked by running validTest.py.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from testlib import *
# These are defined so that we can evaluate the test code.
NONSOURCE = "not a source"
SOURCE = "source"
def is_source(x):
return x == "source" or x == b"source" or x == 42 or x == 42.0 or x == 42j
def SINK(x):
if is_source(x):
print("OK")
else:
print("Unexpected flow", x)
def SINK_F(x):
if is_source(x):
print("Unexpected flow", x)
else:
print("OK")
def out():
sinkO1 = { "x": "" }
def captureOut1():
sinkO1["x"] = SOURCE
captureOut1()
SINK(sinkO1["x"]) #$ MISSING:captured
sinkO2 = { "x": "" }
def captureOut2():
def m():
sinkO2["x"] = SOURCE
m()
captureOut2()
SINK(sinkO2["x"]) #$ MISSING:captured
nonSink0 = { "x": "" }
def captureOut1NotCalled():
nonSink0["x"] = SOURCE
SINK_F(nonSink0["x"])
def captureOut2NotCalled():
def m():
nonSink0["x"] = SOURCE
captureOut2NotCalled()
SINK_F(nonSink0["x"])
@expects(4)
def test_out():
out()
def through(tainted):
sinkO1 = { "x": "" }
def captureOut1():
sinkO1["x"] = tainted
captureOut1()
SINK(sinkO1["x"]) #$ MISSING:captured
sinkO2 = { "x": "" }
def captureOut2():
def m():
sinkO2["x"] = tainted
m()
captureOut2()
SINK(sinkO2["x"]) #$ MISSING:captured
nonSink0 = { "x": "" }
def captureOut1NotCalled():
nonSink0["x"] = tainted
SINK_F(nonSink0["x"])
def captureOut2NotCalled():
def m():
nonSink0["x"] = tainted
captureOut2NotCalled()
SINK_F(nonSink0["x"])
@expects(4)
def test_through():
through(SOURCE)
| 22.148936 | 94 | 0.577329 |
acf906216bafb601b16aac20de817d0b0330393a | 4,502 | py | Python | chia/pools/pool_wallet_info.py | Albertjan90/chia-blockchain | 24b4533e7dd225c065c234eeaea25f06118a088b | [
"Apache-2.0"
] | 11 | 2021-08-14T15:46:18.000Z | 2022-02-09T04:29:53.000Z | chia/pools/pool_wallet_info.py | Albertjan90/chia-blockchain | 24b4533e7dd225c065c234eeaea25f06118a088b | [
"Apache-2.0"
] | 44 | 2021-05-04T06:23:12.000Z | 2022-03-29T19:04:48.000Z | chia/pools/pool_wallet_info.py | Albertjan90/chia-blockchain | 24b4533e7dd225c065c234eeaea25f06118a088b | [
"Apache-2.0"
] | 6 | 2021-08-19T13:27:46.000Z | 2022-03-15T08:37:56.000Z | from dataclasses import dataclass
from enum import IntEnum
from typing import Optional, Dict
from blspy import G1Element
from chia.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.byte_types import hexstr_to_bytes
from chia.util.ints import uint32, uint8
from chia.util.streamable import streamable, Streamable
class PoolSingletonState(IntEnum):
"""
From the user's point of view, a pool group can be in these states:
`SELF_POOLING`: The singleton exists on the blockchain, and we are farming
block rewards to a wallet address controlled by the user
`LEAVING_POOL`: The singleton exists, and we have entered the "escaping" state, which
means we are waiting for a number of blocks = `relative_lock_height` to pass, so we can leave.
`FARMING_TO_POOL`: The singleton exists, and it is assigned to a pool.
`CLAIMING_SELF_POOLED_REWARDS`: We have submitted a transaction to sweep our
self-pooled funds.
"""
SELF_POOLING = 1
LEAVING_POOL = 2
FARMING_TO_POOL = 3
SELF_POOLING = PoolSingletonState.SELF_POOLING
LEAVING_POOL = PoolSingletonState.LEAVING_POOL
FARMING_TO_POOL = PoolSingletonState.FARMING_TO_POOL
@dataclass(frozen=True)
@streamable
class PoolState(Streamable):
"""
`PoolState` is a type that is serialized to the blockchain to track the state of the user's pool singleton
`target_puzzle_hash` is either the pool address, or the self-pooling address that pool rewards will be paid to.
`target_puzzle_hash` is NOT the p2_singleton puzzle that block rewards are sent to.
The `p2_singleton` address is the initial address, and the `target_puzzle_hash` is the final destination.
`relative_lock_height` is zero when in SELF_POOLING state
"""
version: uint8
state: uint8 # PoolSingletonState
# `target_puzzle_hash`: A puzzle_hash we pay to
# When self-farming, this is a main wallet address
# When farming-to-pool, the pool sends this to the farmer during pool protocol setup
target_puzzle_hash: bytes32 # TODO: rename target_puzzle_hash -> pay_to_address
# owner_pubkey is set by the wallet, once
owner_pubkey: G1Element
pool_url: Optional[str]
relative_lock_height: uint32
def initial_pool_state_from_dict(state_dict: Dict, owner_pubkey: G1Element, owner_puzzle_hash: bytes32) -> PoolState:
state_str = state_dict["state"]
singleton_state: PoolSingletonState = PoolSingletonState[state_str]
if singleton_state == SELF_POOLING:
target_puzzle_hash = owner_puzzle_hash
pool_url: str = ""
relative_lock_height = uint32(0)
elif singleton_state == FARMING_TO_POOL:
target_puzzle_hash = bytes32(hexstr_to_bytes(state_dict["target_puzzle_hash"]))
pool_url = state_dict["pool_url"]
relative_lock_height = uint32(state_dict["relative_lock_height"])
else:
raise ValueError("Initial state must be SELF_POOLING or FARMING_TO_POOL")
# TODO: change create_pool_state to return error messages, as well
assert relative_lock_height is not None
return create_pool_state(singleton_state, target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height)
def create_pool_state(
state: PoolSingletonState,
target_puzzle_hash: bytes32,
owner_pubkey: G1Element,
pool_url: Optional[str],
relative_lock_height: uint32,
) -> PoolState:
if state not in set(s.value for s in PoolSingletonState):
raise AssertionError("state {state} is not a valid PoolSingletonState,")
ps = PoolState(
POOL_PROTOCOL_VERSION, uint8(state), target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height
)
# TODO Move verify here
return ps
@dataclass(frozen=True)
@streamable
class PoolWalletInfo(Streamable):
"""
Internal Pool Wallet state, not destined for the blockchain. This can be completely derived with
the Singleton's CoinSolutions list, or with the information from the WalletPoolStore.
"""
current: PoolState
target: Optional[PoolState]
launcher_coin: Coin
launcher_id: bytes32
p2_singleton_puzzle_hash: bytes32
current_inner: Program # Inner puzzle in current singleton, not revealed yet
tip_singleton_coin_id: bytes32
singleton_block_height: uint32 # Block height that current PoolState is from
| 38.810345 | 117 | 0.757885 |
acf90890fbbedfacd04b4e16f6e7af68b54715a0 | 196,646 | py | Python | venv/lib/python3.6/site-packages/xero_python/payrollnz/api/payroll_nz_api.py | 6enno/FarmXero | 881b1e6648e927631b276e66a4c5287e4de2cbc1 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/xero_python/payrollnz/api/payroll_nz_api.py | 6enno/FarmXero | 881b1e6648e927631b276e66a4c5287e4de2cbc1 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/xero_python/payrollnz/api/payroll_nz_api.py | 6enno/FarmXero | 881b1e6648e927631b276e66a4c5287e4de2cbc1 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Xero Payroll NZ
This is the Xero Payroll API for orgs in the NZ region. # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
"""
OpenAPI spec version: 2.8.4
"""
import importlib
import re # noqa: F401
from xero_python import exceptions
from xero_python.api_client import ApiClient, ModelFinder
try:
from .exception_handler import translate_status_exception
except ImportError:
translate_status_exception = exceptions.translate_status_exception
class empty:
""" empty object to mark optional parameter not set """
class PayrollNzApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
base_url = "https://api.xero.com/payroll.xro/2.0"
models_module = importlib.import_module("xero_python.payrollnz.models")
def __init__(self, api_client=None, base_url=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.base_url = base_url or self.base_url
def get_resource_url(self, resource_path):
"""
Combine API base url with resource specific path
:param str resource_path: API endpoint specific path
:return: str full resource path
"""
return self.base_url + resource_path
def get_model_finder(self):
return ModelFinder(self.models_module)
def approve_timesheet(
self,
xero_tenant_id,
timesheet_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""approve a timesheet # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `approve_timesheet`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `approve_timesheet`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}/Approve")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "approve_timesheet")
def create_deduction(
self,
xero_tenant_id,
deduction,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""create a new deduction # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Deduction deduction: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: DeductionObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_deduction`"
)
# verify the required parameter 'deduction' is set
if deduction is None:
raise ValueError(
"Missing the required parameter `deduction` "
"when calling `create_deduction`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = deduction
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Deductions")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="DeductionObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_deduction")
def create_earnings_rate(
self,
xero_tenant_id,
earnings_rate,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""create a new earnings rate # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param EarningsRate earnings_rate: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsRateObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_earnings_rate`"
)
# verify the required parameter 'earnings_rate' is set
if earnings_rate is None:
raise ValueError(
"Missing the required parameter `earnings_rate` "
"when calling `create_earnings_rate`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = earnings_rate
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/EarningsRates")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsRateObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_earnings_rate")
def create_employee(
self,
xero_tenant_id,
employee,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""creates employees # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Employee employee: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee`"
)
# verify the required parameter 'employee' is set
if employee is None:
raise ValueError(
"Missing the required parameter `employee` "
"when calling `create_employee`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_employee")
def create_employee_earnings_template(
self,
xero_tenant_id,
employee_id,
earnings_template,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""creates employee earnings template records # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param EarningsTemplate earnings_template: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsTemplateObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_earnings_template`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_earnings_template`"
)
# verify the required parameter 'earnings_template' is set
if earnings_template is None:
raise ValueError(
"Missing the required parameter `earnings_template` "
"when calling `create_employee_earnings_template`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = earnings_template
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/PayTemplates/earnings")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsTemplateObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_employee_earnings_template"
)
def create_employee_leave(
self,
xero_tenant_id,
employee_id,
employee_leave,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""creates employee leave records # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param EmployeeLeave employee_leave: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_leave`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_leave`"
)
# verify the required parameter 'employee_leave' is set
if employee_leave is None:
raise ValueError(
"Missing the required parameter `employee_leave` "
"when calling `create_employee_leave`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_leave
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/Leave")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_employee_leave")
def create_employee_leave_setup(
self,
xero_tenant_id,
employee_id,
employee_leave_setup,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Allows you to set-up leave for a specific employee. This is required before viewing, configuring and requesting leave for an employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param EmployeeLeaveSetup employee_leave_setup: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveSetupObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_leave_setup`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_leave_setup`"
)
# verify the required parameter 'employee_leave_setup' is set
if employee_leave_setup is None:
raise ValueError(
"Missing the required parameter `employee_leave_setup` "
"when calling `create_employee_leave_setup`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_leave_setup
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/leaveSetup")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveSetupObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_employee_leave_setup")
def create_employee_leave_type(
self,
xero_tenant_id,
employee_id,
employee_leave_type,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""creates employee leave type records # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param EmployeeLeaveType employee_leave_type: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveTypeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_leave_type`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_leave_type`"
)
# verify the required parameter 'employee_leave_type' is set
if employee_leave_type is None:
raise ValueError(
"Missing the required parameter `employee_leave_type` "
"when calling `create_employee_leave_type`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_leave_type
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/LeaveTypes")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveTypeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_employee_leave_type")
def create_employee_opening_balances(
self,
xero_tenant_id,
employee_id,
employee_opening_balance,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""creates employee opening balances # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param list[EmployeeOpeningBalance] employee_opening_balance: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeOpeningBalancesObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_opening_balances`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_opening_balances`"
)
# verify the required parameter 'employee_opening_balance' is set
if employee_opening_balance is None:
raise ValueError(
"Missing the required parameter `employee_opening_balance` "
"when calling `create_employee_opening_balances`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_opening_balance
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/openingBalances")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeOpeningBalancesObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_employee_opening_balances"
)
def create_employee_payment_method(
self,
xero_tenant_id,
employee_id,
payment_method,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""creates employee payment method # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param PaymentMethod payment_method: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PaymentMethodObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_payment_method`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_payment_method`"
)
# verify the required parameter 'payment_method' is set
if payment_method is None:
raise ValueError(
"Missing the required parameter `payment_method` "
"when calling `create_employee_payment_method`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = payment_method
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/PaymentMethods")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaymentMethodObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_employee_payment_method"
)
def create_employee_salary_and_wage(
self,
xero_tenant_id,
employee_id,
salary_and_wage,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""creates employee salary and wage record # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param SalaryAndWage salary_and_wage: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SalaryAndWageObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_salary_and_wage`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_salary_and_wage`"
)
# verify the required parameter 'salary_and_wage' is set
if salary_and_wage is None:
raise ValueError(
"Missing the required parameter `salary_and_wage` "
"when calling `create_employee_salary_and_wage`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = salary_and_wage
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/SalaryAndWages")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SalaryAndWageObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_employee_salary_and_wage"
)
def create_employment(
self,
xero_tenant_id,
employee_id,
employment,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""creates employment # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param Employment employment: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmploymentObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employment`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employment`"
)
# verify the required parameter 'employment' is set
if employment is None:
raise ValueError(
"Missing the required parameter `employment` "
"when calling `create_employment`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employment
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/Employment")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmploymentObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_employment")
def create_leave_type(
self,
xero_tenant_id,
leave_type,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""create a new leave type # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param LeaveType leave_type: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: LeaveTypeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_leave_type`"
)
# verify the required parameter 'leave_type' is set
if leave_type is None:
raise ValueError(
"Missing the required parameter `leave_type` "
"when calling `create_leave_type`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = leave_type
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/LeaveTypes")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="LeaveTypeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_leave_type")
def create_multiple_employee_earnings_template(
self,
xero_tenant_id,
employee_id,
earnings_template,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""creates multiple employee earnings template records # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param list[EarningsTemplate] earnings_template: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeEarningsTemplates
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_multiple_employee_earnings_template`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_multiple_employee_earnings_template`"
)
# verify the required parameter 'earnings_template' is set
if earnings_template is None:
raise ValueError(
"Missing the required parameter `earnings_template` "
"when calling `create_multiple_employee_earnings_template`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = earnings_template
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/paytemplateearnings")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeEarningsTemplates",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_multiple_employee_earnings_template"
)
def create_pay_run(
self,
xero_tenant_id,
pay_run,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""create a pay run # noqa: E501
OAuth2 scope: payroll.payruns
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param PayRun pay_run: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_pay_run`"
)
# verify the required parameter 'pay_run' is set
if pay_run is None:
raise ValueError(
"Missing the required parameter `pay_run` "
"when calling `create_pay_run`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = pay_run
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRuns")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_pay_run")
def create_pay_run_calendar(
self,
xero_tenant_id,
pay_run_calendar,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""create a new payrun calendar # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param PayRunCalendar pay_run_calendar: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunCalendarObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_pay_run_calendar`"
)
# verify the required parameter 'pay_run_calendar' is set
if pay_run_calendar is None:
raise ValueError(
"Missing the required parameter `pay_run_calendar` "
"when calling `create_pay_run_calendar`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = pay_run_calendar
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRunCalendars")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunCalendarObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_pay_run_calendar")
def create_reimbursement(
self,
xero_tenant_id,
reimbursement,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""create a new reimbursement # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Reimbursement reimbursement: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: ReimbursementObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_reimbursement`"
)
# verify the required parameter 'reimbursement' is set
if reimbursement is None:
raise ValueError(
"Missing the required parameter `reimbursement` "
"when calling `create_reimbursement`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = reimbursement
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Reimbursements")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ReimbursementObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_reimbursement")
def create_superannuation(
self,
xero_tenant_id,
benefit,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""create a new superannuation # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Benefit benefit: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SuperannuationObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_superannuation`"
)
# verify the required parameter 'benefit' is set
if benefit is None:
raise ValueError(
"Missing the required parameter `benefit` "
"when calling `create_superannuation`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = benefit
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/superannuations")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SuperannuationObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_superannuation")
def create_timesheet(
self,
xero_tenant_id,
timesheet,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""create a new timesheet # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Timesheet timesheet: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_timesheet`"
)
# verify the required parameter 'timesheet' is set
if timesheet is None:
raise ValueError(
"Missing the required parameter `timesheet` "
"when calling `create_timesheet`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = timesheet
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_timesheet")
def create_timesheet_line(
self,
xero_tenant_id,
timesheet_id,
timesheet_line,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""create a new timesheet line # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param TimesheetLine timesheet_line: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetLineObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_timesheet_line`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `create_timesheet_line`"
)
# verify the required parameter 'timesheet_line' is set
if timesheet_line is None:
raise ValueError(
"Missing the required parameter `timesheet_line` "
"when calling `create_timesheet_line`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = timesheet_line
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}/Lines")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetLineObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_timesheet_line")
def delete_employee_earnings_template(
self,
xero_tenant_id,
employee_id,
pay_template_earning_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""deletes an employee earnings template record # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str pay_template_earning_id: Id for single pay template earnings object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsTemplateObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_employee_earnings_template`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `delete_employee_earnings_template`"
)
# verify the required parameter 'pay_template_earning_id' is set
if pay_template_earning_id is None:
raise ValueError(
"Missing the required parameter `pay_template_earning_id` "
"when calling `delete_employee_earnings_template`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
"PayTemplateEarningID": pay_template_earning_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/Employees/{EmployeeId}/PayTemplates/earnings/{PayTemplateEarningID}"
)
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsTemplateObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "delete_employee_earnings_template"
)
def delete_employee_leave(
self,
xero_tenant_id,
employee_id,
leave_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""deletes an employee leave record # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str leave_id: Leave id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_employee_leave`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `delete_employee_leave`"
)
# verify the required parameter 'leave_id' is set
if leave_id is None:
raise ValueError(
"Missing the required parameter `leave_id` "
"when calling `delete_employee_leave`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
"LeaveID": leave_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/Leave/{LeaveID}")
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "delete_employee_leave")
def delete_employee_salary_and_wage(
self,
xero_tenant_id,
employee_id,
salary_and_wages_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""deletes an employee salary and wages record # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str salary_and_wages_id: Id for single salary and wages object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SalaryAndWageObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_employee_salary_and_wage`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `delete_employee_salary_and_wage`"
)
# verify the required parameter 'salary_and_wages_id' is set
if salary_and_wages_id is None:
raise ValueError(
"Missing the required parameter `salary_and_wages_id` "
"when calling `delete_employee_salary_and_wage`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
"SalaryAndWagesID": salary_and_wages_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/Employees/{EmployeeId}/SalaryAndWages/{SalaryAndWagesID}"
)
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SalaryAndWageObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "delete_employee_salary_and_wage"
)
def delete_timesheet(
self,
xero_tenant_id,
timesheet_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""delete a timesheet # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetLine
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_timesheet`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `delete_timesheet`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}")
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetLine",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "delete_timesheet")
def delete_timesheet_line(
self,
xero_tenant_id,
timesheet_id,
timesheet_line_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""delete a timesheet line # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param str timesheet_line_id: Identifier for the timesheet line (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetLine
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_timesheet_line`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `delete_timesheet_line`"
)
# verify the required parameter 'timesheet_line_id' is set
if timesheet_line_id is None:
raise ValueError(
"Missing the required parameter `timesheet_line_id` "
"when calling `delete_timesheet_line`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
"TimesheetLineID": timesheet_line_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}/Lines/{TimesheetLineID}")
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetLine",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "delete_timesheet_line")
def get_deduction(
self,
xero_tenant_id,
deduction_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieve a single deduction by id # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str deduction_id: Identifier for the deduction (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: DeductionObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_deduction`"
)
# verify the required parameter 'deduction_id' is set
if deduction_id is None:
raise ValueError(
"Missing the required parameter `deduction_id` "
"when calling `get_deduction`"
)
collection_formats = {}
path_params = {
"deductionId": deduction_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Deductions/{deductionId}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="DeductionObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_deduction")
def get_deductions(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches deductions # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Deductions
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_deductions`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Deductions")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Deductions",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_deductions")
def get_earnings_rate(
self,
xero_tenant_id,
earnings_rate_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieve a single earnings rates by id # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str earnings_rate_id: Identifier for the earnings rate (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsRateObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_earnings_rate`"
)
# verify the required parameter 'earnings_rate_id' is set
if earnings_rate_id is None:
raise ValueError(
"Missing the required parameter `earnings_rate_id` "
"when calling `get_earnings_rate`"
)
collection_formats = {}
path_params = {
"EarningsRateID": earnings_rate_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/EarningsRates/{EarningsRateID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsRateObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_earnings_rate")
def get_earnings_rates(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches earnings rates # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsRates
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_earnings_rates`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/EarningsRates")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsRates",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_earnings_rates")
def get_employee(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches employees # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee")
def get_employee_leave_balances(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""search employee leave balances # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveBalances
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_leave_balances`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_leave_balances`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/LeaveBalances")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveBalances",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_leave_balances")
def get_employee_leave_periods(
self,
xero_tenant_id,
employee_id,
start_date=empty,
end_date=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches employee leave periods # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param date start_date: Filter by start date
:param date end_date: Filter by end date
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: LeavePeriods
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_leave_periods`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_leave_periods`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
if start_date is not empty:
query_params.append(("startDate", start_date))
if end_date is not empty:
query_params.append(("endDate", end_date))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/LeavePeriods")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="LeavePeriods",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_leave_periods")
def get_employee_leave_types(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches employee leave types # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveTypes
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_leave_types`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_leave_types`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/LeaveTypes")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveTypes",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_leave_types")
def get_employee_leaves(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""search employee leave records # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaves
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_leaves`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_leaves`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/Leave")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaves",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_leaves")
def get_employee_opening_balances(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieve employee openingbalances # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeOpeningBalancesObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_opening_balances`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_opening_balances`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/openingBalances")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeOpeningBalancesObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_employee_opening_balances"
)
def get_employee_pay_templates(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches employee pay templates # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeePayTemplates
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_pay_templates`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_pay_templates`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/PayTemplates")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeePayTemplates",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_pay_templates")
def get_employee_payment_method(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieves an employee's payment method # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PaymentMethodObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_payment_method`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_payment_method`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/PaymentMethods")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaymentMethodObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_payment_method")
def get_employee_salary_and_wage(
self,
xero_tenant_id,
employee_id,
salary_and_wages_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""get employee salary and wages record by id # noqa: E501
OAuth2 scope: payroll.employees, payroll.employees.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str salary_and_wages_id: Id for single pay template earnings object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SalaryAndWages
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_salary_and_wage`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_salary_and_wage`"
)
# verify the required parameter 'salary_and_wages_id' is set
if salary_and_wages_id is None:
raise ValueError(
"Missing the required parameter `salary_and_wages_id` "
"when calling `get_employee_salary_and_wage`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
"SalaryAndWagesID": salary_and_wages_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/Employees/{EmployeeId}/SalaryAndWages/{SalaryAndWagesID}"
)
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SalaryAndWages",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_employee_salary_and_wage"
)
def get_employee_salary_and_wages(
self,
xero_tenant_id,
employee_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieves an employee's salary and wages # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SalaryAndWages
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_salary_and_wages`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_salary_and_wages`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/SalaryAndWages")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SalaryAndWages",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_employee_salary_and_wages"
)
def get_employee_tax(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches tax records for an employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeTaxObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_tax`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_tax`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/Tax")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeTaxObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_tax")
def get_employees(
self,
xero_tenant_id,
first_name=empty,
last_name=empty,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches employees # noqa: E501
OAuth2 scope: payroll.employees.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str first_name: Filter by first name
:param str last_name: Filter by last name
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Employees
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employees`"
)
collection_formats = {}
path_params = {}
query_params = []
if first_name is not empty:
query_params.append(("firstName", first_name))
if last_name is not empty:
query_params.append(("lastName", last_name))
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Employees",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employees")
def get_leave_type(
self,
xero_tenant_id,
leave_type_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieve a single leave type by id # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str leave_type_id: Identifier for the leave type (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: LeaveTypeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_leave_type`"
)
# verify the required parameter 'leave_type_id' is set
if leave_type_id is None:
raise ValueError(
"Missing the required parameter `leave_type_id` "
"when calling `get_leave_type`"
)
collection_formats = {}
path_params = {
"LeaveTypeID": leave_type_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/LeaveTypes/{LeaveTypeID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="LeaveTypeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_leave_type")
def get_leave_types(
self,
xero_tenant_id,
page=empty,
active_only=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches leave types # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool active_only: Filters leave types by active status. By default the API returns all leave types.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: LeaveTypes
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_leave_types`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
if active_only is not empty:
query_params.append(("ActiveOnly", active_only))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/LeaveTypes")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="LeaveTypes",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_leave_types")
def get_pay_run(
self,
xero_tenant_id,
pay_run_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieve a single pay run by id # noqa: E501
OAuth2 scope: payroll.payruns.read, payroll.payruns
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str pay_run_id: Identifier for the pay run (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_run`"
)
# verify the required parameter 'pay_run_id' is set
if pay_run_id is None:
raise ValueError(
"Missing the required parameter `pay_run_id` "
"when calling `get_pay_run`"
)
collection_formats = {}
path_params = {
"PayRunID": pay_run_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRuns/{PayRunID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_run")
def get_pay_run_calendar(
self,
xero_tenant_id,
payroll_calendar_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieve a single payrun calendar by id # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str payroll_calendar_id: Identifier for the payrun calendars (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunCalendarObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_run_calendar`"
)
# verify the required parameter 'payroll_calendar_id' is set
if payroll_calendar_id is None:
raise ValueError(
"Missing the required parameter `payroll_calendar_id` "
"when calling `get_pay_run_calendar`"
)
collection_formats = {}
path_params = {
"PayrollCalendarID": payroll_calendar_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRunCalendars/{PayrollCalendarID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunCalendarObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_run_calendar")
def get_pay_run_calendars(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches payrun calendars # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunCalendars
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_run_calendars`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRunCalendars")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunCalendars",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_run_calendars")
def get_pay_runs(
self,
xero_tenant_id,
page=empty,
status=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches pay runs # noqa: E501
OAuth2 scope: payroll.payruns.read, payroll.payruns
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param str status: By default get payruns will return all the payruns for an organization. You can add GET https://api.xero.com/payroll.xro/2.0/payRuns?statu={PayRunStatus} to filter the payruns by status.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRuns
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_runs`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
if status is not empty:
query_params.append(("status", status))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRuns")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRuns",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_runs")
def get_pay_slip(
self,
xero_tenant_id,
pay_slip_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieve a single payslip by id # noqa: E501
OAuth2 scope: payroll.payslip.read, payroll.payslip
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str pay_slip_id: Identifier for the payslip (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PaySlipObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_slip`"
)
# verify the required parameter 'pay_slip_id' is set
if pay_slip_id is None:
raise ValueError(
"Missing the required parameter `pay_slip_id` "
"when calling `get_pay_slip`"
)
collection_formats = {}
path_params = {
"PaySlipID": pay_slip_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PaySlips/{PaySlipID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaySlipObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_slip")
def get_pay_slips(
self,
xero_tenant_id,
pay_run_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches payslips # noqa: E501
OAuth2 scope: payroll.payslip.read, payroll.payslip
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str pay_run_id: PayrunID which specifies the containing payrun of payslips to retrieve. By default, the API does not group payslips by payrun. (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PaySlips
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_slips`"
)
# verify the required parameter 'pay_run_id' is set
if pay_run_id is None:
raise ValueError(
"Missing the required parameter `pay_run_id` "
"when calling `get_pay_slips`"
)
collection_formats = {}
path_params = {}
query_params = [
("PayRunID", pay_run_id),
]
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PaySlips")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaySlips",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_slips")
def get_reimbursement(
self,
xero_tenant_id,
reimbursement_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieve a single reimbursement by id # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str reimbursement_id: Identifier for the reimbursement (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: ReimbursementObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_reimbursement`"
)
# verify the required parameter 'reimbursement_id' is set
if reimbursement_id is None:
raise ValueError(
"Missing the required parameter `reimbursement_id` "
"when calling `get_reimbursement`"
)
collection_formats = {}
path_params = {
"ReimbursementID": reimbursement_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Reimbursements/{ReimbursementID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ReimbursementObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_reimbursement")
def get_reimbursements(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches reimbursements # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Reimbursements
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_reimbursements`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Reimbursements")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Reimbursements",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_reimbursements")
def get_settings(
self,
xero_tenant_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches settings # noqa: E501
OAuth2 scope: payroll.settings.read, settings.payslip
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Settings
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_settings`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Settings")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Settings",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_settings")
def get_statutory_deduction(
self,
xero_tenant_id,
id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieve a single statutory deduction by id # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str id: Identifier for the statutory deduction (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: StatutoryDeductionObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_statutory_deduction`"
)
# verify the required parameter 'id' is set
if id is None:
raise ValueError(
"Missing the required parameter `id` "
"when calling `get_statutory_deduction`"
)
collection_formats = {}
path_params = {
"Id": id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/StatutoryDeductions/{Id}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="StatutoryDeductionObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_statutory_deduction")
def get_statutory_deductions(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches statutory deductions # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: StatutoryDeductions
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_statutory_deductions`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/StatutoryDeductions")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="StatutoryDeductions",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_statutory_deductions")
def get_superannuation(
self,
xero_tenant_id,
superannuation_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches for a unique superannuation # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str superannuation_id: Identifier for the superannuation (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SuperannuationObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_superannuation`"
)
# verify the required parameter 'superannuation_id' is set
if superannuation_id is None:
raise ValueError(
"Missing the required parameter `superannuation_id` "
"when calling `get_superannuation`"
)
collection_formats = {}
path_params = {
"SuperannuationID": superannuation_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/superannuations/{SuperannuationID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SuperannuationObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_superannuation")
def get_superannuations(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches statutory deductions # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Superannuations
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_superannuations`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/superannuations")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Superannuations",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_superannuations")
def get_timesheet(
self,
xero_tenant_id,
timesheet_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""retrieve a single timesheet by id # noqa: E501
OAuth2 scope: payroll.timesheets.read, timesheets.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_timesheet`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `get_timesheet`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_timesheet")
def get_timesheets(
self,
xero_tenant_id,
page=empty,
employee_id=empty,
payroll_calendar_id=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches timesheets # noqa: E501
OAuth2 scope: payroll.timesheets.read, payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param str employee_id: By default get Timesheets will return the timesheets for all employees in an organization. You can add GET https://…/timesheets?filter=employeeId=={EmployeeId} to get only the timesheets of a particular employee.
:param str payroll_calendar_id: By default get Timesheets will return all the timesheets for an organization. You can add GET https://…/timesheets?filter=payrollCalendarId=={PayrollCalendarID} to filter the timesheets by payroll calendar id
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Timesheets
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_timesheets`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
if employee_id is not empty:
query_params.append(("employeeId", employee_id))
if payroll_calendar_id is not empty:
query_params.append(("payrollCalendarId", payroll_calendar_id))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Timesheets",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_timesheets")
def get_tracking_categories(
self,
xero_tenant_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""searches tracking categories # noqa: E501
OAuth2 scope: payroll.settings.read, settings.payslip
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TrackingCategories
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_tracking_categories`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/settings/trackingCategories")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TrackingCategories",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_tracking_categories")
def revert_timesheet(
self,
xero_tenant_id,
timesheet_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""revert a timesheet to draft # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `revert_timesheet`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `revert_timesheet`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}/RevertToDraft")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "revert_timesheet")
def update_employee(
self,
xero_tenant_id,
employee_id,
employee,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""updates employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param Employee employee: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_employee`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `update_employee`"
)
# verify the required parameter 'employee' is set
if employee is None:
raise ValueError(
"Missing the required parameter `employee` "
"when calling `update_employee`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_employee")
def update_employee_earnings_template(
self,
xero_tenant_id,
employee_id,
pay_template_earning_id,
earnings_template,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""updates employee earnings template records # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str pay_template_earning_id: Id for single pay template earnings object (required)
:param EarningsTemplate earnings_template: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsTemplateObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_employee_earnings_template`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `update_employee_earnings_template`"
)
# verify the required parameter 'pay_template_earning_id' is set
if pay_template_earning_id is None:
raise ValueError(
"Missing the required parameter `pay_template_earning_id` "
"when calling `update_employee_earnings_template`"
)
# verify the required parameter 'earnings_template' is set
if earnings_template is None:
raise ValueError(
"Missing the required parameter `earnings_template` "
"when calling `update_employee_earnings_template`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
"PayTemplateEarningID": pay_template_earning_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = earnings_template
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/Employees/{EmployeeId}/PayTemplates/earnings/{PayTemplateEarningID}"
)
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsTemplateObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "update_employee_earnings_template"
)
def update_employee_leave(
self,
xero_tenant_id,
employee_id,
leave_id,
employee_leave,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""updates employee leave records # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str leave_id: Leave id for single object (required)
:param EmployeeLeave employee_leave: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_employee_leave`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `update_employee_leave`"
)
# verify the required parameter 'leave_id' is set
if leave_id is None:
raise ValueError(
"Missing the required parameter `leave_id` "
"when calling `update_employee_leave`"
)
# verify the required parameter 'employee_leave' is set
if employee_leave is None:
raise ValueError(
"Missing the required parameter `employee_leave` "
"when calling `update_employee_leave`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
"LeaveID": leave_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_leave
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/Leave/{LeaveID}")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_employee_leave")
def update_employee_salary_and_wage(
self,
xero_tenant_id,
employee_id,
salary_and_wages_id,
salary_and_wage,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""updates employee salary and wages record # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str salary_and_wages_id: Id for single pay template earnings object (required)
:param SalaryAndWage salary_and_wage: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SalaryAndWageObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_employee_salary_and_wage`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `update_employee_salary_and_wage`"
)
# verify the required parameter 'salary_and_wages_id' is set
if salary_and_wages_id is None:
raise ValueError(
"Missing the required parameter `salary_and_wages_id` "
"when calling `update_employee_salary_and_wage`"
)
# verify the required parameter 'salary_and_wage' is set
if salary_and_wage is None:
raise ValueError(
"Missing the required parameter `salary_and_wage` "
"when calling `update_employee_salary_and_wage`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
"SalaryAndWagesID": salary_and_wages_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = salary_and_wage
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/Employees/{EmployeeId}/SalaryAndWages/{SalaryAndWagesID}"
)
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SalaryAndWageObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "update_employee_salary_and_wage"
)
def update_employee_tax(
self,
xero_tenant_id,
employee_id,
employee_tax,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""updates the tax records for an employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param EmployeeTax employee_tax: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeTaxObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_employee_tax`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `update_employee_tax`"
)
# verify the required parameter 'employee_tax' is set
if employee_tax is None:
raise ValueError(
"Missing the required parameter `employee_tax` "
"when calling `update_employee_tax`"
)
collection_formats = {}
path_params = {
"EmployeeId": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_tax
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeId}/Tax")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeTaxObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_employee_tax")
def update_pay_run(
self,
xero_tenant_id,
pay_run_id,
pay_run,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""update a pay run # noqa: E501
OAuth2 scope: payroll.payruns
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str pay_run_id: Identifier for the pay run (required)
:param PayRun pay_run: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_pay_run`"
)
# verify the required parameter 'pay_run_id' is set
if pay_run_id is None:
raise ValueError(
"Missing the required parameter `pay_run_id` "
"when calling `update_pay_run`"
)
# verify the required parameter 'pay_run' is set
if pay_run is None:
raise ValueError(
"Missing the required parameter `pay_run` "
"when calling `update_pay_run`"
)
collection_formats = {}
path_params = {
"PayRunID": pay_run_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = pay_run
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRuns/{PayRunID}")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_pay_run")
def update_pay_slip_line_items(
self,
xero_tenant_id,
pay_slip_id,
pay_slip,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""creates employee pay slip # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str pay_slip_id: Identifier for the payslip (required)
:param PaySlip pay_slip: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PaySlipObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_pay_slip_line_items`"
)
# verify the required parameter 'pay_slip_id' is set
if pay_slip_id is None:
raise ValueError(
"Missing the required parameter `pay_slip_id` "
"when calling `update_pay_slip_line_items`"
)
# verify the required parameter 'pay_slip' is set
if pay_slip is None:
raise ValueError(
"Missing the required parameter `pay_slip` "
"when calling `update_pay_slip_line_items`"
)
collection_formats = {}
path_params = {
"PaySlipID": pay_slip_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = pay_slip
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PaySlips/{PaySlipID}")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaySlipObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_pay_slip_line_items")
def update_timesheet_line(
self,
xero_tenant_id,
timesheet_id,
timesheet_line_id,
timesheet_line,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""update a timesheet line # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param str timesheet_line_id: Identifier for the timesheet line (required)
:param TimesheetLine timesheet_line: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetLineObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_timesheet_line`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `update_timesheet_line`"
)
# verify the required parameter 'timesheet_line_id' is set
if timesheet_line_id is None:
raise ValueError(
"Missing the required parameter `timesheet_line_id` "
"when calling `update_timesheet_line`"
)
# verify the required parameter 'timesheet_line' is set
if timesheet_line is None:
raise ValueError(
"Missing the required parameter `timesheet_line` "
"when calling `update_timesheet_line`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
"TimesheetLineID": timesheet_line_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = timesheet_line
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}/Lines/{TimesheetLineID}")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetLineObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_timesheet_line")
| 35.228592 | 248 | 0.593803 |
acf908ad92296d836db4596f76c26cea8805b346 | 2,428 | py | Python | data/p4VQE/R1/benchmark/startQiskit_QC5.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R1/benchmark/startQiskit_QC5.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R1/benchmark/startQiskit_QC5.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=7
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[1],input_qubit[0]) # number=5
prog.cx(input_qubit[1],input_qubit[0]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_QC5.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 26.977778 | 118 | 0.633855 |
acf908dd7eba9c9fd2cc0ef14f13c086c84dbe19 | 954 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/monitor/models/MetricDataList.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/monitor/models/MetricDataList.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/monitor/models/MetricDataList.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class MetricDataList(object):
def __init__(self, errMetricData=None, errDetail=None):
"""
:param errMetricData: (Optional) 错误数据
:param errDetail: (Optional) 错误数据描述
"""
self.errMetricData = errMetricData
self.errDetail = errDetail
| 31.8 | 75 | 0.722222 |
acf909de9e3691b5d21f3bf88364c909e67f1c85 | 3,534 | py | Python | delta/layers/ops/kernels/tokenizer_ops_test.py | awesome-archive/delta | 841d853cf0bdb479260be112432813dcb705f859 | [
"Apache-2.0"
] | 1 | 2021-01-11T13:25:19.000Z | 2021-01-11T13:25:19.000Z | delta/layers/ops/kernels/tokenizer_ops_test.py | lhhriver/delta | a916e06f55213dcd1fea39a5950927dfed1483c7 | [
"Apache-2.0"
] | null | null | null | delta/layers/ops/kernels/tokenizer_ops_test.py | lhhriver/delta | a916e06f55213dcd1fea39a5950927dfed1483c7 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tokenizer_ops."""
import time
import tempfile
import tensorflow as tf
from absl import logging
from delta.layers.ops import py_x_ops
class TokenizerOpsTest(tf.test.TestCase):
''' tokenizer op test'''
def setUp(self):
''' set up '''
vocab = [
'</s>',
'<unk>',
'hello',
'你好',
'world',
]
self.vocab_filepath = tempfile.mktemp(suffix='vocab.txt')
with open(self.vocab_filepath, mode='w', encoding='utf-8') as fobj:
for token in vocab:
fobj.write(token)
fobj.write('\n')
def test_text_to_tokenid(self):
''' test label to token id'''
with self.session(use_gpu=False) as sess:
# test batch
start = time.time()
batch_op = py_x_ops.sentence_to_ids(
['hello world', '你好 hello unknown world'],
maxlen=10,
vocab_filepath=self.vocab_filepath,
load_token_ids_from_vocab=False,
pad_id=-1)
token_ids, paddings = sess.run(batch_op)
elapsed = time.time() - start
logging.info("Time cost: {:.4f}s".format(elapsed))
logging.info(token_ids)
logging.info(paddings)
logging.info("batch_op: {}".format(batch_op))
self.assertAllEqual(token_ids, [[2, 4, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 2, 1, 4, -1, -1, -1, -1, -1, -1]])
self.assertAllEqual(
paddings,
[[0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
# test single
single_op = py_x_ops.sentence_to_ids(
'你好 hello unknown world',
maxlen=10,
vocab_filepath=self.vocab_filepath,
load_token_ids_from_vocab=False,
pad_id=-1)
token_ids, paddings = sess.run(single_op)
logging.info("single_op: {}".format(single_op))
self.assertAllEqual(token_ids, [3, 2, 1, 4, -1, -1, -1, -1, -1, -1])
# test short single
short_single_op = py_x_ops.sentence_to_ids(
'你好 hello unknown world',
maxlen=2,
vocab_filepath=self.vocab_filepath,
load_token_ids_from_vocab=False,
pad_id=0)
token_ids, paddings = sess.run(short_single_op)
logging.info("short_op: {}".format(short_single_op))
self.assertAllEqual(token_ids, [3, 2])
# test short batch
short_batch_op = py_x_ops.sentence_to_ids(
['hello world', '你好 hello unknown world'],
maxlen=2,
vocab_filepath=self.vocab_filepath,
load_token_ids_from_vocab=False,
pad_id=0)
token_ids, paddings = sess.run(short_batch_op)
logging.info("short_op: {}".format(short_batch_op))
self.assertAllEqual(token_ids, [[2, 4], [3, 2]])
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.test.main()
| 34.31068 | 80 | 0.607244 |
acf90b355a58b945e549c0b0f8baa6fa38c500a6 | 6,693 | py | Python | A5-Yoga-Pose-Classification/Week 2 and 3/train_2019CS10407_2019CS10349.py | GauravJain28/ML-Assignments | 0de464fe6564a0bad43f7962c92563fe0a988285 | [
"MIT"
] | null | null | null | A5-Yoga-Pose-Classification/Week 2 and 3/train_2019CS10407_2019CS10349.py | GauravJain28/ML-Assignments | 0de464fe6564a0bad43f7962c92563fe0a988285 | [
"MIT"
] | null | null | null | A5-Yoga-Pose-Classification/Week 2 and 3/train_2019CS10407_2019CS10349.py | GauravJain28/ML-Assignments | 0de464fe6564a0bad43f7962c92563fe0a988285 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils, models
from skimage import io, transform
import matplotlib.pyplot as plt # for plotting
import numpy as np
import pandas as pd
import glob
import sys
import os
import PIL
from sklearn.model_selection import KFold
import torchvision.models as models
from IPython.display import Image
from torch.autograd import Variable
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout
from torch.optim import Adam, SGD
device = ("cuda" if torch.cuda.is_available() else "cpu")
trainfile = sys.argv[1]
modelfile = sys.argv[2]
modelfile += "model.pth"
img_train_folder=""
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, csv_path, images_folder, transform = None, train=True):
self.df = pd.read_csv(csv_path)
self.is_train = train
self.images_folder = images_folder
self.transform = transform
self.class2index = {
"Virabhadrasana":0,
"Vrikshasana":1,
"Utkatasana":2,
"Padahastasana":3,
"Katichakrasana":4,
"TriyakTadasana":5,
"Gorakshasana":6,
"Tadasana":7,
"Natarajasana":8,
"Pranamasana":9,
"ParivrittaTrikonasana":10,
"Tuladandasana":11,
"Santolanasana":12,
"Still":13,
"Natavarasana":14,
"Garudasana":15,
"Naukasana":16,
"Ardhachakrasana":17,
"Trikonasana":18,
}
def __len__(self):
return len(self.df)
def __getitem__(self, index):
filename = self.df["name"].iloc[index]
if self.is_train:
label = self.class2index[self.df["category"].iloc[index]]
else:
label = -1
image = PIL.Image.open(os.path.join(self.images_folder, filename))
if self.transform is not None:
image = self.transform(image)
sample = {"images": image, "labels": label}
return sample
BATCH_SIZE = 80
NUM_WORKERS = 20
stats = ((0.4914, 0.4822, 0.5065), (0.2023, 0.1994, 0.2010))
img_transforms = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.Resize(size=(299,299)),
transforms.ToTensor(),
transforms.Normalize(*stats,inplace=True)])
train_data = trainfile
train_dataset = CustomDataset(csv_path = train_data, images_folder = img_train_folder, transform=img_transforms, train=True)
#architecture 2
class Net_drop_1(Module):
def __init__(self):
super(Net_drop_1, self).__init__()
self.cnn_layers = Sequential(
Conv2d(3, 32, kernel_size=3, stride=1,padding=1),
BatchNorm2d(32),
ReLU(inplace=True),
Dropout(p = 0.2),
Conv2d(32, 64, kernel_size=3, stride=1,padding=1),
BatchNorm2d(64),
ReLU(inplace=True),
MaxPool2d(kernel_size=2, stride=2),
Dropout(p = 0.2),
Conv2d(64, 128, kernel_size=3, stride=1,padding=1),
BatchNorm2d(128),
ReLU(inplace=True),
MaxPool2d(kernel_size=2, stride=2),
Dropout(p = 0.2),
Conv2d(128, 128, kernel_size=3, stride=1,padding=1),
BatchNorm2d(128),
ReLU(inplace=True),
MaxPool2d(kernel_size=2, stride=2),
Dropout(p = 0.2),
Conv2d(128, 256, kernel_size=3, stride=1,padding=1),
BatchNorm2d(256),
ReLU(inplace=True),
MaxPool2d(kernel_size=2, stride=2),
Dropout(p = 0.2),
Conv2d(256, 512, kernel_size=3, stride=1,padding=1),
ReLU(inplace=True),
Dropout(p = 0.2),
)
self.linear_layers = Sequential(
Linear(512*4*4 , 512),
ReLU(inplace=True),
Dropout(p = 0.2),
Linear(512, 64),
ReLU(inplace=True),
Dropout(p = 0.2),
Linear(64 , 19),
)
def forward(self, x):
x = self.cnn_layers(x)
x = x.view(x.size(0), -1)
x = self.linear_layers(x)
return x
class Inception_Model(Module):
def __init__(self, pretrained=True):
super(Inception_Model,self).__init__()
self.m = models.inception_v3(pretrained=True)
self.m.fc = nn.Linear(self.m.fc.in_features, 19)
def forward(self, xb):
return self.m(xb)
def train(epoch, x, y, criterion, optimizer, model):
model.train()
x_train, y_train = Variable(x), Variable(y)
if torch.cuda.is_available():
x_train = x_train.cuda()
y_train = y_train.cuda()
optimizer.zero_grad()
output_train = model(x_train).logits
loss_train = criterion(output_train, y_train)
loss_train.backward()
optimizer.step()
tr_loss = loss_train.item()
return tr_loss
def reset_weights(m):
for layer in m.children():
if hasattr(layer, 'reset_parameters'):
print(f'Reset trainable parameters of layer = {layer}')
layer.reset_parameters()
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=BATCH_SIZE, num_workers = NUM_WORKERS, shuffle=False)
torch.manual_seed(51)
cnnmodel = Inception_Model()
print(sum(p.numel() for p in cnnmodel.parameters()))
optimizer = SGD(cnnmodel.parameters(), lr=0.1, momentum=0.9,nesterov=True)
criterion = CrossEntropyLoss()
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr = 0.1, epochs = 20, steps_per_epoch = len(train_loader))
if torch.cuda.is_available():
cnnmodel = cnnmodel.cuda()
criterion = criterion.cuda()
epochs = 20
for epoch in range(epochs):
loss_avg = 0
count = 0
for batch_idx, sample in enumerate(train_loader):
images = sample['images']
labels = sample['labels']
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
loss = train(epoch, images, labels, criterion, optimizer, cnnmodel)
loss_avg += loss
count+=1
scheduler.step()
loss_avg = loss_avg/count
#lossf.write(str(loss_avg) + '\n')
print("Training loss -> Epoch" + str(epoch), loss_avg)
torch.save(cnnmodel.state_dict(), modelfile)
| 28.480851 | 124 | 0.595099 |
acf90c2087c67bcc8f2637306bfa4612ad2200a0 | 6,015 | py | Python | lovpy/graphs/timestamps.py | dkarageo/lovpy | 85f43c07aeed4b318238c35da606de2dc65ca24f | [
"Apache-2.0"
] | null | null | null | lovpy/graphs/timestamps.py | dkarageo/lovpy | 85f43c07aeed4b318238c35da606de2dc65ca24f | [
"Apache-2.0"
] | null | null | null | lovpy/graphs/timestamps.py | dkarageo/lovpy | 85f43c07aeed4b318238c35da606de2dc65ca24f | [
"Apache-2.0"
] | null | null | null | from copy import copy
import lovpy.monitor.time_source as time_source
PLUS_INFINITE = "inf"
MINUS_INFINITE = "-inf"
class Timestamp:
def __init__(self, value):
self._value = value
def __repr__(self):
return str(self._value)
def __lt__(self, other):
return self.get_validity_interval()[1] < other.get_validity_interval()[1]
def __le__(self, other):
return self.get_validity_interval()[1] <= other.get_validity_interval()[1]
def __gt__(self, other):
return self.get_validity_interval()[1] > other.get_validity_interval()[1]
def __ge__(self, other):
return self.get_validity_interval()[1] >= other.get_validity_interval()[1]
def __eq__(self, other):
return type(self) == type(other) and self._value == other._value
def __hash__(self):
return hash(repr(self))
def __copy__(self):
return type(self)(self._value)
def is_absolute(self):
return type(self) is Timestamp
def get_absolute_value(self):
return self._value
def get_validity_interval(self):
return [self._value, self._value]
def get_shifted_timestamp(self, shift):
shifted = copy(self)
shifted._value += shift
return shifted
def matches(self, other):
"""Checks whether the intervals of current and other timestamps overlap."""
a = self.get_validity_interval()
b = other.get_validity_interval()
# WARNING: [..,-inf] and [inf,..] cases are not supported yet.
if a[0] == MINUS_INFINITE and b[0] != MINUS_INFINITE:
a[0] = b[0] - 1
if b[0] == MINUS_INFINITE and a[0] != MINUS_INFINITE:
b[0] = a[0] - 1
if a[1] == PLUS_INFINITE and b[1] != PLUS_INFINITE:
a[1] = b[1] + 1
if b[1] == PLUS_INFINITE and a[1] != PLUS_INFINITE:
b[1] = a[1] + 1
# Cover the cases where both lower or upper limits are -inf or inf respectively.
if a[0] == MINUS_INFINITE and b[0] == MINUS_INFINITE:
if a[1] != MINUS_INFINITE and b[1] != MINUS_INFINITE:
return True
else:
return False
if a[1] == PLUS_INFINITE and b[1] == PLUS_INFINITE:
if a[0] != PLUS_INFINITE and b[0] != PLUS_INFINITE:
return True
else:
return False
return max(a[0], b[0]) <= min(a[1], b[1])
class RelativeTimestamp(Timestamp):
def __init__(self, value, time_source=None):
super().__init__(value)
self.time_source = time_source
def __eq__(self, other):
if type(self) != type(other):
return False
if self.get_relative_value() != other.get_relative_value():
return False
return True
def __hash__(self):
return hash(repr(self))
def __copy__(self):
return type(self)(super().get_absolute_value(), time_source=self.time_source)
def __repr__(self):
if self.get_relative_value() == 0:
return "t"
else:
return "t" + "{0:+}".format(self.get_relative_value())
def get_absolute_value(self):
"""Returns the absolute time value, calculated using timestamp's time source."""
return self.time_source.get_current_time() + self.get_relative_value()
def get_relative_value(self):
"""Returns the relative offset value associated with timestamp."""
return super().get_absolute_value()
def get_validity_interval(self):
absolute = self.get_absolute_value()
return [absolute, absolute]
def set_time_source(self, time_source):
self.time_source = time_source
def get_time_source(self):
return self.time_source
class LesserThanRelativeTimestamp(RelativeTimestamp):
def __repr__(self):
return "<= " + RelativeTimestamp.__repr__(self)
def get_validity_interval(self):
return [MINUS_INFINITE, self.get_absolute_value()]
class GreaterThanRelativeTimestamp(RelativeTimestamp):
def __repr__(self):
return ">= " + RelativeTimestamp.__repr__(self)
def get_validity_interval(self):
return [self.get_absolute_value(), PLUS_INFINITE]
def timestamp_sequences_matches(seq1, seq2):
"""Checks if two timestamp sequences match."""
if len(seq1) != len(seq2):
raise RuntimeError("Timestamp sequences lengths should match.")
# Align sequences based on the first pair of absolute and relative occurence.
shift = 0
for i in reversed(range(len(seq1))):
t1 = seq1[i]
t2 = seq2[i]
if t1.is_absolute() and not t2.is_absolute():
shift = t2.get_relative_value() - t1.get_absolute_value()
break
elif t2.is_absolute() and not t1.is_absolute():
shift = t1.get_relative_value() - t2.get_absolute_value()
break
matches = True
for i in range(len(seq1)):
t1 = seq1[i]
t2 = seq2[i]
if t1.is_absolute():
t1 = t1.get_shifted_timestamp(shift)
else:
t1 = copy(t1)
t1.set_time_source(time_source.get_zero_locked_timesource())
if t2.is_absolute():
t2 = t2.get_shifted_timestamp(shift)
else:
t2 = copy(t2)
t2.set_time_source(time_source.get_zero_locked_timesource())
if not t1.matches(t2):
matches = False
break
return matches
def is_interval_subset(interval1, interval2):
"""Checks whether interval1 is subset of interval2."""
# Check the upper bound.
if (interval1[1] == "inf" and interval2[1] != "inf") or \
(interval1[1] != "inf" and interval2[1] != "inf" and interval1[1] > interval2[1]):
return False
# Check the lower bound.
if (interval1[0] == "-inf" and interval2[0] != "-inf") or \
(interval1[0] != "-inf" and interval2[0] != "-inf" and interval1[0] < interval2[0]):
return False
return True
| 30.688776 | 96 | 0.614131 |
acf90c28379f7fbb460a53a802c525a3d8144be0 | 3,556 | py | Python | api/app/resources/theq/citizen/citizen_detail.py | sumesh-aot/queue-management | d8de45c2d94c1a557c8f8d207d73a067709d5abb | [
"Apache-2.0"
] | 1 | 2020-09-08T21:49:04.000Z | 2020-09-08T21:49:04.000Z | api/app/resources/theq/citizen/citizen_detail.py | sumesh-aot/queue-management | d8de45c2d94c1a557c8f8d207d73a067709d5abb | [
"Apache-2.0"
] | 3 | 2020-10-21T15:44:41.000Z | 2020-12-03T17:49:47.000Z | api/app/resources/theq/citizen/citizen_detail.py | acoard-aot/queue-management | 07023246b2adee315d9e954a82c6f0891017e2da | [
"Apache-2.0"
] | null | null | null | '''Copyright 2018 Province of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from flask import request, g
from flask_restx import Resource
from qsystem import api, api_call_with_retry, db, oidc, socketio, my_print
from app.models.theq import Citizen, CSR, Counter
from marshmallow import ValidationError
from app.schemas.theq import CitizenSchema
from sqlalchemy import exc
from app.utilities.snowplow import SnowPlow
from app.utilities.auth_util import Role, has_any_role
@api.route("/citizens/<int:id>/", methods=["GET", "PUT"])
class CitizenDetail(Resource):
citizen_schema = CitizenSchema()
@oidc.accept_token(require_token=True)
@has_any_role(roles=[Role.internal_user.value])
def get(self, id):
try:
citizen = Citizen.query.filter_by(citizen_id=id).first()
citizen_ticket = "None"
if hasattr(citizen, 'ticket_number'):
citizen_ticket = str(citizen.ticket_number)
my_print("==> GET /citizens/" + str(citizen.citizen_id) + '/, Ticket: ' + citizen_ticket)
result = self.citizen_schema.dump(citizen)
return {'citizen': result.data,
'errors': result.errors}
except exc.SQLAlchemyError as e:
print(e)
return {'message': 'API is down'}, 500
@oidc.accept_token(require_token=True)
@has_any_role(roles=[Role.internal_user.value])
@api_call_with_retry
def put(self, id):
json_data = request.get_json()
if 'counter_id' not in json_data:
json_data['counter_id'] = counter_id
if not json_data:
return {'message': 'No input data received for updating citizen'}, 400
csr = CSR.find_by_username(g.oidc_token_info['username'])
citizen = Citizen.query.filter_by(citizen_id=id).first()
my_print("==> PUT /citizens/" + str(citizen.citizen_id) + '/, Ticket: ' + str(citizen.ticket_number))
try:
citizen = self.citizen_schema.load(json_data, instance=citizen, partial=True).data
except ValidationError as err:
return {'message': err.messages}, 422
db.session.add(citizen)
db.session.commit()
# If this put request is the result of an appointment checkin, make a Snowplow call.
if ('snowplow_addcitizen' in json_data) and (json_data['snowplow_addcitizen'] == True):
SnowPlow.add_citizen(citizen, csr)
result = self.citizen_schema.dump(citizen)
socketio.emit('update_active_citizen', result.data, room=csr.office_id)
return {'citizen': result.data,
'errors': result.errors}, 200
try:
counter = Counter.query.filter(Counter.counter_name=="Counter")[0]
counter_id = counter.counter_id
# NOTE!! There should ONLY be an exception when first building the database
# from a python3 manage.py db upgrade command.
except:
counter_id = 1
print("==> In citizen_detail.py")
print(" --> NOTE!! You should only see this if doing a 'python3 manage.py db upgrade'")
| 38.652174 | 109 | 0.683071 |
acf90cab99f4e0c36f26915422500647fe5477b0 | 20,549 | py | Python | flux_mito/model_654.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | flux_mito/model_654.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | flux_mito/model_654.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 77500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 30000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.134259 | 798 | 0.804127 |
acf90d69a84fb58c7ca23288c176fbbefdea1c14 | 692 | py | Python | adventofcode/2021/download.py | hacktoolkit/code_challenges | d71f8362496a72963a53abba7bcc9dd4d35a2920 | [
"MIT"
] | 10 | 2015-01-31T09:04:45.000Z | 2022-01-08T04:09:48.000Z | adventofcode/2021/download.py | hacktoolkit/code_challenges | d71f8362496a72963a53abba7bcc9dd4d35a2920 | [
"MIT"
] | 3 | 2016-05-16T07:37:01.000Z | 2016-05-18T14:14:16.000Z | adventofcode/2021/download.py | hacktoolkit/code_challenges | d71f8362496a72963a53abba7bcc9dd4d35a2920 | [
"MIT"
] | 6 | 2015-02-06T06:00:00.000Z | 2020-02-13T16:13:48.000Z | # Python Standard Library Imports
import argparse
import os
# Third Party (PyPI) Imports
import requests
from dotenv import load_dotenv
load_dotenv()
SESSION = os.environ.get('SESSION')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('day', type=int)
args = parser.parse_args()
download_input(args.day)
def download_input(day):
url = f'https://adventofcode.com/2021/day/{day}/input'
cookies = {
'session': SESSION,
}
response = requests.get(url, cookies=cookies)
filename = f'{str(day).zfill(2)}.in'
with open(filename, 'w') as f:
f.write(response.content.decode())
if __name__ == '__main__':
main()
| 18.210526 | 58 | 0.666185 |
acf90df0a8dbbc362fd9ed342f1cc321dd930daf | 95,099 | py | Python | Lib/test/test_io.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | null | null | null | Lib/test/test_io.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | null | null | null | Lib/test/test_io.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | null | null | null | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import os
import sys
import time
import array
import threading
import random
import unittest
import warnings
import weakref
import gc
import abc
import signal
import errno
from itertools import chain, cycle, count
from collections import deque
from test import support
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
self._blocker_char = None
self._write_stack.append(b[:n])
raise self.BlockingIOError(0, "test blocking", n)
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
if not support.is_resource_enabled("largefile"):
print("\nTesting large file ops skipped on %s." % sys.platform,
file=sys.stderr)
print("It requires %d bytes and a long time." % self.LARGE,
file=sys.stderr)
print("Use 'regrtest.py -u largefile test_io' to run it.",
file=sys.stderr)
return
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
class CIOTest(IOTest):
pass
class PyIOTest(IOTest):
pass
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertTrue(None is bufio.read())
self.assertEqual(b"", bufio.read())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings() as w:
warnings.simplefilter("always", DeprecationWarning)
self.tp(self.MockRawIO(), 8, 12)
self.assertEqual(len(w.warnings), 1)
warning = w.warnings[0]
self.assertTrue(warning.category is DeprecationWarning)
self.assertEqual(str(warning.message),
"max_buffer_size is deprecated")
class CBufferedWriterTest(BufferedWriterTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings() as w:
warnings.simplefilter("always", DeprecationWarning)
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
self.assertEqual(len(w.warnings), 1)
warning = w.warnings[0]
self.assertTrue(warning.category is DeprecationWarning)
self.assertEqual(str(warning.message),
"max_buffer_size is deprecated")
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"asdf")
rw.seek(0, 0)
self.assertEqual(b"asdfasdfl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
class CBufferedRandomTest(BufferedRandomTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
class PyTextIOWrapperTest(TextIOWrapperTest):
pass
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertTrue(isinstance(self.IOBase, abc.ABCMeta))
self.assertTrue(isinstance(self.RawIOBase, abc.ABCMeta))
self.assertTrue(isinstance(self.BufferedIOBase, abc.ABCMeta))
self.assertTrue(isinstance(self.TextIOBase, abc.ABCMeta))
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertTrue(isinstance(f, abcmodule.IOBase))
self.assertTrue(isinstance(f, abcmodule.RawIOBase))
self.assertFalse(isinstance(f, abcmodule.BufferedIOBase))
self.assertFalse(isinstance(f, abcmodule.TextIOBase))
with self.open(support.TESTFN, "wb") as f:
self.assertTrue(isinstance(f, abcmodule.IOBase))
self.assertFalse(isinstance(f, abcmodule.RawIOBase))
self.assertTrue(isinstance(f, abcmodule.BufferedIOBase))
self.assertFalse(isinstance(f, abcmodule.TextIOBase))
with self.open(support.TESTFN, "w") as f:
self.assertTrue(isinstance(f, abcmodule.IOBase))
self.assertFalse(isinstance(f, abcmodule.RawIOBase))
self.assertFalse(isinstance(f, abcmodule.BufferedIOBase))
self.assertTrue(isinstance(f, abcmodule.TextIOBase))
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
wio.write, item * (1024 * 1024))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
| 36.478328 | 89 | 0.569701 |
acf90f7f29d9669f81138d1efc0d462c8103a326 | 341 | py | Python | astropixie/astropixie/__init__.py | lsst-epo/epoc | 8e17ebec509be5c3cc2063f4645dfe9e26b49c18 | [
"MIT"
] | 1 | 2018-11-27T10:15:27.000Z | 2018-11-27T10:15:27.000Z | astropixie/astropixie/__init__.py | lsst-epo/epoc | 8e17ebec509be5c3cc2063f4645dfe9e26b49c18 | [
"MIT"
] | 7 | 2018-04-02T18:02:21.000Z | 2018-07-13T15:41:56.000Z | astropixie/astropixie/__init__.py | lsst-epo/vela | 8e17ebec509be5c3cc2063f4645dfe9e26b49c18 | [
"MIT"
] | null | null | null | import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
from . import catalog_service
from . import sample_catalog_provider
from . import data
def hello_universe():
return "Hello universe!"
provider = sample_catalog_provider.SampleCatalogProvider()
Catalog = catalog_service.CatalogService(provider)
| 18.944444 | 58 | 0.797654 |
acf91029b0ea77c16c218484be50447596a3ecf7 | 2,533 | py | Python | tempest/services/volume/xml/admin/volume_quotas_client.py | queria/my-tempest | a9cdee0201bb956c7502fd372dab467b056ba67f | [
"Apache-2.0"
] | 1 | 2021-06-12T14:54:52.000Z | 2021-06-12T14:54:52.000Z | tempest/services/volume/xml/admin/volume_quotas_client.py | queria/my-tempest | a9cdee0201bb956c7502fd372dab467b056ba67f | [
"Apache-2.0"
] | null | null | null | tempest/services/volume/xml/admin/volume_quotas_client.py | queria/my-tempest | a9cdee0201bb956c7502fd372dab467b056ba67f | [
"Apache-2.0"
] | 1 | 2017-07-11T13:54:27.000Z | 2017-07-11T13:54:27.000Z | # Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Baubeau <sylvain.baubeau@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
from lxml import etree
from tempest.common import xml_utils as xml
from tempest import config
from tempest.services.volume.json.admin import volume_quotas_client
CONF = config.CONF
class VolumeQuotasClientXML(volume_quotas_client.VolumeQuotasClientJSON):
"""
Client class to send CRUD Volume Quotas API requests to a Cinder endpoint
"""
TYPE = "xml"
def _format_quota(self, q):
quota = {}
for k, v in q.items():
try:
v = ast.literal_eval(v)
except (ValueError, SyntaxError):
pass
quota[k] = v
return quota
def get_quota_usage(self, tenant_id):
"""List the quota set for a tenant."""
resp, body = self.get_quota_set(tenant_id, params={'usage': True})
self.expected_success(200, resp.status)
return resp, self._format_quota(body)
def update_quota_set(self, tenant_id, gigabytes=None, volumes=None,
snapshots=None):
post_body = {}
element = xml.Element("quota_set")
if gigabytes is not None:
post_body['gigabytes'] = gigabytes
if volumes is not None:
post_body['volumes'] = volumes
if snapshots is not None:
post_body['snapshots'] = snapshots
xml.deep_dict_to_xml(element, post_body)
resp, body = self.put('os-quota-sets/%s' % tenant_id,
str(xml.Document(element)))
body = xml.xml_to_json(etree.fromstring(body))
self.expected_success(200, resp.status)
return resp, self._format_quota(body)
def delete_quota_set(self, tenant_id):
"""Delete the tenant's quota set."""
resp, body = self.delete('os-quota-sets/%s' % tenant_id)
self.expected_success(200, resp.status)
| 32.063291 | 78 | 0.647848 |
acf910863c9f411f0d60c3b5c44a34fb91e15e20 | 1,283 | py | Python | api/migrations/0012_auto_20190702_1743.py | yuxiaoYX/xiaoshuo | 5652703521aa99774e8e0667c5e6b9f24a6d90ac | [
"MIT"
] | null | null | null | api/migrations/0012_auto_20190702_1743.py | yuxiaoYX/xiaoshuo | 5652703521aa99774e8e0667c5e6b9f24a6d90ac | [
"MIT"
] | null | null | null | api/migrations/0012_auto_20190702_1743.py | yuxiaoYX/xiaoshuo | 5652703521aa99774e8e0667c5e6b9f24a6d90ac | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2019-07-02 09:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0011_booklist'),
]
operations = [
migrations.RemoveField(
model_name='booklist',
name='list_key',
),
migrations.RemoveField(
model_name='booklist',
name='list_value',
),
migrations.AddField(
model_name='booklist',
name='list_kv',
field=models.TextField(default=111, verbose_name='源小说列表url和章节名'),
preserve_default=False,
),
migrations.AlterField(
model_name='bookintroduce',
name='requests_method',
field=models.CharField(default='get', max_length=10, verbose_name='访问方式post或get'),
),
migrations.AlterField(
model_name='booklist',
name='requests_method',
field=models.CharField(default='get', max_length=10, verbose_name='访问方式post或get'),
),
migrations.AlterField(
model_name='booksearch',
name='requests_method',
field=models.CharField(default='get', max_length=10, verbose_name='访问方式post或get'),
),
]
| 29.837209 | 94 | 0.577553 |
acf91160635cdc2fd9d0fd3c3aae99f2f324708e | 113 | py | Python | uniquepaths/uniquePaths.py | evansMeja/Leetcode | dac2e00090afad47eb02b30e56848fbc0ea8b57f | [
"MIT"
] | null | null | null | uniquepaths/uniquePaths.py | evansMeja/Leetcode | dac2e00090afad47eb02b30e56848fbc0ea8b57f | [
"MIT"
] | null | null | null | uniquepaths/uniquePaths.py | evansMeja/Leetcode | dac2e00090afad47eb02b30e56848fbc0ea8b57f | [
"MIT"
] | null | null | null | class Solution:
def uniquePaths(self, m: int, n: int) -> int:
return math.comb(m - 1+ n - 1 , m - 1)
| 28.25 | 49 | 0.548673 |
acf912aa206669a4a80c31fd6e46bcd3733cf3b5 | 1,433 | py | Python | pretix_newsletter_ml/views.py | pretix-unofficial/pretix-newsletter-ml | 649cc1c1fbf3b7111f60ca935026ca871893391b | [
"Apache-2.0"
] | 3 | 2018-11-19T04:07:15.000Z | 2019-12-14T08:34:24.000Z | pretix_newsletter_ml/views.py | pretix/pretix-newsletter-ml | 649cc1c1fbf3b7111f60ca935026ca871893391b | [
"Apache-2.0"
] | 2 | 2022-02-08T12:17:06.000Z | 2022-02-09T17:34:52.000Z | pretix_newsletter_ml/views.py | pretix-unofficial/pretix-newsletter-ml | 649cc1c1fbf3b7111f60ca935026ca871893391b | [
"Apache-2.0"
] | 4 | 2017-07-15T23:45:33.000Z | 2018-11-19T04:07:16.000Z | import logging
from django import forms
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from i18nfield.forms import I18nFormField, I18nTextInput
from pretix.base.forms import SettingsForm
from pretix.base.models import Event
from pretix.control.views.event import (
EventSettingsFormView, EventSettingsViewMixin,
)
logger = logging.getLogger(__name__)
class NewsletterSettingsForm(SettingsForm):
newsletter_ml_subscribe_address = forms.EmailField(
label=_("Subscribe address"),
required=False,
)
newsletter_ml_add_automatically = forms.BooleanField(
label=_("Add emails to the list without asking users in the frontend"),
help_text=_("Not recommended, might be considered illegal/unfair business practice in your legislation."),
required=False,
)
newsletter_ml_text = I18nFormField(
label=_("Checkbox label"),
required=True,
widget=I18nTextInput,
)
class MLSettings(EventSettingsViewMixin, EventSettingsFormView):
model = Event
form_class = NewsletterSettingsForm
template_name = 'pretix_newsletter_ml/settings.html'
permission = 'can_change_settings'
def get_success_url(self) -> str:
return reverse('plugins:pretix_newsletter_ml:settings', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug
})
| 32.568182 | 114 | 0.734124 |
acf9148eb322a79110149f4b26cd8ec327323d40 | 12,605 | py | Python | python/ccxt/bl3p.py | Joukahainen/ccxt | 82823a85b96cee336853f0deb353474df2122b88 | [
"MIT"
] | 2 | 2022-03-10T15:21:49.000Z | 2022-03-10T15:22:01.000Z | python/ccxt/bl3p.py | Joukahainen/ccxt | 82823a85b96cee336853f0deb353474df2122b88 | [
"MIT"
] | 4 | 2021-12-14T06:19:10.000Z | 2022-03-19T02:39:29.000Z | python/ccxt/bl3p.py | Joukahainen/ccxt | 82823a85b96cee336853f0deb353474df2122b88 | [
"MIT"
] | 2 | 2022-03-15T22:31:00.000Z | 2022-03-23T06:08:29.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.precise import Precise
class bl3p(Exchange):
def describe(self):
return self.deep_extend(super(bl3p, self).describe(), {
'id': 'bl3p',
'name': 'BL3P',
'countries': ['NL'], # Netherlands
'rateLimit': 1000,
'version': '1',
'comment': 'An exchange market by BitonicNL',
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchIsolatedPositions': False,
'fetchLeverage': False,
'fetchMarkOHLCV': False,
'fetchOrderBook': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28501752-60c21b82-6feb-11e7-818b-055ee6d0e754.jpg',
'api': 'https://api.bl3p.eu',
'www': 'https://bl3p.eu', # 'https://bitonic.nl'
'doc': [
'https://github.com/BitonicNL/bl3p-api/tree/master/docs',
'https://bl3p.eu/api',
'https://bitonic.nl/en/api',
],
},
'api': {
'public': {
'get': [
'{market}/ticker',
'{market}/orderbook',
'{market}/trades',
],
},
'private': {
'post': [
'{market}/money/depth/full',
'{market}/money/order/add',
'{market}/money/order/cancel',
'{market}/money/order/result',
'{market}/money/orders',
'{market}/money/orders/history',
'{market}/money/trades/fetch',
'GENMKT/money/info',
'GENMKT/money/deposit_address',
'GENMKT/money/new_deposit_address',
'GENMKT/money/wallet/history',
'GENMKT/money/withdraw',
],
},
},
'markets': {
'BTC/EUR': {'id': 'BTCEUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'baseId': 'BTC', 'quoteId': 'EUR', 'maker': 0.0025, 'taker': 0.0025, 'type': 'spot', 'spot': True},
'LTC/EUR': {'id': 'LTCEUR', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'baseId': 'LTC', 'quoteId': 'EUR', 'maker': 0.0025, 'taker': 0.0025, 'type': 'spot', 'spot': True},
},
})
def parse_balance(self, response):
data = self.safe_value(response, 'data', {})
wallets = self.safe_value(data, 'wallets')
result = {'info': data}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currency(code)
currencyId = currency['id']
wallet = self.safe_value(wallets, currencyId, {})
available = self.safe_value(wallet, 'available', {})
balance = self.safe_value(wallet, 'balance', {})
account = self.account()
account['free'] = self.safe_string(available, 'value')
account['total'] = self.safe_string(balance, 'value')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostGENMKTMoneyInfo(params)
return self.parse_balance(response)
def parse_bid_ask(self, bidask, priceKey=0, amountKey=1):
price = self.safe_number(bidask, priceKey)
size = self.safe_number(bidask, amountKey)
return [
price / 100000.0,
size / 100000000.0,
]
def fetch_order_book(self, symbol, limit=None, params={}):
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetMarketOrderbook(self.extend(request, params))
orderbook = self.safe_value(response, 'data')
return self.parse_order_book(orderbook, symbol, None, 'bids', 'asks', 'price_int', 'amount_int')
def parse_ticker(self, ticker, market=None):
#
# {
# "currency":"BTC",
# "last":32654.55595,
# "bid":32552.3642,
# "ask":32703.58231,
# "high":33500,
# "low":31943,
# "timestamp":1643372789,
# "volume":{
# "24h":2.27372413,
# "30d":320.79375456
# }
# }
#
symbol = self.safe_symbol(None, market)
timestamp = self.safe_timestamp(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
volume = self.safe_value(ticker, 'volume', {})
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(volume, '24h'),
'quoteVolume': None,
'info': ticker,
}, market, False)
def fetch_ticker(self, symbol, params={}):
market = self.market(symbol)
request = {
'market': market['id'],
}
ticker = self.publicGetMarketTicker(self.extend(request, params))
#
# {
# "currency":"BTC",
# "last":32654.55595,
# "bid":32552.3642,
# "ask":32703.58231,
# "high":33500,
# "low":31943,
# "timestamp":1643372789,
# "volume":{
# "24h":2.27372413,
# "30d":320.79375456
# }
# }
#
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
id = self.safe_string(trade, 'trade_id')
timestamp = self.safe_integer(trade, 'date')
price = self.safe_string(trade, 'price_int')
amount = self.safe_string(trade, 'amount_int')
market = self.safe_market(None, market)
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': None,
'order': None,
'takerOrMaker': None,
'price': Precise.string_div(price, '100000'),
'amount': Precise.string_div(amount, '100000000'),
'cost': None,
'fee': None,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = self.publicGetMarketTrades(self.extend({
'market': market['id'],
}, params))
result = self.parse_trades(response['data']['trades'], market, since, limit)
return result
def fetch_trading_fees(self, params={}):
self.load_markets()
response = self.privatePostGENMKTMoneyInfo(params)
#
# {
# result: 'success',
# data: {
# user_id: '13396',
# wallets: {
# BTC: {
# balance: {
# value_int: '0',
# display: '0.00000000 BTC',
# currency: 'BTC',
# value: '0.00000000',
# display_short: '0.00 BTC'
# },
# available: {
# value_int: '0',
# display: '0.00000000 BTC',
# currency: 'BTC',
# value: '0.00000000',
# display_short: '0.00 BTC'
# }
# },
# ...
# },
# trade_fee: '0.25'
# }
# }
#
data = self.safe_value(response, 'data', {})
feeString = self.safe_string(data, 'trade_fee')
fee = self.parse_number(Precise.string_div(feeString, '100'))
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': data,
'symbol': symbol,
'maker': fee,
'taker': fee,
'percentage': True,
'tierBased': False,
}
return result
def create_order(self, symbol, type, side, amount, price=None, params={}):
market = self.market(symbol)
order = {
'market': market['id'],
'amount_int': int(amount * 100000000),
'fee_currency': market['quote'],
'type': 'bid' if (side == 'buy') else 'ask',
}
if type == 'limit':
order['price_int'] = int(price * 100000.0)
response = self.privatePostMarketMoneyOrderAdd(self.extend(order, params))
orderId = self.safe_string(response['data'], 'order_id')
return {
'info': response,
'id': orderId,
}
def cancel_order(self, id, symbol=None, params={}):
request = {
'order_id': id,
}
return self.privatePostMarketMoneyOrderCancel(self.extend(request, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = self.implode_params(path, params)
url = self.urls['api'] + '/' + self.version + '/' + request
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
body = self.urlencode(self.extend({'nonce': nonce}, query))
secret = self.base64_to_binary(self.secret)
# eslint-disable-next-line quotes
auth = request + "\0" + body
signature = self.hmac(self.encode(auth), secret, hashlib.sha512, 'base64')
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Rest-Key': self.apiKey,
'Rest-Sign': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| 38.429878 | 195 | 0.470131 |
acf914a8c5a8867466583b2220cc43a1099be9a5 | 2,340 | py | Python | apps/banlist/resources.py | tarvitz/icu | 9a7cdac9d26ea224539f68f678b90bf70084374d | [
"BSD-3-Clause"
] | 1 | 2022-03-12T23:44:21.000Z | 2022-03-12T23:44:21.000Z | apps/banlist/resources.py | tarvitz/icu | 9a7cdac9d26ea224539f68f678b90bf70084374d | [
"BSD-3-Clause"
] | null | null | null | apps/banlist/resources.py | tarvitz/icu | 9a7cdac9d26ea224539f68f678b90bf70084374d | [
"BSD-3-Clause"
] | null | null | null | import re
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from apps.banlist.models import ServerBanList
from tastypie.authorization import Authorization
from tastypie.throttle import BaseThrottle, CacheThrottle
from tastypie.authentication import ApiKeyAuthentication
from tastypie.validation import Validation, FormValidation
from tastypie import fields
from apps.accounts.resources import UserResource
from django.db.models import Q, F
from django.conf import settings
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.forms.util import ErrorList
from django.conf.urls.defaults import url
from tastypie.http import HttpGone, HttpMultipleChoices
from tastypie.utils import trailing_slash
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ImproperlyConfigured
from apps.core.helpers import get_object_or_404
class ServerBanListResource(ModelResource):
owner = fields.ForeignKey(UserResource, attribute='owner', readonly=True)
#read-onlies
id = fields.IntegerField(readonly=True, attribute='id')
created_on = fields.DateTimeField(readonly=True, attribute='created_on',
help_text=_('Shows when spot was created'))
#creates and associates instance.owner with request.user
def obj_create(self, bundle, request=None, **kwargs):
return super(ServerBanListResource, self).obj_create(bundle, request,
owner=request.user)
#retrieve only objects we own or permitted to alter
def apply_authorization_limits(self, request, object_list):
return object_list.filter(Q(owner=request.user))
class Meta:
queryset = ServerBanList.objects.all()
allowed_methods = ['get', 'post', 'put', 'patch', 'delete']
resource_name = 'server'
authorization = Authorization()
authentication = ApiKeyAuthentication()
throttle = BaseThrottle(throttle_at=settings.THROTTLE_AT)
if settings.ENABLE_THROTTLING:
throttle = CacheThrottle(throttle_at=settings.THROTTLE_AT)
#validation = FormValidation(form_class=RepSpotForm)
filtering = {
'owner': ALL_WITH_RELATIONS,
'server_name': ALL,
'ip_address': ALL,
'reason': ALL,
'plain_type': ALL,
}
| 40.344828 | 100 | 0.741453 |
acf916e7654ab093f681772f5a2817d1f1dd0515 | 11,976 | py | Python | applications/geoclaw/bowl_radial/make_plots.py | MelodyShih/forestclaw | 2abaab636e6e93f5507a6f231490144a3f805b59 | [
"BSD-2-Clause"
] | 1 | 2021-03-09T23:06:42.000Z | 2021-03-09T23:06:42.000Z | applications/geoclaw/bowl_radial/make_plots.py | scottaiton/forestclaw | 2abaab636e6e93f5507a6f231490144a3f805b59 | [
"BSD-2-Clause"
] | null | null | null | applications/geoclaw/bowl_radial/make_plots.py | scottaiton/forestclaw | 2abaab636e6e93f5507a6f231490144a3f805b59 | [
"BSD-2-Clause"
] | null | null | null |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
try:
from setplotfg import setplotfg
except:
print("Did not find setplotfg.py")
setplotfg = None
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps, geoplot
plotdata.clearfigures() # clear any old figures,axes,items data
def set_drytol(current_data):
# The drytol parameter is used in masking land and water and
# affects what color map is used for cells with small water depth h.
# The cell will be plotted as dry if h < drytol.
# The best value to use often depends on the application and can
# be set here (measured in meters):
current_data.user['drytol'] = 1.e-2
plotdata.beforeframe = set_drytol
# To plot gauge locations on pcolor or contour plot, use this as
# an afteraxis function:
def addgauges(current_data):
from clawpack.visclaw import gaugetools
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos='all', format_string='ko', add_labels=True)
#-----------------------------------------
# Figure for pcolor plot
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='pcolor', figno=0)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
plotaxes.title = 'Surface'
plotaxes.scaled = True
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = -0.9
plotitem.pcolor_cmax = 0.9
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [1,1,0]
plotitem.amr_patchedges_show = [1]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [1,1,0]
plotaxes.xlimits = [-100,100]
plotaxes.ylimits = [-100,100]
#-----------------------------------------
# Figure for zoom
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Zoom', figno=10)
#plotfigure.show = False
plotfigure.kwargs = {'figsize':[12,7]}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('diag zoom')
plotaxes.axescmd = 'axes([0.0,0.1,0.6,0.6])'
plotaxes.title = 'On diagonal'
plotaxes.scaled = True
plotaxes.xlimits = [55,66]
plotaxes.ylimits = [55,66]
plotaxes.afteraxes = addgauges
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = -0.9
plotitem.pcolor_cmax = 0.9
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [1,1,0]
plotitem.amr_patchedges_show = [1]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [1,1,0]
# Add contour lines of bathymetry:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(-10., 0., 1.)
plotitem.amr_contour_colors = ['k'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
plotitem.show = True
# Add contour lines of topography:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(0., 11., 1.)
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
plotitem.show = True
# Add dashed contour line for shoreline
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = [0.]
plotitem.amr_contour_colors = ['k'] # color on each level
plotitem.kwargs = {'linestyles':'dashed'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
plotitem.show = True
#-----------------------------------------
# Figure for zoom near axis
#-----------------------------------------
#plotfigure = plotdata.new_plotfigure(name='Zoom2', figno=11)
# now included in same figure as zoom on diagonal
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('x zoom')
plotaxes.show = True
plotaxes.axescmd = 'axes([0.5,0.1,0.6,0.6])'
plotaxes.title = 'On x-axis'
plotaxes.scaled = True
plotaxes.xlimits = [82,93]
plotaxes.ylimits = [-5,6]
plotaxes.afteraxes = addgauges
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = -0.9
plotitem.pcolor_cmax = 0.9
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [1,1,0]
plotitem.amr_patchedges_show = [1]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [1,1,0]
# Add contour lines of bathymetry:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(-10., 0., 1.)
plotitem.amr_contour_colors = ['k'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
plotitem.show = True
# Add contour lines of topography:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(0., 11., 1.)
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
plotitem.show = True
# Add dashed contour line for shoreline
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = [0.]
plotitem.amr_contour_colors = ['k'] # color on each level
plotitem.kwargs = {'linestyles':'dashed'}
plotitem.amr_contour_show = [0,0,1] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
plotitem.show = True
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface & topo', figno=300, \
type='each_gauge')
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = [-2.0, 2.0]
plotaxes.title = 'Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
# Plot topo as green curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
def gaugetopo(current_data):
q = current_data.q
h = q[0,:]
eta = q[3,:]
topo = eta - h
return topo
plotitem.plot_var = gaugetopo
plotitem.plotstyle = 'g-'
def add_zeroline(current_data):
from pylab import plot, legend
t = current_data.t
legend(('surface','topography'),loc='lower left')
plot(t, 0*t, 'k')
plotaxes.afteraxes = add_zeroline
#-----------------------------------------
# Figure for patches alone
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='patches', figno=2)
plotfigure.show = False
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [0,1]
plotaxes.ylimits = [0,1]
plotaxes.title = 'patches'
plotaxes.scaled = True
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_patch')
plotitem.amr_patch_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']
plotitem.amr_celledges_show = [1,1,0]
plotitem.amr_patchedges_show = [1]
#-----------------------------------------
# Scatter plot of surface for radially symmetric
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Scatter', figno=200)
plotfigure.show = False
# Note: will not look very good unless more of domain is refined
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [0., 100.]
plotaxes.ylimits = [-1.5, 2.]
plotaxes.title = 'Scatter plot of surface'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.plot_var = geoplot.surface
def q_vs_radius(current_data):
from numpy import sqrt
x = current_data.x
y = current_data.y
r = sqrt(x**2 + y**2)
q = current_data.var
return r,q
plotitem.map_2d_to_1d = q_vs_radius
plotitem.plotstyle = 'o'
plotitem.amr_color=['b','r','g']
plotaxes.afteraxes = "import pylab; pylab.legend(['Level 1','Level 2'])"
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.print_fignos = [300] # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.html_movie = 'JSAnimation'
plotdata.latex = False # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.format = 'forestclaw' # Format of output
return plotdata
if __name__=="__main__":
from clawpack.visclaw.plotclaw import plotclaw
plotclaw(outdir='.',setplot=setplot,plotdir='_plots',format='forestclaw')
| 34.512968 | 77 | 0.642368 |
acf917bc26055517fa42be53053db6280e3405f0 | 4,081 | py | Python | test/Fortran/F77COM.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | 1 | 2019-09-18T06:37:02.000Z | 2019-09-18T06:37:02.000Z | test/Fortran/F77COM.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | test/Fortran/F77COM.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Fortran/F77COM.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
from common import write_fake_link
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
write_fake_link(test)
test.write('myfortran.py', r"""
import sys
comment = '#' + sys.argv[1]
outfile = open(sys.argv[2], 'wb')
infile = open(sys.argv[3], 'rb')
for l in infile.readlines():
if l[:len(comment)] != comment:
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
F77COM = r'%(_python_)s myfortran.py f77 $TARGET $SOURCES',
F77PPCOM = r'%(_python_)s myfortran.py f77pp $TARGET $SOURCES',
FORTRANCOM = r'%(_python_)s myfortran.py fortran $TARGET $SOURCES',
FORTRANPPCOM = r'%(_python_)s myfortran.py fortranpp $TARGET $SOURCES')
env.Program(target = 'test01', source = 'test01.f')
env.Program(target = 'test02', source = 'test02.F')
env.Program(target = 'test03', source = 'test03.for')
env.Program(target = 'test04', source = 'test04.FOR')
env.Program(target = 'test05', source = 'test05.ftn')
env.Program(target = 'test06', source = 'test06.FTN')
env.Program(target = 'test07', source = 'test07.fpp')
env.Program(target = 'test08', source = 'test08.FPP')
env.Program(target = 'test09', source = 'test09.f77')
env.Program(target = 'test10', source = 'test10.F77')
""" % locals())
test.write('test01.f', "This is a .f file.\n#link\n#fortran\n")
test.write('test02.F', "This is a .F file.\n#link\n#fortranpp\n")
test.write('test03.for', "This is a .for file.\n#link\n#fortran\n")
test.write('test04.FOR', "This is a .FOR file.\n#link\n#fortranpp\n")
test.write('test05.ftn', "This is a .ftn file.\n#link\n#fortran\n")
test.write('test06.FTN', "This is a .FTN file.\n#link\n#fortranpp\n")
test.write('test07.fpp', "This is a .fpp file.\n#link\n#fortranpp\n")
test.write('test08.FPP', "This is a .FPP file.\n#link\n#fortranpp\n")
test.write('test09.f77', "This is a .f77 file.\n#link\n#f77\n")
test.write('test10.F77', "This is a .F77 file.\n#link\n#f77pp\n")
test.run(arguments = '.', stderr = None)
test.must_match('test01' + _exe, "This is a .f file.\n")
test.must_match('test02' + _exe, "This is a .F file.\n")
test.must_match('test03' + _exe, "This is a .for file.\n")
test.must_match('test04' + _exe, "This is a .FOR file.\n")
test.must_match('test05' + _exe, "This is a .ftn file.\n")
test.must_match('test06' + _exe, "This is a .FTN file.\n")
test.must_match('test07' + _exe, "This is a .fpp file.\n")
test.must_match('test08' + _exe, "This is a .FPP file.\n")
test.must_match('test09' + _exe, "This is a .f77 file.\n")
test.must_match('test10' + _exe, "This is a .F77 file.\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 41.222222 | 96 | 0.691007 |
acf919568896d477c17e43d42e6340bd51218aaa | 3,018 | py | Python | netplan/capfore/migrations/0001_initial.py | VisionUnchange/CCD-Via-celery-DRF-ReactJS | ab29060ad3931f323fb1cc89d4e118a9b649e234 | [
"MIT"
] | null | null | null | netplan/capfore/migrations/0001_initial.py | VisionUnchange/CCD-Via-celery-DRF-ReactJS | ab29060ad3931f323fb1cc89d4e118a9b649e234 | [
"MIT"
] | 9 | 2020-02-11T23:47:47.000Z | 2022-01-13T00:41:42.000Z | netplan/capfore/migrations/0001_initial.py | VisionUnchange/CCD-Via-celery-DRF-ReactJS | ab29060ad3931f323fb1cc89d4e118a9b649e234 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 15:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CityAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=100)),
('user_activty', models.FloatField()),
],
),
migrations.CreateModel(
name='PackageThreshold',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('packagethreshold_text', models.CharField(max_length=100)),
('threshold_rrc', models.FloatField()),
('threshold_prb_utilization', models.FloatField()),
('threshold_upoct', models.FloatField()),
],
),
migrations.CreateModel(
name='SceneAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=100)),
('scene', models.CharField(max_length=100)),
('guaranteed_bandwidth', models.FloatField()),
('rrc_growth_rate', models.FloatField()),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(blank=True, default='Quick Task', max_length=100)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='snippets', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('created',),
},
),
migrations.AddField(
model_name='sceneattribute',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_scene', to='capfore.Task'),
),
migrations.AddField(
model_name='packagethreshold',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_package', to='capfore.Task'),
),
migrations.AddField(
model_name='cityattribute',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_city', to='capfore.Task'),
),
]
| 40.24 | 144 | 0.585156 |
acf91a43ed167f12339bd9f88a0e31cb76002e6e | 257,651 | py | Python | zerver/lib/actions.py | mtrame098/zulip | ce474ee8cf1df880b374eb044d0ed59237c8b7e9 | [
"Apache-2.0"
] | null | null | null | zerver/lib/actions.py | mtrame098/zulip | ce474ee8cf1df880b374eb044d0ed59237c8b7e9 | [
"Apache-2.0"
] | null | null | null | zerver/lib/actions.py | mtrame098/zulip | ce474ee8cf1df880b374eb044d0ed59237c8b7e9 | [
"Apache-2.0"
] | null | null | null | from typing import (
AbstractSet, Any, Callable, Dict, Iterable, List, Mapping, MutableMapping,
Optional, Sequence, Set, Tuple, Union, cast
)
from typing_extensions import TypedDict
import django.db.utils
from django.db.models import Count
from django.contrib.contenttypes.models import ContentType
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core import validators
from django.core.files import File
from analytics.lib.counts import COUNT_STATS, do_increment_logging_stat, \
RealmCount
from zerver.lib.bugdown import (
version as bugdown_version,
url_embed_preview_enabled,
convert as bugdown_convert,
)
from zerver.lib.addressee import Addressee
from zerver.lib.bot_config import (
ConfigError,
get_bot_config,
get_bot_configs,
set_bot_config,
)
from zerver.lib.cache import (
bot_dict_fields,
display_recipient_cache_key,
delete_user_profile_caches,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.emoji import emoji_name_to_emoji_code, get_emoji_file_name
from zerver.lib.exceptions import StreamDoesNotExistError, \
StreamWithIDDoesNotExistError
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.message import (
access_message,
MessageDict,
render_markdown,
update_first_visible_message_id,
)
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import send_email, FromAddress, send_email_to_admins, \
clear_scheduled_emails, clear_scheduled_invitation_emails
from zerver.lib.storage import static_path
from zerver.lib.stream_subscription import (
get_active_subscriptions_for_stream_id,
get_active_subscriptions_for_stream_ids,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
num_subscribers_for_stream_id,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.topic import (
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
save_message_for_edit_use_case,
update_messages_for_topic_edit,
ORIG_TOPIC,
LEGACY_PREV_TOPIC,
TOPIC_LINKS,
TOPIC_NAME,
)
from zerver.lib.topic_mutes import (
get_topic_mutes,
add_topic_mute,
remove_topic_mute,
)
from zerver.lib.users import (
bulk_get_users,
check_bot_name_available,
check_full_name,
get_api_key,
)
from zerver.lib.user_status import (
update_user_status,
)
from zerver.lib.user_groups import create_user_group, access_user_group_by_id
from zerver.models import Realm, RealmEmoji, Stream, UserProfile, UserActivity, \
RealmDomain, Service, SubMessage, \
Subscription, Recipient, Message, Attachment, UserMessage, RealmAuditLog, \
UserHotspot, MultiuseInvite, ScheduledMessage, UserStatus, \
Client, DefaultStream, DefaultStreamGroup, UserPresence, \
ScheduledEmail, MAX_TOPIC_NAME_LENGTH, \
MAX_MESSAGE_LENGTH, get_client, get_stream, get_personal_recipient, \
get_user_profile_by_id, PreregistrationUser, \
bulk_get_recipients, get_stream_recipient, get_stream_recipients, \
email_allowed_for_realm, email_to_username, \
get_user_by_delivery_email, get_stream_cache_key, active_non_guest_user_ids, \
UserActivityInterval, active_user_ids, get_active_streams, \
realm_filters_for_realm, RealmFilter, stream_name_in_use, \
get_old_unclaimed_attachments, is_cross_realm_bot_email, \
Reaction, EmailChangeStatus, CustomProfileField, \
custom_profile_fields_for_realm, get_huddle_user_ids, \
CustomProfileFieldValue, validate_attachment_request, get_system_bot, \
query_for_ids, get_huddle_recipient, \
UserGroup, UserGroupMembership, get_default_stream_groups, \
get_bot_services, get_bot_dicts_in_realm, DomainNotAllowedForRealmError, \
DisposableEmailError, EmailContainsPlusError, \
get_user_including_cross_realm, get_user_by_id_in_realm_including_cross_realm, \
get_stream_by_id_in_realm
from zerver.lib.alert_words import get_alert_word_automaton
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.stream_recipient import StreamRecipientMap
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions
from django.db import transaction, IntegrityError, connection
from django.db.models import F, Q, Max, Sum
from django.db.models.query import QuerySet
from django.core.exceptions import ValidationError
from django.utils.timezone import now as timezone_now
from confirmation.models import Confirmation, create_confirmation_link, generate_key, \
confirmation_url
from confirmation import settings as confirmation_settings
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.timestamp import timestamp_to_datetime, datetime_to_timestamp
from zerver.lib.queue import queue_json_publish
from zerver.lib.utils import generate_api_key
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib import bugdown
from zerver.lib.cache import cache_with_key, cache_set, \
user_profile_by_email_cache_key, \
cache_set_many, cache_delete, cache_delete_many
from zerver.decorator import statsd_increment
from zerver.lib.utils import log_statsd_event, statsd
from zerver.lib.i18n import get_language_name
from zerver.lib.alert_words import add_user_alert_words, \
remove_user_alert_words, set_user_alert_words
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.exceptions import JsonableError, ErrorCode, BugdownRenderingException
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.upload import attachment_url_re, attachment_url_to_path_id, \
claim_attachment, delete_message_image, upload_emoji_image, delete_avatar_image, \
delete_export_tarball
from zerver.lib.video_calls import request_zoom_video_call_url
from zerver.tornado.event_queue import send_event
from zerver.lib.types import ProfileFieldData
from analytics.models import StreamCount
if settings.BILLING_ENABLED:
from corporate.lib.stripe import update_license_ledger_if_needed
import ujson
import time
import datetime
import os
import platform
import logging
import itertools
from collections import defaultdict
from operator import itemgetter
# This will be used to type annotate parameters in a function if the function
# works on both str and unicode in python 2 but in python 3 it only works on str.
SizedTextIterable = Union[Sequence[str], AbstractSet[str]]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
# Store an event in the log for re-importing messages
def log_event(event: MutableMapping[str, Any]) -> None:
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node() +
timezone_now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(ujson.dumps(event) + '\n')
def can_access_stream_user_ids(stream: Stream) -> Set[int]:
# return user ids of users who can access the attributes of
# a stream, such as its name/description.
if stream.is_public():
# For a public stream, this is everyone in the realm
# except unsubscribed guest users
return public_stream_user_ids(stream)
else:
# for a private stream, it's subscribers plus realm admins.
return private_stream_user_ids(
stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()}
def private_stream_user_ids(stream_id: int) -> Set[int]:
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = get_active_subscriptions_for_stream_id(stream_id)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def public_stream_user_ids(stream: Stream) -> Set[int]:
guest_subscriptions = get_active_subscriptions_for_stream_id(
stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST)
guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')}
return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream and
user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and
user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return {user_profile.bot_owner_id, }
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0,
UserProfile.ROLE_MEMBER: 0,
UserProfile.ROLE_GUEST: 0}
for value_dict in list(UserProfile.objects.filter(
realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))):
human_counts[value_dict['role']] = value_dict['role__count']
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def send_signup_message(sender: UserProfile, admin_realm_signup_notifications_stream: str,
user_profile: UserProfile, internal: bool=False,
realm: Optional[Realm]=None) -> None:
if internal:
# TODO: This should be whether this is done using manage.py
# vs. the web interface. But recent refactorings mean that
# the internal flag isn't passed properly to this function.
internal_blurb = " **INTERNAL SIGNUP** "
else:
internal_blurb = " "
user_count = realm_user_count(user_profile.realm)
signup_notifications_stream = user_profile.realm.get_signup_notifications_stream()
# Send notification to realm signup notifications stream if it exists
# Don't send notification for the first user in a realm
if signup_notifications_stream is not None and user_count > 1:
internal_send_message(
user_profile.realm,
sender,
"stream",
signup_notifications_stream.name,
"signups",
"@_**%s|%s** just signed up for Zulip. (total: %i)" % (
user_profile.full_name, user_profile.id, user_count
)
)
# We also send a notification to the Zulip administrative realm
admin_realm = get_system_bot(sender).realm
try:
# Check whether the stream exists
get_stream(admin_realm_signup_notifications_stream, admin_realm)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
return
internal_send_message(
admin_realm,
sender,
"stream",
admin_realm_signup_notifications_stream,
user_profile.realm.display_subdomain,
"%s <`%s`> just signed up for Zulip!%s(total: **%i**)" % (
user_profile.full_name,
user_profile.email,
internal_blurb,
user_count,
)
)
def notify_invites_changed(user_profile: UserProfile) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in
user_profile.realm.get_admin_users_and_bots()]
send_event(user_profile.realm, event, admin_ids)
def notify_new_user(user_profile: UserProfile, internal: bool=False) -> None:
send_signup_message(settings.NOTIFICATION_BOT, "signups", user_profile, internal)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
"""
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
stream_ids = [stream.id for stream in streams if not stream.invite_only]
recipients = get_stream_recipients(stream_ids)
recent_messages = Message.objects.filter(recipient_id__in=recipients,
date_sent__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list(
'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES]))
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id",
flat=True))
# Mark the newest ONBOARDING_UNREAD_MESSAGES as unread.
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: List[DefaultStreamGroup]=[],
realm_creation: bool=False) -> None:
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
if prereg_user is not None:
streams = prereg_user.streams.all()
acting_user = prereg_user.referred_by # type: Optional[UserProfile]
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None:
# This is a cross-realm private message.
internal_send_private_message(
user_profile.realm,
get_system_bot(settings.NOTIFICATION_BOT),
prereg_user.referred_by,
"%s <`%s`> accepted your invitation to join Zulip!" % (
user_profile.full_name,
user_profile.email,
)
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email).exclude(
id=prereg_user.id).update(status=0)
if prereg_user.referred_by is not None:
notify_invites_changed(user_profile)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email).update(status=0)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if user_profile.realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'email_address': user_profile.delivery_email,
'user_id': user_profile.id,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile: UserProfile) -> None:
person = dict(email=user_profile.email,
user_id=user_profile.id,
is_admin=user_profile.is_realm_admin,
full_name=user_profile.full_name,
avatar_url=avatar_url(user_profile),
timezone=user_profile.timezone,
date_joined=user_profile.date_joined.isoformat(),
is_guest=user_profile.is_guest,
is_bot=user_profile.is_bot) # type: Dict[str, Any]
if user_profile.is_bot:
person["bot_type"] = user_profile.bot_type
if user_profile.bot_owner_id is not None:
person["bot_owner_id"] = user_profile.bot_owner_id
event = dict(type="realm_user", op="add", person=person) # type: Dict[str, Any]
if not user_profile.is_bot:
event["person"]["profile_data"] = {}
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services = get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot['owner'] = user_profile.bot_owner.email
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
short_name = email_to_username(email)
user_set.add((email, full_name, short_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str,
short_name: str, bot_type: Optional[int]=None,
is_realm_admin: bool=False, is_guest: bool=False,
bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None,
timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream]=None,
default_events_register_stream: Optional[Stream]=None,
default_all_public_streams: Optional[bool]=None,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: List[DefaultStreamGroup]=[],
source_profile: Optional[UserProfile]=None,
realm_creation: bool=False) -> UserProfile:
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name, short_name=short_name,
is_realm_admin=is_realm_admin, is_guest=is_guest,
bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, timezone=timezone, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile)
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm)
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if bot_type:
notify_created_bot(user_profile)
else:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation)
return user_profile
def do_activate_user(user_profile: UserProfile) -> None:
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm)
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm)
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(realm: Realm, name: str, value: Any) -> None:
"""Takes in a realm object, the name of an attribute to update, and the
value to update.
"""
property_type = Realm.property_types[name]
assert isinstance(value, property_type), (
'Cannot update %s: %s is not an instance of %s' % (
name, value, property_type,))
setattr(realm, name, value)
realm.save(update_fields=[name])
if name == 'zoom_api_secret':
# Send '' as the value through the API for the API secret
value = ''
event = dict(
type='realm',
op='update',
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
if name == "email_address_visibility":
for user_profile in UserProfile.objects.filter(realm=realm, is_bot=False):
# TODO: This does linear queries in the number of users
# and thus is potentially very slow. Probably not super
# important since this is a feature few folks will toggle,
# but as a policy matter, we don't do linear queries
# ~anywhere in Zulip.
old_email = user_profile.email
user_profile.email = get_display_email_address(user_profile, realm)
user_profile.save(update_fields=["email"])
# TODO: Design a bulk event for this or force-reload all clients
if user_profile.email != old_email:
send_user_email_update_event(user_profile)
def do_set_realm_authentication_methods(realm: Realm,
authentication_methods: Dict[str, bool]) -> None:
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=realm.authentication_methods_dict())
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
allow_community_topic_editing: bool) -> None:
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.allow_community_topic_editing = allow_community_topic_editing
realm.save(update_fields=['allow_message_editing',
'allow_community_topic_editing',
'message_content_edit_limit_seconds',
]
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
allow_community_topic_editing=allow_community_topic_editing),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_deleting(realm: Realm,
message_content_delete_limit_seconds: int) -> None:
realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds
realm.save(update_fields=['message_content_delete_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(realm: Realm, stream: Stream, stream_id: int) -> None:
realm.notifications_stream = stream
realm.save(update_fields=['notifications_stream'])
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(realm: Realm, stream: Stream,
stream_id: int) -> None:
realm.signup_notifications_stream = stream
realm.save(update_fields=['signup_notifications_stream'])
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm)
}))
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
event = dict(type="realm", op="deactivated",
realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm)
}))
def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None:
realm.string_id = new_subdomain
realm.save(update_fields=["string_id"])
def do_scrub_realm(realm: Realm) -> None:
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user)
user.full_name = "Scrubbed {}".format(generate_key()[:15])
scrubbed_email = "scrubbed-{}@{}".format(generate_key()[:15], realm.host)
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(),
event_type=RealmAuditLog.REALM_SCRUBBED)
def do_deactivate_user(user_profile: UserProfile,
acting_user: Optional[UserProfile]=None,
_cascade: bool=True) -> None:
if not user_profile.is_active:
return
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
# when trying to send messages to this person inside Zulip.
#
# Ideally, we need to also ensure their zephyr mirroring bot
# isn't running, but that's a separate issue.
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
clear_scheduled_emails([user_profile.id])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm)
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time, increment=-1)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(type="realm_user", op="remove",
person=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, acting_user=acting_user, _cascade=False)
def do_deactivate_stream(stream: Stream, log: bool=True) -> None:
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id).update(active=False)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
if stream_name_in_use(new_name, stream.realm_id):
# This stream has alrady been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save(update_fields=['name', 'deactivated', 'invite_only'])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(stream.realm, event, affected_user_ids)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id,
new_email=user_profile.email)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
# We notify just the target user (and eventually org admins) about
# their new delivery email, since that field is private.
payload = dict(user_id=user_profile.id,
delivery_email=new_email)
event = dict(type='realm_user', op='update', person=payload)
send_event(user_profile.realm, event, [user_profile.id])
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
# If the user is using Gravatar to manage their email address,
# their Gravatar just changed, and we need to notify other
# clients.
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# Additionally, if we're also changing the publicly visible
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email,
user_profile=user_profile, realm=user_profile.realm)
activation_url = create_confirmation_link(obj, user_profile.realm.host, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update({
'old_email': old_email,
'new_email': new_email,
'activate_url': activation_url
})
send_email('zerver/emails/confirm_new_email', to_emails=[new_email],
from_name='Zulip Account Security', from_address=FromAddress.tokenized_no_reply_address(),
language=user_profile.default_language, context=context)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm: Realm, email: str,
email_to_fullname: Callable[[str], str]) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
short_name=email_to_username(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None:
welcome_bot = get_system_bot(settings.WELCOME_BOT)
human_recipient = get_personal_recipient(message['message'].sender.id)
if Message.objects.filter(sender=welcome_bot, recipient=human_recipient).count() < 2:
internal_send_private_message(
message['realm'], welcome_bot, message['message'].sender,
"Congratulations on your first reply! :tada:\n\n"
"Feel free to continue using this space to practice your new messaging "
"skills. Or, try clicking on some of the stream names to your left!")
def render_incoming_message(message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[bugdown.MentionData]=None,
email_gateway: Optional[bool]=False) -> str:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendered_content = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton = realm_alert_words_automaton,
user_ids=user_ids,
mention_data=mention_data,
email_gateway=email_gateway,
)
except BugdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
def get_typing_user_profiles(recipient: Recipient, sender_id: int) -> List[UserProfile]:
if recipient.type == Recipient.STREAM:
'''
We don't support typing indicators for streams because they
are expensive and initial user feedback was they were too
distracting.
'''
raise ValueError('Typing indicators not supported for streams')
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
user_ids = list({recipient.type_id, sender_id})
assert(len(user_ids) in [1, 2])
elif recipient.type == Recipient.HUDDLE:
user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
users = [get_user_profile_by_id(user_id) for user_id in user_ids]
return users
RecipientInfoResult = TypedDict('RecipientInfoResult', {
'active_user_ids': Set[int],
'push_notify_user_ids': Set[int],
'stream_email_user_ids': Set[int],
'stream_push_user_ids': Set[int],
'wildcard_mention_user_ids': Set[int],
'um_eligible_user_ids': Set[int],
'long_term_idle_user_ids': Set[int],
'default_bot_user_ids': Set[int],
'service_bot_tuples': List[Tuple[int, int]],
})
def get_recipient_info(recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: Optional[Set[int]]=None,
possible_wildcard_mention: bool=True) -> RecipientInfoResult:
stream_push_user_ids = set() # type: Set[int]
stream_email_user_ids = set() # type: Set[int]
wildcard_mention_user_ids = set() # type: Set[int]
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert(len(message_to_user_ids) in [1, 2])
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert(stream_topic is not None)
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = stream_topic.get_active_subscriptions().annotate(
user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'),
user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'),
user_profile_wildcard_mentions_notify=F(
'user_profile__wildcard_mentions_notify'),
).values(
'user_profile_id',
'push_notifications',
'email_notifications',
'wildcard_mentions_notify',
'user_profile_email_notifications',
'user_profile_push_notifications',
'user_profile_wildcard_mentions_notify',
'is_muted',
).order_by('user_profile_id')
message_to_user_ids = [
row['user_profile_id']
for row in subscription_rows
]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row['is_muted']:
return False
if row['user_profile_id'] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row['user_profile_' + setting]
stream_push_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send('push_notifications', row)
}
stream_email_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send('email_notifications', row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# determine which users would receive a wildcard mention
# notification for this message should the message indeed
# contain a wildcard mention.
#
# We don't have separate values for push/email
# notifications here; at this stage, we're just
# determining whether this wildcard mention should be
# treated as a mention (and follow the user's mention
# notification preferences) or a normal message.
wildcard_mention_user_ids = {
row['user_profile_id']
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
if possibly_mentioned_user_ids:
# Important note: Because we haven't rendered bugdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(
is_active=True,
).values(
'id',
'enable_online_push_notifications',
'is_bot',
'bot_type',
'long_term_idle',
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(list(user_ids)),
field='id'
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {
row['id']
for row in rows
if f(row)
} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
push_notify_user_ids = get_ids_for(
lambda r: r['enable_online_push_notifications']
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r)
)
long_term_idle_user_ids = get_ids_for(
lambda r: r['long_term_idle']
)
# These two bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via bugdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = set([
row['id']
for row in rows
if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT
])
service_bot_tuples = [
(row['id'], row['bot_type'])
for row in rows
if is_service_bot(row)
]
info = dict(
active_user_ids=active_user_ids,
push_notify_user_ids=push_notify_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples
) # type: RecipientInfoResult
return info
def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int], active_user_ids: Set[int],
recipient_type: int) -> Dict[str, List[Dict[str, Any]]]:
event_dict = defaultdict(list) # type: Dict[str, List[Dict[str, Any]]]
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = 'outgoing_webhooks'
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = 'embedded_bots'
else:
logging.error(
'Unexpected bot_type for Service bot id=%s: %s' %
(user_profile_id, bot_type))
return
is_stream = (recipient_type == Recipient.STREAM)
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = 'mention'
# PM triggers for personal and huddle messsages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = 'private_message'
else:
return
event_dict[queue_name].append({
'trigger': trigger,
'user_profile_id': user_profile_id,
})
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]:
scheduled_messages = [] # type: List[ScheduledMessage]
for message in messages:
scheduled_message = ScheduledMessage()
scheduled_message.sender = message['message'].sender
scheduled_message.recipient = message['message'].recipient
topic_name = message['message'].topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = message['message'].content
scheduled_message.sending_client = message['message'].sending_client
scheduled_message.stream = message['stream']
scheduled_message.realm = message['realm']
scheduled_message.scheduled_timestamp = message['deliver_at']
if message['delivery_type'] == 'send_later':
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif message['delivery_type'] == 'remind':
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]],
email_gateway: Optional[bool]=False,
mark_as_read: List[int]=[]) -> List[int]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages_maybe_none if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids = [] # type: List[int]
new_messages = [] # type: List[MutableMapping[str, Any]]
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
links_for_embed = set() # type: Set[str]
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
message['realm'] = message.get('realm', message['message'].sender.realm)
mention_data = bugdown.MentionData(
realm_id=message['realm'].id,
content=message['message'].content,
)
message['mention_data'] = mention_data
if message['message'].is_stream_message():
stream_id = message['message'].recipient.type_id
stream_topic = StreamTopicTarget(
stream_id=stream_id,
topic_name=message['message'].topic_name()
) # type: Optional[StreamTopicTarget]
else:
stream_topic = None
info = get_recipient_info(
recipient=message['message'].recipient,
sender_id=message['message'].sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
message['active_user_ids'] = info['active_user_ids']
message['push_notify_user_ids'] = info['push_notify_user_ids']
message['stream_push_user_ids'] = info['stream_push_user_ids']
message['stream_email_user_ids'] = info['stream_email_user_ids']
message['um_eligible_user_ids'] = info['um_eligible_user_ids']
message['long_term_idle_user_ids'] = info['long_term_idle_user_ids']
message['default_bot_user_ids'] = info['default_bot_user_ids']
message['service_bot_tuples'] = info['service_bot_tuples']
# Render our messages.
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message['active_user_ids'],
message['realm'],
mention_data=message['mention_data'],
email_gateway=email_gateway,
)
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = bugdown_version
links_for_embed |= message['message'].links_for_preview
# Add members of the mentioned user groups into `mentions_user_ids`.
for group_id in message['message'].mentions_user_group_ids:
members = message['mention_data'].get_group_members(group_id)
message['message'].mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if message['message'].mentions_wildcard:
message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids']
else:
message['wildcard_mention_user_ids'] = []
'''
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
'''
mentioned_user_ids = message['message'].mentions_user_ids
default_bot_user_ids = message['default_bot_user_ids']
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
message['um_eligible_user_ids'] |= mentioned_bot_user_ids
# Update calculated fields of the message
message['message'].update_calculated_fields()
# Save the message receipts in the database
user_message_flags = defaultdict(dict) # type: Dict[int, Dict[int, List[str]]]
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
ums = [] # type: List[UserMessageLite]
for message in messages:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = message['message'].mentions_user_ids
user_messages = create_user_messages(
message=message['message'],
um_eligible_user_ids=message['um_eligible_user_ids'],
long_term_idle_user_ids=message['long_term_idle_user_ids'],
stream_push_user_ids = message['stream_push_user_ids'],
stream_email_user_ids = message['stream_email_user_ids'],
mentioned_user_ids=mentioned_user_ids,
mark_as_read=mark_as_read
)
for um in user_messages:
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
message['message'].service_queue_events = get_service_bot_events(
sender=message['message'].sender,
service_bot_tuples=message['service_bot_tuples'],
mentioned_user_ids=mentioned_user_ids,
active_user_ids=message['active_user_ids'],
recipient_type=message['message'].recipient.type,
)
bulk_insert_ums(ums)
# Claim attachments in message
for message in messages:
if Message.content_has_attachment(message['message'].content):
do_claim_attachments(message['message'])
for message in messages:
do_widget_post_save_actions(message)
for message in messages:
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(message['message'])
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
message_type = wide_message_dict['type']
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
message_type=message_type,
active_user_ids=message['active_user_ids'],
user_flags=user_flags,
)
event = dict(
type='message',
message=message['message'].id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
)
'''
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
'''
user_ids = message['active_user_ids'] | set(user_flags.keys())
users = [
dict(
id=user_id,
flags=user_flags.get(user_id, []),
always_push_notify=(user_id in message['push_notify_user_ids']),
stream_push_notify=(user_id in message['stream_push_user_ids']),
stream_email_notify=(user_id in message['stream_email_user_ids']),
wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']),
)
for user_id in user_ids
]
if message['message'].is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
if message['stream'] is None:
stream_id = message['message'].recipient.type_id
message['stream'] = Stream.objects.select_related("realm").get(id=stream_id)
assert message['stream'] is not None # assert needed because stubs for django are missing
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['stream'].first_message_id is None:
message['stream'].first_message_id = message['message'].id
message['stream'].save(update_fields=["first_message_id"])
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(message['realm'], event, users)
if url_embed_preview_enabled(message['message']) and links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'message_realm_id': message['realm'].id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data)
if (settings.ENABLE_FEEDBACK and settings.FEEDBACK_BOT and
message['message'].recipient.type == Recipient.PERSONAL):
feedback_bot_id = get_system_bot(email=settings.FEEDBACK_BOT).id
if feedback_bot_id in message['active_user_ids']:
queue_json_publish(
'feedback_messages',
wide_message_dict,
)
if message['message'].recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id
if (welcome_bot_id in message['active_user_ids'] and
welcome_bot_id != message['message'].sender_id):
send_welcome_bot_response(message)
for queue_name, events in message['message'].service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event['trigger'],
"user_profile_id": event["user_profile_id"],
}
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
class UserMessageLite:
'''
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
'''
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(message: Message,
um_eligible_user_ids: Set[int],
long_term_idle_user_ids: Set[int],
stream_push_user_ids: Set[int],
stream_email_user_ids: Set[int],
mentioned_user_ids: Set[int],
mark_as_read: List[int]=[]) -> List[UserMessageLite]:
ums_to_create = []
for user_profile_id in um_eligible_user_ids:
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=0,
)
ums_to_create.append(um)
# These properties on the Message are set via
# render_markdown by code in the bugdown inline patterns
wildcard = message.mentions_wildcard
ids_with_alert_words = message.user_ids_with_alert_words
for um in ums_to_create:
if (um.user_profile_id == message.sender.id and
message.sent_by_human()) or \
um.user_profile_id in mark_as_read:
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_user_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
um.flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
# for details on this system.
user_messages = []
for um in ums_to_create:
if (um.user_profile_id in long_term_idle_user_ids and
um.user_profile_id not in stream_push_user_ids and
um.user_profile_id not in stream_email_user_ids and
message.is_stream_message() and
int(um.flags) == 0):
continue
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
'''
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
'''
if not ums:
return
vals = ','.join([
'(%d, %d, %d)' % (um.user_profile_id, um.message_id, um.flags)
for um in ums
])
query = '''
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES
''' + vals
with connection.cursor() as cursor:
cursor.execute(query)
def do_add_submessage(realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
send_event(realm, event, target_user_ids)
def notify_reaction_update(user_profile: UserProfile, message: Message,
reaction: Reaction, op: str) -> None:
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event = {'type': 'reaction',
'op': op,
'user': user_dict,
'message_id': message.id,
'emoji_name': reaction.emoji_name,
'emoji_code': reaction.emoji_code,
'reaction_type': reaction.reaction_type} # type: Dict[str, Any]
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
#
# However, to ensure that reactions do live-update for any user
# who has actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications.
ums = UserMessage.objects.filter(message=message.id)
send_event(user_profile.realm, event, [um.user_profile_id for um in ums])
def do_add_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: str) -> None:
(emoji_code, reaction_type) = emoji_name_to_emoji_code(user_profile.realm, emoji_name)
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
try:
reaction.save()
except django.db.utils.IntegrityError: # nocoverage
# This can happen when a race results in the check in views
# code not catching an attempt to double-add a reaction, or
# perhaps if the emoji_name/emoji_code mapping is busted.
raise JsonableError(_("Reaction already exists."))
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_add_reaction(user_profile: UserProfile, message: Message,
emoji_name: str, emoji_code: str, reaction_type: str) -> None:
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
try:
reaction.save()
except django.db.utils.IntegrityError: # nocoverage
# This can happen when a race results in the check in views
# code not catching an attempt to double-add a reaction, or
# perhaps if the emoji_name/emoji_code mapping is busted.
raise JsonableError(_("Reaction already exists."))
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction(user_profile: UserProfile, message: Message,
emoji_code: str, reaction_type: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(realm: Realm, notification: Dict[str, Any]) -> None:
recipient_user_profiles = get_typing_user_profiles(notification['recipient'],
notification['sender'].id)
# Only deliver the notification to active user recipients
user_ids_to_notify = [profile.id for profile in recipient_user_profiles if profile.is_active]
sender_dict = {'user_id': notification['sender'].id, 'email': notification['sender'].email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [{'user_id': profile.id, 'email': profile.email}
for profile in recipient_user_profiles]
event = dict(
type = 'typing',
op = notification['op'],
sender = sender_dict,
recipients = recipient_dicts)
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile, notification_to: Union[Sequence[str], Sequence[int]],
operator: str) -> None:
typing_notification = check_typing_notification(sender, notification_to, operator)
do_send_typing_notification(sender.realm, typing_notification)
# check_typing_notification:
# Returns typing notification ready for sending with do_send_typing_notification on success
# or the error message (string) on error.
def check_typing_notification(sender: UserProfile,
notification_to: Union[Sequence[str], Sequence[int]],
operator: str) -> Dict[str, Any]:
if len(notification_to) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
try:
if isinstance(notification_to[0], str):
emails = cast(Sequence[str], notification_to)
recipient = recipient_for_emails(emails, False, sender, sender)
elif isinstance(notification_to[0], int):
user_ids = cast(Sequence[int], notification_to)
recipient = recipient_for_user_ids(user_ids, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
assert recipient.type != Recipient.STREAM
return {'sender': sender, 'recipient': recipient, 'op': operator}
def send_stream_creation_event(stream: Stream, user_ids: List[int]) -> None:
event = dict(type="stream", op="create",
streams=[stream.to_dict()])
send_event(stream.realm, event, user_ids)
def get_default_value_for_history_public_to_subscribers(
realm: Realm,
invite_only: bool,
history_public_to_subscribers: Optional[bool]
) -> bool:
if invite_only:
if history_public_to_subscribers is None:
# A private stream's history is non-public by default
history_public_to_subscribers = False
else:
# If we later decide to support public streams without
# history, we can remove this code path.
history_public_to_subscribers = True
if realm.is_zephyr_mirror_realm:
# In the Zephyr mirroring model, history is unconditionally
# not public to subscribers, even for public streams.
history_public_to_subscribers = False
return history_public_to_subscribers
def render_stream_description(text: str) -> str:
return bugdown_convert(text, no_previews=True)
def create_stream_if_needed(realm: Realm,
stream_name: str,
*,
invite_only: bool=False,
is_announcement_only: bool=False,
history_public_to_subscribers: Optional[bool]=None,
stream_description: str="") -> Tuple[Stream, bool]:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
realm, invite_only, history_public_to_subscribers)
(stream, created) = Stream.objects.get_or_create(
realm=realm,
name__iexact=stream_name,
defaults = dict(
name=stream_name,
description=stream_description,
invite_only=invite_only,
is_announcement_only=is_announcement_only,
history_public_to_subscribers=history_public_to_subscribers,
is_in_zephyr_realm=realm.is_zephyr_mirror_realm
)
)
if created:
stream.rendered_description = render_stream_description(stream_description)
stream.save(update_fields=["rendered_description"])
Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
if stream.is_public():
send_stream_creation_event(stream, active_non_guest_user_ids(stream.realm_id))
else:
realm_admin_ids = [user.id for user in
stream.realm.get_admin_users_and_bots()]
send_stream_creation_event(stream, realm_admin_ids)
return stream, created
def ensure_stream(realm: Realm,
stream_name: str,
invite_only: bool=False,
stream_description: str="") -> Stream:
return create_stream_if_needed(realm, stream_name,
invite_only=invite_only,
stream_description=stream_description)[0]
def create_streams_if_needed(realm: Realm,
stream_dicts: List[Mapping[str, Any]]) -> Tuple[List[Stream], List[Stream]]:
"""Note that stream_dict["name"] is assumed to already be stripped of
whitespace"""
added_streams = [] # type: List[Stream]
existing_streams = [] # type: List[Stream]
for stream_dict in stream_dicts:
stream, created = create_stream_if_needed(
realm,
stream_dict["name"],
invite_only=stream_dict.get("invite_only", False),
is_announcement_only=stream_dict.get("is_announcement_only", False),
history_public_to_subscribers=stream_dict.get("history_public_to_subscribers"),
stream_description=stream_dict.get("description", "")
)
if created:
added_streams.append(stream)
else:
existing_streams.append(stream)
return added_streams, existing_streams
def get_recipient_from_user_ids(recipient_profile_ids: Set[int],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
# Avoid mutating the passed in set of recipient_profile_ids.
recipient_profile_ids = set(recipient_profile_ids)
# If the private message is just between the sender and
# another person, force it to be a personal internally
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profile_ids:
raise ValidationError(_("User not authorized for this query"))
if (len(recipient_profile_ids) == 2 and sender.id in recipient_profile_ids):
recipient_profile_ids.remove(sender.id)
if len(recipient_profile_ids) > 1:
# Make sure the sender is included in huddle messages
recipient_profile_ids.add(sender.id)
return get_huddle_recipient(recipient_profile_ids)
else:
return get_personal_recipient(list(recipient_profile_ids)[0])
def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile],
sender: UserProfile,
allow_deactivated: bool=False) -> Set[int]:
recipient_profile_ids = set()
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (not user_profile.is_active and not user_profile.is_mirror_dummy and
not allow_deactivated) or user_profile.realm.deactivated:
raise ValidationError(_("'%s' is no longer using Zulip.") % (user_profile.email,))
recipient_profile_ids.add(user_profile.id)
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return recipient_profile_ids
def recipient_for_emails(emails: Iterable[str], forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
# This helper should only be used for searches.
# Other features are moving toward supporting ids.
user_profiles = [] # type: List[UserProfile]
for email in emails:
try:
user_profile = get_user_including_cross_realm(email, sender.realm)
except UserProfile.DoesNotExist:
raise ValidationError(_("Invalid email '%s'") % (email,))
user_profiles.append(user_profile)
return recipient_for_user_profiles(
user_profiles=user_profiles,
forwarded_mirror_message=forwarded_mirror_message,
forwarder_user_profile=forwarder_user_profile,
sender=sender
)
def recipient_for_user_ids(user_ids: Iterable[int], sender: UserProfile) -> Recipient:
user_profiles = [] # type: List[UserProfile]
for user_id in user_ids:
try:
user_profile = get_user_by_id_in_realm_including_cross_realm(
user_id, sender.realm)
except UserProfile.DoesNotExist:
raise ValidationError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
return recipient_for_user_profiles(
user_profiles=user_profiles,
forwarded_mirror_message=False,
forwarder_user_profile=None,
sender=sender
)
def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile, allow_deactivated: bool=False) -> Recipient:
recipient_profile_ids = validate_recipient_user_profiles(user_profiles, sender,
allow_deactivated=allow_deactivated)
return get_recipient_from_user_ids(recipient_profile_ids, forwarded_mirror_message,
forwarder_user_profile, sender)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_recipients(
s: Union[str, Iterable[str], Iterable[int]]
) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
if isinstance(s, str):
try:
data = ujson.loads(s)
except (ValueError, TypeError):
data = s
else:
data = s
if isinstance(data, str):
data = data.split(',')
if not isinstance(data, list):
raise ValueError("Invalid data type for recipients")
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
recipients = extract_emails(data) # type: Union[List[str], List[int]]
if isinstance(data[0], int):
recipients = extract_user_ids(data)
# Remove any duplicates.
return list(set(recipients)) # type: ignore # mypy gets confused about what's passed to set()
def extract_user_ids(user_ids: Iterable[int]) -> List[int]:
recipients = []
for user_id in user_ids:
if not isinstance(user_id, int):
raise TypeError("Recipient lists may contain emails or user IDs, but not both.")
recipients.append(user_id)
return recipients
def extract_emails(emails: Iterable[str]) -> List[str]:
recipients = []
for email in emails:
if not isinstance(email, str):
raise TypeError("Recipient lists may contain emails or user IDs, but not both.")
email = email.strip()
if email:
recipients.append(email)
return recipients
def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str,
topic: str, body: str, realm: Optional[Realm]=None) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(sender: UserProfile, client: Client,
receiving_user: UserProfile, body: str) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(sender: UserProfile, client: Client, message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str, realm: Optional[Realm]=None,
forged: bool=False, forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id,
widget_content)
return do_send_messages([message])[0]
def check_schedule_message(sender: UserProfile, client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str], message_content: str,
delivery_type: str, deliver_at: datetime.datetime,
realm: Optional[Realm]=None,
forwarder_user_profile: Optional[UserProfile]=None
) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm=realm,
forwarder_user_profile=forwarder_user_profile)
message['deliver_at'] = deliver_at
message['delivery_type'] = delivery_type
recipient = message['message'].recipient
if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and
recipient.type_id != sender.id)):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([message])[0]
def check_stream_name(stream_name: str) -> None:
if stream_name.strip() == "":
raise JsonableError(_("Invalid stream name '%s'") % (stream_name,))
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(_("Stream name too long (limit: %s characters).") % (Stream.MAX_NAME_LENGTH,))
for i in stream_name:
if ord(i) == 0:
raise JsonableError(_("Stream name '%s' contains NULL (0x00) characters.") % (stream_name,))
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '%s'") % (group_name,))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(_("Default stream group name too long (limit: %s characters)")
% (DefaultStreamGroup.MAX_NAME_LENGTH,))
for i in group_name:
if ord(i) == 0:
raise JsonableError(_("Default stream group name '%s' contains NULL (0x00) characters.")
% (group_name,))
def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile,
realm: Realm,
content: str) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT),
sender.bot_owner, content)
sender.last_reminder = timezone_now()
sender.save(update_fields=['last_reminder'])
def send_pm_if_empty_stream(stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str]=None,
stream_id: Optional[int]=None) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": sender.delivery_email,
"stream_id": stream_id,
"stream_name": stream_name,
}
if stream is None:
if stream_id is not None:
content = _("Your bot `%(bot_identity)s` tried to send a message to stream ID "
"%(stream_id)s, but there is no stream with that ID.") % arg_dict
else:
assert(stream_name is not None)
content = _("Your bot `%(bot_identity)s` tried to send a message to stream "
"#**%(stream_name)s**, but that stream does not exist. "
"Click [here](#streams/new) to create it.") % arg_dict
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _("Your bot `%(bot_identity)s` tried to send a message to "
"stream #**%(stream_name)s**. The stream exists but "
"does not have any subscribers.") % arg_dict
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_sender_can_write_to_stream(sender: UserProfile,
stream: Stream,
forwarder_user_profile: Optional[UserProfile]) -> None:
# Our caller is responsible for making sure that `stream` actually
# matches the realm of the sender.
if stream.is_announcement_only:
if sender.is_realm_admin or is_cross_realm_bot_email(sender.delivery_email):
pass
elif sender.is_bot and (sender.bot_owner is not None and
sender.bot_owner.is_realm_admin):
pass
else:
raise JsonableError(_("Only organization administrators can send to this stream."))
if not (stream.invite_only or sender.is_guest):
# This is a public stream and sender is not a guest user
return
if subscribed_to_stream(sender, stream.id):
# It is private, but your are subscribed
return
if sender.is_api_super_user:
return
if (forwarder_user_profile is not None and forwarder_user_profile.is_api_super_user):
return
if sender.is_bot and (sender.bot_owner is not None and
subscribed_to_stream(sender.bot_owner, stream.id)):
# Bots can send to any stream their owner can.
return
if sender.delivery_email == settings.WELCOME_BOT:
# The welcome bot welcomes folks to the stream.
return
if sender.delivery_email == settings.NOTIFICATION_BOT:
return
# All other cases are an error.
raise JsonableError(_("Not authorized to send to stream '%s'") % (stream.name,))
def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm,
sender: UserProfile) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm,
sender: UserProfile) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender: UserProfile, client: Client, addressee: Addressee,
message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False,
forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> Dict[str, Any]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
stream = None
message_content = message_content_raw.rstrip()
if len(message_content) == 0:
raise JsonableError(_("Message must not be empty"))
if '\x00' in message_content:
raise JsonableError(_("Message must not contain null bytes"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
recipient = get_stream_recipient(stream.id)
# This will raise JsonableError if there are problems.
validate_sender_can_write_to_stream(
sender=sender,
stream=stream,
forwarder_user_profile=forwarder_user_profile
)
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror",
"jabber_mirror", "JabberMirror"]
# API Super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(user_profiles,
forwarded_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
if widget_content is not None:
try:
widget_content = ujson.loads(widget_content)
except Exception:
raise JsonableError(_('Widgets: API programmer sent invalid JSON content'))
error_msg = check_widget_content(widget_content)
if error_msg:
raise JsonableError(_('Widgets: %s') % (error_msg,))
return {'message': message, 'stream': stream, 'local_id': local_id,
'sender_queue_id': sender_queue_id, 'realm': realm,
'widget_content': widget_content}
def _internal_prep_message(realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str) -> Optional[Dict[str, Any]]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
if realm is None:
raise RuntimeError("None is not a valid realm for internal_prep_message!")
# If we have a stream name, and the stream doesn't exist, we
# create it here (though this code path should probably be removed
# eventually, moving that responsibility to the caller). If
# addressee.stream_name() is None (i.e. we're sending to a stream
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name)
try:
return check_message(sender, get_client("Internal"), addressee,
content, realm=realm)
except JsonableError as e:
logging.exception("Error queueing internal message by %s: %s" % (
sender.delivery_email, e))
return None
def internal_prep_stream_message(
realm: Realm, sender: UserProfile,
stream: Stream, topic: str, content: str
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_message(realm: Realm, sender_email: str, recipient_type_name: str,
recipients: str, topic_name: str, content: str,
email_gateway: Optional[bool]=False) -> Optional[int]:
"""internal_send_message should only be used where `sender_email` is a
system bot."""
# Verify the user is in fact a system bot
assert(is_cross_realm_bot_email(sender_email) or sender_email == settings.ERROR_BOT)
sender = get_system_bot(sender_email)
parsed_recipients = extract_recipients(recipients)
addressee = Addressee.legacy_build(
sender,
recipient_type_name,
parsed_recipients,
topic_name,
realm=realm)
msg = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if msg is None:
return None
message_ids = do_send_messages([msg], email_gateway=email_gateway)
return message_ids[0]
def internal_send_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[int]:
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
realm: Realm, sender: UserProfile,
stream: Stream, topic: str, content: str
) -> Optional[int]:
message = internal_prep_stream_message(
realm, sender, stream,
topic, content
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm, sender, stream_name,
topic, content
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str],
content: str) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str:
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(user_profile: Optional[UserProfile],
stream: Stream) -> None:
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda: subscribed_to_stream(cast(UserProfile, user_profile), stream.id))
def validate_user_access_to_subscribers_helper(user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[], bool]) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Guest users can access subscribed public stream's subscribers
if user_profile.is_guest:
if check_user_subscribed():
return
# We could put an AssertionError here; in that we don't have
# any code paths that would allow a guest user to access other
# streams in the first place.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if (stream_dict["invite_only"] and not check_user_subscribed()):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
sub_dict: Mapping[int, bool],
stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
try:
validate_user_access_to_subscribers_helper(user_profile, stream_dict,
lambda: sub_dict[stream_dict["id"]])
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
stream_ids = [stream['id'] for stream in target_stream_dicts]
stream_recipient.populate_for_stream_ids(stream_ids)
recipient_ids = sorted([
stream_recipient.recipient_id_for(stream_id)
for stream_id in stream_ids
])
result = dict((stream["id"], []) for stream in stream_dicts) # type: Dict[int, List[int]]
if not recipient_ids:
return result
'''
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
'''
id_list = ', '.join(str(recipient_id) for recipient_id in recipient_ids)
query = '''
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
INNER JOIN zerver_userprofile ON
zerver_userprofile.id = zerver_subscription.user_profile_id
WHERE
zerver_subscription.recipient_id in (%s) AND
zerver_subscription.active AND
zerver_userprofile.is_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
''' % (id_list,)
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict()
'''
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
'''
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter(
user_profile__is_active=True
)
return subscriptions
def get_subscriber_emails(stream: Stream,
requesting_user: Optional[UserProfile]=None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def notify_subscriptions_added(user_profile: UserProfile,
sub_pairs: Iterable[Tuple[Subscription, Stream]],
stream_user_ids: Callable[[Stream], List[int]],
recent_traffic: Dict[int, int],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'realm': user_profile.realm.string_id})
# Send a notification to the user who subscribed.
payload = [dict(name=stream.name,
stream_id=stream.id,
in_home_view=not subscription.is_muted,
is_muted=subscription.is_muted,
invite_only=stream.invite_only,
is_web_public=stream.is_web_public,
is_announcement_only=stream.is_announcement_only,
color=subscription.color,
email_address=encode_email_address(stream, show_sender=True),
desktop_notifications=subscription.desktop_notifications,
audible_notifications=subscription.audible_notifications,
push_notifications=subscription.push_notifications,
email_notifications=subscription.email_notifications,
description=stream.description,
rendered_description=stream.rendered_description,
pin_to_top=subscription.pin_to_top,
is_old_stream=is_old_stream(stream.date_created),
first_message_id=stream.first_message_id,
stream_weekly_traffic=get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic),
subscribers=stream_user_ids(stream),
history_public_to_subscribers=stream.history_public_to_subscribers)
for (subscription, stream) in sub_pairs]
event = dict(type="subscription", op="add",
subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream: Stream,
altered_user_ids: Iterable[int],
subscribed_user_ids: Iterable[int]) -> Set[int]:
'''
altered_user_ids is the user_ids that we are adding/removing
subscribed_user_ids is the already-subscribed user_ids
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
'''
if stream.invite_only:
# PRIVATE STREAMS
# Realm admins can access all private stream subscribers. Send them an
# event even if they aren't subscribed to stream.
realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()]
user_ids_to_notify = []
user_ids_to_notify.extend(realm_admin_ids)
user_ids_to_notify.extend(subscribed_user_ids)
return set(user_ids_to_notify) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids)
def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]:
stream_ids = [stream.id for stream in streams]
all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter(
user_profile__is_active=True,
).values(
'recipient__type_id',
'user_profile_id',
).order_by(
'recipient__type_id',
)
get_stream_id = itemgetter('recipient__type_id')
all_subscribers_by_stream = defaultdict(list) # type: Dict[int, List[int]]
for stream_id, rows in itertools.groupby(all_subs, get_stream_id):
user_ids = [row['user_profile_id'] for row in rows]
all_subscribers_by_stream[stream_id] = user_ids
return all_subscribers_by_stream
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually systemwide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max('id'))['id__max']
if last_id is None:
# During initial realm creation, there might be 0 messages in
# the database; in that case, the `aggregate` query returns
# None. Since we want an int for "beginning of time", use -1.
last_id = -1
return last_id
SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_add_subscriptions(streams: Iterable[Stream],
users: Iterable[UserProfile],
color_map: Optional[Dict[str, str]]=None,
from_stream_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> SubT:
users = list(users)
recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams]) # type: Mapping[int, Recipient]
recipients = [recipient.id for recipient in recipients_map.values()] # type: List[int]
stream_map = {} # type: Dict[int, Stream]
for stream in streams:
stream_map[recipients_map[stream.id].id] = stream
subs_by_user = defaultdict(list) # type: Dict[int, List[Subscription]]
all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile')
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
realm = users[0].realm
already_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
subs_to_activate = [] # type: List[Tuple[Subscription, Stream]]
new_subs = [] # type: List[Tuple[UserProfile, int, Stream]]
for user_profile in users:
needs_new_sub = set(recipients) # type: Set[int]
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add = [] # type: List[Tuple[Subscription, Stream]]
for (user_profile, recipient_id, stream) in new_subs:
if color_map is not None and stream.name in color_map:
color = color_map[stream.name]
else:
color = pick_color(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
sub_ids = [sub.id for (sub, stream) in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
occupied_streams_after = list(get_occupied_streams(realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs = [] # type: (List[RealmAuditLog])
for (sub, stream) in subs_to_add:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time))
for (sub, stream) in subs_to_activate:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams and not from_stream_creation:
event = dict(type="stream", op="occupy",
streams=[stream.to_dict()
for stream in new_occupied_streams])
send_event(realm, event, active_user_ids(realm.id))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]:
if stream.is_in_zephyr_realm and not stream.invite_only:
return []
user_ids = all_subscribers_by_stream[stream.id]
return user_ids
sub_tuples_by_user = defaultdict(list) # type: Dict[int, List[Tuple[Subscription, Stream]]]
new_streams = set() # type: Set[Tuple[int, int]]
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
# We now send several types of events to notify browsers. The
# first batch is notifications to users on invite-only streams
# that the stream exists.
for stream in streams:
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()]
new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and
user.id not in realm_admin_ids]
send_stream_creation_event(stream, new_users_ids)
stream_ids = {stream.id for stream in streams}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
# The second batch is events for the users themselves that they
# were subscribed to the new streams.
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids,
recent_traffic)
# The second batch is events for other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
for stream in streams:
if stream.is_in_zephyr_realm and not stream.invite_only:
continue
new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=new_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for new_user_id in new_user_ids:
event = dict(type="subscription", op="peer_add",
subscriptions=[stream.name],
user_id=new_user_id)
send_event(realm, event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path('audio/notification_sounds')
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if '.' in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == '.ogg':
available_notification_sounds.append(root)
return available_notification_sounds
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'realm': user_profile.realm.string_id})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(users: Iterable[UserProfile],
streams: Iterable[Stream],
acting_client: Client,
acting_user: Optional[UserProfile]=None) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict)
def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {
stream.id
for (sub, stream) in user_sub_stream_info
}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_tups()
subs_to_deactivate = [] # type: List[Tuple[Subscription, Stream]]
sub_ids_to_deactivate = [] # type: List[int]
# This loop just flattens out our data into big lists for
# bulk operations.
for tup_list in existing_subs_by_user.values():
for (sub, stream) in tup_list:
subs_to_deactivate.append((sub, stream))
sub_ids_to_deactivate.append(sub.id)
our_realm = users[0].realm
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
) .update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs = [] # type: (List[RealmAuditLog])
for (sub, stream) in subs_to_deactivate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict = defaultdict(list) # type: Dict[int, List[UserProfile]]
streams_by_user = defaultdict(list) # type: Dict[int, List[Stream]]
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {'type': 'mark_stream_messages_as_read',
'client_id': acting_client.id,
'user_profile_id': user_profile.id,
'stream_ids': [stream.id for stream in streams]}
queue_json_publish("deferred_work", event)
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def send_peer_remove_event(stream: Stream) -> None:
if stream.is_in_zephyr_realm and not stream.invite_only:
return
altered_users = altered_user_dict[stream.id]
altered_user_ids = [u.id for u in altered_users]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=altered_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
subscriptions=[stream.name],
user_id=removed_user.id)
send_event(our_realm, event, peer_user_ids)
for stream in streams:
send_peer_remove_event(stream=stream)
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)]
new_vacant_private_streams = [stream for stream in new_vacant_streams
if stream.invite_only]
new_vacant_public_streams = [stream for stream in new_vacant_streams
if not stream.invite_only]
if new_vacant_public_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_public_streams])
send_event(our_realm, event, active_user_ids(our_realm.id))
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream)
return (
[(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed,
)
def log_subscription_property_change(user_email: str, stream_name: str, property: str,
value: Any) -> None:
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile: UserProfile, sub: Subscription,
stream: Stream, property_name: str, value: Any
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
# API to the new name yet.
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
log_subscription_property_change(user_profile.email, stream.name,
database_property_name, database_value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=event_property_name,
value=event_value,
stream_id=stream.id,
name=stream.name)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time)
def do_change_full_name(user_profile: UserProfile, full_name: str,
acting_user: Optional[UserProfile]) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time, extra_data=old_name)
payload = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot', op='update', bot=payload),
bot_owner_user_ids(user_profile))
def check_change_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile,
acting_user: UserProfile) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
send_event(user_profile.realm,
dict(type='realm_bot',
op="delete",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
)),
{previous_owner.id, })
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id, }
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
send_event(user_profile.realm, add_event, {bot_owner.id, })
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id, }
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
)),
update_users)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
) # type: Dict[str, Any]
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,
event_time=event_time)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time)
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
api_key=new_api_key,
)),
bot_owner_user_ids(user_profile))
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_user_ids(user_profile))
payload = dict(
email=user_profile.email,
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
user_id=user_profile.id
)
send_event(user_profile.realm,
dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm_id))
def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={'avatar_source': avatar_source},
event_time=event_time)
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR)
delete_avatar_image(user)
def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
if log:
log_event({'type': 'realm_change_icon',
'realm': realm.string_id,
'icon_source': icon_source})
send_event(realm,
dict(type='realm',
op='update_dict',
property="icon",
data=dict(icon_source=realm.icon_source,
icon_url=realm_icon_url(realm))),
active_user_ids(realm.id))
def do_change_logo_source(realm: Realm, logo_source: str, night: bool) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm, event_time=timezone_now())
event = dict(type='realm',
op='update_dict',
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night))
send_event(realm, event, active_user_ids(realm.id))
def do_change_plan_type(realm: Realm, plan_type: int) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=['plan_type'])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm, event_time=timezone_now(),
extra_data={'old_value': old_value, 'new_value': plan_type})
if plan_type == Realm.STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
update_first_visible_message_id(realm)
realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb'])
event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type,
'extra_data': {'upload_quota': realm.upload_quota_bytes()}}
send_event(realm, event, active_user_ids(realm.id))
def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name # type: Optional[str]
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_events_register_stream(user_profile: UserProfile,
stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name # type: Optional[str]
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_all_public_streams(user_profile: UserProfile, value: bool,
log: bool=True) -> None:
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_user_ids(user_profile))
def do_change_is_admin(user_profile: UserProfile, value: bool,
permission: str='administer') -> None:
# TODO: This function and do_change_is_guest should be merged into
# a single do_change_user_role function in a future refactor.
if permission == "administer":
old_value = user_profile.role
if value:
user_profile.role = UserProfile.ROLE_REALM_ADMINISTRATOR
else:
user_profile.role = UserProfile.ROLE_MEMBER
user_profile.save(update_fields=["role"])
elif permission == "api_super_user":
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
else:
raise AssertionError("Invalid admin permission")
if permission == 'administer':
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(),
extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: UserProfile.ROLE_REALM_ADMINISTRATOR,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
event = dict(type="realm_user", op="update",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_admin=value))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_is_guest(user_profile: UserProfile, value: bool) -> None:
# TODO: This function and do_change_is_admin should be merged into
# a single do_change_user_role function in a future refactor.
old_value = user_profile.role
if value:
user_profile.role = UserProfile.ROLE_GUEST
else:
user_profile.role = UserProfile.ROLE_MEMBER
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(),
extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: UserProfile.ROLE_GUEST,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
event = dict(type="realm_user", op="update",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_guest=value))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_stream_invite_only(stream: Stream, invite_only: bool,
history_public_to_subscribers: Optional[bool]=None) -> None:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.save(update_fields=['invite_only', 'history_public_to_subscribers'])
event = dict(
op="update",
type="stream",
property="invite_only",
value=invite_only,
history_public_to_subscribers=history_public_to_subscribers,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None:
stream.is_web_public = is_web_public
stream.save(update_fields=['is_web_public'])
def do_change_stream_announcement_only(stream: Stream, is_announcement_only: bool) -> None:
stream.is_announcement_only = is_announcement_only
stream.save(update_fields=['is_announcement_only'])
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=is_announcement_only,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_rename_stream(stream: Stream,
new_name: str,
user_profile: UserProfile,
log: bool=True) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'realm': stream.realm.string_id,
'new_name': new_name})
recipient = get_stream_recipient(stream.id)
messages = Message.objects.filter(recipient=recipient).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient.id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT)
internal_send_stream_message(
stream.realm,
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_('@_**%(user_name)s|%(user_id)d** renamed stream **%(old_stream_name)s** to '
'**%(new_stream_name)s**.') % {
'user_name': user_profile.full_name,
'user_id': user_profile.id,
'old_stream_name': old_name,
'new_stream_name': new_name}
)
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: str) -> None:
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=['description', 'rendered_description'])
event = dict(
type='stream',
op='update',
property='description',
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_create_realm(string_id: str, name: str,
emails_restricted_to_domains: Optional[bool]=None) -> Realm:
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError("Realm %s already exists!" % (string_id,))
kwargs = {} # type: Dict[str, Any]
if emails_restricted_to_domains is not None:
kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains
realm = Realm(string_id=string_id, name=name, **kwargs)
realm.save()
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(
realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:")
realm.notifications_stream = notifications_stream
# With the current initial streams situation, the only public
# stream is the notifications_stream.
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True,
stream_description="A private stream for core team members.")
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=['notifications_stream', 'signup_notifications_stream'])
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
# Log the event
log_event({"type": "realm_created",
"string_id": string_id,
"emails_restricted_to_domains": emails_restricted_to_domains})
# Send a notification to the admin realm
signup_message = "Signups enabled"
admin_realm = get_system_bot(settings.NOTIFICATION_BOT).realm
internal_send_message(admin_realm, settings.NOTIFICATION_BOT, "stream",
"signups", realm.display_subdomain, signup_message)
return realm
def do_change_notification_settings(user_profile: UserProfile, name: str,
value: Union[bool, int, str], log: bool=True) -> None:
"""Takes in a UserProfile object, the name of a global notification
preference to update, and the value to update to
"""
notification_setting_type = UserProfile.notification_setting_types[name]
assert isinstance(value, notification_setting_type), (
'Cannot update %s: %s is not an instance of %s' % (
name, value, notification_setting_type,))
setattr(user_profile, name, value)
# Disabling digest emails should clear a user's email queue
if name == 'enable_digest_emails' and not value:
clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST)
user_profile.save(update_fields=[name])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': name,
'setting': value}
if log:
log_event(event)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_set_user_display_setting(user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str, int]) -> None:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
user_profile.save(update_fields=[setting_name])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': setting_name,
'setting': setting_value}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event['language_name'] = get_language_name(setting_value)
send_event(user_profile.realm, event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(email=user_profile.email,
user_id=user_profile.id,
timezone=user_profile.timezone)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def lookup_default_stream_groups(default_stream_group_names: List[str],
realm: Realm) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(
name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_('Invalid default stream group %s') % (group_name,))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id))
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm))
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(realm: Realm, group_name: str,
description: str, streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'%(stream_name)s' is a default stream and cannot be added to '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group_name})
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description)
if not created:
raise JsonableError(_("Default stream group '%(group_name)s' already exists")
% {'group_name': group_name})
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'%(stream_name)s' is a default stream and cannot be added to '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group.name})
if stream in group.streams.all():
raise JsonableError(_(
"Stream '%(stream_name)s' is already present in default stream group '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group.name})
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(_(
"Stream '%(stream_name)s' is not present in default stream group '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group.name})
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup,
new_group_name: str) -> None:
if group.name == new_group_name:
raise JsonableError(_("This default stream group is already named '%s'") % (new_group_name,))
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '%s' already exists") % (new_group_name,))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup,
new_description: str) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [default.stream for default in
DefaultStream.objects.select_related("stream", "stream__realm").filter(
realm_id=realm_id)]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile: UserProfile,
log_time: datetime.datetime) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile_id: int,
client_id: int,
query: str,
count: int,
log_time: datetime.datetime) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id = user_profile_id,
client_id = client_id,
query = query,
defaults={'last_visit': log_time, 'count': count})
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
presence_dict = presence.to_dict()
event = dict(type="presence", email=user_profile.email,
server_timestamp=time.time(),
presence={presence_dict['client']: presence_dict})
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int) -> None:
client = consolidate_client(client)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = {'timestamp': log_time,
'status': status})
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime,
status: int, new_user_input: bool) -> None:
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_pointer(user_profile: UserProfile, client: Client,
pointer: int, update_flags: bool=False) -> None:
prev_pointer = user_profile.pointer
user_profile.pointer = pointer
user_profile.save(update_fields=["pointer"])
if update_flags: # nocoverage
# This block of code is compatibility code for the
# legacy/original Zulip Android app natively. It's a shim
# that will mark as read any messages up until the pointer
# move; we expect to remove this feature entirely before long,
# when we drop support for the old Android app entirely.
app_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer).extra(where=[
UserMessage.where_unread(),
UserMessage.where_active_push_notification(),
]).values_list("message_id", flat=True)
UserMessage.objects.filter(user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer).extra(where=[UserMessage.where_unread()]) \
.update(flags=F('flags').bitor(UserMessage.flags.read))
do_clear_mobile_push_notifications_for_ids(user_profile, app_message_ids)
event = dict(type='pointer', pointer=pointer)
send_event(user_profile.realm, event, [user_profile.id])
def do_update_user_status(user_profile: UserProfile,
away: Optional[bool],
status_text: Optional[str],
client_id: int) -> None:
if away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
)
event = dict(
type='user_status',
user_id=user_profile.id,
)
if away is not None:
event['away'] = away
if status_text is not None:
event['status_text'] = status_text
send_event(realm, event, active_user_ids(realm.id))
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event('bankruptcy')
msgs = UserMessage.objects.filter(
user_profile=user_profile
).extra(
where=[UserMessage.where_unread()]
)
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read)
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=[], # we don't send messages, since the client reloads anyway
all=True
)
send_event(user_profile.realm, event, [user_profile.id])
statsd.incr("mark_all_as_read", count)
all_push_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list("message_id", flat=True)[0:10000]
do_clear_mobile_push_notifications_for_ids(user_profile, all_push_message_ids)
return count
def do_mark_stream_messages_as_read(user_profile: UserProfile,
client: Client,
stream: Stream,
topic_name: Optional[str]=None) -> int:
log_statsd_event('mark_stream_as_read')
msgs = UserMessage.objects.filter(
user_profile=user_profile
)
recipient = get_stream_recipient(stream.id)
msgs = msgs.filter(message__recipient=recipient)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()]
)
message_ids = list(msgs.values_list('message__id', flat=True))
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read)
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=message_ids,
all=False,
)
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids(user_profile, message_ids)
statsd.incr("mark_stream_as_read", count)
return count
def do_clear_mobile_push_notifications_for_ids(user_profile: UserProfile,
message_ids: List[int]) -> None:
filtered_message_ids = list(UserMessage.objects.filter(
message_id__in=message_ids,
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list('message_id', flat=True))
num_detached = settings.MAX_UNBATCHED_REMOVE_NOTIFICATIONS - 1
for message_id in filtered_message_ids[:num_detached]:
# Older clients (all clients older than 2019-02-13) will only
# see the first message ID in a given notification-message.
# To help them out, send a few of these separately.
queue_json_publish("missedmessage_mobile_notifications", {
"type": "remove",
"user_profile_id": user_profile.id,
"message_ids": [message_id],
})
if filtered_message_ids[num_detached:]:
queue_json_publish("missedmessage_mobile_notifications", {
"type": "remove",
"user_profile_id": user_profile.id,
"message_ids": filtered_message_ids[num_detached:],
})
def do_update_message_flags(user_profile: UserProfile,
client: Client,
operation: str,
flag: str,
messages: List[int]) -> int:
valid_flags = [item for item in UserMessage.flags
if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '%s'") % (flag,))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '%s'") % (flag,))
flagattr = getattr(UserMessage.flags, flag)
assert messages is not None
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
# is a flag boolean on UserMessage, and UserMessage rows are
# normally created only when you receive a message to support
# searching your personal history. So we need to create one. We
# add UserMessage.flags.historical, so that features that need
# "messages you actually received" can exclude these UserMessages.
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
if operation == 'add':
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
count = msgs.update(flags=F('flags').bitand(~flagattr))
else:
raise AssertionError("Invalid message flags operation")
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': False}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
do_clear_mobile_push_notifications_for_ids(user_profile, messages)
statsd.incr("flags.%s.%s" % (flag, operation), count)
return count
def subscribed_to_stream(user_profile: UserProfile, stream_id: int) -> bool:
return Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__type_id=stream_id).exists()
def truncate_content(content: str, max_length: int, truncation_message: str) -> str:
if len(content) > max_length:
content = content[:max_length - len(truncation_message)] + truncation_message
return content
def truncate_body(body: str) -> str:
return truncate_content(body, MAX_MESSAGE_LENGTH, "\n[message truncated]")
def truncate_topic(topic: str) -> str:
return truncate_content(topic, MAX_TOPIC_NAME_LENGTH, "...")
MessageUpdateUserInfoResult = TypedDict('MessageUpdateUserInfoResult', {
'message_user_ids': Set[int],
'mention_user_ids': Set[int],
})
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical
).values('user_profile_id', 'flags')
rows = list(query)
message_user_ids = {
row['user_profile_id']
for row in rows
}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {
row['user_profile_id']
for row in rows
if int(row['flags']) & mask
}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None:
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums = set() # type: Set[UserMessage]
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages: List[Message]) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
for changed_message in changed_messages:
message_ids.append(changed_message.id)
key = to_dict_cache_key_id(changed_message.id)
value = MessageDict.to_dict_uncached(changed_message)
items_for_remote_cache[key] = (value,)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(user_profile: UserProfile,
message: Message,
content: Optional[str],
rendered_content: Optional[str]) -> None:
event = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id} # type: Dict[str, Any]
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(user_profile: UserProfile, message: Message, topic_name: Optional[str],
propagate_mode: str, content: Optional[str],
rendered_content: Optional[str], prior_mention_user_ids: Set[int],
mention_user_ids: Set[int], mention_data: Optional[bugdown.MentionData]=None) -> int:
"""
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
"""
event = {'type': 'update_message',
# TODO: We probably want to remove the 'sender' field
# after confirming it isn't used by any consumers.
'sender': user_profile.email,
'user_id': user_profile.id,
'message_id': message.id} # type: Dict[str, Any]
edit_history_event = {
'user_id': user_profile.id,
} # type: Dict[str, Any]
changed_messages = [message]
stream_being_edited = None
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_being_edited = Stream.objects.get(id=stream_id)
event['stream_name'] = stream_being_edited.name
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
assert rendered_content is not None
update_user_message_flags(message, ums)
# mention_data is required if there's a content edit.
assert mention_data is not None
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
event['prev_rendered_content_version'] = message.rendered_content_version
event['is_me_message'] = Message.is_status_message(content, rendered_content)
prev_content = edit_history_event['prev_content']
if Message.content_has_attachment(prev_content) or Message.content_has_attachment(message.content):
check_attachment_reference_change(prev_content, message)
if message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = message.topic_name()
stream_topic = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
) # type: Optional[StreamTopicTarget]
else:
stream_topic = None
info = get_recipient_info(
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event['push_notify_user_ids'] = list(info['push_notify_user_ids'])
event['stream_push_user_ids'] = list(info['stream_push_user_ids'])
event['stream_email_user_ids'] = list(info['stream_email_user_ids'])
event['prior_mention_user_ids'] = list(prior_mention_user_ids)
event['mention_user_ids'] = list(mention_user_ids)
event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids'])
if message.mentions_wildcard:
event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids'])
else:
event['wildcard_mention_user_ids'] = []
if topic_name is not None:
orig_topic_name = message.topic_name()
topic_name = truncate_topic(topic_name)
event["propagate_mode"] = propagate_mode
message.set_topic_name(topic_name)
event["stream_id"] = message.recipient.type_id
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = bugdown.topic_links(message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
if propagate_mode in ["change_later", "change_all"]:
messages_list = update_messages_for_topic_edit(
message=message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
)
changed_messages += messages_list
message.last_edit_time = timezone_now()
assert message.last_edit_time is not None # assert needed because stubs for django are missing
event['edit_timestamp'] = datetime_to_timestamp(message.last_edit_time)
edit_history_event['timestamp'] = event['edit_timestamp']
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=message)
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {
'id': user_id,
'flags': ['read']
}
# The following blocks arranges that users who are subscribed to a
# stream and can see history from before they subscribed get
# live-update when old messages are edited (e.g. if the user does
# a topic edit themself).
#
# We still don't send an update event to users who are not
# subscribed to this stream and don't have a UserMessage row. This
# means if a non-subscriber is viewing the narrow, they won't get
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already in users_to_be_notified list.
# This is the case where a user both has a UserMessage row and is a current Subscriber
subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums])
# All users that are subscribed to the stream must be notified when a message is edited
subscribers_ids = [user.user_profile_id for user in subscribers]
users_to_be_notified += list(map(subscriber_info, subscribers_ids))
send_event(user_profile.realm, event, users_to_be_notified)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
message_ids = []
for message in messages:
message_ids.append(message.id)
message_type = "stream"
if not message.is_stream_message():
message_type = "private"
event = {
'type': 'delete_message',
'sender': message.sender.email,
'sender_id': message.sender_id,
'message_id': message.id,
'message_type': message_type, } # type: Dict[str, Any]
if message_type == "stream":
event['stream_id'] = message.recipient.type_id
event['topic'] = message.topic_name()
else:
event['recipient_id'] = message.recipient_id
# TODO: Each part of the following should be changed to bulk
# queries, since right now if you delete 1000 messages, you'll
# end up doing 1000 database queries in a loop and timing out.
ums = [{'id': um.user_profile_id} for um in
UserMessage.objects.filter(message=message.id)]
move_messages_to_archive([message.id])
send_event(realm, event, ums)
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id')
if message_ids:
move_messages_to_archive(message_ids)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS['messages_in_stream:is_bot:day']
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property,
end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values('stream_id').annotate(value=Sum('value'))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime,
recent_traffic: Dict[int, int]) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
def is_old_stream(stream_date_created: datetime.datetime) -> bool:
return (timezone_now() - stream_date_created).days \
>= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS
SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
def get_web_public_subs(realm: Realm) -> SubHelperT:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = [
{'name': stream.name,
'is_muted': False,
'invite_only': False,
'is_announcement_only': stream.is_announcement_only,
'color': get_next_color(),
'desktop_notifications': True,
'audible_notifications': True,
'push_notifications': False,
'pin_to_top': False,
'stream_id': stream.id,
'description': stream.description,
'rendered_description': stream.rendered_description,
'is_old_stream': is_old_stream(stream.date_created),
'first_message_id': stream.first_message_id,
'stream_weekly_traffic': get_average_weekly_stream_traffic(stream.id,
stream.date_created,
{}),
'email_address': ''}
for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False)]
return (subscribed, [], [])
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(user_profile: UserProfile,
include_subscribers: bool=True) -> SubHelperT:
sub_dicts = get_stream_subscriptions_for_user(user_profile).values(
"recipient_id", "is_muted", "color", "desktop_notifications",
"audible_notifications", "push_notifications", "email_notifications",
"active", "pin_to_top"
).order_by("recipient_id")
sub_dicts = list(sub_dicts)
sub_recipient_ids = [
sub['recipient_id']
for sub in sub_dicts
]
stream_recipient = StreamRecipientMap()
stream_recipient.populate_for_recipient_ids(sub_recipient_ids)
stream_ids = set() # type: Set[int]
for sub in sub_dicts:
sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id'])
stream_ids.add(sub['stream_id'])
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values("id", "name", "invite_only", "is_announcement_only", "realm_id",
"email_token", "description", "rendered_description", "date_created",
"history_public_to_subscribers", "first_message_id", "is_web_public")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts
if sub["stream_id"] in stream_hash]
streams_subscribed_map = dict((sub["stream_id"], sub["active"]) for sub in sub_dicts)
# Add never subscribed streams to streams_subscribed_map
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
if include_subscribers:
subscriber_map = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
streams_subscribed_map,
stream_recipient
) # type: Mapping[int, Optional[List[int]]]
else:
# If we're not including subscribers, always return None,
# which the below code needs to check for anyway.
subscriber_map = defaultdict(lambda: None)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["stream_id"])
stream = stream_hash.get(sub["stream_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
subscribers = subscriber_map[stream["id"]] # type: Optional[List[int]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore (or a realm administrator).
if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin):
subscribers = None
# Guest users lose access to subscribers when they are unsubscribed.
if not sub["active"] and user_profile.is_guest:
subscribers = None
email_address = encode_email_address_helper(stream["name"], stream["email_token"],
show_sender=True)
stream_dict = {'name': stream["name"],
'in_home_view': not sub["is_muted"],
'is_muted': sub["is_muted"],
'invite_only': stream["invite_only"],
'is_web_public': stream["is_web_public"],
'is_announcement_only': stream["is_announcement_only"],
'color': sub["color"],
'desktop_notifications': sub["desktop_notifications"],
'audible_notifications': sub["audible_notifications"],
'push_notifications': sub["push_notifications"],
'email_notifications': sub["email_notifications"],
'pin_to_top': sub["pin_to_top"],
'stream_id': stream["id"],
'first_message_id': stream["first_message_id"],
'description': stream["description"],
'rendered_description': stream["rendered_description"],
'is_old_stream': is_old_stream(stream["date_created"]),
'stream_weekly_traffic': get_average_weekly_stream_traffic(stream["id"],
stream["date_created"],
recent_traffic),
'email_address': email_address,
'history_public_to_subscribers': stream['history_public_to_subscribers']}
if subscribers is not None:
stream_dict['subscribers'] = subscribers
if sub["active"]:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
else:
never_subscribed_stream_ids = set()
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
is_public = (not stream['invite_only'])
if is_public or user_profile.is_realm_admin:
stream_dict = {'name': stream['name'],
'invite_only': stream['invite_only'],
'is_web_public': stream['is_web_public'],
'is_announcement_only': stream['is_announcement_only'],
'stream_id': stream['id'],
'first_message_id': stream["first_message_id"],
'is_old_stream': is_old_stream(stream["date_created"]),
'stream_weekly_traffic': get_average_weekly_stream_traffic(stream["id"],
stream["date_created"],
recent_traffic),
'description': stream['description'],
'rendered_description': stream["rendered_description"],
'history_public_to_subscribers': stream['history_public_to_subscribers']}
if is_public or user_profile.is_realm_admin:
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool=False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
subscribed, unsubscribed, _ = gather_subscriptions_helper(
user_profile, include_subscribers=include_subscribers)
if include_subscribers:
user_ids = set()
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = sorted([
email_dict[user_id] for user_id in sub['subscribers']
])
return (subscribed, unsubscribed)
def get_active_presence_idle_user_ids(realm: Realm,
sender_id: int,
message_type: str,
active_user_ids: Set[int],
user_flags: Dict[int, List[str]]) -> List[int]:
'''
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications (either due
to mentions, alert words, or being PM'ed).
* They are no longer "present" according to the
UserPresence table.
'''
if realm.presence_disabled:
return []
is_pm = message_type == 'private'
user_ids = set()
for user_id in active_user_ids:
flags = user_flags.get(user_id, []) # type: Iterable[str]
mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags
private_message = is_pm and user_id != sender_id
alerted = 'has_alert_word' in flags
if mentioned or private_message or alerted:
user_ids.add(user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
if not user_ids:
return []
# 140 seconds is consistent with presence.js:OFFLINE_THRESHOLD_SECS
recent = timezone_now() - datetime.timedelta(seconds=140)
rows = UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent
).distinct('user_profile_id').values('user_profile_id')
active_user_ids = {row['user_profile_id'] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(list(idle_user_ids))
def get_status_dict(requesting_user_profile: UserProfile) -> Dict[str, Dict[str, Dict[str, Any]]]:
if requesting_user_profile.realm.presence_disabled:
# Return an empty dict if presence is disabled in this realm
return defaultdict(dict)
return UserPresence.get_status_dict_by_realm(requesting_user_profile.realm_id)
def get_cross_realm_dicts() -> List[Dict[str, Any]]:
users = bulk_get_users(list(settings.CROSS_REALM_BOT_EMAILS), None,
base_query=UserProfile.objects.filter(
realm__string_id=settings.SYSTEM_BOT_REALM)).values()
return [{'email': user.email,
'user_id': user.id,
'is_admin': user.is_realm_admin,
'is_bot': user.is_bot,
'avatar_url': avatar_url(user),
'timezone': user.timezone,
'date_joined': user.date_joined.isoformat(),
'full_name': user.full_name}
for user in users
# Important: We filter here, is addition to in
# `base_query`, because of how bulk_get_users shares its
# cache with other UserProfile caches.
if user.realm.string_id == settings.SYSTEM_BOT_REALM]
def do_send_confirmation_email(invitee: PreregistrationUser,
referrer: UserProfile) -> str:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)
context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email,
'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name}
from_name = "%s (via Zulip)" % (referrer.full_name,)
send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name,
from_address=FromAddress.tokenized_no_reply_address(),
language=referrer.realm.default_language, context=context)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
raise ValidationError('%s is reserved for system bots' % (email,))
def validate_email_for_realm(target_realm: Realm, email: str) -> None:
email_not_system_bot(email)
try:
existing_user_profile = get_user_by_delivery_email(email, target_realm)
except UserProfile.DoesNotExist:
return
if existing_user_profile.is_active:
if existing_user_profile.is_mirror_dummy:
raise AssertionError("Mirror dummy user is already active!")
# Other users should not already exist at all.
raise ValidationError(_('%s already has an account') %
(email,), code = _("Already has an account."))
elif not existing_user_profile.is_mirror_dummy:
raise ValidationError('The account for %s has been deactivated' % (email,),
code = _("Account has been deactivated."))
def validate_email(user_profile: UserProfile, email: str) -> Tuple[Optional[str], Optional[str]]:
try:
validators.validate_email(email)
except ValidationError:
return _("Invalid address."), None
try:
email_allowed_for_realm(email, user_profile.realm)
except DomainNotAllowedForRealmError:
return _("Outside your domain."), None
except DisposableEmailError:
return _("Please use your real email address."), None
except EmailContainsPlusError:
return _("Email addresses containing + are not allowed."), None
try:
validate_email_for_realm(user_profile.realm, email)
except ValidationError as error:
return None, (error.code)
return None, None
class InvitationError(JsonableError):
code = ErrorCode.INVITATION_FAILED
data_fields = ['errors', 'sent_invitations']
def __init__(self, msg: str, errors: List[Tuple[str, str]], sent_invitations: bool) -> None:
self._msg = msg # type: str
self.errors = errors # type: List[Tuple[str, str]]
self.sent_invitations = sent_invitations # type: bool
def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int:
'''An upper bound on the number of invites sent in the last `days` days'''
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property='invites_sent::day',
end_time__gte=timezone_now() - datetime.timedelta(days=days)
).aggregate(Sum('value'))['value__sum']
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
'''Discourage using invitation emails as a vector for carrying spam.'''
msg = _("You do not have enough remaining invites. "
"Please contact %s to have your limit raised. "
"No invitations were sent.") % (settings.ZULIP_ADMINISTRATOR,)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(msg, [], sent_invitations=False)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(msg, [], sent_invitations=False)
def do_invite_users(user_profile: UserProfile,
invitee_emails: SizedTextIterable,
streams: Iterable[Stream],
invite_as: Optional[int]=PreregistrationUser.INVITE_AS['MEMBER']) -> None:
check_invite_limit(user_profile.realm, len(invitee_emails))
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if (user_profile.date_joined > timezone_now() - min_age
and not user_profile.is_realm_admin):
raise InvitationError(
_("Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."),
[], sent_invitations=False)
validated_emails = [] # type: List[str]
errors = [] # type: List[Tuple[str, str]]
skipped = [] # type: List[Tuple[str, str]]
for email in invitee_emails:
if email == '':
continue
email_error, email_skipped = validate_email(user_profile, email)
if not (email_error or email_skipped):
validated_emails.append(email)
elif email_error:
errors.append((email, email_error))
elif email_skipped:
skipped.append((email, email_skipped))
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped, sent_invitations=False)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(_("We weren't able to invite anyone."),
skipped, sent_invitations=False)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'],
None, timezone_now(), increment=len(validated_emails))
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile,
invited_as=invite_as,
realm=user_profile.realm)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(_("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"),
skipped, sent_invitations=True)
notify_invites_changed(user_profile)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
days_to_activate = settings.INVITATION_LINK_VALIDITY_DAYS
active_value = getattr(confirmation_settings, 'STATUS_ACTIVE', 1)
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
prereg_users = PreregistrationUser.objects.exclude(status=active_value).filter(
invited_at__gte=lowest_datetime,
referred_by__realm=user_profile.realm)
invites = []
for invitee in prereg_users:
invites.append(dict(email=invitee.email,
ref=invitee.referred_by.email,
invited=datetime_to_timestamp(invitee.invited_at),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False))
multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm,
type=Confirmation.MULTIUSE_INVITE,
date_sent__gte=lowest_datetime)
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
invites.append(dict(ref=invite.referred_by.email,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
id=invite.id,
link_url=confirmation_url(confirmation_obj.confirmation_key,
user_profile.realm.host,
Confirmation.MULTIUSE_INVITE),
invited_as=invite.invited_as,
is_multiuse=True))
return invites
def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int,
streams: Optional[List[Stream]]=[]) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by)
return create_confirmation_link(invite, realm.host, Confirmation.MULTIUSE_INVITE)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actaully want to set the confirmation objects
# to a "revoked" status so that we can give the invited user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type,
object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(prereg_user)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type,
object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(multiuse_invite.referred_by)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
# These are two structurally for the caller's code path.
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'],
None, prereg_user.invited_at)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(realm: Realm,
name: str,
author: UserProfile,
image_file: File) -> Optional[RealmEmoji]:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
emoji_uploaded_successfully = False
try:
upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
return None
else:
realm_emoji.file_name = emoji_file_name
realm_emoji.save(update_fields=['file_name'])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_set_alert_words(user_profile: UserProfile, alert_words: List[str]) -> None:
set_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, alert_words)
def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str) -> None:
add_topic_mute(user_profile, stream.id, recipient.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_realm_filters(realm: Realm) -> None:
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None,
id: Optional[int]=None) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]:
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(realm=realm, domain=domain,
allow_subdomains=allow_subdomains)
event = dict(type="realm_domains", op="add",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=['allow_subdomains'])
event = dict(type="realm_domains", op="change",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(realm_domain: RealmDomain) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, 'emails_restricted_to_domains', False)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Get streams with subscribers """
subs_filter = Subscription.objects.filter(active=True, user_profile__realm=realm,
user_profile__is_active=True).values('recipient_id')
stream_ids = Recipient.objects.filter(
type=Recipient.STREAM, id__in=subs_filter).values('type_id')
return Stream.objects.filter(id__in=stream_ids, realm=realm, deactivated=False)
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]:
query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)
streams = [(row.to_dict()) for row in query]
return streams
def do_get_streams(
user_profile: UserProfile, include_public: bool=True,
include_subscribed: bool=True, include_all_active: bool=False,
include_default: bool=False, include_owner_subscribed: bool=False
) -> List[Dict[str, Any]]:
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if not include_all_active:
user_subs = get_stream_subscriptions_for_user(user_profile).filter(
active=True,
).select_related('recipient')
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter = None # type: Optional[Q]
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
recipient_check = Q(id__in=[sub.recipient.type_id for sub in user_subs])
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_owner_subscribed and user_profile.is_bot:
assert user_profile.bot_owner is not None
owner_subs = get_stream_subscriptions_for_user(user_profile.bot_owner).filter(
active=True,
).select_related('recipient')
owner_subscribed_check = Q(id__in=[sub.recipient.type_id for sub in owner_subs])
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
else:
# Don't bother doing to the database with no valid sources
query = []
streams = [(row.to_dict()) for row in query]
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(user_profile: UserProfile, op: str,
attachment_dict: Dict[str, Any]) -> None:
event = {
'type': 'attachment',
'op': op,
'attachment': attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message) -> None:
attachment_url_list = attachment_url_re.findall(message.content)
for url in attachment_url_list:
path_id = attachment_url_to_path_id(url)
user_profile = message.sender
is_message_realm_public = False
if message.is_stream_message():
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning("User %s tried to share upload %s in message %s, but lacks permission" % (
user_profile.id, path_id, message.id))
continue
attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public)
notify_attachment_update(user_profile, "update", attachment.to_dict())
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(prev_content: str, message: Message) -> None:
new_content = message.content
prev_attachments = set(attachment_url_re.findall(prev_content))
new_attachments = set(attachment_url_re.findall(new_content))
to_remove = list(prev_attachments - new_attachments)
path_ids = []
for url in to_remove:
path_id = attachment_url_to_path_id(url)
path_ids.append(path_id)
attachments_to_update = Attachment.objects.filter(path_id__in=path_ids).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message)
def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields",
op=operation,
fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(realm: Realm,
field_subtype: str) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
field = CustomProfileField(realm=realm, name=field_data['name'],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data['hint'],
field_data=ujson.dumps(dict(subtype=field_subtype)))
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int,
hint: str='',
field_data: Optional[ProfileFieldData]=None) -> CustomProfileField:
field = CustomProfileField(realm=realm, name=name, field_type=field_type)
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm, 'delete')
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField,
name: str, hint: str='',
field_data: Optional[ProfileFieldData]=None) -> None:
field.name = name
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
notify_realm_custom_profile_fields(realm, 'update')
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = dict((_[1], _[0]) for _ in enumerate(order))
fields = CustomProfileField.objects.filter(realm=realm)
for field in fields:
if field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for field in fields:
field.order = order_mapping[field.id]
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'update')
def notify_user_update_custom_profile_data(user_profile: UserProfile,
field: Dict[str, Union[int, str, List[int], None]]) -> None:
data = dict(id=field['id'])
if field['type'] == CustomProfileField.USER:
data["value"] = ujson.dumps(field['value'])
else:
data['value'] = field['value']
if field['rendered_value']:
data['rendered_value'] = field['rendered_value']
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile,
data: List[Dict[str, Union[int, str, List[int]]]]
) -> None:
with transaction.atomic():
for field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile,
field_id=field['id'])
if not created and field_value.value == str(field['value']):
# If the field value isn't actually being changed to a different one,
# and always_notify is disabled, we have nothing to do here for this field.
# Note: field_value.value is a TextField() so we need to cast field['value']
# to a string for the comparison in this if.
continue
field_value.value = field['value']
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(str(field['value']))
field_value.save(update_fields=['value', 'rendered_value'])
else:
field_value.save(update_fields=['value'])
notify_user_update_custom_profile_data(user_profile, {
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type})
def check_remove_custom_profile_field_value(user_profile: UserProfile,
field_id: Union[int, str, List[int]]
) -> None:
try:
field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile)
field_value.delete()
notify_user_update_custom_profile_data(user_profile, {'id': field_id,
'value': None,
'rendered_value': None,
'type': field.field_type})
except CustomProfileField.DoesNotExist:
raise JsonableError(_('Field id {id} not found.').format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(type="user_group",
op="add",
group=dict(name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile],
description: str) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '%s' already exists.") % (name,))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None:
event = dict(type="user_group", op='update', group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=['name'])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '%s' already exists.") % (name,))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=['description'])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(bot_profile: UserProfile,
service_interface: int,
service_payload_url: str) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=bot_profile.email,
user_id=bot_profile.id,
services = [dict(base_url=service.base_url,
interface=service.interface,
token=service.token,)],
),
),
bot_owner_user_ids(bot_profile))
def do_update_bot_config_data(bot_profile: UserProfile,
config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=bot_profile.email,
user_id=bot_profile.id,
services = [dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile))
def get_service_dicts_for_bot(user_profile_id: str) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts = [] # type: List[Dict[str, Any]]
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [{'config_data': get_bot_config(user_profile),
'service_name': services[0].name
}]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]],
realm: Realm) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts]
bot_services_by_uid = defaultdict(list) # type: Dict[int, List[Service]]
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts
if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid = {} # type: Dict[int, List[Dict[str, Any]]]
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts = [] # type: List[Dict[str, Any]]
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [{'config_data': bot_config,
'service_name': services[0].name
}]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(user_profile: UserProfile,
include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'bot_type': botdict['bot_type'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': avatar_url_from_dict(botdict),
'services': services_by_ids[botdict['id']],
}
for botdict in result]
def do_send_user_group_members_update_event(event_name: str,
user_group: UserGroup,
user_ids: List[int]) -> None:
event = dict(type="user_group",
op=event_name,
group_id=user_group.id,
user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
memberships = [UserGroupMembership(user_group_id=user_group.id,
user_profile=user_profile)
for user_profile in user_profiles]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('add_members', user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id,
user_profile__in=user_profiles).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('remove_members', user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int,
realm_id: int) -> None:
event = dict(type="user_group",
op="remove",
group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def missing_any_realm_internal_bots() -> bool:
bot_emails = [bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,)
for bot in settings.REALM_INTERNAL_BOTS]
bot_counts = dict(UserProfile.objects.filter(email__in=bot_emails)
.values_list('email')
.annotate(Count('id')))
realm_count = Realm.objects.count()
return any(bot_counts.get(email, 0) < realm_count for email in bot_emails)
def do_send_realm_reactivation_email(realm: Realm) -> None:
url = create_confirmation_link(realm, realm.host, Confirmation.REALM_REACTIVATION)
context = {'confirmation_url': url,
'realm_uri': realm.uri,
'realm_name': realm.name}
send_email_to_admins(
'zerver/emails/realm_reactivation', realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name="Zulip Account Security", context=context)
def get_zoom_video_call_url(realm: Realm) -> str:
response = request_zoom_video_call_url(
realm.zoom_user_id,
realm.zoom_api_key,
realm.zoom_api_secret
)
if response is None:
return ''
return response['join_url']
def notify_realm_export(user_profile: UserProfile) -> None:
# In the future, we may want to send this event to all realm admins.
event = dict(type='realm_export',
exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
# Give mypy a hint so it knows `ujson.loads`
# isn't being passed an `Optional[str]`.
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = ujson.loads(export_extra_data)
delete_export_tarball(export_data.get('export_path'))
export_data.update({'deleted_timestamp': timezone_now().timestamp()})
export.extra_data = ujson.dumps(export_data)
export.save(update_fields=['extra_data'])
notify_realm_export(user_profile)
| 43.952747 | 126 | 0.656508 |
acf91a76e7e3bf392f88f6ebd6d13018e146766d | 263 | py | Python | backend/api/volunteer.py | covmunity/Covmunity | b8a642724decd591c01baa1374acfa813596120f | [
"MIT"
] | 4 | 2020-03-28T15:16:50.000Z | 2020-05-13T22:56:53.000Z | backend/api/volunteer.py | covmunity/Covmunity | b8a642724decd591c01baa1374acfa813596120f | [
"MIT"
] | null | null | null | backend/api/volunteer.py | covmunity/Covmunity | b8a642724decd591c01baa1374acfa813596120f | [
"MIT"
] | null | null | null | from flask import Blueprint
# API for volunteer management
bp = Blueprint('volunteer', __name__, url_prefix='/volunteer')
@bp.route('/request', methods=('GET', 'POST'))
def request():
pass
@bp.route('/offer', methods=('GET', 'POST'))
def offer():
pass
| 20.230769 | 62 | 0.669202 |
acf91a7b3d6ce117982b93b0fa8f9e9860d67819 | 15,626 | py | Python | WebServer.py | roykim98/PlexConnect | 51a781d7ce0566b2a4766bb53e7ce2604c056c0a | [
"MIT"
] | null | null | null | WebServer.py | roykim98/PlexConnect | 51a781d7ce0566b2a4766bb53e7ce2604c056c0a | [
"MIT"
] | null | null | null | WebServer.py | roykim98/PlexConnect | 51a781d7ce0566b2a4766bb53e7ce2604c056c0a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Sources:
http://fragments.turtlemeat.com/pythonwebserver.php
http://www.linuxjournal.com/content/tech-tip-really-simple-http-server-python
...stackoverflow.com and such
after 27Aug - Apple's switch to https:
- added https WebServer with SSL encryption - needs valid (private) vertificate on aTV and server
- for additional information see http://langui.sh/2013/08/27/appletv-ssl-plexconnect/
Thanks to reaperhulk for showing this solution!
"""
import sys
import string, cgi, time
from os import sep, path
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
import ssl
from multiprocessing import Pipe # inter process communication
import urllib, StringIO, gzip
import signal
import traceback
import datetime
import Settings, ATVSettings
from Debug import * # dprint()
import XMLConverter # XML_PMS2aTV, XML_PlayVideo
import re
import Localize
import Subtitle
CONFIG_PATH = "."
g_param = {}
def setParams(param):
global g_param
g_param = param
def JSConverter(file, options):
f = open(sys.path[0] + "/assets/js/" + file)
JS = f.read()
f.close()
# PlexConnect {{URL()}}->baseURL
for path in set(re.findall(r'\{\{URL\((.*?)\)\}\}', JS)):
JS = JS.replace('{{URL(%s)}}' % path, g_param['baseURL']+path)
# localization
JS = Localize.replaceTEXT(JS, options['aTVLanguage']).encode('utf-8')
return JS
class MyHandler(BaseHTTPRequestHandler):
# Fixes slow serving speed under Windows
def address_string(self):
host, port = self.client_address[:2]
#return socket.getfqdn(host)
return host
def log_message(self, format, *args):
pass
def compress(self, data):
buf = StringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', fileobj=buf, compresslevel=9)
zfile.write(data)
zfile.close()
return buf.getvalue()
def sendResponse(self, data, type, enableGzip):
self.send_response(200)
self.send_header('Server', 'PlexConnect')
self.send_header('Content-type', type)
try:
accept_encoding = map(string.strip, string.split(self.headers["accept-encoding"], ","))
except KeyError:
accept_encoding = []
if enableGzip and \
g_param['CSettings'].getSetting('allow_gzip_atv')=='True' and \
'gzip' in accept_encoding:
self.send_header('Content-encoding', 'gzip')
self.end_headers()
self.wfile.write(self.compress(data))
else:
self.end_headers()
self.wfile.write(data)
def do_GET(self):
global g_param
try:
dprint(__name__, 2, "http request header:\n{0}", self.headers)
dprint(__name__, 2, "http request path:\n{0}", self.path)
# check for PMS address
PMSaddress = ''
pms_end = self.path.find(')')
if self.path.startswith('/PMS(') and pms_end>-1:
PMSaddress = urllib.unquote_plus(self.path[5:pms_end])
self.path = self.path[pms_end+1:]
# break up path, separate PlexConnect options
# clean path needed for filetype decoding
parts = re.split(r'[?&]', self.path, 1) # should be '?' only, but we do some things different :-)
if len(parts)==1:
self.path = parts[0]
options = {}
query = ''
else:
self.path = parts[0]
# break up query string
options = {}
query = ''
parts = parts[1].split('&')
for part in parts:
if part.startswith('PlexConnect'):
# get options[]
opt = part.split('=', 1)
if len(opt)==1:
options[opt[0]] = ''
else:
options[opt[0]] = urllib.unquote(opt[1])
else:
# recreate query string (non-PlexConnect) - has to be merged back when forwarded
if query=='':
query = '?' + part
else:
query += '&' + part
# get aTV language setting
options['aTVLanguage'] = Localize.pickLanguage(self.headers.get('Accept-Language', 'en'))
query = query.replace("yyltyy", "<").replace("yygtyy", ">")
# add client address - to be used in case UDID is unknown
if 'X-Forwarded-For' in self.headers:
options['aTVAddress'] = self.headers['X-Forwarded-For'].split(',', 1)[0]
else:
options['aTVAddress'] = self.client_address[0]
# get aTV hard-/software parameters
options['aTVFirmwareVersion'] = self.headers.get('X-Apple-TV-Version', '5.1')
options['aTVScreenResolution'] = self.headers.get('X-Apple-TV-Resolution', '720')
dprint(__name__, 2, "pms address:\n{0}", PMSaddress)
dprint(__name__, 2, "cleaned path:\n{0}", self.path)
dprint(__name__, 2, "PlexConnect options:\n{0}", options)
dprint(__name__, 2, "additional arguments:\n{0}", query)
if 'User-Agent' in self.headers and \
'AppleTV' in self.headers['User-Agent']:
# recieve simple logging messages from the ATV
if 'PlexConnectATVLogLevel' in options:
dprint('ATVLogger', int(options['PlexConnectATVLogLevel']), options['PlexConnectLog'])
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
return
# serve "*.cer" - Serve up certificate file to atv
if self.path.endswith(".cer"):
dprint(__name__, 1, "serving *.cer: "+self.path)
if g_param['CSettings'].getSetting('certfile').startswith('.'):
# relative to current path
cfg_certfile = sys.path[0] + sep + g_param['CSettings'].getSetting('certfile')
else:
# absolute path
cfg_certfile = g_param['CSettings'].getSetting('certfile')
cfg_certfile = path.normpath(cfg_certfile)
cfg_certfile = path.splitext(cfg_certfile)[0] + '.cer'
try:
f = open(cfg_certfile, "rb")
except:
dprint(__name__, 0, "Failed to access certificate: {0}", cfg_certfile)
return
self.sendResponse(f.read(), 'text/xml', False)
f.close()
return
# serve .js files to aTV
# application, main: ignore path, send /assets/js/application.js
# otherwise: path should be '/js', send /assets/js/*.js
dirname = path.dirname(self.path)
basename = path.basename(self.path)
if basename in ("application.js", "main.js", "javascript-packed.js", "bootstrap.js") or \
basename.endswith(".js") and dirname == '/js':
if basename in ("main.js", "javascript-packed.js", "bootstrap.js"):
basename = "application.js"
dprint(__name__, 1, "serving /js/{0}", basename)
JS = JSConverter(basename, options)
self.sendResponse(JS, 'text/javascript', True)
return
# proxy phobos.apple.com to support PlexConnect main icon
if "a1.phobos.apple.com" in self.headers['Host']:
resource = self.headers['Host']+self.path
icon = g_param['CSettings'].getSetting('icon')
if basename.startswith(icon):
icon_res = basename[len(icon):] # cut string from settings, keeps @720.png/@1080.png
resource = sys.path[0] + '/assets/icons/icon'+icon_res
dprint(__name__, 1, "serving "+self.headers['Host']+self.path+" with "+resource)
r = open(resource, "rb")
else:
r = urllib.urlopen('http://'+resource)
self.sendResponse(r.read(), 'image/png', False)
r.close()
return
# serve "*.jpg" - thumbnails for old-style mainpage
if self.path.endswith(".jpg"):
dprint(__name__, 1, "serving *.jpg: "+self.path)
f = open(sys.path[0] + sep + "assets" + self.path, "rb")
self.sendResponse(f.read(), 'image/jpeg', False)
f.close()
return
# serve "*.png" - only png's support transparent colors
if self.path.endswith(".png"):
dprint(__name__, 1, "serving *.png: "+self.path)
f = open(sys.path[0] + sep + "assets" + self.path, "rb")
self.sendResponse(f.read(), 'image/png', False)
f.close()
return
# serve subtitle file - transcoded to aTV subtitle json
if 'PlexConnect' in options and \
options['PlexConnect']=='Subtitle':
dprint(__name__, 1, "serving subtitle: "+self.path)
XML = Subtitle.getSubtitleJSON(PMSaddress, self.path + query, options)
self.sendResponse(XML, 'application/json', True)
return
# get everything else from XMLConverter - formerly limited to trailing "/" and &PlexConnect Cmds
if True:
dprint(__name__, 1, "serving .xml: "+self.path)
XML = XMLConverter.XML_PMS2aTV(PMSaddress, self.path + query, options)
self.sendResponse(XML, 'text/xml', True)
return
"""
# unexpected request
self.send_error(403,"Access denied: %s" % self.path)
"""
else:
"""
Added Up Page for docker helthcheck
self.send_error(403,"Not Serving Client %s" % self.client_address[0])
"""
dprint(__name__, 1, "serving *.html: "+self.path)
f = open(sys.path[0] + sep + "assets/templates/up.html")
self.sendResponse(f.read(), 'text/html', False)
f.close()
except IOError:
dprint(__name__, 0, 'File Not Found:\n{0}', traceback.format_exc())
self.send_error(404,"File Not Found: %s" % self.path)
except:
dprint(__name__, 0, 'Internal Server Error:\n{0}', traceback.format_exc())
self.send_error(500,"Internal Server Error: %s" % self.path)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def Run(cmdPipe, param):
if not __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_IGN)
dinit(__name__, param) # init logging, WebServer process
cfg_IP_WebServer = param['IP_self']
cfg_Port_WebServer = param['CSettings'].getSetting('port_webserver')
try:
server = ThreadedHTTPServer((cfg_IP_WebServer,int(cfg_Port_WebServer)), MyHandler)
server.timeout = 1
except Exception, e:
dprint(__name__, 0, "Failed to connect to HTTP on {0} port {1}: {2}", cfg_IP_WebServer, cfg_Port_WebServer, e)
sys.exit(1)
socketinfo = server.socket.getsockname()
dprint(__name__, 0, "***")
dprint(__name__, 0, "WebServer: Serving HTTP on {0} port {1}.", socketinfo[0], socketinfo[1])
dprint(__name__, 0, "***")
setParams(param)
XMLConverter.setParams(param)
XMLConverter.setATVSettings(param['CATVSettings'])
try:
while True:
# check command
if cmdPipe.poll():
cmd = cmdPipe.recv()
if cmd=='shutdown':
break
# do your work (with timeout)
server.handle_request()
except KeyboardInterrupt:
signal.signal(signal.SIGINT, signal.SIG_IGN) # we heard you!
dprint(__name__, 0,"^C received.")
finally:
dprint(__name__, 0, "Shutting down (HTTP).")
server.socket.close()
def Run_SSL(cmdPipe, param):
if not __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_IGN)
dinit(__name__, param) # init logging, WebServer process
cfg_IP_WebServer = param['IP_self']
cfg_Port_SSL = param['CSettings'].getSetting('port_ssl')
if param['CSettings'].getSetting('certfile').startswith('.'):
# relative to current path
cfg_certfile = sys.path[0] + sep + param['CSettings'].getSetting('certfile')
else:
# absolute path
cfg_certfile = param['CSettings'].getSetting('certfile')
cfg_certfile = path.normpath(cfg_certfile)
try:
certfile = open(cfg_certfile, 'r')
except:
dprint(__name__, 0, "Failed to access certificate: {0}", cfg_certfile)
sys.exit(1)
certfile.close()
try:
server = ThreadedHTTPServer((cfg_IP_WebServer,int(cfg_Port_SSL)), MyHandler)
server.socket = ssl.wrap_socket(server.socket, certfile=cfg_certfile, server_side=True)
server.timeout = 1
except Exception, e:
dprint(__name__, 0, "Failed to connect to HTTPS on {0} port {1}: {2}", cfg_IP_WebServer, cfg_Port_SSL, e)
sys.exit(1)
socketinfo = server.socket.getsockname()
dprint(__name__, 0, "***")
dprint(__name__, 0, "WebServer: Serving HTTPS on {0} port {1}.", socketinfo[0], socketinfo[1])
dprint(__name__, 0, "***")
setParams(param)
XMLConverter.setParams(param)
XMLConverter.setATVSettings(param['CATVSettings'])
try:
while True:
# check command
if cmdPipe.poll():
cmd = cmdPipe.recv()
if cmd=='shutdown':
break
# do your work (with timeout)
server.handle_request()
except KeyboardInterrupt:
signal.signal(signal.SIGINT, signal.SIG_IGN) # we heard you!
dprint(__name__, 0,"^C received.")
finally:
dprint(__name__, 0, "Shutting down (HTTPS).")
server.socket.close()
if __name__=="__main__":
cmdPipe = Pipe()
cfg = Settings.CSettings(CONFIG_PATH)
param = {}
param['CSettings'] = cfg
param['CATVSettings'] = ATVSettings.CATVSettings(CONFIG_PATH)
param['IP_self'] = os.getenv("IP_SELF", "192.168.1.18")
param['baseURL'] = 'http://'+ param['IP_self'] +':'+ cfg.getSetting('port_webserver')
param['HostToIntercept'] = cfg.getSetting('hosttointercept')
if len(sys.argv)==1:
Run(cmdPipe[1], param)
elif len(sys.argv)==2 and sys.argv[1]=='SSL':
Run_SSL(cmdPipe[1], param)
| 38.774194 | 118 | 0.539357 |
acf91a95be128b62fe1539e16d0465b7bc9845a8 | 434 | py | Python | mmdet/models/detectors/tood.py | VietDunghacker/mmdetection | 9e97878b2c5247bebe8ec406752941ffc8083871 | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/tood.py | VietDunghacker/mmdetection | 9e97878b2c5247bebe8ec406752941ffc8083871 | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/tood.py | VietDunghacker/mmdetection | 9e97878b2c5247bebe8ec406752941ffc8083871 | [
"Apache-2.0"
] | null | null | null | from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class TOOD(SingleStageDetector):
"""Implementation of `TOOD`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg= None):
super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 22.842105 | 66 | 0.700461 |
acf91bd1f2ed0e9797f8b0c757912d6705f59627 | 3,286 | py | Python | station/settings.py | kmes055/AutoDrawer_server | b6fd31ba448834c75a06762e92a3e64ddace5034 | [
"MIT"
] | null | null | null | station/settings.py | kmes055/AutoDrawer_server | b6fd31ba448834c75a06762e92a3e64ddace5034 | [
"MIT"
] | null | null | null | station/settings.py | kmes055/AutoDrawer_server | b6fd31ba448834c75a06762e92a3e64ddace5034 | [
"MIT"
] | null | null | null | """
Django settings for station project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!+oiw-=+v_8p53lwwuf$mtf%dktucp#flq%hm=&=gjqx1aqsx-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['172.16.20.133']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'station.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'station.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# Upload/Download files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Metadata for AutoDrawer.
MAX_USER = 15 | 25.083969 | 91 | 0.695374 |
acf91c9a63c97cc6cf4ca07b7eabd4a150fe7b8e | 3,716 | py | Python | testBool.py | palvaro/ldfi-py | d19db77ba2d942cb1e37a2f3eb544b89877bcd63 | [
"MIT"
] | 4 | 2016-06-27T11:19:57.000Z | 2021-07-13T09:11:30.000Z | testBool.py | palvaro/ldfi-py | d19db77ba2d942cb1e37a2f3eb544b89877bcd63 | [
"MIT"
] | null | null | null | testBool.py | palvaro/ldfi-py | d19db77ba2d942cb1e37a2f3eb544b89877bcd63 | [
"MIT"
] | 3 | 2017-11-10T21:39:57.000Z | 2021-02-10T17:54:31.000Z | import unittest2
import pbool
import psat
import pilp
from pytest import *
class MyTest(unittest2.TestCase):
def basic(self):
return pbool.OrFormula(pbool.AndFormula(pbool.Literal("a"), pbool.Literal("b")), pbool.AndFormula(pbool.Literal("c"), pbool.Literal("d")))
def bare_disj(self):
return pbool.OrFormula(pbool.Literal("A"), pbool.OrFormula(pbool.Literal("B"), pbool.OrFormula(pbool.Literal("C"), pbool.Literal("D"))))
def test_size(self):
fg = pbool.FormulaGenerator(10, 8)
fst = fg.formula(10)
snd = fg.formula(20)
#nprint "snd " + str(len(snd.variables()))
assert(snd.clauses() > fst.clauses())
assert(snd.depth() == 20)
assert(fst.depth() == 10)
assert(len(snd.variables()) == 11)
def test_simpleConvert(self):
base = self.basic()
#pbool.OrFormula(pbool.AndFormula(pbool.Literal("a"), pbool.Literal("b")), pbool.AndFormula(pbool.Literal("c"), pbool.Literal("d")))
cnf = pbool.CNFFormula(base)
print str(cnf.formula)
assert(cnf.formula.isCNF())
def test_convert(self):
fg = pbool.FormulaGenerator(10, 8)
fst = fg.formula(7)
c = pbool.CNFFormula(fst)
cnf = c.formula
assert(cnf.variables() == fst.variables())
#assert(cnf.clauses() > fst.clauses())
#assert(cnf.depth() > fst.depth())
assert(not fst.isCNF())
assert(cnf.isCNF())
c = cnf.conjuncts()
def Ntest_solve(self):
fg = pbool.FormulaGenerator(50, 8)
fst = fg.formula(4)
cnf = pbool.CNFFormula(fst)
s = psat.Solver(cnf)
for soln in s.minimal_solutions():
print "SOLN1 " + str(soln)
def Ntest_basic_solve(self):
base = self.basic()
cnf = pbool.CNFFormula(base)
s = psat.Solver(cnf)
for soln in s.minimal_solutions():
print "SOLN " + str(soln)
def Ntest_basic_ilp(self):
base = self.basic()
cnf = pbool.CNFFormula(base)
s = pilp.Solver(cnf)
for soln in s.solutions():
print "SOL: " + str(soln)
def test_disj(self):
disj = self.bare_disj()
assert(disj.clauses() == 7)
cnf = pbool.CNFFormula(disj)
assert(len(cnf.conjuncts()) == 1)
s = pilp.Solver(cnf)
assert(len(list(s.solutions())) == 15)
def NNNtest_big_ilp(self):
fg = pbool.FormulaGenerator(100, 8)
fst = fg.formula(8)
cnf = pbool.CNFFormula(fst)
s = pilp.Solver(cnf)
for soln in s.solutions():
print "SOL: " + str(soln)
def test_ilp_sat_equivalence(self):
fg = pbool.FormulaGenerator(20, 8)
fst = fg.formula(4)
cnf = pbool.CNFFormula(fst)
s = psat.Solver(cnf)
i = pilp.Solver(cnf)
ssols = sorted(list(s.solutions()))
isols = sorted(list(i.solutions()))
print "LENs " + str(len(ssols)) + " vs " + str(len(isols))
print "SOLs " + str(ssols) + " vs " + str(isols)
#assert(ssols == isols)
def test_prob(self):
fg = pbool.FormulaGenerator(20, 8)
fst = fg.formula(4)
cnf = pbool.CNFFormula(fst)
i = pilp.Solver(cnf)
p = pilp.ProbSolver(cnf, {})
isols = list(i.solutions())
psols = list(p.solutions())
print("SOL1 " + str(isols))
print("SOL2 " + str(psols))
assert(isols == psols)
p2 = pilp.ProbSolver(cnf, {"I2":0.0001, "I9":0.000001, "I4":0.000000000001})
p2sols = list(p2.solutions())
print("SOL3 " + str(p2sols))
if __name__ == '__main__':
unittest2.main()
| 27.731343 | 148 | 0.561356 |
acf91cc12552ceef4227e5f9e038a03ac2ae6a09 | 2,247 | py | Python | Algorithms/classification/Fuzzy_C_Means.py | L4RBI/python-language | 12bf522b2c0c6df5fd05e09b0a46c3aab4f4361b | [
"MIT"
] | 15 | 2020-10-03T19:28:51.000Z | 2022-02-16T23:41:27.000Z | Algorithms/classification/Fuzzy_C_Means.py | L4RBI/python-language | 12bf522b2c0c6df5fd05e09b0a46c3aab4f4361b | [
"MIT"
] | 12 | 2020-10-15T18:48:22.000Z | 2020-11-02T15:55:27.000Z | Algorithms/classification/Fuzzy_C_Means.py | L4RBI/python-language | 12bf522b2c0c6df5fd05e09b0a46c3aab4f4361b | [
"MIT"
] | 31 | 2020-10-12T07:40:01.000Z | 2021-05-16T18:52:57.000Z | from matplotlib.image import imread
import matplotlib.pyplot as plt
from math import sqrt
import math
import random
import numpy
import operator
from scipy.spatial.distance import cdist
from scipy.linalg import norm
import datetime
# https://en.wikipedia.org/wiki/Fuzzy_clustering
class FuzzyCMeans:
def __init__(self, n_clusters, initial_centers, data, max_iter=250, m=2, error=1e-5):
assert m > 1
#assert initial_centers.shape[0] == n_clusters
self.U = None
self.centers = initial_centers
self.max_iter = max_iter
self.m = m
self.error = error
self.data = data
def membership(self, data, centers):
U_temp = cdist(data, centers, 'euclidean')
U_temp = numpy.power(U_temp, 2/(self.m - 1))
denominator_ = U_temp.reshape(
(data.shape[0], 1, -1)).repeat(U_temp.shape[-1], axis=1)
denominator_ = U_temp[:, :, numpy.newaxis] / denominator_
return 1 / denominator_.sum(2)
def Centers(self, data, U):
um = U ** self.m
return (data.T @ um / numpy.sum(um, axis=0)).T
def newImage(self, U, centers, im):
best = numpy.argmax(self.U, axis=-1)
# print(best)
# numpy.round()
image = im.astype(int)
for i in range(256):
image = numpy.where(image == float(i), centers[best[i]][0], image)
return image
def compute(self):
self.U = self.membership(self.data, self.centers)
past_U = numpy.copy(self.U)
begin_time = datetime.datetime.now()
for i in range(self.max_iter):
self.centers = self.Centers(self.data, self.U)
self.U = self.membership(self.data, self.centers)
if norm(self.U - past_U) < self.error:
break
past_U = numpy.copy(self.U)
x = datetime.datetime.now() - begin_time
return self.centers, self.U, x
# that's how you run it, data being your data, and the other parameters being the basic FCM parameters such as numbe rof cluseters, degree of fuzziness and so on
# f = FuzzyCMeans(n_clusters=C, initial_centers=Initial_centers,
# data=data m=2, max_iter=1000, error=1e-5)
# centers, U, time = f.compute()
| 33.044118 | 161 | 0.624388 |
acf91d03b0fd8000d547924feffe57c8450bd0a4 | 4,008 | py | Python | source/components/menu.py | janzmazek/Wave-propagation | 9176555f4b1b8a93be3fcc502f4f5094c9bb927b | [
"MIT"
] | 1 | 2019-12-17T19:20:11.000Z | 2019-12-17T19:20:11.000Z | source/components/menu.py | janzmazek/Wave-propagation | 9176555f4b1b8a93be3fcc502f4f5094c9bb927b | [
"MIT"
] | null | null | null | source/components/menu.py | janzmazek/Wave-propagation | 9176555f4b1b8a93be3fcc502f4f5094c9bb927b | [
"MIT"
] | null | null | null | import tkinter as tk
class Menu(tk.Menu):
"""This class implements menu of "view" part of the MVC pattern."""
def __init__(self, view, *args, **kwargs):
super(Menu, self).__init__(view, *args, **kwargs)
self.view = view
self.file_menu = tk.Menu(self, tearoff=0)
self.add_cascade(label="File", menu=self.file_menu)
self.add_file_menu()
self.window_menu = tk.Menu(self, tearoff=0)
self.add_cascade(label="Window", menu=self.window_menu)
self.add_window_menu()
self.tools_menu = tk.Menu(self, tearoff=0)
self.add_cascade(label="Tools", menu=self.tools_menu)
self.add_tools_menu()
self.help_menu = tk.Menu(self, tearoff=0)
self.add_cascade(label="Help", menu=self.help_menu)
self.add_help_menu()
def add_file_menu(self):
"""
This method adds the file menu and submenu.
"""
self.file_menu.add_command(label="Import network",
command=lambda: self.view.controller.file_click("import_network")
)
self.file_menu.add_command(label="Export network",
command=lambda: self.view.controller.file_click("export_network")
)
self.file_menu.add_command(label="Draw network",
command=lambda: self.view.controller.file_click("draw_network")
)
self.file_menu.add_command(label="Set background",
command=lambda: self.view.controller.file_click("set_background"),
)
self.file_menu.add_command(label="Remove background",
command=lambda: self.view.controller.file_click("remove_background"),
)
def add_window_menu(self):
"""
This method adds the window menu and submenu.
"""
self.window_menu.add_command(label="Small",
command=lambda: self.view.controller.window_click("small")
)
self.window_menu.add_command(label="Medium",
command=lambda: self.view.controller.window_click("medium")
)
self.window_menu.add_command(label="Large",
command=lambda: self.view.controller.window_click("large")
)
def add_tools_menu(self):
"""
This method adds the tools menu and submenu.
"""
self.tools_menu.add_command(label="Create network",
command=lambda: self.view.controller.tools_click("CreationTools")
)
self.tools_menu.add_command(label="Move streets",
command=lambda: self.view.controller.tools_click("MovingTools")
)
self.tools_menu.add_command(label="Delete street",
command=lambda: self.view.controller.tools_click("DeletingTools")
)
self.tools_menu.add_command(label="Modify network",
command=lambda: self.view.controller.tools_click("ModifyingTools")
)
self.tools_menu.add_command(label="Customise streets",
command=lambda: self.view.controller.tools_click("CustomisingTools")
)
def add_help_menu(self):
"""
This method adds the help menu and submenu.
"""
self.help_menu.add_command(label="About", command=lambda: self.view.controller.about_click())
self.help_menu.add_command(label="I don't like background", command=self.view.change_background)
| 47.152941 | 104 | 0.533683 |
acf91d181364786fdbc3262edd55859875e782a8 | 195 | py | Python | roscraco/router/tplink/wr720n.py | spantaleev/roscraco | 87a5a7c54931d5586fd7d30c8c67a699bef69c1f | [
"BSD-3-Clause"
] | 13 | 2015-03-01T00:39:43.000Z | 2020-09-06T09:32:52.000Z | roscraco/router/tplink/wr720n.py | spantaleev/roscraco | 87a5a7c54931d5586fd7d30c8c67a699bef69c1f | [
"BSD-3-Clause"
] | 3 | 2015-08-08T01:34:35.000Z | 2017-05-14T11:07:50.000Z | roscraco/router/tplink/wr720n.py | spantaleev/roscraco | 87a5a7c54931d5586fd7d30c8c67a699bef69c1f | [
"BSD-3-Clause"
] | 11 | 2015-01-29T03:21:08.000Z | 2020-06-30T17:05:19.000Z | from .wr740n import Tplink_WR740N
class Tplink_WR720N(Tplink_WR740N):
def confirm_identity(self):
self._ensure_www_auth_header('Basic realm="150Mbps Wireless N Router TL-WR720N"')
| 24.375 | 89 | 0.769231 |
acf91d93eec3c2da4ba6701bfb4f0c0b920aafcc | 21 | py | Python | mycode/NRMS-Pytorch/__init__.py | ConnollyLeon/recommenders | 6ada3b6b71380660fec353c11db752b4637aebf5 | [
"MIT"
] | null | null | null | mycode/NRMS-Pytorch/__init__.py | ConnollyLeon/recommenders | 6ada3b6b71380660fec353c11db752b4637aebf5 | [
"MIT"
] | null | null | null | mycode/NRMS-Pytorch/__init__.py | ConnollyLeon/recommenders | 6ada3b6b71380660fec353c11db752b4637aebf5 | [
"MIT"
] | null | null | null | from .net import NPA
| 10.5 | 20 | 0.761905 |
acf91e61e254457e7e56a0048770ae181da431a8 | 10,335 | py | Python | testscripts/RDKB/component/ccspcommon_mbus/TS_CCSPCOMMON_MBUS_BusCheck.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/ccspcommon_mbus/TS_CCSPCOMMON_MBUS_BusCheck.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/ccspcommon_mbus/TS_CCSPCOMMON_MBUS_BusCheck.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>4</version>
<name>TS_CCSPCOMMON_MBUS_BusCheck</name>
<primitive_test_id/>
<primitive_test_name>CCSPMBUS_BusCheck</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis/>
<groups_id/>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<remarks>RDKB-108 Currently there is no support in RDK-B stack for other CCSP components to invoke this API</remarks>
<skip>false</skip>
<box_types>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_CCSPMBUS_17</test_case_id>
<test_objective>To Validate Ccsp Base Interface Bus Check Function</test_objective>
<test_type>Positive</test_type>
<test_setup>Emulator,
XB3</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component.
2.TDK Agent should be in running state or invoke it through StartTdk.sh script.</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
CCSPMBUS_BusCheck
Input
N/A</input_parameters>
<automation_approch>1.Configure the Function info in Test Manager GUI which needs to be tested
(CCSPMBUS_BusCheck - func name - "If not exists already"
ccspcommon_mbus - module name
Necessary I/P args as Mentioned in Input)
2.Python Script will be generated/overrided automically by Test Manager with provided arguments in configure page (TS_CCSPCOMMON_MBUS_BusCheck.py)
3.Execute the generated Script(TS_CCSPCOMMON_MBUS_BusCheck.py) using execution page of Test Manager GUI
4.mbusstub which is a part of TDK Agent process, will be in listening mode to execute TDK Component function named CCSPMBUS_BusCheck through registered TDK mbusstub function along with necessary Entry Values as arguments
5.CCSPMBUS_BusCheck function will call ssp_mbus_init and other neccessary init functions,then it calls ssp_mbus_bus_check function that inturn will call CCSP Base Interface Function named CcspBaseIf_busCheck along with necessary input arguments which is under test to return success status from Component Registry (CR) through Message Bus
6.Responses(printf) from TDK Component,Ccsp Library function and mbusstub would be logged in Agent Console log based on the debug info redirected to agent console
7.mbusstub will validate the available result (from ssp_mbus_bus_check as CCSP_Message_Bus_OK [100] ) with expected result (CCSP_Message_Bus_OK [100]) and the result is updated in agent console log and json output variable
8.ssp_mbus_exit function is invoked by CCSPMBUS_BusCheck to close the bus handle created by ssp_mbus_init and returns the updated results to Test Manager
9.TestManager will publish the result in GUI as PASS/FAILURE based on the response from CCSPMBUS_BusCheck function</automation_approch>
<except_output>CheckPoint 1:
Return Status from DUT should be available in Agent Console Log
CheckPoint 2:
TDK agent Test Function will log the test case result as PASS based on API response
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution page</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_CCSPCOMMON_MBUS_BusCheck</test_script>
<skipped>Yes</skipped>
<release_version/>
<remarks/>
</test_cases>
</xml>
'''
#use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("ccspcommon_mbus","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_CCSPCOMMON_MBUS_BusCheck');
#Get the result of connection with test component and STB
loadModuleresult =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s\n" %loadModuleresult;
loadStatusExpected = "SUCCESS"
if loadStatusExpected not in loadModuleresult.upper():
print "[Failed To Load MBUS Agent Stub from env TDK_PATH]"
print "[Exiting the Script]"
exit();
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CCSPMBUS_LoadCfg');
#Input Parameters
tdkTestObj.addParameter("cmpCfgFile","TDKB.cfg");
expectedresult = "SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "\n[TEST ACTUAL RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
tdkTestObj.setResultStatus("SUCCESS");
print "\nMessage Bus Load Config is SUCCESS"
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "\nMessage Bus Load Config is FAILURE"
obj.unloadModule("ccspcommon_mbus");
exit();
print "\n[TEST EXECUTION RESULT] : %s\n" %resultDetails ;
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CCSPMBUS_Init');
#Input Parameters
tdkTestObj.addParameter("cfgfileName","/tmp/ccsp_msg.cfg");
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
print "\n[TEST ACTUAL RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
tdkTestObj.setResultStatus("SUCCESS");
print "\nMessage Bus Initialization is SUCCESS"
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "\nMessage Bus Initialization is FAILURE"
obj.unloadModule("ccspcommon_mbus");
exit();
print "\n[TEST EXECUTION RESULT] : %s\n" %resultDetails ;
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CCSPMBUS_RegisterPath');
#Input Parameters - Nil
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "\n[TEST ACTUAL RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
tdkTestObj.setResultStatus("SUCCESS");
print "\nMessage Bus Register Path Function is Success"
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "\nMessage Bus Register Path Function is FAILURE"
obj.unloadModule("ccspcommon_mbus");
exit();
print "\n[TEST EXECUTION RESULT] : %s\n" %resultDetails ;
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CCSPMBUS_RegisterCapabilities');
#Input Parameters
#Nil
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "\n[TEST ACTUAL RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
tdkTestObj.setResultStatus("SUCCESS");
print "\nCcsp Base Interface Register Capabilities Function is SUCCESS"
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "\nCcsp Base Interface Register Capabilities Function is FAILURE"
obj.unloadModule("ccspcommon_mbus");
exit();
print "\n[TEST EXECUTION RESULT] : %s\n" %resultDetails ;
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CCSPMBUS_BusCheck');
#Input Parameters
#Nil
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "\n[TEST ACTUAL RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
tdkTestObj.setResultStatus("SUCCESS");
print "\nCcsp Base API for Bus Check Function is SUCCESS"
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "\nCcsp Base API for Bus Check Function is FAILURE"
obj.unloadModule("ccspcommon_mbus");
exit();
print "\n[TEST EXECUTION RESULT] : %s\n" %resultDetails ;
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CCSPMBUS_Exit');
#Input Parameters - Nil
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "\n[TEST ACTUAL RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
print "\nMessage Bus De-Initialization/Exit is SUCCESS"
else:
#Set the result status of execution as failure
print "\nMessage Bus De-Initialization/Exit is FAILURE"
print "\n[TEST EXECUTION RESULT] : %s\n" %resultDetails ;
obj.unloadModule("ccspcommon_mbus");
| 37.718978 | 338 | 0.747654 |
acf91ea53f630b94107877809b475ee2116867be | 7,788 | py | Python | trainer/text_summarization_trainer.py | bhadreshpsavani/TAPER-EHR | ab938749756fcaaef52a7002a074421f483e3562 | [
"MIT"
] | 12 | 2020-04-10T02:24:20.000Z | 2021-11-09T22:52:24.000Z | trainer/text_summarization_trainer.py | bhadreshpsavani/TAPER-EHR | ab938749756fcaaef52a7002a074421f483e3562 | [
"MIT"
] | 7 | 2020-05-03T10:03:29.000Z | 2022-02-09T23:38:21.000Z | trainer/text_summarization_trainer.py | bhadreshpsavani/TAPER-EHR | ab938749756fcaaef52a7002a074421f483e3562 | [
"MIT"
] | 10 | 2020-06-14T09:37:35.000Z | 2022-02-04T22:21:16.000Z | import numpy as np
import torch
from torchvision.utils import make_grid
from base import BaseTrainer
from model.gru_ae import *
import random
class TextSummTrainer(BaseTrainer):
"""
Trainer class
Note:
Inherited from BaseTrainer.
"""
def __init__(self, model, loss, metrics, optimizer, resume, config,
data_loader, valid_data_loader=None, lr_scheduler=None, train_logger=None):
super(TextSummTrainer, self).__init__(model, loss, metrics, optimizer, resume, config, train_logger)
self.config = config
self.data_loader = data_loader
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = config['trainer'].get('log_step', int(np.sqrt(data_loader.batch_size)))
def _eval_metrics(self, output, target):
acc_metrics = np.zeros(len(self.metrics))
for i, metric in enumerate(self.metrics):
acc_metrics[i] += metric(output, target)
self.writer.add_scalar(f'{metric.__name__}', acc_metrics[i])
return acc_metrics
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current training epoch.
:return: A log that contains all information you want to save.
Note:
If you have additional information to record, for example:
> additional_log = {"x": x, "y": y}
merge it with log before return. i.e.
> log = {**log, **additional_log}
> return log
The metrics in log must have the key 'metrics'.
"""
self.model.train()
total_loss = 0
total_metrics = np.zeros(len(self.metrics))
self.model.encoder.set_device(self.device)
self.model.decoder.set_device(self.device)
for batch_idx, data in enumerate(self.data_loader):
x, i_s, b_is = list(map(lambda x: x.to(self.device), data))
#x, i_s, b_is = x.to(self.device), i_s.to(self.device), b_is.to(self.device)
self.optimizer.zero_grad()
encoder_outputs = torch.zeros(x.shape[0], self.model.encoder.hidden_size, device=self.device)
encoder_hidden = self.model.encoder.init_hidden(x.shape[1])
encoder_outputs = self.model.encoder(x, encoder_hidden, i_s, b_is)
decoder_hidden = self.model.decoder.init_hidden(x.shape[1])
decoder_output = self.model.decoder(encoder_outputs, decoder_hidden)
#use_teacher_forcing = True if random.random() < self.model.teacher_forcing_ratio else False
#if use_teacher_forcing:
# # Teacher forcing: Feed the target as the next input
# for di in range(target_length):
# decoder_output, decoder_hidden, attn_weights = self.model.decoder(
# decoder_input, decoder_hidden, encoder_outputs)
# loss += self.loss(decoder_output, x[di])
# decoder_input = x[di]#encoder_outputs[di] # Teacher forcing
#else:
# # Without teacher forcing: use its own predictions as the next input
# for di in range(target_length):
# decoder_output, decoder_hidden, attn_weights = self.model.decoder(
# decoder_input, decoder_hidden, encoder_outputs)
# #topv, topi = decoder_output.topk(1)
# decoder_input = decoder_output.detach()#decoder_output.detach()#topi.squeeze().detach() # detach from history as input
# loss += self.loss(decoder_output, x[di])
loss = self.loss(decoder_output, x.detach(), i_s.detach())
loss.backward()
self.optimizer.step()
self.writer.set_step((epoch - 1) * len(self.data_loader) + batch_idx)
self.writer.add_scalar('loss', loss.item())
loss = loss.detach()
total_loss += loss
if self.verbosity >= 2 and batch_idx % self.log_step == 0:
self.logger.info('Train Epoch: {} [{}/{} ({:.0f}%)] {}: {:.6f}'.format(
epoch,
batch_idx * self.data_loader.batch_size,
self.data_loader.n_samples,
100.0 * batch_idx / len(self.data_loader),
'loss', loss))
log = {
'loss': total_loss / len(self.data_loader),
'metrics': total_metrics / len(self.data_loader),
}
if self.do_validation:
val_log = self._valid_epoch(epoch)
log = {**log, **val_log}
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:return: A log that contains information about validation
Note:
The validation metrics in log must have the key 'val_metrics'.
"""
self.model.eval()
total_val_loss = 0
total_val_metrics = np.zeros(len(self.metrics))
with torch.no_grad():
for batch_idx, data in enumerate(self.valid_data_loader):
x, i_s, b_is = list(map(lambda x: x.to(self.device), data))
#x, i_s, b_is = x.to(self.device), i_s.to(self.device), b_is.to(self.device)
#encoder_optimizer.zero_grad()
#decoder_optimizer.zero_grad()
#input_length = i_s.item()
#target_length = input_length #target_tensor.size(0)
#max length x.shape[0]
encoder_outputs = torch.zeros(x.shape[0], self.model.encoder.hidden_size, device=self.device)
self.model.encoder.set_device(self.device)
self.model.decoder.set_device(self.device)
encoder_hidden = self.model.encoder.init_hidden(x.shape[1])
encoder_outputs = self.model.encoder(x, encoder_hidden, i_s, b_is)
decoder_input = torch.zeros_like(x[0])#encoder_outputs[ei]
decoder_hidden = self.model.decoder.init_hidden(x.shape[1])
output = self.model.decoder(encoder_outputs, decoder_hidden)
#for ei in range(input_length):
# encoder_output, encoder_hidden = self.model.encoder(
# x[ei], encoder_hidden)
# encoder_outputs[ei] = encoder_output[0, 0]
#decoder_input = torch.zeros_like(x[0])#encoder_hidden #encoder_outputs[ei]
#decoder_hidden = self.model.decoder.init_hidden()
## Without teacher forcing: use its own predictions as the next input
#for di in range(target_length):
# decoder_output, decoder_hidden, _ = self.model.decoder(
# decoder_input, decoder_hidden, encoder_outputs)
# #topv, topi = decoder_output.topk(1)
# decoder_input = decoder_output.detach()#decoder_output.detach()#topi.squeeze().detach() # detach from history as input
#loss += self.loss(decoder_output, x[di])
#loss = (1.0 / i_s.item()) * loss
loss = self.loss(output, x, i_s)
self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')
self.writer.add_scalar('loss', loss.item())
total_val_loss += loss.item()
return {
'val_loss': total_val_loss / len(self.valid_data_loader),
'val_metrics': (total_val_metrics / len(self.valid_data_loader)).tolist()
}
| 44 | 140 | 0.587314 |
acf91f8806361d7c6043781256bb6aa783914be1 | 3,615 | py | Python | ensemble_linear_predict.py | artyompal/imet | 75d708392237a1392ac5fa2f598a747408a88f19 | [
"Apache-2.0"
] | 5 | 2019-06-11T09:11:56.000Z | 2020-05-06T16:05:26.000Z | ensemble_linear_predict.py | artyompal/imet | 75d708392237a1392ac5fa2f598a747408a88f19 | [
"Apache-2.0"
] | null | null | null | ensemble_linear_predict.py | artyompal/imet | 75d708392237a1392ac5fa2f598a747408a88f19 | [
"Apache-2.0"
] | 2 | 2019-06-12T14:14:35.000Z | 2019-07-18T15:06:14.000Z | #!/usr/bin/python3.6
import os
import pickle
import re
import sys
import yaml
from glob import glob
from collections import OrderedDict
from typing import Any, List
import numpy as np
import pandas as pd
from scipy import optimize
from scipy.stats import describe
from tqdm import tqdm
from sklearn.metrics import fbeta_score
from debug import dprint
IN_KERNEL = os.environ.get('KAGGLE_WORKING_DIR') is not None
INPUT_PATH = '../input/imet-2019-fgvc6/' if IN_KERNEL else '../input/'
NUM_ATTEMPTS = 100
NUM_FOLDS = 5
NUM_CLASSES = 1103
THRESHOLDS_PATH = '../yml/' if not IN_KERNEL else '../input/imet-yaml/yml/'
ADD_THRESHOLD = True
def parse_labels(s: str) -> np.array:
res = np.zeros(NUM_CLASSES)
res[list(map(int, s.split()))] = 1
return res
def load_weights(weights_file: str) -> List[np.ndarray]:
lines = []
with open(weights_file) as f:
for line in f:
if '[' in line:
lines.append(line)
else:
lines[-1] += line
assert len(lines) == NUM_CLASSES
num_predicts = level1_predicts.shape[2]
weights = []
for class_, line in enumerate(lines):
m = re.match(r'class=\d+ weights=\[((.|\n)*)\] f2=.+', line)
assert m
w = np.array(list(map(float, m.group(1).split())))
assert w.size == num_predicts + 1
weights.append(w)
return weights
if __name__ == '__main__':
np.set_printoptions(linewidth=120)
if len(sys.argv) < 5:
print(f'usage: {sys.argv[0]} result.npy coeffs.txt predict1.npy ...')
sys.exit()
all_predicts_list, all_thresholds = [], []
predicts = sorted(sys.argv[3:])
test_df = pd.read_csv(INPUT_PATH + 'sample_submission.csv')
for filename in predicts:
assert 'level1_test_' in filename and '_f0_' in filename
m = re.match(r'(.*)_f(\d)_e\d+.*\.npy', filename)
assert m
model_path = m.group(1)
fold_predicts = []
for fold in range(NUM_FOLDS):
filenames = glob(f'{model_path}_f{fold}_*.npy')
if len(filenames) != 1:
dprint(filenames)
assert False # the model must be unique in this fold
filename = filenames[0]
print('reading', filename)
# load data
data = np.load(filename)
if ADD_THRESHOLD:
# read threshold
filename = os.path.basename(filename)
assert filename.startswith('level1_test_') and filename.endswith('.npy')
with open(os.path.join(THRESHOLDS_PATH, filename[12:-4] + '.yml')) as f:
threshold = yaml.load(f, Loader=yaml.SafeLoader)['threshold']
all_thresholds.append(threshold)
data = data + threshold
fold_predicts.append(data)
predict = np.mean(np.dstack(fold_predicts), axis=2)
all_predicts_list.append(predict)
level1_predicts = np.dstack(all_predicts_list)
dprint(level1_predicts.shape)
weights = load_weights(sys.argv[2])
assert len(weights) == NUM_CLASSES
level2_predicts = np.zeros((level1_predicts.shape[0], NUM_CLASSES))
for sample in tqdm(range(level1_predicts.shape[0]), disable=IN_KERNEL):
for class_ in range(NUM_CLASSES):
x = level1_predicts[sample, class_]
w = weights[class_]
level2_predicts[sample, class_] = np.dot(w[:-1], x) + w[-1]
gold_threshold = np.mean(all_thresholds) if ADD_THRESHOLD else 0
level2_predicts -= gold_threshold
np.save(sys.argv[1], level2_predicts)
| 29.153226 | 88 | 0.619364 |
acf91ffad4458a41342b03b5ec36518960780c42 | 206,646 | py | Python | pyboto3/codepipeline.py | gehad-shaat/pyboto3 | 4a0c2851a8bc04fb1c71c36086f7bb257e48181d | [
"MIT"
] | 91 | 2016-12-31T11:38:37.000Z | 2021-09-16T19:33:23.000Z | pyboto3/codepipeline.py | gehad-shaat/pyboto3 | 4a0c2851a8bc04fb1c71c36086f7bb257e48181d | [
"MIT"
] | 7 | 2017-01-02T18:54:23.000Z | 2020-08-11T13:54:02.000Z | pyboto3/codepipeline.py | gehad-shaat/pyboto3 | 4a0c2851a8bc04fb1c71c36086f7bb257e48181d | [
"MIT"
] | 26 | 2016-12-31T13:11:00.000Z | 2022-03-03T21:01:12.000Z | '''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def acknowledge_job(jobId=None, nonce=None):
"""
Returns information about a specified job and whether that job has been received by the job worker. Used for custom actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.acknowledge_job(
jobId='string',
nonce='string'
)
:type jobId: string
:param jobId: [REQUIRED]\nThe unique system-generated ID of the job for which you want to confirm receipt.\n
:type nonce: string
:param nonce: [REQUIRED]\nA system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Get this number from the response of the PollForJobs request that returned this job.\n
:rtype: dict
ReturnsResponse Syntax
{
'status': 'Created'|'Queued'|'Dispatched'|'InProgress'|'TimedOut'|'Succeeded'|'Failed'
}
Response Structure
(dict) --
Represents the output of an AcknowledgeJob action.
status (string) --
Whether the job worker has received the specified job.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.InvalidNonceException
CodePipeline.Client.exceptions.JobNotFoundException
:return: {
'status': 'Created'|'Queued'|'Dispatched'|'InProgress'|'TimedOut'|'Succeeded'|'Failed'
}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.InvalidNonceException
CodePipeline.Client.exceptions.JobNotFoundException
"""
pass
def acknowledge_third_party_job(jobId=None, nonce=None, clientToken=None):
"""
Confirms a job worker has received the specified job. Used for partner actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.acknowledge_third_party_job(
jobId='string',
nonce='string',
clientToken='string'
)
:type jobId: string
:param jobId: [REQUIRED]\nThe unique system-generated ID of the job.\n
:type nonce: string
:param nonce: [REQUIRED]\nA system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Get this number from the response to a GetThirdPartyJobDetails request.\n
:type clientToken: string
:param clientToken: [REQUIRED]\nThe clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.\n
:rtype: dict
ReturnsResponse Syntax
{
'status': 'Created'|'Queued'|'Dispatched'|'InProgress'|'TimedOut'|'Succeeded'|'Failed'
}
Response Structure
(dict) --
Represents the output of an AcknowledgeThirdPartyJob action.
status (string) --
The status information for the third party job, if any.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.InvalidNonceException
CodePipeline.Client.exceptions.JobNotFoundException
CodePipeline.Client.exceptions.InvalidClientTokenException
:return: {
'status': 'Created'|'Queued'|'Dispatched'|'InProgress'|'TimedOut'|'Succeeded'|'Failed'
}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.InvalidNonceException
CodePipeline.Client.exceptions.JobNotFoundException
CodePipeline.Client.exceptions.InvalidClientTokenException
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def create_custom_action_type(category=None, provider=None, version=None, settings=None, configurationProperties=None, inputArtifactDetails=None, outputArtifactDetails=None, tags=None):
"""
Creates a new custom action that can be used in all pipelines associated with the AWS account. Only used for custom actions.
See also: AWS API Documentation
Exceptions
:example: response = client.create_custom_action_type(
category='Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
provider='string',
version='string',
settings={
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
configurationProperties=[
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
inputArtifactDetails={
'minimumCount': 123,
'maximumCount': 123
},
outputArtifactDetails={
'minimumCount': 123,
'maximumCount': 123
},
tags=[
{
'key': 'string',
'value': 'string'
},
]
)
:type category: string
:param category: [REQUIRED]\nThe category of the custom action, such as a build action or a test action.\n\nNote\nAlthough Source and Approval are listed as valid values, they are not currently functional. These values are reserved for future use.\n\n
:type provider: string
:param provider: [REQUIRED]\nThe provider of the service used in the custom action, such as AWS CodeDeploy.\n
:type version: string
:param version: [REQUIRED]\nThe version identifier of the custom action.\n
:type settings: dict
:param settings: URLs that provide users information about this custom action.\n\nthirdPartyConfigurationUrl (string) --The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service.\n\nentityUrlTemplate (string) --The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display in the pipeline.\n\nexecutionUrlTemplate (string) --The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as the console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.\n\nrevisionUrlTemplate (string) --The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.\n\n\n
:type configurationProperties: list
:param configurationProperties: The configuration properties for the custom action.\n\nNote\nYou can refer to a name in the configuration properties of the custom action within the URL templates by following the format of {Config:name}, as long as the configuration property is both required and not secret. For more information, see Create a Custom Action for a Pipeline .\n\n\n(dict) --Represents information about an action configuration property.\n\nname (string) -- [REQUIRED]The name of the action configuration property.\n\nrequired (boolean) -- [REQUIRED]Whether the configuration property is a required value.\n\nkey (boolean) -- [REQUIRED]Whether the configuration property is a key.\n\nsecret (boolean) -- [REQUIRED]Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails , GetThirdPartyJobDetails , PollForJobs , and PollForThirdPartyJobs .\nWhen updating a pipeline, passing * * * * * without changing any other values of the action preserves the previous value of the secret.\n\nqueryable (boolean) --Indicates that the property is used with PollForJobs . When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.\nIf you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to other restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.\n\ndescription (string) --The description of the action configuration property that is displayed to users.\n\ntype (string) --The type of the configuration property.\n\n\n\n\n
:type inputArtifactDetails: dict
:param inputArtifactDetails: [REQUIRED]\nThe details of the input artifact for the action, such as its commit ID.\n\nminimumCount (integer) -- [REQUIRED]The minimum number of artifacts allowed for the action type.\n\nmaximumCount (integer) -- [REQUIRED]The maximum number of artifacts allowed for the action type.\n\n\n
:type outputArtifactDetails: dict
:param outputArtifactDetails: [REQUIRED]\nThe details of the output artifact of the action, such as its commit ID.\n\nminimumCount (integer) -- [REQUIRED]The minimum number of artifacts allowed for the action type.\n\nmaximumCount (integer) -- [REQUIRED]The maximum number of artifacts allowed for the action type.\n\n\n
:type tags: list
:param tags: The tags for the custom action.\n\n(dict) --A tag is a key-value pair that is used to manage the resource.\n\nkey (string) -- [REQUIRED]The tag\'s key.\n\nvalue (string) -- [REQUIRED]The tag\'s value.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'actionType': {
'id': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'settings': {
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
'actionConfigurationProperties': [
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
'inputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
},
'outputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
}
},
'tags': [
{
'key': 'string',
'value': 'string'
},
]
}
Response Structure
(dict) --
Represents the output of a CreateCustomActionType operation.
actionType (dict) --
Returns information about the details of an action type.
id (dict) --
Represents information about an action type.
category (string) --
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.
owner (string) --
The creator of the action being called.
provider (string) --
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .
version (string) --
A string that describes the action version.
settings (dict) --
The settings for the action type.
thirdPartyConfigurationUrl (string) --
The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service.
entityUrlTemplate (string) --
The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display in the pipeline.
executionUrlTemplate (string) --
The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as the console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.
revisionUrlTemplate (string) --
The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.
actionConfigurationProperties (list) --
The configuration properties for the action type.
(dict) --
Represents information about an action configuration property.
name (string) --
The name of the action configuration property.
required (boolean) --
Whether the configuration property is a required value.
key (boolean) --
Whether the configuration property is a key.
secret (boolean) --
Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails , GetThirdPartyJobDetails , PollForJobs , and PollForThirdPartyJobs .
When updating a pipeline, passing * * * * * without changing any other values of the action preserves the previous value of the secret.
queryable (boolean) --
Indicates that the property is used with PollForJobs . When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.
If you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to other restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.
description (string) --
The description of the action configuration property that is displayed to users.
type (string) --
The type of the configuration property.
inputArtifactDetails (dict) --
The details of the input artifact for the action, such as its commit ID.
minimumCount (integer) --
The minimum number of artifacts allowed for the action type.
maximumCount (integer) --
The maximum number of artifacts allowed for the action type.
outputArtifactDetails (dict) --
The details of the output artifact of the action, such as its commit ID.
minimumCount (integer) --
The minimum number of artifacts allowed for the action type.
maximumCount (integer) --
The maximum number of artifacts allowed for the action type.
tags (list) --
Specifies the tags applied to the custom action.
(dict) --
A tag is a key-value pair that is used to manage the resource.
key (string) --
The tag\'s key.
value (string) --
The tag\'s value.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.LimitExceededException
CodePipeline.Client.exceptions.TooManyTagsException
CodePipeline.Client.exceptions.InvalidTagsException
CodePipeline.Client.exceptions.ConcurrentModificationException
:return: {
'actionType': {
'id': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'settings': {
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
'actionConfigurationProperties': [
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
'inputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
},
'outputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
}
},
'tags': [
{
'key': 'string',
'value': 'string'
},
]
}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.LimitExceededException
CodePipeline.Client.exceptions.TooManyTagsException
CodePipeline.Client.exceptions.InvalidTagsException
CodePipeline.Client.exceptions.ConcurrentModificationException
"""
pass
def create_pipeline(pipeline=None, tags=None):
"""
Creates a pipeline.
See also: AWS API Documentation
Exceptions
:example: response = client.create_pipeline(
pipeline={
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string',
'namespace': 'string'
},
]
},
],
'version': 123
},
tags=[
{
'key': 'string',
'value': 'string'
},
]
)
:type pipeline: dict
:param pipeline: [REQUIRED]\nRepresents the structure of actions and stages to be performed in the pipeline.\n\nname (string) -- [REQUIRED]The name of the action to be performed.\n\nroleArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn , or to use to assume roles for actions with an actionRoleArn .\n\nartifactStore (dict) --Represents information about the S3 bucket where artifacts are stored for the pipeline.\n\nNote\nYou must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .\n\n\ntype (string) -- [REQUIRED]The type of the artifact store, such as S3.\n\nlocation (string) -- [REQUIRED]The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.\n\nencryptionKey (dict) --The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.\n\nid (string) -- [REQUIRED]The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.\n\nNote\nAliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.\n\n\ntype (string) -- [REQUIRED]The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.\n\n\n\n\n\nartifactStores (dict) --A mapping of artifactStore objects and their corresponding AWS Regions. There must be an artifact store for the pipeline Region and for each cross-region action in the pipeline.\n\nNote\nYou must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .\n\n\n(string) --\n(dict) --The S3 bucket where artifacts for the pipeline are stored.\n\nNote\nYou must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .\n\n\ntype (string) -- [REQUIRED]The type of the artifact store, such as S3.\n\nlocation (string) -- [REQUIRED]The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.\n\nencryptionKey (dict) --The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.\n\nid (string) -- [REQUIRED]The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.\n\nNote\nAliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.\n\n\ntype (string) -- [REQUIRED]The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.\n\n\n\n\n\n\n\n\n\nstages (list) -- [REQUIRED]The stage in which to perform the action.\n\n(dict) --Represents information about a stage and its definition.\n\nname (string) -- [REQUIRED]The name of the stage.\n\nblockers (list) --Reserved for future use.\n\n(dict) --Reserved for future use.\n\nname (string) -- [REQUIRED]Reserved for future use.\n\ntype (string) -- [REQUIRED]Reserved for future use.\n\n\n\n\n\nactions (list) -- [REQUIRED]The actions included in a stage.\n\n(dict) --Represents information about an action declaration.\n\nname (string) -- [REQUIRED]The action declaration\'s name.\n\nactionTypeId (dict) -- [REQUIRED]Specifies the action type and the provider of the action.\n\ncategory (string) -- [REQUIRED]A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.\n\nowner (string) -- [REQUIRED]The creator of the action being called.\n\nprovider (string) -- [REQUIRED]The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .\n\nversion (string) -- [REQUIRED]A string that describes the action version.\n\n\n\nrunOrder (integer) --The order in which actions are run.\n\nconfiguration (dict) --The action\'s configuration. These are key-value pairs that specify input values for an action. For more information, see Action Structure Requirements in CodePipeline . For the list of configuration properties for the AWS CloudFormation action type in CodePipeline, see Configuration Properties Reference in the AWS CloudFormation User Guide . For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the AWS CloudFormation User Guide .\nThe values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows:\n\nJSON:'Configuration' : { Key : Value },\n\n\n(string) --\n(string) --\n\n\n\n\noutputArtifacts (list) --The name or ID of the result of the action declaration, such as a test or build artifact.\n\n(dict) --Represents information about the output of an action.\n\nname (string) -- [REQUIRED]The name of the output of an artifact, such as 'My App'.\nThe input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.\nOutput artifact names must be unique within a pipeline.\n\n\n\n\n\ninputArtifacts (list) --The name or ID of the artifact consumed by the action, such as a test or build artifact.\n\n(dict) --Represents information about an artifact to be worked on, such as a test or build artifact.\n\nname (string) -- [REQUIRED]The name of the artifact to be worked on (for example, 'My App').\nThe input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.\n\n\n\n\n\nroleArn (string) --The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline.\n\nregion (string) --The action declaration\'s AWS Region, such as us-east-1.\n\nnamespace (string) --The variable namespace associated with the action. All variables produced as output by this action fall under this namespace.\n\n\n\n\n\n\n\n\n\nversion (integer) --The version number of the pipeline. A new pipeline always has a version number of 1. This number is incremented when a pipeline is updated.\n\n\n
:type tags: list
:param tags: The tags for the pipeline.\n\n(dict) --A tag is a key-value pair that is used to manage the resource.\n\nkey (string) -- [REQUIRED]The tag\'s key.\n\nvalue (string) -- [REQUIRED]The tag\'s value.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string',
'namespace': 'string'
},
]
},
],
'version': 123
},
'tags': [
{
'key': 'string',
'value': 'string'
},
]
}
Response Structure
(dict) --
Represents the output of a CreatePipeline action.
pipeline (dict) --
Represents the structure of actions and stages to be performed in the pipeline.
name (string) --
The name of the action to be performed.
roleArn (string) --
The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn , or to use to assume roles for actions with an actionRoleArn .
artifactStore (dict) --
Represents information about the S3 bucket where artifacts are stored for the pipeline.
Note
You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .
type (string) --
The type of the artifact store, such as S3.
location (string) --
The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
encryptionKey (dict) --
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
id (string) --
The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.
Note
Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.
type (string) --
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
artifactStores (dict) --
A mapping of artifactStore objects and their corresponding AWS Regions. There must be an artifact store for the pipeline Region and for each cross-region action in the pipeline.
Note
You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .
(string) --
(dict) --
The S3 bucket where artifacts for the pipeline are stored.
Note
You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .
type (string) --
The type of the artifact store, such as S3.
location (string) --
The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
encryptionKey (dict) --
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
id (string) --
The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.
Note
Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.
type (string) --
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
stages (list) --
The stage in which to perform the action.
(dict) --
Represents information about a stage and its definition.
name (string) --
The name of the stage.
blockers (list) --
Reserved for future use.
(dict) --
Reserved for future use.
name (string) --
Reserved for future use.
type (string) --
Reserved for future use.
actions (list) --
The actions included in a stage.
(dict) --
Represents information about an action declaration.
name (string) --
The action declaration\'s name.
actionTypeId (dict) --
Specifies the action type and the provider of the action.
category (string) --
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.
owner (string) --
The creator of the action being called.
provider (string) --
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .
version (string) --
A string that describes the action version.
runOrder (integer) --
The order in which actions are run.
configuration (dict) --
The action\'s configuration. These are key-value pairs that specify input values for an action. For more information, see Action Structure Requirements in CodePipeline . For the list of configuration properties for the AWS CloudFormation action type in CodePipeline, see Configuration Properties Reference in the AWS CloudFormation User Guide . For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the AWS CloudFormation User Guide .
The values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows:
JSON:
"Configuration" : { Key : Value },
(string) --
(string) --
outputArtifacts (list) --
The name or ID of the result of the action declaration, such as a test or build artifact.
(dict) --
Represents information about the output of an action.
name (string) --
The name of the output of an artifact, such as "My App".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
Output artifact names must be unique within a pipeline.
inputArtifacts (list) --
The name or ID of the artifact consumed by the action, such as a test or build artifact.
(dict) --
Represents information about an artifact to be worked on, such as a test or build artifact.
name (string) --
The name of the artifact to be worked on (for example, "My App").
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
roleArn (string) --
The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline.
region (string) --
The action declaration\'s AWS Region, such as us-east-1.
namespace (string) --
The variable namespace associated with the action. All variables produced as output by this action fall under this namespace.
version (integer) --
The version number of the pipeline. A new pipeline always has a version number of 1. This number is incremented when a pipeline is updated.
tags (list) --
Specifies the tags applied to the pipeline.
(dict) --
A tag is a key-value pair that is used to manage the resource.
key (string) --
The tag\'s key.
value (string) --
The tag\'s value.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNameInUseException
CodePipeline.Client.exceptions.InvalidStageDeclarationException
CodePipeline.Client.exceptions.InvalidActionDeclarationException
CodePipeline.Client.exceptions.InvalidBlockerDeclarationException
CodePipeline.Client.exceptions.InvalidStructureException
CodePipeline.Client.exceptions.LimitExceededException
CodePipeline.Client.exceptions.TooManyTagsException
CodePipeline.Client.exceptions.InvalidTagsException
CodePipeline.Client.exceptions.ConcurrentModificationException
:return: {
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string',
'namespace': 'string'
},
]
},
],
'version': 123
},
'tags': [
{
'key': 'string',
'value': 'string'
},
]
}
:returns:
(string) --
(string) --
"""
pass
def delete_custom_action_type(category=None, provider=None, version=None):
"""
Marks a custom action as deleted. PollForJobs for the custom action fails after the action is marked for deletion. Used for custom actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_custom_action_type(
category='Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
provider='string',
version='string'
)
:type category: string
:param category: [REQUIRED]\nThe category of the custom action that you want to delete, such as source or deploy.\n
:type provider: string
:param provider: [REQUIRED]\nThe provider of the service used in the custom action, such as AWS CodeDeploy.\n
:type version: string
:param version: [REQUIRED]\nThe version of the custom action to delete.\n
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.ConcurrentModificationException
"""
pass
def delete_pipeline(name=None):
"""
Deletes the specified pipeline.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_pipeline(
name='string'
)
:type name: string
:param name: [REQUIRED]\nThe name of the pipeline to be deleted.\n
"""
pass
def delete_webhook(name=None):
"""
Deletes a previously created webhook by name. Deleting the webhook stops AWS CodePipeline from starting a pipeline every time an external event occurs. The API returns successfully when trying to delete a webhook that is already deleted. If a deleted webhook is re-created by calling PutWebhook with the same name, it will have a different URL.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_webhook(
name='string'
)
:type name: string
:param name: [REQUIRED]\nThe name of the webhook you want to delete.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.ConcurrentModificationException
:return: {}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.ConcurrentModificationException
"""
pass
def deregister_webhook_with_third_party(webhookName=None):
"""
Removes the connection between the webhook that was created by CodePipeline and the external tool with events to be detected. Currently supported only for webhooks that target an action type of GitHub.
See also: AWS API Documentation
Exceptions
:example: response = client.deregister_webhook_with_third_party(
webhookName='string'
)
:type webhookName: string
:param webhookName: The name of the webhook you want to deregister.
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.WebhookNotFoundException
:return: {}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.WebhookNotFoundException
"""
pass
def disable_stage_transition(pipelineName=None, stageName=None, transitionType=None, reason=None):
"""
Prevents artifacts in a pipeline from transitioning to the next stage in the pipeline.
See also: AWS API Documentation
Exceptions
:example: response = client.disable_stage_transition(
pipelineName='string',
stageName='string',
transitionType='Inbound'|'Outbound',
reason='string'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]\nThe name of the pipeline in which you want to disable the flow of artifacts from one stage to another.\n
:type stageName: string
:param stageName: [REQUIRED]\nThe name of the stage where you want to disable the inbound or outbound transition of artifacts.\n
:type transitionType: string
:param transitionType: [REQUIRED]\nSpecifies whether artifacts are prevented from transitioning into the stage and being processed by the actions in that stage (inbound), or prevented from transitioning from the stage after they have been processed by the actions in that stage (outbound).\n
:type reason: string
:param reason: [REQUIRED]\nThe reason given to the user that a stage is disabled, such as waiting for manual approval or manual tests. This message is displayed in the pipeline console UI.\n
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.StageNotFoundException
"""
pass
def enable_stage_transition(pipelineName=None, stageName=None, transitionType=None):
"""
Enables artifacts in a pipeline to transition to a stage in a pipeline.
See also: AWS API Documentation
Exceptions
:example: response = client.enable_stage_transition(
pipelineName='string',
stageName='string',
transitionType='Inbound'|'Outbound'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]\nThe name of the pipeline in which you want to enable the flow of artifacts from one stage to another.\n
:type stageName: string
:param stageName: [REQUIRED]\nThe name of the stage where you want to enable the transition of artifacts, either into the stage (inbound) or from that stage to the next stage (outbound).\n
:type transitionType: string
:param transitionType: [REQUIRED]\nSpecifies whether artifacts are allowed to enter the stage and be processed by the actions in that stage (inbound) or whether already processed artifacts are allowed to transition to the next stage (outbound).\n
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.StageNotFoundException
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_job_details(jobId=None):
"""
Returns information about a job. Used for custom actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_details(
jobId='string'
)
:type jobId: string
:param jobId: [REQUIRED]\nThe unique system-generated ID for the job.\n
:rtype: dict
ReturnsResponse Syntax{
'jobDetails': {
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'accountId': 'string'
}
}
Response Structure
(dict) --Represents the output of a GetJobDetails action.
jobDetails (dict) --The details of the job.
Note
If AWSSessionCredentials is used, a long-running job can call GetJobDetails again to obtain new credentials.
id (string) --The unique system-generated ID of the job.
data (dict) --Represents other information about a job required for a job worker to complete the job.
actionTypeId (dict) --Represents information about an action type.
category (string) --A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.
owner (string) --The creator of the action being called.
provider (string) --The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .
version (string) --A string that describes the action version.
actionConfiguration (dict) --Represents information about an action configuration.
configuration (dict) --The configuration data for the action.
(string) --
(string) --
pipelineContext (dict) --Represents information about a pipeline to a job worker.
Note
Includes pipelineArn and pipelineExecutionId for custom jobs.
pipelineName (string) --The name of the pipeline. This is a user-specified value. Pipeline names must be unique across all pipeline names under an Amazon Web Services account.
stage (dict) --The stage of the pipeline.
name (string) --The name of the stage.
action (dict) --The context of an action to a job worker in the stage of a pipeline.
name (string) --The name of the action in the context of a job.
actionExecutionId (string) --The system-generated unique ID that corresponds to an action\'s execution.
pipelineArn (string) --The Amazon Resource Name (ARN) of the pipeline.
pipelineExecutionId (string) --The execution ID of the pipeline.
inputArtifacts (list) --The artifact supplied to the job.
(dict) --Represents information about an artifact that is worked on by actions in the pipeline.
name (string) --The artifact\'s name.
revision (string) --The artifact\'s revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
location (dict) --The location of an artifact.
type (string) --The type of artifact in the location.
s3Location (dict) --The S3 bucket that contains the artifact.
bucketName (string) --The name of the S3 bucket.
objectKey (string) --The key of the object in the S3 bucket, which uniquely identifies the object in the bucket.
outputArtifacts (list) --The output of the job.
(dict) --Represents information about an artifact that is worked on by actions in the pipeline.
name (string) --The artifact\'s name.
revision (string) --The artifact\'s revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
location (dict) --The location of an artifact.
type (string) --The type of artifact in the location.
s3Location (dict) --The S3 bucket that contains the artifact.
bucketName (string) --The name of the S3 bucket.
objectKey (string) --The key of the object in the S3 bucket, which uniquely identifies the object in the bucket.
artifactCredentials (dict) --Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifacts for the pipeline in AWS CodePipeline.
accessKeyId (string) --The access key for the session.
secretAccessKey (string) --The secret access key for the session.
sessionToken (string) --The token for the session.
continuationToken (string) --A system-generated token, such as a AWS CodeDeploy deployment ID, required by a job to continue the job asynchronously.
encryptionKey (dict) --Represents information about the key used to encrypt data in the artifact store, such as an AWS Key Management Service (AWS KMS) key.
id (string) --The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.
Note
Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.
type (string) --The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
accountId (string) --The AWS account ID associated with the job.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.JobNotFoundException
:return: {
'jobDetails': {
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'accountId': 'string'
}
}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.JobNotFoundException
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_pipeline(name=None, version=None):
"""
Returns the metadata, structure, stages, and actions of a pipeline. Can be used to return the entire structure of a pipeline in JSON format, which can then be modified and used to update the pipeline structure with UpdatePipeline .
See also: AWS API Documentation
Exceptions
:example: response = client.get_pipeline(
name='string',
version=123
)
:type name: string
:param name: [REQUIRED]\nThe name of the pipeline for which you want to get information. Pipeline names must be unique under an AWS user account.\n
:type version: integer
:param version: The version number of the pipeline. If you do not specify a version, defaults to the current version.
:rtype: dict
ReturnsResponse Syntax
{
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string',
'namespace': 'string'
},
]
},
],
'version': 123
},
'metadata': {
'pipelineArn': 'string',
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
Represents the output of a GetPipeline action.
pipeline (dict) --
Represents the structure of actions and stages to be performed in the pipeline.
name (string) --
The name of the action to be performed.
roleArn (string) --
The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn , or to use to assume roles for actions with an actionRoleArn .
artifactStore (dict) --
Represents information about the S3 bucket where artifacts are stored for the pipeline.
Note
You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .
type (string) --
The type of the artifact store, such as S3.
location (string) --
The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
encryptionKey (dict) --
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
id (string) --
The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.
Note
Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.
type (string) --
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
artifactStores (dict) --
A mapping of artifactStore objects and their corresponding AWS Regions. There must be an artifact store for the pipeline Region and for each cross-region action in the pipeline.
Note
You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .
(string) --
(dict) --
The S3 bucket where artifacts for the pipeline are stored.
Note
You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .
type (string) --
The type of the artifact store, such as S3.
location (string) --
The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
encryptionKey (dict) --
The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
id (string) --
The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.
Note
Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.
type (string) --
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
stages (list) --
The stage in which to perform the action.
(dict) --
Represents information about a stage and its definition.
name (string) --
The name of the stage.
blockers (list) --
Reserved for future use.
(dict) --
Reserved for future use.
name (string) --
Reserved for future use.
type (string) --
Reserved for future use.
actions (list) --
The actions included in a stage.
(dict) --
Represents information about an action declaration.
name (string) --
The action declaration\'s name.
actionTypeId (dict) --
Specifies the action type and the provider of the action.
category (string) --
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.
owner (string) --
The creator of the action being called.
provider (string) --
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .
version (string) --
A string that describes the action version.
runOrder (integer) --
The order in which actions are run.
configuration (dict) --
The action\'s configuration. These are key-value pairs that specify input values for an action. For more information, see Action Structure Requirements in CodePipeline . For the list of configuration properties for the AWS CloudFormation action type in CodePipeline, see Configuration Properties Reference in the AWS CloudFormation User Guide . For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the AWS CloudFormation User Guide .
The values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows:
JSON:
"Configuration" : { Key : Value },
(string) --
(string) --
outputArtifacts (list) --
The name or ID of the result of the action declaration, such as a test or build artifact.
(dict) --
Represents information about the output of an action.
name (string) --
The name of the output of an artifact, such as "My App".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
Output artifact names must be unique within a pipeline.
inputArtifacts (list) --
The name or ID of the artifact consumed by the action, such as a test or build artifact.
(dict) --
Represents information about an artifact to be worked on, such as a test or build artifact.
name (string) --
The name of the artifact to be worked on (for example, "My App").
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
roleArn (string) --
The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline.
region (string) --
The action declaration\'s AWS Region, such as us-east-1.
namespace (string) --
The variable namespace associated with the action. All variables produced as output by this action fall under this namespace.
version (integer) --
The version number of the pipeline. A new pipeline always has a version number of 1. This number is incremented when a pipeline is updated.
metadata (dict) --
Represents the pipeline metadata information returned as part of the output of a GetPipeline action.
pipelineArn (string) --
The Amazon Resource Name (ARN) of the pipeline.
created (datetime) --
The date and time the pipeline was created, in timestamp format.
updated (datetime) --
The date and time the pipeline was last updated, in timestamp format.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.PipelineVersionNotFoundException
:return: {
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string',
'namespace': 'string'
},
]
},
],
'version': 123
},
'metadata': {
'pipelineArn': 'string',
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
}
}
:returns:
(string) --
(string) --
"""
pass
def get_pipeline_execution(pipelineName=None, pipelineExecutionId=None):
"""
Returns information about an execution of a pipeline, including details about artifacts, the pipeline execution ID, and the name, version, and status of the pipeline.
See also: AWS API Documentation
Exceptions
:example: response = client.get_pipeline_execution(
pipelineName='string',
pipelineExecutionId='string'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]\nThe name of the pipeline about which you want to get execution details.\n
:type pipelineExecutionId: string
:param pipelineExecutionId: [REQUIRED]\nThe ID of the pipeline execution about which you want to get execution details.\n
:rtype: dict
ReturnsResponse Syntax
{
'pipelineExecution': {
'pipelineName': 'string',
'pipelineVersion': 123,
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Stopped'|'Stopping'|'Succeeded'|'Superseded'|'Failed',
'artifactRevisions': [
{
'name': 'string',
'revisionId': 'string',
'revisionChangeIdentifier': 'string',
'revisionSummary': 'string',
'created': datetime(2015, 1, 1),
'revisionUrl': 'string'
},
]
}
}
Response Structure
(dict) --
Represents the output of a GetPipelineExecution action.
pipelineExecution (dict) --
Represents information about the execution of a pipeline.
pipelineName (string) --
The name of the pipeline with the specified pipeline execution.
pipelineVersion (integer) --
The version number of the pipeline with the specified pipeline execution.
pipelineExecutionId (string) --
The ID of the pipeline execution.
status (string) --
The status of the pipeline execution.
InProgress: The pipeline execution is currently running.
Stopped: The pipeline execution was manually stopped. For more information, see Stopped Executions .
Stopping: The pipeline execution received a request to be manually stopped. Depending on the selected stop mode, the execution is either completing or abandoning in-progress actions. For more information, see Stopped Executions .
Succeeded: The pipeline execution was completed successfully.
Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution advanced and continued through the pipeline instead. For more information, see Superseded Executions .
Failed: The pipeline execution was not completed successfully.
artifactRevisions (list) --
A list of ArtifactRevision objects included in a pipeline execution.
(dict) --
Represents revision details of an artifact.
name (string) --
The name of an artifact. This name might be system-generated, such as "MyApp", or defined by the user when an action is created.
revisionId (string) --
The revision ID of the artifact.
revisionChangeIdentifier (string) --
An additional identifier for a revision, such as a commit date or, for artifacts stored in Amazon S3 buckets, the ETag value.
revisionSummary (string) --
Summary information about the most recent revision of the artifact. For GitHub and AWS CodeCommit repositories, the commit message. For Amazon S3 buckets or actions, the user-provided content of a codepipeline-artifact-revision-summary key specified in the object metadata.
created (datetime) --
The date and time when the most recent revision of the artifact was created, in timestamp format.
revisionUrl (string) --
The commit ID for the artifact revision. For artifacts stored in GitHub or AWS CodeCommit repositories, the commit ID is linked to a commit details page.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.PipelineExecutionNotFoundException
:return: {
'pipelineExecution': {
'pipelineName': 'string',
'pipelineVersion': 123,
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Stopped'|'Stopping'|'Succeeded'|'Superseded'|'Failed',
'artifactRevisions': [
{
'name': 'string',
'revisionId': 'string',
'revisionChangeIdentifier': 'string',
'revisionSummary': 'string',
'created': datetime(2015, 1, 1),
'revisionUrl': 'string'
},
]
}
}
:returns:
InProgress: The pipeline execution is currently running.
Stopped: The pipeline execution was manually stopped. For more information, see Stopped Executions .
Stopping: The pipeline execution received a request to be manually stopped. Depending on the selected stop mode, the execution is either completing or abandoning in-progress actions. For more information, see Stopped Executions .
Succeeded: The pipeline execution was completed successfully.
Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution advanced and continued through the pipeline instead. For more information, see Superseded Executions .
Failed: The pipeline execution was not completed successfully.
"""
pass
def get_pipeline_state(name=None):
"""
Returns information about the state of a pipeline, including the stages and actions.
See also: AWS API Documentation
Exceptions
:example: response = client.get_pipeline_state(
name='string'
)
:type name: string
:param name: [REQUIRED]\nThe name of the pipeline about which you want to get information.\n
:rtype: dict
ReturnsResponse Syntax{
'pipelineName': 'string',
'pipelineVersion': 123,
'stageStates': [
{
'stageName': 'string',
'inboundTransitionState': {
'enabled': True|False,
'lastChangedBy': 'string',
'lastChangedAt': datetime(2015, 1, 1),
'disabledReason': 'string'
},
'actionStates': [
{
'actionName': 'string',
'currentRevision': {
'revisionId': 'string',
'revisionChangeId': 'string',
'created': datetime(2015, 1, 1)
},
'latestExecution': {
'status': 'InProgress'|'Abandoned'|'Succeeded'|'Failed',
'summary': 'string',
'lastStatusChange': datetime(2015, 1, 1),
'token': 'string',
'lastUpdatedBy': 'string',
'externalExecutionId': 'string',
'externalExecutionUrl': 'string',
'percentComplete': 123,
'errorDetails': {
'code': 'string',
'message': 'string'
}
},
'entityUrl': 'string',
'revisionUrl': 'string'
},
],
'latestExecution': {
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Failed'|'Stopped'|'Stopping'|'Succeeded'
}
},
],
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
}
Response Structure
(dict) --Represents the output of a GetPipelineState action.
pipelineName (string) --The name of the pipeline for which you want to get the state.
pipelineVersion (integer) --The version number of the pipeline.
Note
A newly created pipeline is always assigned a version number of 1 .
stageStates (list) --A list of the pipeline stage output information, including stage name, state, most recent run details, whether the stage is disabled, and other data.
(dict) --Represents information about the state of the stage.
stageName (string) --The name of the stage.
inboundTransitionState (dict) --The state of the inbound transition, which is either enabled or disabled.
enabled (boolean) --Whether the transition between stages is enabled (true) or disabled (false).
lastChangedBy (string) --The ID of the user who last changed the transition state.
lastChangedAt (datetime) --The timestamp when the transition state was last changed.
disabledReason (string) --The user-specified reason why the transition between two stages of a pipeline was disabled.
actionStates (list) --The state of the stage.
(dict) --Represents information about the state of an action.
actionName (string) --The name of the action.
currentRevision (dict) --Represents information about the version (or revision) of an action.
revisionId (string) --The system-generated unique ID that identifies the revision number of the action.
revisionChangeId (string) --The unique identifier of the change that set the state to this revision (for example, a deployment ID or timestamp).
created (datetime) --The date and time when the most recent version of the action was created, in timestamp format.
latestExecution (dict) --Represents information about the run of an action.
status (string) --The status of the action, or for a completed action, the last status of the action.
summary (string) --A summary of the run of the action.
lastStatusChange (datetime) --The last status change of the action.
token (string) --The system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState command. It is used to validate that the approval request corresponding to this token is still valid.
lastUpdatedBy (string) --The ARN of the user who last changed the pipeline.
externalExecutionId (string) --The external ID of the run of the action.
externalExecutionUrl (string) --The URL of a resource external to AWS that is used when running the action (for example, an external repository URL).
percentComplete (integer) --A percentage of completeness of the action as it runs.
errorDetails (dict) --The details of an error returned by a URL external to AWS.
code (string) --The system ID or number code of the error.
message (string) --The text of the error message.
entityUrl (string) --A URL link for more information about the state of the action, such as a deployment group details page.
revisionUrl (string) --A URL link for more information about the revision, such as a commit details page.
latestExecution (dict) --Information about the latest execution in the stage, including its ID and status.
pipelineExecutionId (string) --The ID of the pipeline execution associated with the stage.
status (string) --The status of the stage, or for a completed stage, the last status of the stage.
created (datetime) --The date and time the pipeline was created, in timestamp format.
updated (datetime) --The date and time the pipeline was last updated, in timestamp format.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
:return: {
'pipelineName': 'string',
'pipelineVersion': 123,
'stageStates': [
{
'stageName': 'string',
'inboundTransitionState': {
'enabled': True|False,
'lastChangedBy': 'string',
'lastChangedAt': datetime(2015, 1, 1),
'disabledReason': 'string'
},
'actionStates': [
{
'actionName': 'string',
'currentRevision': {
'revisionId': 'string',
'revisionChangeId': 'string',
'created': datetime(2015, 1, 1)
},
'latestExecution': {
'status': 'InProgress'|'Abandoned'|'Succeeded'|'Failed',
'summary': 'string',
'lastStatusChange': datetime(2015, 1, 1),
'token': 'string',
'lastUpdatedBy': 'string',
'externalExecutionId': 'string',
'externalExecutionUrl': 'string',
'percentComplete': 123,
'errorDetails': {
'code': 'string',
'message': 'string'
}
},
'entityUrl': 'string',
'revisionUrl': 'string'
},
],
'latestExecution': {
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Failed'|'Stopped'|'Stopping'|'Succeeded'
}
},
],
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
}
"""
pass
def get_third_party_job_details(jobId=None, clientToken=None):
"""
Requests the details of a job for a third party action. Used for partner actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.get_third_party_job_details(
jobId='string',
clientToken='string'
)
:type jobId: string
:param jobId: [REQUIRED]\nThe unique system-generated ID used for identifying the job.\n
:type clientToken: string
:param clientToken: [REQUIRED]\nThe clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.\n
:rtype: dict
ReturnsResponse Syntax
{
'jobDetails': {
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'nonce': 'string'
}
}
Response Structure
(dict) --
Represents the output of a GetThirdPartyJobDetails action.
jobDetails (dict) --
The details of the job, including any protected values defined for the job.
id (string) --
The identifier used to identify the job details in AWS CodePipeline.
data (dict) --
The data to be returned by the third party job worker.
actionTypeId (dict) --
Represents information about an action type.
category (string) --
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.
owner (string) --
The creator of the action being called.
provider (string) --
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .
version (string) --
A string that describes the action version.
actionConfiguration (dict) --
Represents information about an action configuration.
configuration (dict) --
The configuration data for the action.
(string) --
(string) --
pipelineContext (dict) --
Represents information about a pipeline to a job worker.
Note
Does not include pipelineArn and pipelineExecutionId for ThirdParty jobs.
pipelineName (string) --
The name of the pipeline. This is a user-specified value. Pipeline names must be unique across all pipeline names under an Amazon Web Services account.
stage (dict) --
The stage of the pipeline.
name (string) --
The name of the stage.
action (dict) --
The context of an action to a job worker in the stage of a pipeline.
name (string) --
The name of the action in the context of a job.
actionExecutionId (string) --
The system-generated unique ID that corresponds to an action\'s execution.
pipelineArn (string) --
The Amazon Resource Name (ARN) of the pipeline.
pipelineExecutionId (string) --
The execution ID of the pipeline.
inputArtifacts (list) --
The name of the artifact that is worked on by the action, if any. This name might be system-generated, such as "MyApp", or it might be defined by the user when the action is created. The input artifact name must match the name of an output artifact generated by an action in an earlier action or stage of the pipeline.
(dict) --
Represents information about an artifact that is worked on by actions in the pipeline.
name (string) --
The artifact\'s name.
revision (string) --
The artifact\'s revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
location (dict) --
The location of an artifact.
type (string) --
The type of artifact in the location.
s3Location (dict) --
The S3 bucket that contains the artifact.
bucketName (string) --
The name of the S3 bucket.
objectKey (string) --
The key of the object in the S3 bucket, which uniquely identifies the object in the bucket.
outputArtifacts (list) --
The name of the artifact that is the result of the action, if any. This name might be system-generated, such as "MyBuiltApp", or it might be defined by the user when the action is created.
(dict) --
Represents information about an artifact that is worked on by actions in the pipeline.
name (string) --
The artifact\'s name.
revision (string) --
The artifact\'s revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
location (dict) --
The location of an artifact.
type (string) --
The type of artifact in the location.
s3Location (dict) --
The S3 bucket that contains the artifact.
bucketName (string) --
The name of the S3 bucket.
objectKey (string) --
The key of the object in the S3 bucket, which uniquely identifies the object in the bucket.
artifactCredentials (dict) --
Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifact for the pipeline in AWS CodePipeline.
accessKeyId (string) --
The access key for the session.
secretAccessKey (string) --
The secret access key for the session.
sessionToken (string) --
The token for the session.
continuationToken (string) --
A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires to continue the job asynchronously.
encryptionKey (dict) --
The encryption key used to encrypt and decrypt data in the artifact store for the pipeline, such as an AWS Key Management Service (AWS KMS) key. This is optional and might not be present.
id (string) --
The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.
Note
Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.
type (string) --
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
nonce (string) --
A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Use this number in an AcknowledgeThirdPartyJob request.
Exceptions
CodePipeline.Client.exceptions.JobNotFoundException
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.InvalidClientTokenException
CodePipeline.Client.exceptions.InvalidJobException
:return: {
'jobDetails': {
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'nonce': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_action_executions(pipelineName=None, filter=None, maxResults=None, nextToken=None):
"""
Lists the action executions that have occurred in a pipeline.
See also: AWS API Documentation
Exceptions
:example: response = client.list_action_executions(
pipelineName='string',
filter={
'pipelineExecutionId': 'string'
},
maxResults=123,
nextToken='string'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]\nThe name of the pipeline for which you want to list action execution history.\n
:type filter: dict
:param filter: Input information used to filter action execution history.\n\npipelineExecutionId (string) --The pipeline execution ID used to filter action execution history.\n\n\n
:type maxResults: integer
:param maxResults: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Action execution history is retained for up to 12 months, based on action execution start times. Default value is 100.\n\nNote\nDetailed execution history is available for executions run on or after February 21, 2019.\n\n
:type nextToken: string
:param nextToken: The token that was returned from the previous ListActionExecutions call, which can be used to return the next set of action executions in the list.
:rtype: dict
ReturnsResponse Syntax
{
'actionExecutionDetails': [
{
'pipelineExecutionId': 'string',
'actionExecutionId': 'string',
'pipelineVersion': 123,
'stageName': 'string',
'actionName': 'string',
'startTime': datetime(2015, 1, 1),
'lastUpdateTime': datetime(2015, 1, 1),
'status': 'InProgress'|'Abandoned'|'Succeeded'|'Failed',
'input': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'configuration': {
'string': 'string'
},
'resolvedConfiguration': {
'string': 'string'
},
'roleArn': 'string',
'region': 'string',
'inputArtifacts': [
{
'name': 'string',
's3location': {
'bucket': 'string',
'key': 'string'
}
},
],
'namespace': 'string'
},
'output': {
'outputArtifacts': [
{
'name': 'string',
's3location': {
'bucket': 'string',
'key': 'string'
}
},
],
'executionResult': {
'externalExecutionId': 'string',
'externalExecutionSummary': 'string',
'externalExecutionUrl': 'string'
},
'outputVariables': {
'string': 'string'
}
}
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
actionExecutionDetails (list) --
The details for a list of recent executions, such as action execution ID.
(dict) --
Returns information about an execution of an action, including the action execution ID, and the name, version, and timing of the action.
pipelineExecutionId (string) --
The pipeline execution ID for the action execution.
actionExecutionId (string) --
The action execution ID.
pipelineVersion (integer) --
The version of the pipeline where the action was run.
stageName (string) --
The name of the stage that contains the action.
actionName (string) --
The name of the action.
startTime (datetime) --
The start time of the action execution.
lastUpdateTime (datetime) --
The last update time of the action execution.
status (string) --
The status of the action execution. Status categories are InProgress , Succeeded , and Failed .
input (dict) --
Input details for the action execution, such as role ARN, Region, and input artifacts.
actionTypeId (dict) --
Represents information about an action type.
category (string) --
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.
owner (string) --
The creator of the action being called.
provider (string) --
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .
version (string) --
A string that describes the action version.
configuration (dict) --
Configuration data for an action execution.
(string) --
(string) --
resolvedConfiguration (dict) --
Configuration data for an action execution with all variable references replaced with their real values for the execution.
(string) --
(string) --
roleArn (string) --
The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline.
region (string) --
The AWS Region for the action, such as us-east-1.
inputArtifacts (list) --
Details of input artifacts of the action that correspond to the action execution.
(dict) --
Artifact details for the action execution, such as the artifact location.
name (string) --
The artifact object name for the action execution.
s3location (dict) --
The Amazon S3 artifact location for the action execution.
bucket (string) --
The Amazon S3 artifact bucket for an action\'s artifacts.
key (string) --
The artifact name.
namespace (string) --
The variable namespace associated with the action. All variables produced as output by this action fall under this namespace.
output (dict) --
Output details for the action execution, such as the action execution result.
outputArtifacts (list) --
Details of output artifacts of the action that correspond to the action execution.
(dict) --
Artifact details for the action execution, such as the artifact location.
name (string) --
The artifact object name for the action execution.
s3location (dict) --
The Amazon S3 artifact location for the action execution.
bucket (string) --
The Amazon S3 artifact bucket for an action\'s artifacts.
key (string) --
The artifact name.
executionResult (dict) --
Execution result information listed in the output details for an action execution.
externalExecutionId (string) --
The action provider\'s external ID for the action execution.
externalExecutionSummary (string) --
The action provider\'s summary for the action execution.
externalExecutionUrl (string) --
The deepest external link to the external resource (for example, a repository URL or deployment endpoint) that is used when running the action.
outputVariables (dict) --
The outputVariables field shows the key-value pairs that were output as part of that execution.
(string) --
(string) --
nextToken (string) --
If the amount of returned information is significantly large, an identifier is also returned and can be used in a subsequent ListActionExecutions call to return the next set of action executions in the list.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.InvalidNextTokenException
CodePipeline.Client.exceptions.PipelineExecutionNotFoundException
:return: {
'actionExecutionDetails': [
{
'pipelineExecutionId': 'string',
'actionExecutionId': 'string',
'pipelineVersion': 123,
'stageName': 'string',
'actionName': 'string',
'startTime': datetime(2015, 1, 1),
'lastUpdateTime': datetime(2015, 1, 1),
'status': 'InProgress'|'Abandoned'|'Succeeded'|'Failed',
'input': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'configuration': {
'string': 'string'
},
'resolvedConfiguration': {
'string': 'string'
},
'roleArn': 'string',
'region': 'string',
'inputArtifacts': [
{
'name': 'string',
's3location': {
'bucket': 'string',
'key': 'string'
}
},
],
'namespace': 'string'
},
'output': {
'outputArtifacts': [
{
'name': 'string',
's3location': {
'bucket': 'string',
'key': 'string'
}
},
],
'executionResult': {
'externalExecutionId': 'string',
'externalExecutionSummary': 'string',
'externalExecutionUrl': 'string'
},
'outputVariables': {
'string': 'string'
}
}
},
],
'nextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_action_types(actionOwnerFilter=None, nextToken=None):
"""
Gets a summary of all AWS CodePipeline action types associated with your account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_action_types(
actionOwnerFilter='AWS'|'ThirdParty'|'Custom',
nextToken='string'
)
:type actionOwnerFilter: string
:param actionOwnerFilter: Filters the list of action types to those created by a specified entity.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous list action types call, which can be used to return the next set of action types in the list.
:rtype: dict
ReturnsResponse Syntax
{
'actionTypes': [
{
'id': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'settings': {
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
'actionConfigurationProperties': [
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
'inputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
},
'outputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
}
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
Represents the output of a ListActionTypes action.
actionTypes (list) --
Provides details of the action types.
(dict) --
Returns information about the details of an action type.
id (dict) --
Represents information about an action type.
category (string) --
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.
owner (string) --
The creator of the action being called.
provider (string) --
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .
version (string) --
A string that describes the action version.
settings (dict) --
The settings for the action type.
thirdPartyConfigurationUrl (string) --
The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service.
entityUrlTemplate (string) --
The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display in the pipeline.
executionUrlTemplate (string) --
The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as the console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.
revisionUrlTemplate (string) --
The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.
actionConfigurationProperties (list) --
The configuration properties for the action type.
(dict) --
Represents information about an action configuration property.
name (string) --
The name of the action configuration property.
required (boolean) --
Whether the configuration property is a required value.
key (boolean) --
Whether the configuration property is a key.
secret (boolean) --
Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails , GetThirdPartyJobDetails , PollForJobs , and PollForThirdPartyJobs .
When updating a pipeline, passing * * * * * without changing any other values of the action preserves the previous value of the secret.
queryable (boolean) --
Indicates that the property is used with PollForJobs . When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.
If you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to other restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.
description (string) --
The description of the action configuration property that is displayed to users.
type (string) --
The type of the configuration property.
inputArtifactDetails (dict) --
The details of the input artifact for the action, such as its commit ID.
minimumCount (integer) --
The minimum number of artifacts allowed for the action type.
maximumCount (integer) --
The maximum number of artifacts allowed for the action type.
outputArtifactDetails (dict) --
The details of the output artifact of the action, such as its commit ID.
minimumCount (integer) --
The minimum number of artifacts allowed for the action type.
maximumCount (integer) --
The maximum number of artifacts allowed for the action type.
nextToken (string) --
If the amount of returned information is significantly large, an identifier is also returned. It can be used in a subsequent list action types call to return the next set of action types in the list.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.InvalidNextTokenException
:return: {
'actionTypes': [
{
'id': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'settings': {
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
'actionConfigurationProperties': [
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
'inputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
},
'outputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
}
},
],
'nextToken': 'string'
}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.InvalidNextTokenException
"""
pass
def list_pipeline_executions(pipelineName=None, maxResults=None, nextToken=None):
"""
Gets a summary of the most recent executions for a pipeline.
See also: AWS API Documentation
Exceptions
:example: response = client.list_pipeline_executions(
pipelineName='string',
maxResults=123,
nextToken='string'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]\nThe name of the pipeline for which you want to get execution summary information.\n
:type maxResults: integer
:param maxResults: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Pipeline history is limited to the most recent 12 months, based on pipeline execution start times. Default value is 100.
:type nextToken: string
:param nextToken: The token that was returned from the previous ListPipelineExecutions call, which can be used to return the next set of pipeline executions in the list.
:rtype: dict
ReturnsResponse Syntax
{
'pipelineExecutionSummaries': [
{
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Stopped'|'Stopping'|'Succeeded'|'Superseded'|'Failed',
'startTime': datetime(2015, 1, 1),
'lastUpdateTime': datetime(2015, 1, 1),
'sourceRevisions': [
{
'actionName': 'string',
'revisionId': 'string',
'revisionSummary': 'string',
'revisionUrl': 'string'
},
],
'trigger': {
'triggerType': 'CreatePipeline'|'StartPipelineExecution'|'PollForSourceChanges'|'Webhook'|'CloudWatchEvent'|'PutActionRevision',
'triggerDetail': 'string'
},
'stopTrigger': {
'reason': 'string'
}
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
Represents the output of a ListPipelineExecutions action.
pipelineExecutionSummaries (list) --
A list of executions in the history of a pipeline.
(dict) --
Summary information about a pipeline execution.
pipelineExecutionId (string) --
The ID of the pipeline execution.
status (string) --
The status of the pipeline execution.
InProgress: The pipeline execution is currently running.
Stopped: The pipeline execution was manually stopped. For more information, see Stopped Executions .
Stopping: The pipeline execution received a request to be manually stopped. Depending on the selected stop mode, the execution is either completing or abandoning in-progress actions. For more information, see Stopped Executions .
Succeeded: The pipeline execution was completed successfully.
Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution advanced and continued through the pipeline instead. For more information, see Superseded Executions .
Failed: The pipeline execution was not completed successfully.
startTime (datetime) --
The date and time when the pipeline execution began, in timestamp format.
lastUpdateTime (datetime) --
The date and time of the last change to the pipeline execution, in timestamp format.
sourceRevisions (list) --
A list of the source artifact revisions that initiated a pipeline execution.
(dict) --
Information about the version (or revision) of a source artifact that initiated a pipeline execution.
actionName (string) --
The name of the action that processed the revision to the source artifact.
revisionId (string) --
The system-generated unique ID that identifies the revision number of the artifact.
revisionSummary (string) --
Summary information about the most recent revision of the artifact. For GitHub and AWS CodeCommit repositories, the commit message. For Amazon S3 buckets or actions, the user-provided content of a codepipeline-artifact-revision-summary key specified in the object metadata.
revisionUrl (string) --
The commit ID for the artifact revision. For artifacts stored in GitHub or AWS CodeCommit repositories, the commit ID is linked to a commit details page.
trigger (dict) --
The interaction or event that started a pipeline execution, such as automated change detection or a StartPipelineExecution API call.
triggerType (string) --
The type of change-detection method, command, or user interaction that started a pipeline execution.
triggerDetail (string) --
Detail related to the event that started a pipeline execution, such as the webhook ARN of the webhook that triggered the pipeline execution or the user ARN for a user-initiated start-pipeline-execution CLI command.
stopTrigger (dict) --
The interaction that stopped a pipeline execution.
reason (string) --
The user-specified reason the pipeline was stopped.
nextToken (string) --
A token that can be used in the next ListPipelineExecutions call. To view all items in the list, continue to call this operation with each subsequent token until no more nextToken values are returned.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.InvalidNextTokenException
:return: {
'pipelineExecutionSummaries': [
{
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Stopped'|'Stopping'|'Succeeded'|'Superseded'|'Failed',
'startTime': datetime(2015, 1, 1),
'lastUpdateTime': datetime(2015, 1, 1),
'sourceRevisions': [
{
'actionName': 'string',
'revisionId': 'string',
'revisionSummary': 'string',
'revisionUrl': 'string'
},
],
'trigger': {
'triggerType': 'CreatePipeline'|'StartPipelineExecution'|'PollForSourceChanges'|'Webhook'|'CloudWatchEvent'|'PutActionRevision',
'triggerDetail': 'string'
},
'stopTrigger': {
'reason': 'string'
}
},
],
'nextToken': 'string'
}
:returns:
InProgress: The pipeline execution is currently running.
Stopped: The pipeline execution was manually stopped. For more information, see Stopped Executions .
Stopping: The pipeline execution received a request to be manually stopped. Depending on the selected stop mode, the execution is either completing or abandoning in-progress actions. For more information, see Stopped Executions .
Succeeded: The pipeline execution was completed successfully.
Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution advanced and continued through the pipeline instead. For more information, see Superseded Executions .
Failed: The pipeline execution was not completed successfully.
"""
pass
def list_pipelines(nextToken=None):
"""
Gets a summary of all of the pipelines associated with your account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_pipelines(
nextToken='string'
)
:type nextToken: string
:param nextToken: An identifier that was returned from the previous list pipelines call. It can be used to return the next set of pipelines in the list.
:rtype: dict
ReturnsResponse Syntax{
'pipelines': [
{
'name': 'string',
'version': 123,
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
Response Structure
(dict) --Represents the output of a ListPipelines action.
pipelines (list) --The list of pipelines.
(dict) --Returns a summary of a pipeline.
name (string) --The name of the pipeline.
version (integer) --The version number of the pipeline.
created (datetime) --The date and time the pipeline was created, in timestamp format.
updated (datetime) --The date and time of the last update to the pipeline, in timestamp format.
nextToken (string) --If the amount of returned information is significantly large, an identifier is also returned. It can be used in a subsequent list pipelines call to return the next set of pipelines in the list.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.InvalidNextTokenException
:return: {
'pipelines': [
{
'name': 'string',
'version': 123,
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
"""
pass
def list_tags_for_resource(resourceArn=None, nextToken=None, maxResults=None):
"""
Gets the set of key-value pairs (metadata) that are used to manage the resource.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags_for_resource(
resourceArn='string',
nextToken='string',
maxResults=123
)
:type resourceArn: string
:param resourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource to get tags for.\n
:type nextToken: string
:param nextToken: The token that was returned from the previous API call, which would be used to return the next page of the list. The ListTagsforResource call lists all available tags in one call and does not use pagination.
:type maxResults: integer
:param maxResults: The maximum number of results to return in a single call.
:rtype: dict
ReturnsResponse Syntax
{
'tags': [
{
'key': 'string',
'value': 'string'
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
tags (list) --
The tags for the resource.
(dict) --
A tag is a key-value pair that is used to manage the resource.
key (string) --
The tag\'s key.
value (string) --
The tag\'s value.
nextToken (string) --
If the amount of returned information is significantly large, an identifier is also returned and can be used in a subsequent API call to return the next page of the list. The ListTagsforResource call lists all available tags in one call and does not use pagination.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.ResourceNotFoundException
CodePipeline.Client.exceptions.InvalidNextTokenException
CodePipeline.Client.exceptions.InvalidArnException
:return: {
'tags': [
{
'key': 'string',
'value': 'string'
},
],
'nextToken': 'string'
}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.ResourceNotFoundException
CodePipeline.Client.exceptions.InvalidNextTokenException
CodePipeline.Client.exceptions.InvalidArnException
"""
pass
def list_webhooks(NextToken=None, MaxResults=None):
"""
Gets a listing of all the webhooks in this AWS Region for this account. The output lists all webhooks and includes the webhook URL and ARN and the configuration for each webhook.
See also: AWS API Documentation
Exceptions
:example: response = client.list_webhooks(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: The token that was returned from the previous ListWebhooks call, which can be used to return the next set of webhooks in the list.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value.
:rtype: dict
ReturnsResponse Syntax
{
'webhooks': [
{
'definition': {
'name': 'string',
'targetPipeline': 'string',
'targetAction': 'string',
'filters': [
{
'jsonPath': 'string',
'matchEquals': 'string'
},
],
'authentication': 'GITHUB_HMAC'|'IP'|'UNAUTHENTICATED',
'authenticationConfiguration': {
'AllowedIPRange': 'string',
'SecretToken': 'string'
}
},
'url': 'string',
'errorMessage': 'string',
'errorCode': 'string',
'lastTriggered': datetime(2015, 1, 1),
'arn': 'string',
'tags': [
{
'key': 'string',
'value': 'string'
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
webhooks (list) --
The JSON detail returned for each webhook in the list output for the ListWebhooks call.
(dict) --
The detail returned for each webhook after listing webhooks, such as the webhook URL, the webhook name, and the webhook ARN.
definition (dict) --
The detail returned for each webhook, such as the webhook authentication type and filter rules.
name (string) --
The name of the webhook.
targetPipeline (string) --
The name of the pipeline you want to connect to the webhook.
targetAction (string) --
The name of the action in a pipeline you want to connect to the webhook. The action must be from the source (first) stage of the pipeline.
filters (list) --
A list of rules applied to the body/payload sent in the POST request to a webhook URL. All defined rules must pass for the request to be accepted and the pipeline started.
(dict) --
The event criteria that specify when a webhook notification is sent to your URL.
jsonPath (string) --
A JsonPath expression that is applied to the body/payload of the webhook. The value selected by the JsonPath expression must match the value specified in the MatchEquals field. Otherwise, the request is ignored. For more information, see Java JsonPath implementation in GitHub.
matchEquals (string) --
The value selected by the JsonPath expression must match what is supplied in the MatchEquals field. Otherwise, the request is ignored. Properties from the target action configuration can be included as placeholders in this value by surrounding the action configuration key with curly brackets. For example, if the value supplied here is "refs/heads/{Branch}" and the target action has an action configuration property called "Branch" with a value of "master", the MatchEquals value is evaluated as "refs/heads/master". For a list of action configuration properties for built-in action types, see Pipeline Structure Reference Action Requirements .
authentication (string) --
Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.
For information about the authentication scheme implemented by GITHUB_HMAC, see Securing your webhooks on the GitHub Developer website.
IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.
UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.
authenticationConfiguration (dict) --
Properties that configure the authentication applied to incoming webhook trigger requests. The required properties depend on the authentication type. For GITHUB_HMAC, only the SecretToken property must be set. For IP, only the AllowedIPRange property must be set to a valid CIDR range. For UNAUTHENTICATED, no properties can be set.
AllowedIPRange (string) --
The property used to configure acceptance of webhooks in an IP address range. For IP, only the AllowedIPRange property must be set. This property must be set to a valid CIDR range.
SecretToken (string) --
The property used to configure GitHub authentication. For GITHUB_HMAC, only the SecretToken property must be set.
url (string) --
A unique URL generated by CodePipeline. When a POST request is made to this URL, the defined pipeline is started as long as the body of the post request satisfies the defined authentication and filtering conditions. Deleting and re-creating a webhook makes the old URL invalid and generates a new one.
errorMessage (string) --
The text of the error message about the webhook.
errorCode (string) --
The number code of the error.
lastTriggered (datetime) --
The date and time a webhook was last successfully triggered, in timestamp format.
arn (string) --
The Amazon Resource Name (ARN) of the webhook.
tags (list) --
Specifies the tags applied to the webhook.
(dict) --
A tag is a key-value pair that is used to manage the resource.
key (string) --
The tag\'s key.
value (string) --
The tag\'s value.
NextToken (string) --
If the amount of returned information is significantly large, an identifier is also returned and can be used in a subsequent ListWebhooks call to return the next set of webhooks in the list.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.InvalidNextTokenException
:return: {
'webhooks': [
{
'definition': {
'name': 'string',
'targetPipeline': 'string',
'targetAction': 'string',
'filters': [
{
'jsonPath': 'string',
'matchEquals': 'string'
},
],
'authentication': 'GITHUB_HMAC'|'IP'|'UNAUTHENTICATED',
'authenticationConfiguration': {
'AllowedIPRange': 'string',
'SecretToken': 'string'
}
},
'url': 'string',
'errorMessage': 'string',
'errorCode': 'string',
'lastTriggered': datetime(2015, 1, 1),
'arn': 'string',
'tags': [
{
'key': 'string',
'value': 'string'
},
]
},
],
'NextToken': 'string'
}
:returns:
For information about the authentication scheme implemented by GITHUB_HMAC, see Securing your webhooks on the GitHub Developer website.
IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.
UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.
"""
pass
def poll_for_jobs(actionTypeId=None, maxBatchSize=None, queryParam=None):
"""
Returns information about any jobs for AWS CodePipeline to act on. PollForJobs is valid only for action types with "Custom" in the owner field. If the action type contains "AWS" or "ThirdParty" in the owner field, the PollForJobs action returns an error.
See also: AWS API Documentation
Exceptions
:example: response = client.poll_for_jobs(
actionTypeId={
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
maxBatchSize=123,
queryParam={
'string': 'string'
}
)
:type actionTypeId: dict
:param actionTypeId: [REQUIRED]\nRepresents information about an action type.\n\ncategory (string) -- [REQUIRED]A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.\n\nowner (string) -- [REQUIRED]The creator of the action being called.\n\nprovider (string) -- [REQUIRED]The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .\n\nversion (string) -- [REQUIRED]A string that describes the action version.\n\n\n
:type maxBatchSize: integer
:param maxBatchSize: The maximum number of jobs to return in a poll for jobs call.
:type queryParam: dict
:param queryParam: A map of property names and values. For an action type with no queryable properties, this value must be null or an empty map. For an action type with a queryable property, you must supply that property as a key in the map. Only jobs whose action configuration matches the mapped value are returned.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'jobs': [
{
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'nonce': 'string',
'accountId': 'string'
},
]
}
Response Structure
(dict) --
Represents the output of a PollForJobs action.
jobs (list) --
Information about the jobs to take action on.
(dict) --
Represents information about a job.
id (string) --
The unique system-generated ID of the job.
data (dict) --
Other data about a job.
actionTypeId (dict) --
Represents information about an action type.
category (string) --
A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.
owner (string) --
The creator of the action being called.
provider (string) --
The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .
version (string) --
A string that describes the action version.
actionConfiguration (dict) --
Represents information about an action configuration.
configuration (dict) --
The configuration data for the action.
(string) --
(string) --
pipelineContext (dict) --
Represents information about a pipeline to a job worker.
Note
Includes pipelineArn and pipelineExecutionId for custom jobs.
pipelineName (string) --
The name of the pipeline. This is a user-specified value. Pipeline names must be unique across all pipeline names under an Amazon Web Services account.
stage (dict) --
The stage of the pipeline.
name (string) --
The name of the stage.
action (dict) --
The context of an action to a job worker in the stage of a pipeline.
name (string) --
The name of the action in the context of a job.
actionExecutionId (string) --
The system-generated unique ID that corresponds to an action\'s execution.
pipelineArn (string) --
The Amazon Resource Name (ARN) of the pipeline.
pipelineExecutionId (string) --
The execution ID of the pipeline.
inputArtifacts (list) --
The artifact supplied to the job.
(dict) --
Represents information about an artifact that is worked on by actions in the pipeline.
name (string) --
The artifact\'s name.
revision (string) --
The artifact\'s revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
location (dict) --
The location of an artifact.
type (string) --
The type of artifact in the location.
s3Location (dict) --
The S3 bucket that contains the artifact.
bucketName (string) --
The name of the S3 bucket.
objectKey (string) --
The key of the object in the S3 bucket, which uniquely identifies the object in the bucket.
outputArtifacts (list) --
The output of the job.
(dict) --
Represents information about an artifact that is worked on by actions in the pipeline.
name (string) --
The artifact\'s name.
revision (string) --
The artifact\'s revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
location (dict) --
The location of an artifact.
type (string) --
The type of artifact in the location.
s3Location (dict) --
The S3 bucket that contains the artifact.
bucketName (string) --
The name of the S3 bucket.
objectKey (string) --
The key of the object in the S3 bucket, which uniquely identifies the object in the bucket.
artifactCredentials (dict) --
Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifacts for the pipeline in AWS CodePipeline.
accessKeyId (string) --
The access key for the session.
secretAccessKey (string) --
The secret access key for the session.
sessionToken (string) --
The token for the session.
continuationToken (string) --
A system-generated token, such as a AWS CodeDeploy deployment ID, required by a job to continue the job asynchronously.
encryptionKey (dict) --
Represents information about the key used to encrypt data in the artifact store, such as an AWS Key Management Service (AWS KMS) key.
id (string) --
The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.
Note
Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.
type (string) --
The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
nonce (string) --
A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Use this number in an AcknowledgeJob request.
accountId (string) --
The ID of the AWS account to use when performing the job.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.ActionTypeNotFoundException
:return: {
'jobs': [
{
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'nonce': 'string',
'accountId': 'string'
},
]
}
:returns:
(string) --
(string) --
"""
pass
def poll_for_third_party_jobs(actionTypeId=None, maxBatchSize=None):
"""
Determines whether there are any third party jobs for a job worker to act on. Used for partner actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.poll_for_third_party_jobs(
actionTypeId={
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
maxBatchSize=123
)
:type actionTypeId: dict
:param actionTypeId: [REQUIRED]\nRepresents information about an action type.\n\ncategory (string) -- [REQUIRED]A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.\n\nowner (string) -- [REQUIRED]The creator of the action being called.\n\nprovider (string) -- [REQUIRED]The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .\n\nversion (string) -- [REQUIRED]A string that describes the action version.\n\n\n
:type maxBatchSize: integer
:param maxBatchSize: The maximum number of jobs to return in a poll for jobs call.
:rtype: dict
ReturnsResponse Syntax
{
'jobs': [
{
'clientId': 'string',
'jobId': 'string'
},
]
}
Response Structure
(dict) --
Represents the output of a PollForThirdPartyJobs action.
jobs (list) --
Information about the jobs to take action on.
(dict) --
A response to a PollForThirdPartyJobs request returned by AWS CodePipeline when there is a job to be worked on by a partner action.
clientId (string) --
The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.
jobId (string) --
The identifier used to identify the job in AWS CodePipeline.
Exceptions
CodePipeline.Client.exceptions.ActionTypeNotFoundException
CodePipeline.Client.exceptions.ValidationException
:return: {
'jobs': [
{
'clientId': 'string',
'jobId': 'string'
},
]
}
:returns:
CodePipeline.Client.exceptions.ActionTypeNotFoundException
CodePipeline.Client.exceptions.ValidationException
"""
pass
def put_action_revision(pipelineName=None, stageName=None, actionName=None, actionRevision=None):
"""
Provides information to AWS CodePipeline about new revisions to a source.
See also: AWS API Documentation
Exceptions
:example: response = client.put_action_revision(
pipelineName='string',
stageName='string',
actionName='string',
actionRevision={
'revisionId': 'string',
'revisionChangeId': 'string',
'created': datetime(2015, 1, 1)
}
)
:type pipelineName: string
:param pipelineName: [REQUIRED]\nThe name of the pipeline that starts processing the revision to the source.\n
:type stageName: string
:param stageName: [REQUIRED]\nThe name of the stage that contains the action that acts on the revision.\n
:type actionName: string
:param actionName: [REQUIRED]\nThe name of the action that processes the revision.\n
:type actionRevision: dict
:param actionRevision: [REQUIRED]\nRepresents information about the version (or revision) of an action.\n\nrevisionId (string) -- [REQUIRED]The system-generated unique ID that identifies the revision number of the action.\n\nrevisionChangeId (string) -- [REQUIRED]The unique identifier of the change that set the state to this revision (for example, a deployment ID or timestamp).\n\ncreated (datetime) -- [REQUIRED]The date and time when the most recent version of the action was created, in timestamp format.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'newRevision': True|False,
'pipelineExecutionId': 'string'
}
Response Structure
(dict) --
Represents the output of a PutActionRevision action.
newRevision (boolean) --
Indicates whether the artifact revision was previously used in an execution of the specified pipeline.
pipelineExecutionId (string) --
The ID of the current workflow state of the pipeline.
Exceptions
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.StageNotFoundException
CodePipeline.Client.exceptions.ActionNotFoundException
CodePipeline.Client.exceptions.ValidationException
:return: {
'newRevision': True|False,
'pipelineExecutionId': 'string'
}
:returns:
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.StageNotFoundException
CodePipeline.Client.exceptions.ActionNotFoundException
CodePipeline.Client.exceptions.ValidationException
"""
pass
def put_approval_result(pipelineName=None, stageName=None, actionName=None, result=None, token=None):
"""
Provides the response to a manual approval request to AWS CodePipeline. Valid responses include Approved and Rejected.
See also: AWS API Documentation
Exceptions
:example: response = client.put_approval_result(
pipelineName='string',
stageName='string',
actionName='string',
result={
'summary': 'string',
'status': 'Approved'|'Rejected'
},
token='string'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]\nThe name of the pipeline that contains the action.\n
:type stageName: string
:param stageName: [REQUIRED]\nThe name of the stage that contains the action.\n
:type actionName: string
:param actionName: [REQUIRED]\nThe name of the action for which approval is requested.\n
:type result: dict
:param result: [REQUIRED]\nRepresents information about the result of the approval request.\n\nsummary (string) -- [REQUIRED]The summary of the current status of the approval request.\n\nstatus (string) -- [REQUIRED]The response submitted by a reviewer assigned to an approval action request.\n\n\n
:type token: string
:param token: [REQUIRED]\nThe system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState action. It is used to validate that the approval request corresponding to this token is still valid.\n
:rtype: dict
ReturnsResponse Syntax
{
'approvedAt': datetime(2015, 1, 1)
}
Response Structure
(dict) --
Represents the output of a PutApprovalResult action.
approvedAt (datetime) --
The timestamp showing when the approval or rejection was submitted.
Exceptions
CodePipeline.Client.exceptions.InvalidApprovalTokenException
CodePipeline.Client.exceptions.ApprovalAlreadyCompletedException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.StageNotFoundException
CodePipeline.Client.exceptions.ActionNotFoundException
CodePipeline.Client.exceptions.ValidationException
:return: {
'approvedAt': datetime(2015, 1, 1)
}
:returns:
CodePipeline.Client.exceptions.InvalidApprovalTokenException
CodePipeline.Client.exceptions.ApprovalAlreadyCompletedException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.StageNotFoundException
CodePipeline.Client.exceptions.ActionNotFoundException
CodePipeline.Client.exceptions.ValidationException
"""
pass
def put_job_failure_result(jobId=None, failureDetails=None):
"""
Represents the failure of a job as returned to the pipeline by a job worker. Used for custom actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.put_job_failure_result(
jobId='string',
failureDetails={
'type': 'JobFailed'|'ConfigurationError'|'PermissionError'|'RevisionOutOfSync'|'RevisionUnavailable'|'SystemUnavailable',
'message': 'string',
'externalExecutionId': 'string'
}
)
:type jobId: string
:param jobId: [REQUIRED]\nThe unique system-generated ID of the job that failed. This is the same ID returned from PollForJobs .\n
:type failureDetails: dict
:param failureDetails: [REQUIRED]\nThe details about the failure of a job.\n\ntype (string) -- [REQUIRED]The type of the failure.\n\nmessage (string) -- [REQUIRED]The message about the failure.\n\nexternalExecutionId (string) --The external ID of the run of the action that failed.\n\n\n
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.JobNotFoundException
CodePipeline.Client.exceptions.InvalidJobStateException
"""
pass
def put_job_success_result(jobId=None, currentRevision=None, continuationToken=None, executionDetails=None, outputVariables=None):
"""
Represents the success of a job as returned to the pipeline by a job worker. Used for custom actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.put_job_success_result(
jobId='string',
currentRevision={
'revision': 'string',
'changeIdentifier': 'string',
'created': datetime(2015, 1, 1),
'revisionSummary': 'string'
},
continuationToken='string',
executionDetails={
'summary': 'string',
'externalExecutionId': 'string',
'percentComplete': 123
},
outputVariables={
'string': 'string'
}
)
:type jobId: string
:param jobId: [REQUIRED]\nThe unique system-generated ID of the job that succeeded. This is the same ID returned from PollForJobs .\n
:type currentRevision: dict
:param currentRevision: The ID of the current revision of the artifact successfully worked on by the job.\n\nrevision (string) -- [REQUIRED]The revision ID of the current version of an artifact.\n\nchangeIdentifier (string) -- [REQUIRED]The change identifier for the current revision.\n\ncreated (datetime) --The date and time when the most recent revision of the artifact was created, in timestamp format.\n\nrevisionSummary (string) --The summary of the most recent revision of the artifact.\n\n\n
:type continuationToken: string
:param continuationToken: A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a custom action in progress. Future jobs use this token to identify the running instance of the action. It can be reused to return more information about the progress of the custom action. When the action is complete, no continuation token should be supplied.
:type executionDetails: dict
:param executionDetails: The execution details of the successful job, such as the actions taken by the job worker.\n\nsummary (string) --The summary of the current status of the actions.\n\nexternalExecutionId (string) --The system-generated unique ID of this action used to identify this job worker in any external systems, such as AWS CodeDeploy.\n\npercentComplete (integer) --The percentage of work completed on the action, represented on a scale of 0 to 100 percent.\n\n\n
:type outputVariables: dict
:param outputVariables: Key-value pairs produced as output by a job worker that can be made available to a downstream action configuration. outputVariables can be included only when there is no continuation token on the request.\n\n(string) --\n(string) --\n\n\n\n
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.JobNotFoundException
CodePipeline.Client.exceptions.InvalidJobStateException
CodePipeline.Client.exceptions.OutputVariablesSizeExceededException
"""
pass
def put_third_party_job_failure_result(jobId=None, clientToken=None, failureDetails=None):
"""
Represents the failure of a third party job as returned to the pipeline by a job worker. Used for partner actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.put_third_party_job_failure_result(
jobId='string',
clientToken='string',
failureDetails={
'type': 'JobFailed'|'ConfigurationError'|'PermissionError'|'RevisionOutOfSync'|'RevisionUnavailable'|'SystemUnavailable',
'message': 'string',
'externalExecutionId': 'string'
}
)
:type jobId: string
:param jobId: [REQUIRED]\nThe ID of the job that failed. This is the same ID returned from PollForThirdPartyJobs .\n
:type clientToken: string
:param clientToken: [REQUIRED]\nThe clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.\n
:type failureDetails: dict
:param failureDetails: [REQUIRED]\nRepresents information about failure details.\n\ntype (string) -- [REQUIRED]The type of the failure.\n\nmessage (string) -- [REQUIRED]The message about the failure.\n\nexternalExecutionId (string) --The external ID of the run of the action that failed.\n\n\n
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.JobNotFoundException
CodePipeline.Client.exceptions.InvalidJobStateException
CodePipeline.Client.exceptions.InvalidClientTokenException
"""
pass
def put_third_party_job_success_result(jobId=None, clientToken=None, currentRevision=None, continuationToken=None, executionDetails=None):
"""
Represents the success of a third party job as returned to the pipeline by a job worker. Used for partner actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.put_third_party_job_success_result(
jobId='string',
clientToken='string',
currentRevision={
'revision': 'string',
'changeIdentifier': 'string',
'created': datetime(2015, 1, 1),
'revisionSummary': 'string'
},
continuationToken='string',
executionDetails={
'summary': 'string',
'externalExecutionId': 'string',
'percentComplete': 123
}
)
:type jobId: string
:param jobId: [REQUIRED]\nThe ID of the job that successfully completed. This is the same ID returned from PollForThirdPartyJobs .\n
:type clientToken: string
:param clientToken: [REQUIRED]\nThe clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.\n
:type currentRevision: dict
:param currentRevision: Represents information about a current revision.\n\nrevision (string) -- [REQUIRED]The revision ID of the current version of an artifact.\n\nchangeIdentifier (string) -- [REQUIRED]The change identifier for the current revision.\n\ncreated (datetime) --The date and time when the most recent revision of the artifact was created, in timestamp format.\n\nrevisionSummary (string) --The summary of the most recent revision of the artifact.\n\n\n
:type continuationToken: string
:param continuationToken: A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a partner action in progress. Future jobs use this token to identify the running instance of the action. It can be reused to return more information about the progress of the partner action. When the action is complete, no continuation token should be supplied.
:type executionDetails: dict
:param executionDetails: The details of the actions taken and results produced on an artifact as it passes through stages in the pipeline.\n\nsummary (string) --The summary of the current status of the actions.\n\nexternalExecutionId (string) --The system-generated unique ID of this action used to identify this job worker in any external systems, such as AWS CodeDeploy.\n\npercentComplete (integer) --The percentage of work completed on the action, represented on a scale of 0 to 100 percent.\n\n\n
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.JobNotFoundException
CodePipeline.Client.exceptions.InvalidJobStateException
CodePipeline.Client.exceptions.InvalidClientTokenException
"""
pass
def put_webhook(webhook=None, tags=None):
"""
Defines a webhook and returns a unique webhook URL generated by CodePipeline. This URL can be supplied to third party source hosting providers to call every time there\'s a code change. When CodePipeline receives a POST request on this URL, the pipeline defined in the webhook is started as long as the POST request satisfied the authentication and filtering requirements supplied when defining the webhook. RegisterWebhookWithThirdParty and DeregisterWebhookWithThirdParty APIs can be used to automatically configure supported third parties to call the generated webhook URL.
See also: AWS API Documentation
Exceptions
:example: response = client.put_webhook(
webhook={
'name': 'string',
'targetPipeline': 'string',
'targetAction': 'string',
'filters': [
{
'jsonPath': 'string',
'matchEquals': 'string'
},
],
'authentication': 'GITHUB_HMAC'|'IP'|'UNAUTHENTICATED',
'authenticationConfiguration': {
'AllowedIPRange': 'string',
'SecretToken': 'string'
}
},
tags=[
{
'key': 'string',
'value': 'string'
},
]
)
:type webhook: dict
:param webhook: [REQUIRED]\nThe detail provided in an input file to create the webhook, such as the webhook name, the pipeline name, and the action name. Give the webhook a unique name that helps you identify it. You might name the webhook after the pipeline and action it targets so that you can easily recognize what it\'s used for later.\n\nname (string) -- [REQUIRED]The name of the webhook.\n\ntargetPipeline (string) -- [REQUIRED]The name of the pipeline you want to connect to the webhook.\n\ntargetAction (string) -- [REQUIRED]The name of the action in a pipeline you want to connect to the webhook. The action must be from the source (first) stage of the pipeline.\n\nfilters (list) -- [REQUIRED]A list of rules applied to the body/payload sent in the POST request to a webhook URL. All defined rules must pass for the request to be accepted and the pipeline started.\n\n(dict) --The event criteria that specify when a webhook notification is sent to your URL.\n\njsonPath (string) -- [REQUIRED]A JsonPath expression that is applied to the body/payload of the webhook. The value selected by the JsonPath expression must match the value specified in the MatchEquals field. Otherwise, the request is ignored. For more information, see Java JsonPath implementation in GitHub.\n\nmatchEquals (string) --The value selected by the JsonPath expression must match what is supplied in the MatchEquals field. Otherwise, the request is ignored. Properties from the target action configuration can be included as placeholders in this value by surrounding the action configuration key with curly brackets. For example, if the value supplied here is 'refs/heads/{Branch}' and the target action has an action configuration property called 'Branch' with a value of 'master', the MatchEquals value is evaluated as 'refs/heads/master'. For a list of action configuration properties for built-in action types, see Pipeline Structure Reference Action Requirements .\n\n\n\n\n\nauthentication (string) -- [REQUIRED]Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\nFor information about the authentication scheme implemented by GITHUB_HMAC, see Securing your webhooks on the GitHub Developer website.\nIP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\nUNAUTHENTICATED accepts all webhook trigger requests regardless of origin.\n\n\nauthenticationConfiguration (dict) -- [REQUIRED]Properties that configure the authentication applied to incoming webhook trigger requests. The required properties depend on the authentication type. For GITHUB_HMAC, only the SecretToken property must be set. For IP, only the AllowedIPRange property must be set to a valid CIDR range. For UNAUTHENTICATED, no properties can be set.\n\nAllowedIPRange (string) --The property used to configure acceptance of webhooks in an IP address range. For IP, only the AllowedIPRange property must be set. This property must be set to a valid CIDR range.\n\nSecretToken (string) --The property used to configure GitHub authentication. For GITHUB_HMAC, only the SecretToken property must be set.\n\n\n\n\n
:type tags: list
:param tags: The tags for the webhook.\n\n(dict) --A tag is a key-value pair that is used to manage the resource.\n\nkey (string) -- [REQUIRED]The tag\'s key.\n\nvalue (string) -- [REQUIRED]The tag\'s value.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'webhook': {
'definition': {
'name': 'string',
'targetPipeline': 'string',
'targetAction': 'string',
'filters': [
{
'jsonPath': 'string',
'matchEquals': 'string'
},
],
'authentication': 'GITHUB_HMAC'|'IP'|'UNAUTHENTICATED',
'authenticationConfiguration': {
'AllowedIPRange': 'string',
'SecretToken': 'string'
}
},
'url': 'string',
'errorMessage': 'string',
'errorCode': 'string',
'lastTriggered': datetime(2015, 1, 1),
'arn': 'string',
'tags': [
{
'key': 'string',
'value': 'string'
},
]
}
}
Response Structure
(dict) --
webhook (dict) --
The detail returned from creating the webhook, such as the webhook name, webhook URL, and webhook ARN.
definition (dict) --
The detail returned for each webhook, such as the webhook authentication type and filter rules.
name (string) --
The name of the webhook.
targetPipeline (string) --
The name of the pipeline you want to connect to the webhook.
targetAction (string) --
The name of the action in a pipeline you want to connect to the webhook. The action must be from the source (first) stage of the pipeline.
filters (list) --
A list of rules applied to the body/payload sent in the POST request to a webhook URL. All defined rules must pass for the request to be accepted and the pipeline started.
(dict) --
The event criteria that specify when a webhook notification is sent to your URL.
jsonPath (string) --
A JsonPath expression that is applied to the body/payload of the webhook. The value selected by the JsonPath expression must match the value specified in the MatchEquals field. Otherwise, the request is ignored. For more information, see Java JsonPath implementation in GitHub.
matchEquals (string) --
The value selected by the JsonPath expression must match what is supplied in the MatchEquals field. Otherwise, the request is ignored. Properties from the target action configuration can be included as placeholders in this value by surrounding the action configuration key with curly brackets. For example, if the value supplied here is "refs/heads/{Branch}" and the target action has an action configuration property called "Branch" with a value of "master", the MatchEquals value is evaluated as "refs/heads/master". For a list of action configuration properties for built-in action types, see Pipeline Structure Reference Action Requirements .
authentication (string) --
Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.
For information about the authentication scheme implemented by GITHUB_HMAC, see Securing your webhooks on the GitHub Developer website.
IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.
UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.
authenticationConfiguration (dict) --
Properties that configure the authentication applied to incoming webhook trigger requests. The required properties depend on the authentication type. For GITHUB_HMAC, only the SecretToken property must be set. For IP, only the AllowedIPRange property must be set to a valid CIDR range. For UNAUTHENTICATED, no properties can be set.
AllowedIPRange (string) --
The property used to configure acceptance of webhooks in an IP address range. For IP, only the AllowedIPRange property must be set. This property must be set to a valid CIDR range.
SecretToken (string) --
The property used to configure GitHub authentication. For GITHUB_HMAC, only the SecretToken property must be set.
url (string) --
A unique URL generated by CodePipeline. When a POST request is made to this URL, the defined pipeline is started as long as the body of the post request satisfies the defined authentication and filtering conditions. Deleting and re-creating a webhook makes the old URL invalid and generates a new one.
errorMessage (string) --
The text of the error message about the webhook.
errorCode (string) --
The number code of the error.
lastTriggered (datetime) --
The date and time a webhook was last successfully triggered, in timestamp format.
arn (string) --
The Amazon Resource Name (ARN) of the webhook.
tags (list) --
Specifies the tags applied to the webhook.
(dict) --
A tag is a key-value pair that is used to manage the resource.
key (string) --
The tag\'s key.
value (string) --
The tag\'s value.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.LimitExceededException
CodePipeline.Client.exceptions.InvalidWebhookFilterPatternException
CodePipeline.Client.exceptions.InvalidWebhookAuthenticationParametersException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.TooManyTagsException
CodePipeline.Client.exceptions.InvalidTagsException
CodePipeline.Client.exceptions.ConcurrentModificationException
:return: {
'webhook': {
'definition': {
'name': 'string',
'targetPipeline': 'string',
'targetAction': 'string',
'filters': [
{
'jsonPath': 'string',
'matchEquals': 'string'
},
],
'authentication': 'GITHUB_HMAC'|'IP'|'UNAUTHENTICATED',
'authenticationConfiguration': {
'AllowedIPRange': 'string',
'SecretToken': 'string'
}
},
'url': 'string',
'errorMessage': 'string',
'errorCode': 'string',
'lastTriggered': datetime(2015, 1, 1),
'arn': 'string',
'tags': [
{
'key': 'string',
'value': 'string'
},
]
}
}
:returns:
For information about the authentication scheme implemented by GITHUB_HMAC, see Securing your webhooks on the GitHub Developer website.
IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.
UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.
"""
pass
def register_webhook_with_third_party(webhookName=None):
"""
Configures a connection between the webhook that was created and the external tool with events to be detected.
See also: AWS API Documentation
Exceptions
:example: response = client.register_webhook_with_third_party(
webhookName='string'
)
:type webhookName: string
:param webhookName: The name of an existing webhook created with PutWebhook to register with a supported third party.
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.WebhookNotFoundException
:return: {}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.WebhookNotFoundException
"""
pass
def retry_stage_execution(pipelineName=None, stageName=None, pipelineExecutionId=None, retryMode=None):
"""
Resumes the pipeline execution by retrying the last failed actions in a stage. You can retry a stage immediately if any of the actions in the stage fail. When you retry, all actions that are still in progress continue working, and failed actions are triggered again.
See also: AWS API Documentation
Exceptions
:example: response = client.retry_stage_execution(
pipelineName='string',
stageName='string',
pipelineExecutionId='string',
retryMode='FAILED_ACTIONS'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]\nThe name of the pipeline that contains the failed stage.\n
:type stageName: string
:param stageName: [REQUIRED]\nThe name of the failed stage to be retried.\n
:type pipelineExecutionId: string
:param pipelineExecutionId: [REQUIRED]\nThe ID of the pipeline execution in the failed stage to be retried. Use the GetPipelineState action to retrieve the current pipelineExecutionId of the failed stage\n
:type retryMode: string
:param retryMode: [REQUIRED]\nThe scope of the retry attempt. Currently, the only supported value is FAILED_ACTIONS.\n
:rtype: dict
ReturnsResponse Syntax
{
'pipelineExecutionId': 'string'
}
Response Structure
(dict) --
Represents the output of a RetryStageExecution action.
pipelineExecutionId (string) --
The ID of the current workflow execution in the failed stage.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.StageNotFoundException
CodePipeline.Client.exceptions.StageNotRetryableException
CodePipeline.Client.exceptions.NotLatestPipelineExecutionException
:return: {
'pipelineExecutionId': 'string'
}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.StageNotFoundException
CodePipeline.Client.exceptions.StageNotRetryableException
CodePipeline.Client.exceptions.NotLatestPipelineExecutionException
"""
pass
def start_pipeline_execution(name=None, clientRequestToken=None):
"""
Starts the specified pipeline. Specifically, it begins processing the latest commit to the source location specified as part of the pipeline.
See also: AWS API Documentation
Exceptions
:example: response = client.start_pipeline_execution(
name='string',
clientRequestToken='string'
)
:type name: string
:param name: [REQUIRED]\nThe name of the pipeline to start.\n
:type clientRequestToken: string
:param clientRequestToken: The system-generated unique ID used to identify a unique execution request.\nThis field is autopopulated if not provided.\n
:rtype: dict
ReturnsResponse Syntax
{
'pipelineExecutionId': 'string'
}
Response Structure
(dict) --
Represents the output of a StartPipelineExecution action.
pipelineExecutionId (string) --
The unique system-generated ID of the pipeline execution that was started.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
:return: {
'pipelineExecutionId': 'string'
}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
"""
pass
def stop_pipeline_execution(pipelineName=None, pipelineExecutionId=None, abandon=None, reason=None):
"""
Stops the specified pipeline execution. You choose to either stop the pipeline execution by completing in-progress actions without starting subsequent actions, or by abandoning in-progress actions. While completing or abandoning in-progress actions, the pipeline execution is in a Stopping state. After all in-progress actions are completed or abandoned, the pipeline execution is in a Stopped state.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_pipeline_execution(
pipelineName='string',
pipelineExecutionId='string',
abandon=True|False,
reason='string'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]\nThe name of the pipeline to stop.\n
:type pipelineExecutionId: string
:param pipelineExecutionId: [REQUIRED]\nThe ID of the pipeline execution to be stopped in the current stage. Use the GetPipelineState action to retrieve the current pipelineExecutionId.\n
:type abandon: boolean
:param abandon: Use this option to stop the pipeline execution by abandoning, rather than finishing, in-progress actions.\n\nNote\nThis option can lead to failed or out-of-sequence tasks.\n\n
:type reason: string
:param reason: Use this option to enter comments, such as the reason the pipeline was stopped.
:rtype: dict
ReturnsResponse Syntax
{
'pipelineExecutionId': 'string'
}
Response Structure
(dict) --
pipelineExecutionId (string) --
The unique system-generated ID of the pipeline execution that was stopped.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.PipelineExecutionNotStoppableException
CodePipeline.Client.exceptions.DuplicatedStopRequestException
:return: {
'pipelineExecutionId': 'string'
}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.PipelineNotFoundException
CodePipeline.Client.exceptions.PipelineExecutionNotStoppableException
CodePipeline.Client.exceptions.DuplicatedStopRequestException
"""
pass
def tag_resource(resourceArn=None, tags=None):
"""
Adds to or modifies the tags of the given resource. Tags are metadata that can be used to manage a resource.
See also: AWS API Documentation
Exceptions
:example: response = client.tag_resource(
resourceArn='string',
tags=[
{
'key': 'string',
'value': 'string'
},
]
)
:type resourceArn: string
:param resourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource you want to add tags to.\n
:type tags: list
:param tags: [REQUIRED]\nThe tags you want to modify or add to the resource.\n\n(dict) --A tag is a key-value pair that is used to manage the resource.\n\nkey (string) -- [REQUIRED]The tag\'s key.\n\nvalue (string) -- [REQUIRED]The tag\'s value.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.ResourceNotFoundException
CodePipeline.Client.exceptions.InvalidArnException
CodePipeline.Client.exceptions.TooManyTagsException
CodePipeline.Client.exceptions.InvalidTagsException
CodePipeline.Client.exceptions.ConcurrentModificationException
:return: {}
:returns:
(dict) --
"""
pass
def untag_resource(resourceArn=None, tagKeys=None):
"""
Removes tags from an AWS resource.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_resource(
resourceArn='string',
tagKeys=[
'string',
]
)
:type resourceArn: string
:param resourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource to remove tags from.\n
:type tagKeys: list
:param tagKeys: [REQUIRED]\nThe list of keys for the tags to be removed from the resource.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.ResourceNotFoundException
CodePipeline.Client.exceptions.InvalidArnException
CodePipeline.Client.exceptions.InvalidTagsException
CodePipeline.Client.exceptions.ConcurrentModificationException
:return: {}
:returns:
(dict) --
"""
pass
def update_pipeline(pipeline=None):
"""
Updates a specified pipeline with edits or changes to its structure. Use a JSON file with the pipeline structure and UpdatePipeline to provide the full structure of the pipeline. Updating the pipeline increases the version number of the pipeline by 1.
See also: AWS API Documentation
Exceptions
:example: response = client.update_pipeline(
pipeline={
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string',
'namespace': 'string'
},
]
},
],
'version': 123
}
)
:type pipeline: dict
:param pipeline: [REQUIRED]\nThe name of the pipeline to be updated.\n\nname (string) -- [REQUIRED]The name of the action to be performed.\n\nroleArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn , or to use to assume roles for actions with an actionRoleArn .\n\nartifactStore (dict) --Represents information about the S3 bucket where artifacts are stored for the pipeline.\n\nNote\nYou must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .\n\n\ntype (string) -- [REQUIRED]The type of the artifact store, such as S3.\n\nlocation (string) -- [REQUIRED]The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.\n\nencryptionKey (dict) --The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.\n\nid (string) -- [REQUIRED]The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.\n\nNote\nAliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.\n\n\ntype (string) -- [REQUIRED]The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.\n\n\n\n\n\nartifactStores (dict) --A mapping of artifactStore objects and their corresponding AWS Regions. There must be an artifact store for the pipeline Region and for each cross-region action in the pipeline.\n\nNote\nYou must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .\n\n\n(string) --\n(dict) --The S3 bucket where artifacts for the pipeline are stored.\n\nNote\nYou must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .\n\n\ntype (string) -- [REQUIRED]The type of the artifact store, such as S3.\n\nlocation (string) -- [REQUIRED]The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.\n\nencryptionKey (dict) --The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.\n\nid (string) -- [REQUIRED]The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.\n\nNote\nAliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.\n\n\ntype (string) -- [REQUIRED]The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.\n\n\n\n\n\n\n\n\n\nstages (list) -- [REQUIRED]The stage in which to perform the action.\n\n(dict) --Represents information about a stage and its definition.\n\nname (string) -- [REQUIRED]The name of the stage.\n\nblockers (list) --Reserved for future use.\n\n(dict) --Reserved for future use.\n\nname (string) -- [REQUIRED]Reserved for future use.\n\ntype (string) -- [REQUIRED]Reserved for future use.\n\n\n\n\n\nactions (list) -- [REQUIRED]The actions included in a stage.\n\n(dict) --Represents information about an action declaration.\n\nname (string) -- [REQUIRED]The action declaration\'s name.\n\nactionTypeId (dict) -- [REQUIRED]Specifies the action type and the provider of the action.\n\ncategory (string) -- [REQUIRED]A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.\n\nowner (string) -- [REQUIRED]The creator of the action being called.\n\nprovider (string) -- [REQUIRED]The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .\n\nversion (string) -- [REQUIRED]A string that describes the action version.\n\n\n\nrunOrder (integer) --The order in which actions are run.\n\nconfiguration (dict) --The action\'s configuration. These are key-value pairs that specify input values for an action. For more information, see Action Structure Requirements in CodePipeline . For the list of configuration properties for the AWS CloudFormation action type in CodePipeline, see Configuration Properties Reference in the AWS CloudFormation User Guide . For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the AWS CloudFormation User Guide .\nThe values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows:\n\nJSON:'Configuration' : { Key : Value },\n\n\n(string) --\n(string) --\n\n\n\n\noutputArtifacts (list) --The name or ID of the result of the action declaration, such as a test or build artifact.\n\n(dict) --Represents information about the output of an action.\n\nname (string) -- [REQUIRED]The name of the output of an artifact, such as 'My App'.\nThe input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.\nOutput artifact names must be unique within a pipeline.\n\n\n\n\n\ninputArtifacts (list) --The name or ID of the artifact consumed by the action, such as a test or build artifact.\n\n(dict) --Represents information about an artifact to be worked on, such as a test or build artifact.\n\nname (string) -- [REQUIRED]The name of the artifact to be worked on (for example, 'My App').\nThe input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.\n\n\n\n\n\nroleArn (string) --The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline.\n\nregion (string) --The action declaration\'s AWS Region, such as us-east-1.\n\nnamespace (string) --The variable namespace associated with the action. All variables produced as output by this action fall under this namespace.\n\n\n\n\n\n\n\n\n\nversion (integer) --The version number of the pipeline. A new pipeline always has a version number of 1. This number is incremented when a pipeline is updated.\n\n\n
:rtype: dict
ReturnsResponse Syntax{
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string',
'namespace': 'string'
},
]
},
],
'version': 123
}
}
Response Structure
(dict) --Represents the output of an UpdatePipeline action.
pipeline (dict) --The structure of the updated pipeline.
name (string) --The name of the action to be performed.
roleArn (string) --The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn , or to use to assume roles for actions with an actionRoleArn .
artifactStore (dict) --Represents information about the S3 bucket where artifacts are stored for the pipeline.
Note
You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .
type (string) --The type of the artifact store, such as S3.
location (string) --The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
encryptionKey (dict) --The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
id (string) --The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.
Note
Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.
type (string) --The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
artifactStores (dict) --A mapping of artifactStore objects and their corresponding AWS Regions. There must be an artifact store for the pipeline Region and for each cross-region action in the pipeline.
Note
You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .
(string) --
(dict) --The S3 bucket where artifacts for the pipeline are stored.
Note
You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores .
type (string) --The type of the artifact store, such as S3.
location (string) --The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.
encryptionKey (dict) --The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
id (string) --The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.
Note
Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.
type (string) --The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
stages (list) --The stage in which to perform the action.
(dict) --Represents information about a stage and its definition.
name (string) --The name of the stage.
blockers (list) --Reserved for future use.
(dict) --Reserved for future use.
name (string) --Reserved for future use.
type (string) --Reserved for future use.
actions (list) --The actions included in a stage.
(dict) --Represents information about an action declaration.
name (string) --The action declaration\'s name.
actionTypeId (dict) --Specifies the action type and the provider of the action.
category (string) --A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.
owner (string) --The creator of the action being called.
provider (string) --The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .
version (string) --A string that describes the action version.
runOrder (integer) --The order in which actions are run.
configuration (dict) --The action\'s configuration. These are key-value pairs that specify input values for an action. For more information, see Action Structure Requirements in CodePipeline . For the list of configuration properties for the AWS CloudFormation action type in CodePipeline, see Configuration Properties Reference in the AWS CloudFormation User Guide . For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the AWS CloudFormation User Guide .
The values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows:
JSON:"Configuration" : { Key : Value },
(string) --
(string) --
outputArtifacts (list) --The name or ID of the result of the action declaration, such as a test or build artifact.
(dict) --Represents information about the output of an action.
name (string) --The name of the output of an artifact, such as "My App".
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
Output artifact names must be unique within a pipeline.
inputArtifacts (list) --The name or ID of the artifact consumed by the action, such as a test or build artifact.
(dict) --Represents information about an artifact to be worked on, such as a test or build artifact.
name (string) --The name of the artifact to be worked on (for example, "My App").
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
roleArn (string) --The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline.
region (string) --The action declaration\'s AWS Region, such as us-east-1.
namespace (string) --The variable namespace associated with the action. All variables produced as output by this action fall under this namespace.
version (integer) --The version number of the pipeline. A new pipeline always has a version number of 1. This number is incremented when a pipeline is updated.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.InvalidStageDeclarationException
CodePipeline.Client.exceptions.InvalidActionDeclarationException
CodePipeline.Client.exceptions.InvalidBlockerDeclarationException
CodePipeline.Client.exceptions.InvalidStructureException
CodePipeline.Client.exceptions.LimitExceededException
:return: {
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'artifactStores': {
'string': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string',
'region': 'string',
'namespace': 'string'
},
]
},
],
'version': 123
}
}
:returns:
(string) --
(string) --
"""
pass
| 37.039971 | 7,786 | 0.636383 |
acf920b7ead3e75e2837565431b8feff1c8e0618 | 31,276 | py | Python | packit/api.py | lbarcziova/packit | 0c022a5008c7efd7578e1a132b48004af2e3dd55 | [
"MIT"
] | null | null | null | packit/api.py | lbarcziova/packit | 0c022a5008c7efd7578e1a132b48004af2e3dd55 | [
"MIT"
] | null | null | null | packit/api.py | lbarcziova/packit | 0c022a5008c7efd7578e1a132b48004af2e3dd55 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This is the official python interface for packit.
"""
import asyncio
import logging
import os
import shutil
import sys
from datetime import datetime
from pathlib import Path
from typing import Sequence, Callable, List, Tuple, Dict, Iterable, Optional
from ogr.abstract import PullRequest
from tabulate import tabulate
from packit import utils
from packit.actions import ActionName
from packit.config import Config
from packit.config.common_package_config import CommonPackageConfig
from packit.config.package_config import find_packit_yaml, load_packit_yaml
from packit.config.package_config_validator import PackageConfigValidator
from packit.constants import SYNCING_NOTE
from packit.copr_helper import CoprHelper
from packit.distgit import DistGit
from packit.exceptions import (
PackitException,
PackitSRPMException,
PackitSRPMNotFoundException,
PackitRPMException,
PackitRPMNotFoundException,
PackitCoprException,
)
from packit.local_project import LocalProject
from packit.status import Status
from packit.sync import sync_files
from packit.upstream import Upstream
from packit.utils import assert_existence, get_packit_version
logger = logging.getLogger(__name__)
class PackitAPI:
def __init__(
self,
config: Config,
package_config: Optional[
CommonPackageConfig
], # validate doesn't want PackageConfig
upstream_local_project: LocalProject = None,
downstream_local_project: LocalProject = None,
) -> None:
self.config = config
self.package_config: CommonPackageConfig = package_config
self.upstream_local_project = upstream_local_project
self.downstream_local_project = downstream_local_project
self._up = None
self._dg = None
self._copr_helper: Optional[CoprHelper] = None
self._kerberos_initialized = False
def __repr__(self):
return (
"PackitAPI("
f"config='{self.config}', "
f"package_config='{self.package_config}', "
f"upstream_local_project='{self.upstream_local_project}', "
f"downstream_local_project='{self.downstream_local_project}', "
f"up='{self.up}', "
f"dg='{self.dg}', "
f"copr_helper='{self.copr_helper}')"
)
@property
def up(self):
if self._up is None:
self._up = Upstream(
config=self.config,
package_config=self.package_config,
local_project=self.upstream_local_project,
)
return self._up
@property
def dg(self):
if self._dg is None:
self.init_kerberos_ticket()
self._dg = DistGit(
config=self.config,
package_config=self.package_config,
local_project=self.downstream_local_project,
)
return self._dg
@property
def copr_helper(self) -> CoprHelper:
if self._copr_helper is None:
self._copr_helper = CoprHelper(
upstream_local_project=self.upstream_local_project
)
return self._copr_helper
def sync_release(
self,
dist_git_branch: str,
use_local_content=False,
version: str = None,
force_new_sources=False,
upstream_ref: str = None,
create_pr: bool = True,
force: bool = False,
) -> Optional[PullRequest]:
"""
Update given package in Fedora
:param dist_git_branch: branch in dist-git
:param use_local_content: don't check out anything
:param version: upstream version to update in Fedora
:param force_new_sources: don't check the lookaside cache and perform new-sources
:param upstream_ref: for a source-git repo, use this ref as the latest upstream commit
:param create_pr: create a pull request if set to True
:param force: ignore changes in the git index
:return created PullRequest if create_pr is True, else None
"""
assert_existence(self.up.local_project)
assert_existence(self.dg.local_project)
if self.dg.is_dirty():
raise PackitException(
f"The distgit repository {self.dg.local_project.working_dir} is dirty."
f"This is not supported."
)
if not force and self.up.is_dirty() and not use_local_content:
raise PackitException(
"The repository is dirty, will not discard the changes. Use --force to bypass."
)
# do not add anything between distgit clone and saving gpg keys!
self.up.allowed_gpg_keys = self.dg.get_allowed_gpg_keys_from_downstream_config()
upstream_ref = upstream_ref or self.package_config.upstream_ref
create_pr = create_pr and self.package_config.create_pr
self.up.run_action(actions=ActionName.post_upstream_clone)
full_version = version or self.up.get_version()
if not full_version:
raise PackitException(
"Could not figure out version of latest upstream release."
)
current_up_branch = self.up.active_branch
try:
upstream_tag = self.up.package_config.upstream_tag_template.format(
version=full_version
)
if not use_local_content:
self.up.local_project.checkout_release(upstream_tag)
self.dg.check_last_commit()
self.up.run_action(actions=ActionName.pre_sync)
self.dg.create_branch(
dist_git_branch,
base=f"remotes/origin/{dist_git_branch}",
setup_tracking=True,
)
# fetch and reset --hard upstream/$branch?
logger.info(f"Using {dist_git_branch!r} dist-git branch.")
self.dg.update_branch(dist_git_branch)
self.dg.checkout_branch(dist_git_branch)
if create_pr:
local_pr_branch = f"{full_version}-{dist_git_branch}-update"
self.dg.create_branch(local_pr_branch)
self.dg.checkout_branch(local_pr_branch)
description = (
f"Upstream tag: {upstream_tag}\n"
f"Upstream commit: {self.up.local_project.commit_hexsha}\n"
)
path = os.path.join(self.dg.local_project.working_dir, "README.packit")
logger.debug(f"Path of README: {path}")
with open(path, "w") as f:
f.write(SYNCING_NOTE.format(packit_version=get_packit_version()))
files_to_sync = self.package_config.get_all_files_to_sync()
if self.up.with_action(action=ActionName.prepare_files):
comment = f"- new upstream release: {full_version}"
try:
self.dg.set_specfile_content(
self.up.specfile, full_version, comment
)
except FileNotFoundError as ex:
# no downstream spec file: this is either a mistake or
# there is no spec file in dist-git yet, hence warning
logger.warning(
f"There is not spec file downstream: {ex}, copying the one from upstream."
)
shutil.copy2(
self.up.absolute_specfile_path,
self.dg.get_absolute_specfile_path(),
)
raw_sync_files = files_to_sync.get_raw_files_to_sync(
Path(self.up.local_project.working_dir),
Path(self.dg.local_project.working_dir),
)
# exclude spec, we have special plans for it
raw_sync_files = [
x for x in raw_sync_files if x.src != self.up.absolute_specfile_path
]
sync_files(raw_sync_files)
if upstream_ref:
if self.up.with_action(action=ActionName.create_patches):
patches = self.up.create_patches(
upstream=upstream_ref,
destination=str(self.dg.absolute_specfile_dir),
)
self.dg.specfile_add_patches(patches)
self._handle_sources(
add_new_sources=True, force_new_sources=force_new_sources
)
# when the action is defined, we still need to copy the files
if self.up.has_action(action=ActionName.prepare_files):
raw_sync_files = files_to_sync.get_raw_files_to_sync(
Path(self.up.local_project.working_dir),
Path(self.dg.local_project.working_dir),
)
sync_files(raw_sync_files)
self.dg.commit(title=f"{full_version} upstream release", msg=description)
new_pr = None
if create_pr:
new_pr = self.push_and_create_pr(
pr_title=f"Update to upstream release {full_version}",
pr_description=description,
dist_git_branch=dist_git_branch,
)
else:
self.dg.push(refspec=f"HEAD:{dist_git_branch}")
finally:
if not use_local_content:
self.up.local_project.git_repo.git.checkout(current_up_branch)
self.dg.refresh_specfile()
return new_pr
def sync_from_downstream(
self,
dist_git_branch: str,
upstream_branch: str,
no_pr: bool = False,
fork: bool = True,
remote_name: str = None,
exclude_files: Iterable[str] = None,
force: bool = False,
):
"""
Sync content of Fedora dist-git repo back to upstream
:param exclude_files: files that will be excluded from the sync
:param dist_git_branch: branch in dist-git
:param upstream_branch: upstream branch
:param no_pr: won't create a pull request if set to True
:param fork: forks the project if set to True
:param remote_name: name of remote where we should push; if None, try to find a ssh_url
:param force: ignore changes in the git index
"""
exclude_files = exclude_files or []
if not dist_git_branch:
raise PackitException("Dist-git branch is not set.")
if not upstream_branch:
raise PackitException("Upstream branch is not set.")
logger.info(f"Upstream active branch: {self.up.active_branch}")
if not force and self.up.is_dirty():
raise PackitException(
"The repository is dirty, will not discard the changes. Use --force to bypass."
)
self.dg.update_branch(dist_git_branch)
self.dg.checkout_branch(dist_git_branch)
logger.info(f"Using {dist_git_branch!r} dist-git branch.")
if no_pr:
self.up.checkout_branch(upstream_branch)
else:
local_pr_branch = f"{dist_git_branch}-downstream-sync"
self.up.create_branch(local_pr_branch)
self.up.checkout_branch(local_pr_branch)
raw_sync_files = self.package_config.synced_files.get_raw_files_to_sync(
dest_dir=Path(self.dg.local_project.working_dir),
src_dir=Path(self.up.local_project.working_dir),
)
reverse_raw_sync_files = [
raw_file.reversed()
for raw_file in raw_sync_files
if Path(raw_file.dest).name not in exclude_files
]
sync_files(reverse_raw_sync_files, fail_on_missing=False)
if not no_pr:
description = f"Downstream commit: {self.dg.local_project.commit_hexsha}\n"
commit_msg = f"Sync from downstream branch {dist_git_branch!r}"
pr_title = f"Update from downstream branch {dist_git_branch!r}"
self.up.commit(title=commit_msg, msg=description)
# the branch may already be up, let's push forcefully
source_branch, fork_username = self.up.push_to_fork(
self.up.local_project.ref,
fork=fork,
force=True,
remote_name=remote_name,
)
self.up.create_pull(
pr_title,
description,
source_branch=source_branch,
target_branch=upstream_branch,
fork_username=fork_username,
)
def push_and_create_pr(
self, pr_title: str, pr_description: str, dist_git_branch: str
) -> PullRequest:
# the branch may already be up, let's push forcefully
self.dg.push_to_fork(self.dg.local_project.ref, force=True)
return self.dg.create_pull(
pr_title,
pr_description,
source_branch=self.dg.local_project.ref,
target_branch=dist_git_branch,
)
def _handle_sources(self, add_new_sources, force_new_sources):
if not (add_new_sources or force_new_sources):
return
make_new_sources = False
# btw this is really naive: the name could be the same but the hash can be different
# TODO: we should do something when such situation happens
if force_new_sources or not self.dg.is_archive_in_lookaside_cache(
self.dg.upstream_archive_name
):
make_new_sources = True
else:
sources_file = Path(self.dg.local_project.working_dir) / "sources"
if self.dg.upstream_archive_name not in sources_file.read_text():
make_new_sources = True
if make_new_sources:
archive = self.dg.download_upstream_archive()
self.init_kerberos_ticket()
self.dg.upload_to_lookaside_cache(str(archive))
def build(
self,
dist_git_branch: str,
scratch: bool = False,
nowait: bool = False,
koji_target: Optional[str] = None,
from_upstream: bool = False,
):
"""
Build component in Fedora infra (defaults to koji)
:param dist_git_branch: ref in dist-git
:param scratch: should the build be a scratch build?
:param nowait: don't wait on build?
:param koji_target: koji target to pick (see `koji list-targets`)
:param from_upstream: build directly from the upstream checkout?
"""
logger.info(f"Using {dist_git_branch!r} dist-git branch")
self.init_kerberos_ticket()
if from_upstream:
srpm_path = self.create_srpm(srpm_dir=self.up.local_project.working_dir)
return self.up.koji_build(
scratch=scratch,
nowait=nowait,
koji_target=koji_target,
srpm_path=srpm_path,
)
self.dg.create_branch(
dist_git_branch,
base=f"remotes/origin/{dist_git_branch}",
setup_tracking=True,
)
self.dg.update_branch(dist_git_branch)
self.dg.checkout_branch(dist_git_branch)
self.dg.build(scratch=scratch, nowait=nowait, koji_target=koji_target)
def create_update(
self,
dist_git_branch: str,
update_type: str,
update_notes: str,
koji_builds: Sequence[str] = None,
):
"""
Create bodhi update
:param dist_git_branch: git ref
:param update_type: type of the update, check CLI
:param update_notes: documentation about the update
:param koji_builds: list of koji builds or None (and pick latest)
"""
logger.debug(
f"Create bodhi update, "
f"builds={koji_builds}, dg_branch={dist_git_branch}, type={update_type}"
)
self.dg.create_bodhi_update(
koji_builds=koji_builds,
dist_git_branch=dist_git_branch,
update_notes=update_notes,
update_type=update_type,
)
def create_srpm(
self, output_file: str = None, upstream_ref: str = None, srpm_dir: str = None
) -> Path:
"""
Create srpm from the upstream repo
:param upstream_ref: git ref to upstream commit
:param output_file: path + filename where the srpm should be written, defaults to cwd
:param srpm_dir: path to the directory where the srpm is meant to be placed
:return: a path to the srpm
"""
self.up.run_action(actions=ActionName.post_upstream_clone)
try:
self.up.prepare_upstream_for_srpm_creation(upstream_ref=upstream_ref)
except Exception as ex:
raise PackitSRPMException(
f"Preparing of the upstream to the SRPM build failed: {ex}"
) from ex
try:
srpm_path = self.up.create_srpm(srpm_path=output_file, srpm_dir=srpm_dir)
except PackitSRPMException:
raise
except Exception as ex:
raise PackitSRPMException(
f"An unexpected error occurred when creating the SRPM: {ex}"
) from ex
if not srpm_path.exists():
raise PackitSRPMNotFoundException(
f"SRPM was created successfully, but can't be found at {srpm_path}"
)
return srpm_path
def create_rpms(self, upstream_ref: str = None, rpm_dir: str = None) -> List[Path]:
"""
Create rpms from the upstream repo
:param upstream_ref: git ref to upstream commit
:param rpm_dir: path to the directory where the rpm is meant to be placed
:return: a path to the rpm
"""
self.up.run_action(actions=ActionName.post_upstream_clone)
try:
self.up.prepare_upstream_for_srpm_creation(upstream_ref=upstream_ref)
except Exception as ex:
raise PackitRPMException(
f"Preparing of the upstream to the RPM build failed: {ex}"
) from ex
try:
rpm_paths = self.up.create_rpms(rpm_dir=rpm_dir)
except PackitRPMException:
raise
except Exception as ex:
raise PackitRPMException(
f"An unexpected error occurred when creating the RPMs: {ex}"
) from ex
for rpm_path in rpm_paths:
if not rpm_path.exists():
raise PackitRPMNotFoundException(
f"RPM was created successfully, but can't be found at {rpm_path}"
)
return rpm_paths
@staticmethod
async def status_get_downstream_prs(status) -> List[Tuple[int, str, str]]:
try:
await asyncio.sleep(0)
return status.get_downstream_prs()
except Exception as exc:
# https://github.com/packit-service/ogr/issues/67 work-around
logger.error(f"Failed when getting downstream PRs: {exc}")
return []
@staticmethod
async def status_get_dg_versions(status) -> Dict:
try:
await asyncio.sleep(0)
return status.get_dg_versions()
except Exception as exc:
logger.error(f"Failed when getting Dist-git versions: {exc}")
return {}
@staticmethod
async def status_get_up_releases(status) -> List:
try:
await asyncio.sleep(0)
return status.get_up_releases()
except Exception as exc:
logger.error(f"Failed when getting upstream releases: {exc}")
return []
@staticmethod
async def status_get_koji_builds(status) -> Dict:
try:
await asyncio.sleep(0)
return status.get_koji_builds()
except Exception as exc:
logger.error(f"Failed when getting Koji builds: {exc}")
return {}
@staticmethod
async def status_get_copr_builds(status) -> List:
try:
await asyncio.sleep(0)
return status.get_copr_builds()
except Exception as exc:
logger.error(f"Failed when getting Copr builds: {exc}")
return []
@staticmethod
async def status_get_updates(status) -> List:
try:
await asyncio.sleep(0)
return status.get_updates()
except Exception as exc:
logger.error(f"Failed when getting Bodhi updates: {exc}")
return []
@staticmethod
async def status_main(status: Status) -> List:
"""
Schedule repository data retrieval calls concurrently.
:param status: status of the package
:return: awaitable tasks
"""
res = await asyncio.gather(
PackitAPI.status_get_downstream_prs(status),
PackitAPI.status_get_dg_versions(status),
PackitAPI.status_get_up_releases(status),
PackitAPI.status_get_koji_builds(status),
PackitAPI.status_get_copr_builds(status),
PackitAPI.status_get_updates(status),
)
return res
def status(self) -> None:
status = Status(self.config, self.package_config, self.up, self.dg)
if sys.version_info >= (3, 7, 0):
res = asyncio.run(self.status_main(status))
else:
# backward compatibility for Python 3.6
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
res = loop.run_until_complete(
asyncio.gather(
self.status_get_downstream_prs(status),
self.status_get_dg_versions(status),
self.status_get_up_releases(status),
self.status_get_koji_builds(status),
self.status_get_copr_builds(status),
self.status_get_updates(status),
)
)
finally:
asyncio.set_event_loop(None)
loop.close()
(ds_prs, dg_versions, up_releases, koji_builds, copr_builds, updates) = res
if ds_prs:
logger.info("\nDownstream PRs:")
logger.info(tabulate(ds_prs, headers=["ID", "Title", "URL"]))
else:
logger.info("\nNo downstream PRs found.")
if dg_versions:
logger.info("\nDist-git versions:")
for branch, dg_version in dg_versions.items():
logger.info(f"{branch}: {dg_version}")
else:
logger.info("\nNo Dist-git versions found.")
if up_releases:
logger.info("\nUpstream releases:")
upstream_releases_str = "\n".join(
f"{release.tag_name}" for release in up_releases
)
logger.info(upstream_releases_str)
else:
logger.info("\nNo upstream releases found.")
if updates:
logger.info("\nLatest Bodhi updates:")
logger.info(tabulate(updates, headers=["Update", "Karma", "status"]))
else:
logger.info("\nNo Bodhi updates found.")
if koji_builds:
logger.info("\nLatest Koji builds:")
for branch, branch_builds in koji_builds.items():
logger.info(f"{branch}: {branch_builds}")
else:
logger.info("\nNo Koji builds found.")
if copr_builds:
logger.info("\nLatest Copr builds:")
logger.info(
tabulate(copr_builds, headers=["Build ID", "Project name", "Status"])
)
else:
logger.info("\nNo Copr builds found.")
def run_copr_build(
self,
project: str,
chroots: List[str],
owner: str = None,
description: str = None,
instructions: str = None,
upstream_ref: str = None,
list_on_homepage: bool = False,
preserve_project: bool = False,
additional_packages: List[str] = None,
additional_repos: List[str] = None,
) -> Tuple[int, str]:
"""
Submit a build to copr build system using an SRPM using the current checkout.
:param project: name of the copr project to build
inside (defaults to something long and ugly)
:param chroots: a list of COPR chroots (targets) e.g. fedora-rawhide-x86_64
:param owner: defaults to username from copr config file
:param description: description of the project
:param instructions: installation instructions for the project
:param upstream_ref: git ref to upstream commit
:param list_on_homepage: if set, created copr project will be visible on copr's home-page
:param preserve_project: if set, project will not be created as temporary
:param list additional_packages: buildroot packages for the chroot [DOES NOT WORK YET]
:param list additional_repos: buildroot additional additional_repos
:return: id of the created build and url to the build web page
"""
srpm_path = self.create_srpm(
upstream_ref=upstream_ref, srpm_dir=self.up.local_project.working_dir
)
owner = owner or self.copr_helper.configured_owner
if not owner:
raise PackitCoprException(
"Copr owner not set. Use Copr config file or `--owner` when calling packit CLI."
)
self.copr_helper.create_copr_project_if_not_exists(
project=project,
chroots=chroots,
owner=owner,
description=description,
instructions=instructions,
list_on_homepage=list_on_homepage,
preserve_project=preserve_project,
additional_packages=additional_packages,
additional_repos=additional_repos,
)
logger.debug(
f"Submitting a build to copr build system,"
f"owner={owner}, project={project}, path={srpm_path}"
)
build = self.copr_helper.copr_client.build_proxy.create_from_file(
ownername=owner, projectname=project, path=srpm_path
)
return build.id, self.copr_helper.copr_web_build_url(build)
def watch_copr_build(
self, build_id: int, timeout: int, report_func: Callable = None
) -> str:
""" returns copr build state """
return self.copr_helper.watch_copr_build(
build_id=build_id, timeout=timeout, report_func=report_func
)
@staticmethod
def push_bodhi_update(update_alias: str):
from bodhi.client.bindings import BodhiClient, UpdateNotFound
b = BodhiClient()
try:
response = b.request(update=update_alias, request="stable")
logger.debug(f"Bodhi response:\n{response}")
logger.info(
f"Bodhi update {response['alias']} pushed to stable:\n"
f"- {response['url']}\n"
f"- stable_karma: {response['stable_karma']}\n"
f"- unstable_karma: {response['unstable_karma']}\n"
f"- notes:\n{response['notes']}\n"
)
except UpdateNotFound:
logger.error("Update was not found.")
def get_testing_updates(self, update_alias: Optional[str]) -> List:
from bodhi.client.bindings import BodhiClient
b = BodhiClient()
results = b.query(
alias=update_alias,
packages=self.dg.package_config.downstream_package_name,
status="testing",
)["updates"]
logger.debug("Bodhi updates with status 'testing' fetched.")
return results
@staticmethod
def days_in_testing(update) -> int:
if update.get("date_testing"):
date_testing = datetime.strptime(
update["date_testing"], "%Y-%m-%d %H:%M:%S"
)
return (datetime.utcnow() - date_testing).days
return 0
def push_updates(self, update_alias: Optional[str] = None):
updates = self.get_testing_updates(update_alias)
if not updates:
logger.info("No testing updates found.")
for update in updates:
if self.days_in_testing(update) >= update["stable_days"]:
self.push_bodhi_update(update["alias"])
else:
logger.debug(f"{update['alias']} is not ready to be pushed to stable")
def init_kerberos_ticket(self) -> None:
"""
Initialize the kerberos ticket if we have fas_user and keytab_path configured.
The `kinit` command is run only once when called multiple times.
"""
if self._kerberos_initialized:
return
self._run_kinit()
self._kerberos_initialized = True
def _run_kinit(self) -> None:
"""
Run `kinit` if we have fas_user and keytab_path configured.
"""
if (
not self.config.fas_user
or not self.config.keytab_path
or not Path(self.config.keytab_path).is_file()
):
logger.info("Won't be doing kinit, no credentials provided.")
return
cmd = [
"kinit",
f"{self.config.fas_user}@FEDORAPROJECT.ORG",
"-k",
"-t",
self.config.keytab_path,
]
utils.run_command_remote(
cmd=cmd,
error_message="Failed to init kerberos ticket:",
fail=True,
# this prints debug logs from kerberos to stdout
env={"KRB5_TRACE": "/dev/stdout"},
)
def clean(self):
""" clean up stuff once all the work is done """
# command handlers have nothing to clean
logger.debug("PackitAPI.cleanup (there are no objects to clean)")
@staticmethod
def validate_package_config(working_dir: Path) -> str:
""" validate .packit.yaml on the provided path and return human readable report """
config_path = find_packit_yaml(working_dir, try_local_dir_last=True,)
config_content = load_packit_yaml(config_path)
v = PackageConfigValidator(config_path, config_content)
return v.validate()
| 37.636582 | 98 | 0.609669 |
acf9233af1c30b1b42d997cfea857beae6aada23 | 5,949 | py | Python | docs/conf.py | Corey-Zumar/clipper-db-queries | e60f8d8b11c0ccc5f0287b63fe5cb86d128b72f0 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | Corey-Zumar/clipper-db-queries | e60f8d8b11c0ccc5f0287b63fe5cb86d128b72f0 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | Corey-Zumar/clipper-db-queries | e60f8d8b11c0ccc5f0287b63fe5cb86d128b72f0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Clipper documentation build configuration file, created by
# sphinx-quickstart on Tue May 30 15:49:22 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
import sys
import os
import shlex
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import recommonmark
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
source_parsers = {'.md': CommonMarkParser}
source_suffix = ['.rst', '.md']
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
autodoc_member_order = 'bysource'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clipper'
copyright = u'2017, Dan Crankshaw'
author = u'Dan Crankshaw'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Clipperdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Clipper.tex', u'Clipper Documentation', u'Dan Crankshaw',
'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'clipper', u'Clipper Documentation', [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Clipper', u'Clipper Documentation', author, 'Clipper',
'One line description of project.', 'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
github_doc_root = 'https://github.com/ucbrise/clipper/tree/develop/docs/'
# app setup hook
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_toc_tree_section': 'Contents',
'enable_eval_rst': True,
'enable_auto_doc_ref': True,
}, True)
app.add_transform(AutoStructify)
| 32.686813 | 79 | 0.697764 |
acf923444100a37036ba4453bc551dc28ccc2305 | 564 | py | Python | nomadgram/images/migrations/0005_image_tags.py | sirius0630/nomadgram | bff27bbcc0d8e75ffd1d1b4005a57d1b3025beb8 | [
"MIT"
] | null | null | null | nomadgram/images/migrations/0005_image_tags.py | sirius0630/nomadgram | bff27bbcc0d8e75ffd1d1b4005a57d1b3025beb8 | [
"MIT"
] | 9 | 2021-03-10T10:00:53.000Z | 2022-02-18T22:00:51.000Z | nomadgram/images/migrations/0005_image_tags.py | sirius0630/nomadgram | bff27bbcc0d8e75ffd1d1b4005a57d1b3025beb8 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.10 on 2020-02-24 10:13
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('images', '0004_auto_20200222_0327'),
]
operations = [
migrations.AddField(
model_name='image',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
| 26.857143 | 162 | 0.64539 |
acf923c8f8f5c590af8a3086b7215a15bb63dcbe | 4,077 | py | Python | bot.py | Stepheny755/ClanStatsBot | 6aeb733fed7c90f9e6a60bf4c16a4289de849037 | [
"MIT"
] | 2 | 2019-02-04T08:21:20.000Z | 2019-03-27T05:11:04.000Z | bot.py | Stepheny755/ClanStatsBot | 6aeb733fed7c90f9e6a60bf4c16a4289de849037 | [
"MIT"
] | null | null | null | bot.py | Stepheny755/ClanStatsBot | 6aeb733fed7c90f9e6a60bf4c16a4289de849037 | [
"MIT"
] | 2 | 2019-03-27T16:04:46.000Z | 2019-03-28T04:03:28.000Z | import discord
import asyncio
import time
import sys
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from stats import Stats
from API import API
from update import Update
from util import Util
from post import Post
token = open('token.txt',"r").read().strip()
client = discord.Client()
@client.event
async def on_read():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
if message.content.startswith('!test'):
counter = 0
tmp = await client.send_message(message.channel, 'Calculating messages...')
async for log in client.logs_from(message.channel, limit=100):
if log.author == message.author:
counter += 1
await client.edit_message(tmp, 'You have {} messages.'.format(counter))
elif message.content.startswith('!sleep'):
await asyncio.sleep(5)
await client.send(message.channel, 'Done sleeping')
#elif message.content.find("cv"):
#await client.send_message(message.channel,'ree')
#elif(int(message.content.find('cv'))>=0):
#print(str(message.channel)+": "+message.content+" "+str(message.timestamp))
#await client.send_message(message.channel,'ree')
elif(message.content.startswith('cv')):
await client.send_message(message.channel,'reee')
elif(message.content.startswith('!stop')):
exit()
sys.exit()
elif(message.content.startswith('!getID')):
temp = message.content
inputname = temp[7:]
api = API()
playerID = api.getPlayerID(inputname)
playername = api.getPlayerName(playerID)
ret = playername+"'s ID: "+str(playerID)
await client.send_message(message.channel,ret)
elif(message.content.startswith("!stats")):
temp = message.content
inputname = temp[7:]
api = API()
playerID = api.getPlayerID(inputname)
playername = api.getPlayerName(playerID)
playerstats = api.getPlayerStats(playerID)
print(playerstats)
ret = str(api.getPlayerStats(playerID))[:2000]
await client.send_message(message.channel,ret)
elif(message.content.startswith('!embed')):
temp = message.content
input = temp[7:]
await client.send_message(message.channel,str('Processing '+input+' Statistics'))
a = API()
if(len(a.getClanMembers(a.getClanID(input)))>25):
embed0 = await postValues(input,0,24)
await client.send_message(message.channel,embed=embed0)
embed1 = await postValues(input,25,50)
await client.send_message(message.channel,embed=embed1)
else:
embed = await postValues(input,0,25)
await client.send_message(message.channel,embed=embed)
print(str(message.channel)+": "+message.content)
async def postValues(clanname,start,end):
start_time=time.time()
embed = discord.Embed()
#TODO: Check member size to determine if multiple embeds are required
p = Post()
a = API()
u = Util()
embed = p.createEmbed(clanname,embed,start,end)
postname = "["+str(clanname)+"] "+a.getClanName(a.getClanID(clanname))+" Statistics"
embed.set_author(name=postname, icon_url=client.user.default_avatar_url)
runtime = "Runtime: "+str(u.round3(time.time()-start_time))+" Seconds"
embed.set_footer(text=str(runtime))
return embed
def scheduled_job():
print("Updated Started")
#TODO: send a discord message
u = Update()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(u.saveExpValues())
loop.run_until_complete(u.saveStats())
t = Util()
print("Update Finished: "+str(t.getGMTTime()))
if(__name__=="__main__"):
sched = AsyncIOScheduler()
sched.add_job(scheduled_job,'cron',hour=5,minute=37,timezone='UTC')
sched.start()
print("Scheduler Started")
print("Bot Started")
client.run(token)
a = API()
print(a.getClanMembers('MIA-E'))
| 31.604651 | 89 | 0.656365 |
acf923e1f3657c5a88d4e04b355d73138a850a56 | 185 | py | Python | output/models/ms_data/datatypes/facets/hex_binary/hex_binary_min_length001_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/datatypes/facets/hex_binary/hex_binary_min_length001_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/datatypes/facets/hex_binary/hex_binary_min_length001_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.datatypes.facets.hex_binary.hex_binary_min_length001_xsd.hex_binary_min_length001 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
| 18.5 | 117 | 0.72973 |
acf9246c24c79ee104b671d826799e0de9a43051 | 438 | py | Python | meiduo_mall/meiduo_mall/apps/ouath/models.py | JianChengBai/Django | a81f71ef431df5e2b0cdb43af11366feb9bfd346 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/ouath/models.py | JianChengBai/Django | a81f71ef431df5e2b0cdb43af11366feb9bfd346 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/ouath/models.py | JianChengBai/Django | a81f71ef431df5e2b0cdb43af11366feb9bfd346 | [
"MIT"
] | null | null | null | from django.db import models
from meiduo_mall.utils.models import BaseModel
class OAuthQQUser(BaseModel):
"""
QQ登录用户
"""
user = models.ForeignKey('users.User', on_delete=models.CASCADE, verbose_name='用户')
openid = models.CharField(max_length=64, verbose_name='openid', db_index=True)
class Meta:
db_table = 'tb_oauth_qq'
verbose_name = 'QQ登录用户数据'
verbose_name_plural = verbose_name
| 20.857143 | 87 | 0.691781 |
acf9248ebe020ad5e2deeeba5a88d0df2f404f47 | 468 | py | Python | week_3/06_lesson.py | SekachVitaliy/Python | 4d199a571cfedca51ec6e724e4d8c98e086e984d | [
"Unlicense"
] | null | null | null | week_3/06_lesson.py | SekachVitaliy/Python | 4d199a571cfedca51ec6e724e4d8c98e086e984d | [
"Unlicense"
] | null | null | null | week_3/06_lesson.py | SekachVitaliy/Python | 4d199a571cfedca51ec6e724e4d8c98e086e984d | [
"Unlicense"
] | null | null | null | """
Цена товара обозначена в рублях с точностью до копеек, то есть действительным числом с двумя цифрами после
десятичной точки. Запишите в две целочисленные переменные стоимость товара в виде целого числа рублей и целого числа
копеек и выведите их на экран. При решении этой задачи нельзя пользоваться условными инструкциями и циклами.
Формат ввода
Вводится неотрицательное действительное число.
"""
x = float(input())
div = round(x * 100)
print(*divmod(div, 100))
| 36 | 116 | 0.794872 |
acf9250930826982cafb709cf7855d170d300f47 | 1,481 | py | Python | commands/mecdriveteleopdefaultfps.py | FRCTeam279/2019mule | fc06aa082711f7f92321c6a1cd2cf425d7e746a4 | [
"MIT"
] | null | null | null | commands/mecdriveteleopdefaultfps.py | FRCTeam279/2019mule | fc06aa082711f7f92321c6a1cd2cf425d7e746a4 | [
"MIT"
] | null | null | null | commands/mecdriveteleopdefaultfps.py | FRCTeam279/2019mule | fc06aa082711f7f92321c6a1cd2cf425d7e746a4 | [
"MIT"
] | null | null | null | import math
from wpilib.command import Command
import robotmap
import subsystems
import oi
import utility.navhelper as navhelper
class MecDriveTeleopDefaultFPS(Command):
'''
One joystick controls rotation, and the other translation
'''
def __init__(self):
super().__init__('MecDriveTeleopDefaultFPS')
self.requires(subsystems.driveline)
self.setInterruptible(True)
self.setRunWhenDisabled(False)
def execute(self):
if oi.leftDriverStick is None or oi.rightDriverStick is None:
return
y = oi.leftDriverStick.getY()
x = oi.leftDriverStick.getX()
z = oi.rightDriverStick.getX()
if oi.btnDriveSlow.get():
y = y/2
x = x/2
x = oi.filterInput(x, oi.config.leftDriverStickNullZone, 1)
y = oi.filterInput(y, oi.config.leftDriverStickNullZone, 1)
z = oi.filterInput(z, oi.config.rightDriverStickNullZone, 1)
magnitude = math.sqrt((x * x) + (y * y))
#Robot oriented
#roboDrive.mecanumDrive_Polar(magnitude, oi.leftDriverStick.getDirectionDegrees(), z);
#Field Oriented
subsystems.driveline.mecanumDrive.drivePolar(magnitude, navhelper.addDegrees(oi.leftDriverStick.getDirectionDegrees(), -1 * robotmap.sensors.ahrs.getAngle()), z)
def isFinished(self):
# default commands never "finish", they're just interrupted by other commands
return False
| 29.039216 | 169 | 0.66104 |
acf925953c392ed3f5a62a781474c446d37574e4 | 653 | py | Python | Python/test/test_bqs/test_stack_array.py | mnk400/INFO6205 | 1a30679b9a7ebe8ec6b22b385091b24aa5a67720 | [
"Apache-2.0"
] | null | null | null | Python/test/test_bqs/test_stack_array.py | mnk400/INFO6205 | 1a30679b9a7ebe8ec6b22b385091b24aa5a67720 | [
"Apache-2.0"
] | null | null | null | Python/test/test_bqs/test_stack_array.py | mnk400/INFO6205 | 1a30679b9a7ebe8ec6b22b385091b24aa5a67720 | [
"Apache-2.0"
] | null | null | null | import unittest
from bqs.stack_array import Stack
class TestStack(unittest.TestCase):
def test_stack(self):
# initialize an object
s = Stack()
self.assertEqual(s.is_empty(), True)
def test_stack2(self):
# initialize an object
s = Stack()
s.push(1)
self.assertEqual(s.is_empty(), False)
def test_stack3(self):
stack = Stack()
self.assertTrue(stack.is_empty())
stack.push(1)
self.assertFalse(stack.is_empty())
item = stack.pop()
self.assertEqual(item, 1)
if __name__ == "__main__":
unittest.main()
| 21.766667 | 46 | 0.572741 |
acf925d0ffcd994bc657de5ca51b74cc357a5bda | 8,564 | py | Python | tests/sync/test_fetch_browser_context.py | axelande/playwright-python | f2c31090d6235014045ad6915d935d6908b1f592 | [
"Apache-2.0"
] | 2 | 2021-02-26T03:04:19.000Z | 2021-02-26T05:17:50.000Z | tests/sync/test_fetch_browser_context.py | axelande/playwright-python | f2c31090d6235014045ad6915d935d6908b1f592 | [
"Apache-2.0"
] | 37 | 2020-12-10T05:11:10.000Z | 2021-05-24T06:25:48.000Z | tests/sync/test_fetch_browser_context.py | axelande/playwright-python | f2c31090d6235014045ad6915d935d6908b1f592 | [
"Apache-2.0"
] | 1 | 2022-01-29T10:35:58.000Z | 2022-01-29T10:35:58.000Z | # Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Any, Dict, List
from urllib.parse import parse_qs
import pytest
from playwright.sync_api import BrowserContext, Error, FilePayload, Page
from tests.server import Server
def test_get_should_work(context: BrowserContext, server: Server) -> None:
response = context.request.get(server.PREFIX + "/simple.json")
assert response.url == server.PREFIX + "/simple.json"
assert response.status == 200
assert response.status_text == "OK"
assert response.ok is True
assert response.headers["content-type"] == "application/json"
assert {
"name": "Content-Type",
"value": "application/json",
} in response.headers_array
assert response.text() == '{"foo": "bar"}\n'
def test_fetch_should_work(context: BrowserContext, server: Server) -> None:
response = context.request.fetch(server.PREFIX + "/simple.json")
assert response.url == server.PREFIX + "/simple.json"
assert response.status == 200
assert response.status_text == "OK"
assert response.ok is True
assert response.headers["content-type"] == "application/json"
assert {
"name": "Content-Type",
"value": "application/json",
} in response.headers_array
assert response.text() == '{"foo": "bar"}\n'
def test_should_throw_on_network_error(context: BrowserContext, server: Server) -> None:
server.set_route("/test", lambda request: request.transport.loseConnection())
with pytest.raises(Error, match="socket hang up"):
context.request.fetch(server.PREFIX + "/test")
def test_should_add_session_cookies_to_request(
context: BrowserContext, server: Server
) -> None:
context.add_cookies(
[
{
"name": "username",
"value": "John Doe",
"url": server.EMPTY_PAGE,
"expires": -1,
"httpOnly": False,
"secure": False,
"sameSite": "Lax",
}
]
)
with server.expect_request("/empty.html") as server_req:
context.request.get(server.EMPTY_PAGE),
assert server_req.value.getHeader("Cookie") == "username=John Doe"
@pytest.mark.parametrize(
"method", ["fetch", "delete", "get", "head", "patch", "post", "put"]
)
def test_should_support_query_params(
context: BrowserContext, server: Server, method: str
) -> None:
expected_params = {"p1": "v1", "парам2": "знач2"}
with server.expect_request("/empty.html") as server_req:
getattr(context.request, method)(
server.EMPTY_PAGE + "?p1=foo", params=expected_params
)
assert server_req.value.args["p1".encode()][0].decode() == "v1"
assert len(server_req.value.args["p1".encode()]) == 1
assert server_req.value.args["парам2".encode()][0].decode() == "знач2"
@pytest.mark.parametrize(
"method", ["fetch", "delete", "get", "head", "patch", "post", "put"]
)
def test_should_support_fail_on_status_code(
context: BrowserContext, server: Server, method: str
) -> None:
with pytest.raises(Error, match="404 Not Found"):
getattr(context.request, method)(
server.PREFIX + "/this-does-clearly-not-exist.html",
fail_on_status_code=True,
)
@pytest.mark.parametrize(
"method", ["fetch", "delete", "get", "head", "patch", "post", "put"]
)
def test_should_support_ignore_https_errors_option(
context: BrowserContext, https_server: Server, method: str
) -> None:
response = getattr(context.request, method)(
https_server.EMPTY_PAGE, ignore_https_errors=True
)
assert response.ok
assert response.status == 200
def test_should_not_add_context_cookie_if_cookie_header_passed_as_parameter(
context: BrowserContext, server: Server
) -> None:
context.add_cookies(
[
{
"name": "username",
"value": "John Doe",
"url": server.EMPTY_PAGE,
"expires": -1,
"httpOnly": False,
"secure": False,
"sameSite": "Lax",
}
]
)
with server.expect_request("/empty.html") as server_req:
context.request.get(server.EMPTY_PAGE, headers={"Cookie": "foo=bar"})
assert server_req.value.getHeader("Cookie") == "foo=bar"
@pytest.mark.parametrize("method", ["delete", "patch", "post", "put"])
def test_should_support_post_data(
context: BrowserContext, method: str, server: Server
) -> None:
def support_post_data(fetch_data: Any, request_post_data: Any) -> None:
with server.expect_request("/simple.json") as request:
response = getattr(context.request, method)(
server.PREFIX + "/simple.json", data=fetch_data
)
assert request.value.method.decode() == method.upper()
assert request.value.post_body == request_post_data # type: ignore
assert response.status == 200
assert response.url == server.PREFIX + "/simple.json"
assert request.value.getHeader("Content-Length") == str(
len(request.value.post_body) # type: ignore
)
support_post_data("My request", "My request".encode())
support_post_data(b"My request", "My request".encode())
support_post_data(
["my", "request"], json.dumps(["my", "request"], separators=(",", ":")).encode()
)
support_post_data(
{"my": "request"}, json.dumps({"my": "request"}, separators=(",", ":")).encode()
)
with pytest.raises(Error, match="Unsupported 'data' type: <class 'function'>"):
support_post_data(lambda: None, None)
def test_should_support_application_x_www_form_urlencoded(
context: BrowserContext, server: Server
) -> None:
with server.expect_request("/empty.html") as server_req:
context.request.post(
server.PREFIX + "/empty.html",
form={
"firstName": "John",
"lastName": "Doe",
"file": "f.js",
},
)
assert server_req.value.method == b"POST"
assert (
server_req.value.getHeader("Content-Type")
== "application/x-www-form-urlencoded"
)
body = server_req.value.post_body.decode() # type: ignore
assert server_req.value.getHeader("Content-Length") == str(len(body))
params: Dict[bytes, List[bytes]] = parse_qs(server_req.value.post_body) # type: ignore
assert params[b"firstName"] == [b"John"]
assert params[b"lastName"] == [b"Doe"]
assert params[b"file"] == [b"f.js"]
def test_should_support_multipart_form_data(
context: BrowserContext, server: Server
) -> None:
file: FilePayload = {
"name": "f.js",
"mimeType": "text/javascript",
"buffer": b"var x = 10;\r\n;console.log(x);",
}
with server.expect_request("/empty.html") as server_req:
context.request.post(
server.PREFIX + "/empty.html",
multipart={
"firstName": "John",
"lastName": "Doe",
"file": file,
},
)
assert server_req.value.method == b"POST"
assert server_req.value.getHeader("Content-Type").startswith(
"multipart/form-data; "
)
assert server_req.value.getHeader("Content-Length") == str(
len(server_req.value.post_body) # type: ignore
)
assert server_req.value.args[b"firstName"] == [b"John"]
assert server_req.value.args[b"lastName"] == [b"Doe"]
assert server_req.value.args[b"file"][0] == file["buffer"]
def test_should_add_default_headers(
context: BrowserContext, page: Page, server: Server
) -> None:
with server.expect_request("/empty.html") as server_req:
context.request.get(server.EMPTY_PAGE)
assert server_req.value.getHeader("Accept") == "*/*"
assert server_req.value.getHeader("Accept-Encoding") == "gzip,deflate,br"
assert server_req.value.getHeader("User-Agent") == page.evaluate(
"() => navigator.userAgent"
)
| 36.288136 | 91 | 0.633582 |
acf926a8a48404317aff960ea4cd0d2266d3944b | 5,924 | py | Python | game.py | caiovini/pixel-cannon | 4596c0db411e952b46bd5b67e66e85a67808322d | [
"MIT"
] | null | null | null | game.py | caiovini/pixel-cannon | 4596c0db411e952b46bd5b67e66e85a67808322d | [
"MIT"
] | null | null | null | game.py | caiovini/pixel-cannon | 4596c0db411e952b46bd5b67e66e85a67808322d | [
"MIT"
] | null | null | null | import pygame as pg
import sys
import math
import random
from assets import (Background,
Ground,
CannonBase,
Cannon,
Ball)
from plane import Plane
from os.path import join
sprite_width, sprite_height = 0, 1
clock = pg.time.Clock()
plane_sign = u'\u2708' # Unicode for plane
SCREEN_WIDTH, SCREEN_HEIGHT = 854, 480
BROWN = pg.Color(40, 26, 14)
YELLOW = pg.Color(255, 255, 0)
BLACK = pg.Color(0, 0, 0)
def main():
pg.init() # Init pygame
screen = pg.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pg.display.set_caption("Pixel cannon")
font_msg = pg.font.SysFont("Comic Sans MS", 20)
font_plane = pg.font.Font(join("fonts", "segoe-ui-symbol.ttf"), 20)
alpha_bg = pg.Surface((SCREEN_WIDTH, SCREEN_HEIGHT))
alpha_bg.set_alpha(128)
alpha_bg.fill((BLACK))
background = Background()
ground = Ground()
cannon_base = CannonBase()
cannon = Cannon()
ball = Ball()
plane = Plane()
cannon_base.set_position(100, SCREEN_HEIGHT - (ground.image.get_rect()
.size[sprite_height] + cannon_base.image.get_rect().size[sprite_height]))
cannon.set_position(115, cannon_base.rect.y -
cannon_base.image.get_rect().size[sprite_height] / 1.8)
def build_ground():
# Build ground according to the size of screen width
for i in range(math.ceil(SCREEN_WIDTH / ground.image.get_rect().size[sprite_width])):
ground.set_position(
i * ground.image.get_rect().size[sprite_width], SCREEN_HEIGHT - ground.image.get_rect().size[sprite_height])
screen.blit(ground.image, ground.rect)
planes_missed = plane_speed = 8
game_over = fly_airplane = is_shooting = done = False
score = plane_altitude = x_ball = y_ball = speed_ball = 0
plane_position_x = SCREEN_WIDTH + \
plane.flying_image.get_rect(
).size[sprite_width] # Initial position for the plane
while not done:
if not fly_airplane:
plane_altitude = random.randrange(0, 5000)
if 0 < plane_altitude < 100: # Randomically generates planes
fly_airplane = True
screen.blit(background.image, background.rect)
screen.blit(cannon_base.image, cannon_base.rect)
# Shooting validation
if not cannon.check_collision(ball) and not game_over:
keys = pg.key.get_pressed()
if keys[pg.K_LEFT]:
if cannon.angle < 90:
cannon.rotate(1)
if keys[pg.K_RIGHT]:
if cannon.angle > 0:
cannon.rotate(-1)
for event in pg.event.get():
if event.type == pg.QUIT:
done = True
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
done = True
if event.key == pg.K_SPACE and not is_shooting and not game_over:
ball.angle = cannon.angle
is_shooting = True
if is_shooting:
# Only allow shooting when ball is out of screen boundaries
if x_ball > SCREEN_WIDTH or y_ball < -ball.image.get_rect().size[sprite_height]:
is_shooting = False
x_ball = y_ball = speed_ball = 0
ball.set_initial_position()
else:
speed_ball += 10
x_ball: float = cannon.rect.x + cannon_base.image.get_rect().size[sprite_height] / 4 + \
math.cos(math.radians(360 - ball.angle)) * speed_ball
y_ball: float = cannon.rect.y + cannon_base.image.get_rect().size[sprite_height] / 4 + \
math.sin(math.radians(360 - ball.angle)) * speed_ball
ball.set_position(x_ball, y_ball)
screen.blit(ball.image, ball.rect)
screen.blit(cannon.rotate_image, cannon.rotate_image.get_rect(center=cannon.image.get_rect
(topleft=(cannon.rect.x - cannon.angle/2, cannon.rect.y - cannon.angle/2)).center).topleft)
if fly_airplane:
plane.set_position(plane_position_x, plane_altitude)
plane.animate()
screen.blit(plane.flying_image, plane.rect)
plane_position_x -= plane_speed
if not plane.is_alive: # If hit, lose altitude
plane_altitude += 2
if plane.check_collision(ball):
ball.set_initial_position()
is_shooting = False
x_ball = y_ball = speed_ball = 0
plane.is_alive = False
score += plane_position_x / 100
if plane_position_x < -plane.flying_image.get_rect().size[sprite_width]:
if not plane.is_alive:
plane_speed += .1
else:
if planes_missed > 0:
planes_missed -= 1
if planes_missed == 0:
game_over = True
fly_airplane = False
plane.is_alive = True
plane_position_x = SCREEN_WIDTH + \
plane.flying_image.get_rect().size[sprite_width]
label = font_msg.render(f"SCORE: {score:.2f}", 1, BROWN)
screen.blit(label, (10, 0))
label = font_plane.render(
plane_sign * planes_missed, 1, BROWN)
screen.blit(label, (SCREEN_WIDTH / 2.5, 0))
build_ground()
if game_over:
screen.blit(alpha_bg, (0, 0))
label = font_msg.render(
"GAME OVER", 1, YELLOW)
screen.blit(label, (SCREEN_WIDTH / 2.5, SCREEN_HEIGHT / 2.5))
pg.display.flip()
clock.tick(60) # FPS
if __name__ == '__main__':
sys.exit(main())
| 34.44186 | 161 | 0.564652 |
acf927173b139fc9fbc0c01b8e5d1b7cdb917d48 | 827 | py | Python | tardis/default_settings/apps.py | keithschulze/mytardis | 8ed3562574ce990d42bfe96133185a82c31c27d4 | [
"Apache-2.0"
] | null | null | null | tardis/default_settings/apps.py | keithschulze/mytardis | 8ed3562574ce990d42bfe96133185a82c31c27d4 | [
"Apache-2.0"
] | null | null | null | tardis/default_settings/apps.py | keithschulze/mytardis | 8ed3562574ce990d42bfe96133185a82c31c27d4 | [
"Apache-2.0"
] | null | null | null | # A tuple of strings designating all applications that are enabled in
# this Django installation.
TARDIS_APP_ROOT = 'tardis.apps'
INSTALLED_APPS = (
'django_extensions',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.humanize',
'registration',
'djcelery',
'kombu.transport.django',
'bootstrapform',
'mustachejs',
'tastypie',
'tastypie_swagger',
'tardis.tardis_portal',
'tardis.tardis_portal.templatetags',
'tardis.search',
'tardis.analytics',
# these optional apps, may require extra settings
'tardis.apps.publication_forms',
'tardis.apps.oaipmh',
# 'tardis.apps.push_to',
)
| 27.566667 | 69 | 0.683192 |
acf9273aa0b4de2a6da94a5348b81fdb2fc6c7fd | 1,201 | py | Python | pinakes/main/auth/tests/functional/test_me_endpoint.py | Alex-Izquierdo/pinakes | dfeb855662b47d29a6e976e87fd7c090a262cf3f | [
"Apache-2.0"
] | 2 | 2022-03-17T18:53:58.000Z | 2022-03-17T22:04:22.000Z | pinakes/main/auth/tests/functional/test_me_endpoint.py | Alex-Izquierdo/pinakes | dfeb855662b47d29a6e976e87fd7c090a262cf3f | [
"Apache-2.0"
] | 9 | 2022-03-18T08:22:57.000Z | 2022-03-30T17:14:49.000Z | pinakes/main/auth/tests/functional/test_me_endpoint.py | Alex-Izquierdo/pinakes | dfeb855662b47d29a6e976e87fd7c090a262cf3f | [
"Apache-2.0"
] | 7 | 2022-03-17T22:03:08.000Z | 2022-03-28T21:28:34.000Z | """Module to test CurrentUser end points"""
import json
import pytest
from django.contrib.auth import get_user_model
User = get_user_model()
@pytest.mark.django_db
def test_current_me_authenticated(api_request):
"""Retrieve currently logged in user"""
fred = User(
username="fred",
is_superuser=False,
password="normal",
first_name="Fred",
last_name="Sample",
)
fred.save()
response = api_request("get", "auth:me", None, None, fred)
assert response.status_code == 200
content = json.loads(response.content)
assert content["username"] == "fred"
assert content["last_name"] == "Sample"
assert content["first_name"] == "Fred"
assert sorted(content["roles"]) == ["approval-admin", "catalog-admin"]
@pytest.mark.django_db
def test_current_me_unauthenticated(api_request):
"""Unauthenticated user should return 403"""
fred = User(
username="fred",
is_superuser=False,
password="normal",
first_name="Fred",
last_name="Sample",
)
fred.save()
response = api_request("get", "auth:me", None, None, fred, "json", False)
assert response.status_code == 403
| 25.553191 | 77 | 0.651957 |
acf92744ec3260c18e89364f888d05f31502f0df | 2,403 | py | Python | src/train/trainer.py | Sushentsev/DapStep | 772312085d5ba0e2877c05b79b0b33df25276011 | [
"Apache-2.0"
] | 1 | 2022-01-11T15:08:38.000Z | 2022-01-11T15:08:38.000Z | src/train/trainer.py | Sushentsev/DapStep | 772312085d5ba0e2877c05b79b0b33df25276011 | [
"Apache-2.0"
] | null | null | null | src/train/trainer.py | Sushentsev/DapStep | 772312085d5ba0e2877c05b79b0b33df25276011 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
import torch
from torch import optim
from src.models.base import NeuralModel
from src.models.losses import Loss
from src.train.loaders.sampler import DataSampler
from src.train.metric_tracker import MetricTracker
from src.utils import set_seed, device
class Trainer:
def __init__(self, model: NeuralModel, optimizer: optim.Optimizer, loss_function: Loss,
epochs: int, update_every: int = 1):
self._model = model
self._epochs = epochs
self._optimizer = optimizer
self._loss_function = loss_function
self._metric_tracker = MetricTracker()
self._step = 0
self._update_every = update_every
def _eval_epoch(self, epoch: int, val_loader: DataSampler, norm: float = 1.):
self._model.eval()
for batch in val_loader.batches():
data, y = batch[:-1], torch.tensor(batch[-1]).to(device)
with torch.no_grad():
out = self._model.raw_predict(*data)
loss = self._loss_function.get_loss(out, y)
if loss is not None:
self._metric_tracker.add_step(out.cpu(), y.cpu(), loss.cpu())
self._metric_tracker.log_epoch(epoch, train=False, norm=norm)
def _train_epoch(self, epoch: int, train_loader: DataSampler):
self._model.train()
for batch in train_loader.batches():
data, y = batch[:-1], torch.tensor(batch[-1]).to(device)
out = self._model.raw_predict(*data)
loss = self._loss_function.get_loss(out, y)
if loss is not None:
(loss / self._update_every).backward()
if (self._step + 1) % self._update_every == 0:
self._optimizer.step()
self._model.zero_grad()
self._step += 1
self._metric_tracker.add_step(out.cpu(), y.cpu(), loss.cpu())
self._metric_tracker.log_epoch(epoch, train=True)
def run_train(self, train_loader: DataSampler, val_loader: Optional[DataSampler] = None, val_norm: float = 1.):
set_seed()
self._step = 0
for epoch in range(1, self._epochs + 1):
print(f"Epoch {epoch} of {self._epochs}:")
self._train_epoch(epoch, train_loader)
if val_loader and len(val_loader) > 0:
self._eval_epoch(epoch, val_loader, val_norm)
| 35.338235 | 115 | 0.617978 |
acf9279d485697d0c04c86cb34517c95e054e427 | 440 | py | Python | gui/account/migrations/0025_auto_20151123_1113.py | alpha-zou/TAMP | 91f0e7b08e2d6a03b541b07dd4768bf5222044dd | [
"MIT"
] | 1 | 2020-03-20T06:52:07.000Z | 2020-03-20T06:52:07.000Z | gui/account/migrations/0025_auto_20151123_1113.py | alpha-zou/TAMP | 91f0e7b08e2d6a03b541b07dd4768bf5222044dd | [
"MIT"
] | 1 | 2021-11-12T15:20:56.000Z | 2021-11-12T15:20:56.000Z | gui/account/migrations/0025_auto_20151123_1113.py | alpha-zou/TAMP | 91f0e7b08e2d6a03b541b07dd4768bf5222044dd | [
"MIT"
] | 3 | 2019-03-10T19:56:17.000Z | 2020-03-20T07:00:10.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0024_auto_20151120_1328'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='application_field',
),
migrations.DeleteModel(
name='appField',
),
]
| 20 | 47 | 0.593182 |
acf92992b1b8d52eace69a58e80194d479326ac4 | 496 | py | Python | pset_functions/basic_function_ops/tests/test_p5.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 5 | 2019-04-08T20:05:37.000Z | 2019-12-04T20:48:45.000Z | pset_functions/basic_function_ops/tests/test_p5.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 8 | 2019-04-15T15:16:05.000Z | 2022-02-12T10:33:32.000Z | pset_functions/basic_function_ops/tests/test_p5.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 2 | 2019-04-10T00:14:42.000Z | 2020-02-26T20:35:21.000Z | """
Function Basics V - Indeterminate Arguments
"""
import io
import pytest
from unittest import TestCase
from unittest.mock import patch
from p5 import *
@pytest.mark.describe('it returns highest and lowest value of input numbers')
def test_high_low():
assert high_low(15, 4, 8, 21, 11) == (21, 4)
@pytest.mark.describe('it returns an error if argument pass is not int type')
def test_high_low():
assert high_low(15, [21, 3], 6, 11) == ('Error: ', 'Please enter only individual integers.') | 27.555556 | 93 | 0.727823 |
acf92a523f9bc92811fb97c9fced269b72ec1110 | 1,011 | py | Python | app/buyers/models.py | kanaygupta/Shopping-portal | 8cc8dfd7f386169f81d2e9eed6f077416d5a1c33 | [
"MIT"
] | null | null | null | app/buyers/models.py | kanaygupta/Shopping-portal | 8cc8dfd7f386169f81d2e9eed6f077416d5a1c33 | [
"MIT"
] | 4 | 2020-04-29T22:57:37.000Z | 2021-12-13T19:39:38.000Z | app/buyers/models.py | obliviateandsurrender/Shop-em-all | c738ad34f0c4a9a610f50929d2195ddb71f78d64 | [
"Apache-2.0"
] | null | null | null | from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from app import db
class Buyers(UserMixin,db.Model):
__tablename__ = 'buyers'
# Define the fields here
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(200), unique=True)
email = db.Column(db.String(3), unique=True)
phone_Number = db.Column(db.String(20))
address = db.Column(db.String(200))
birthdate = db.Column(db.Date)
password = db.Column(db.String(800))
def __init__(self, username, email, password, phone_Number, address, birthdate):
self.username = username
self.email = email
self.phone_Number = phone_Number
self.address = address
self.birthdate = birthdate
self.password= password
def __repr__(self):
return '<Buyer\'s Id is: %d, Buyer\'s Name is: %r & Buyer\'s Email is: %r>>' %(self.id, self.username, self.email)
| 38.884615 | 122 | 0.678536 |
acf92cd31ca42b8ef28531a1440a54a355ce0063 | 3,550 | py | Python | MyBot/utils/paginator.py | Nipa-Code/NipaBot | f3996ea19105db4f71a89d8c42f167292b0008ab | [
"MIT"
] | 1 | 2021-12-28T16:57:58.000Z | 2021-12-28T16:57:58.000Z | MyBot/utils/paginator.py | lucid-coding/NipaBot | f3996ea19105db4f71a89d8c42f167292b0008ab | [
"MIT"
] | null | null | null | MyBot/utils/paginator.py | lucid-coding/NipaBot | f3996ea19105db4f71a89d8c42f167292b0008ab | [
"MIT"
] | 1 | 2021-12-26T17:59:24.000Z | 2021-12-26T17:59:24.000Z | from typing import Any, Dict, Optional, List, Union
import asyncio
import disnake
from disnake import ApplicationCommandInteraction, MessageInteraction
# https://github.com/Kraots/ViHillCorner/blob/master/utils/paginator.py#L367-L448
class EmbedPaginator(disnake.ui.View):
def __init__(
self,
ctx,
embeds: List[disnake.Embed],
*,
timeout: float = 180.0
):
super().__init__(timeout=timeout)
self.ctx = ctx
self.embeds = embeds
self.current_page = 0
async def interaction_check(self, interaction: MessageInteraction) -> bool:
if interaction.user and interaction.user.id in (self.ctx.bot.user.id, self.ctx.author.id):
return True
await interaction.response.send_message('This pagination menu cannot be controlled by you, sorry!', ephemeral=True)
return False
async def on_timeout(self) -> None:
if self.message:
await self.message.edit(view=None)
async def show_page(self, inter: MessageInteraction, page_number: int):
if (
(page_number < 0) or
(page_number > len(self.embeds) - 1)
):
if not inter.response.is_done():
await inter.response.defer()
return
self.current_page = page_number
embed = self.embeds[page_number]
embed.set_footer(text=f'Page {self.current_page + 1}/{len(self.embeds)}')
if inter.response.is_done():
await self.message.edit(embed=embed)
else:
await inter.response.edit_message(embed=embed)
@disnake.ui.button(label='≪', style=disnake.ButtonStyle.grey)
async def go_to_first_page(self, button: disnake.ui.Button, interaction: MessageInteraction):
"""Go to the first page."""
await self.show_page(interaction, 0)
@disnake.ui.button(label='Back', style=disnake.ButtonStyle.blurple)
async def go_to_previous_page(self, button: disnake.ui.Button, interaction: MessageInteraction):
"""Go to the previous page."""
await self.show_page(interaction, self.current_page - 1)
@disnake.ui.button(label='Next', style=disnake.ButtonStyle.blurple)
async def go_to_next_page(self, button: disnake.ui.Button, interaction: MessageInteraction):
"""Go to the next page."""
await self.show_page(interaction, self.current_page + 1)
@disnake.ui.button(label='≫', style=disnake.ButtonStyle.grey)
async def go_to_last_page(self, button: disnake.ui.Button, interaction: MessageInteraction):
"""Go to the last page."""
await self.show_page(interaction, len(self.embeds) - 1)
@disnake.ui.button(label='Quit', style=disnake.ButtonStyle.red)
async def stop_pages(self, button: disnake.ui.Button, interaction: MessageInteraction):
"""Stops the pagination session."""
await interaction.response.defer()
await interaction.delete_original_message()
self.stop()
async def start(self):
"""Start paginating over the embeds."""
embed = self.embeds[0]
embed.set_footer(text=f'Page 1/{len(self.embeds)}')
if isinstance(self.ctx, ApplicationCommandInteraction):
if not self.ctx.response.is_done():
self.message = await self.ctx.response.send_message(embed=embed, view=self)
else:
self.message = await self.ctx.followup.send(embed=embed, view=self)
return
self.message = await self.ctx.send(embed=embed, view=self)
| 38.172043 | 123 | 0.658028 |
acf92d64db2c1ff55d58a858ee9a9e6885316ac2 | 36,394 | py | Python | readtextfromimage_23082020.py | adelekeluqman/ic4_pro_ocr | 99fc9084c150c3b1f0a14e9d03583fcb79ab7cb3 | [
"Apache-2.0"
] | null | null | null | readtextfromimage_23082020.py | adelekeluqman/ic4_pro_ocr | 99fc9084c150c3b1f0a14e9d03583fcb79ab7cb3 | [
"Apache-2.0"
] | null | null | null | readtextfromimage_23082020.py | adelekeluqman/ic4_pro_ocr | 99fc9084c150c3b1f0a14e9d03583fcb79ab7cb3 | [
"Apache-2.0"
] | null | null | null | # USAGE
# python readtextfromimage.py --image Input-2.jpg
# import the necessary packages
import cv2
import pytesseract, re
import numpy as np
import argparse
from skimage.segmentation import clear_border
from imutils import contours
import imutils
# from bank_check_ocr import extract_digits_and_symbols
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
import os, sys
from PIL import Image
import shutil
from datetime import datetime
from pymongo import MongoClient
client = MongoClient()
db = client.ic4pro
# from mongoengine import *
# connect('ic4pro', host='localhost', port=27017)
# class ic4_ocr(Document):
# _id = StringField(required=True, max_length=10)
# ticket_image = StringField(required=True, max_length=50)
# ticket_name = StringField(required=True, max_length=50)
# ticket_date = DateTimeField(default=datetime.datetime.now)
# account_no = StringField(default='null', max_length=10)
# account_name = StringField(default='null', max_length=50)
# amount = DecimalField(default='null')
# amount_word = StringField(default='null', max_length=200)
# cheque_no = StringField(default='null', max_length=15)
# bank_name = StringField(default='null', max_length=50)
# signature = StringField(default='null', max_length=20)
# stamp = StringField(default='null', max_length=20)
# extractn_date = DateTimeField(default=datetime.datetime.now().date().strftime("%Y%d%m"))
# extractn_time = DateTimeField(default=datetime.datetime.now().time().strftime("%H:%M:%S"))
# remark = StringField(default='null', max_length=100)
# comment = StringField(default='null', max_length=100)
# rejectn_reason = StringField(default='null', max_length=100)
# class ic4_ocr_ex(Document):
# ticket_image = StringField(required=True, max_length=50)
# extractn_date = DateTimeField(default=datetime.datetime.now().date().strftime("%Y%d%m"))
# extractn_time = DateTimeField(default=datetime.datetime.now().time().strftime("%H:%M:%S"))
# rejectn_reason = StringField(default=null, max_length=100)
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image", required=True,
# help="path to input image to be OCR'd")
# # ap.add_argument("-c", "--min-conf", type=int, default=0,
# # help="mininum confidence value to filter weak text detection")
# args = vars(ap.parse_args())
# # def extract_digits_and_symbols(image, charCnts, minW=5, minH=15): # initial line of code
# def extract_digits_and_symbols(image, charCnts, minW=5, minH=10): # modified line that enhance detection of micr xter
# # grab the internal Python iterator for the list of character
# # contours, then initialize the character ROI and location
# # lists, respectively
# charIter = charCnts.__iter__()
# rois = []
# locs = []
# # keep looping over the character contours until we reach the end
# # of the list
# while True:
# try:
# # grab the next character contour from the list, compute
# # its bounding box, and initialize the ROI
# c = next(charIter)
# (cX, cY, cW, cH) = cv2.boundingRect(c)
# roi = None
# # check to see if the width and height are sufficiently
# # large, indicating that we have found a digit
# if cW >= minW and cH >= minH:
# # extract the ROI
# roi = image[cY:cY + cH, cX:cX + cW]
# rois.append(roi)
# locs.append((cX, cY, cX + cW, cY + cH))
# # otherwise, we are examining one of the special symbols
# else:
# # MICR symbols include three separate parts, so we
# # need to grab the next two parts from our iterator,
# # followed by initializing the bounding box
# # coordinates for the symbol
# parts = [c, next(charIter), next(charIter)]
# (sXA, sYA, sXB, sYB) = (np.inf, np.inf, -np.inf,
# -np.inf)
# # loop over the parts
# for p in parts:
# # compute the bounding box for the part, then
# # update our bookkeeping variables
# (pX, pY, pW, pH) = cv2.boundingRect(p)
# sXA = min(sXA, pX)
# sYA = min(sYA, pY)
# sXB = max(sXB, pX + pW)
# sYB = max(sYB, pY + pH)
# # extract the ROI
# roi = image[sYA:sYB, sXA:sXB]
# rois.append(roi)
# locs.append((sXA, sYA, sXB, sYB))
# # we have reached the end of the iterator; gracefully break
# # from the loop
# except StopIteration:
# break
# # return a tuple of the ROIs and locations
# return (rois, locs)
os.makedirs('withImage', exist_ok=True) # create withImage folder intended for storing successfully extracted images, but not yet used for the purpose
# source = './inimg/' # Source path
source = "D:\\iC4_Pro_Project\\ic4_pro_ocr\\input\\" # Source path
# source = './' # Source path
# destinatn = './outimg/' # Destination path
destinatn = "D:\\iC4_Pro_Project\\ic4_pro_ocr\\processed\\" # Destination path
# exceptn = './eximg/' # Exception path
exceptn = "D:\\iC4_Pro_Project\\ic4_pro_ocr\\exceptn\\" # Exception path
# filepath = "./inimg/"
# filepath = "."
micrfont = "D:/iC4_Pro_Project/ic4_pro_ocr/micrfont/templateMicr.png" # MICR font image file path
micrpath = "D:\\iC4_Pro_Project\\ic4_pro_ocr\\micrfolder\\" # MICR digit reference file path
# micrfile = "D:\\iC4_Pro_Project\\ic4_pro_ocr\\micrfolder\\micr_e13b_reference.png"
micrfile = " "
for filename2 in os.listdir(micrpath):
if (filename2.endswith('.png') or filename2.endswith('.jpg')):
micrfile = filename2
# print('micrfile string: ' + micrfile)
# change the current working directory to a newly created one before doing any operations in it
os.chdir("D:\\iC4_Pro_Project\\ic4_pro_ocr\\input")
# Get the path of current working directory
filepath = os.getcwd()
# for filename in os.listdir('.'): # Loop over all files in the working directory.
for filename in os.listdir(filepath): # Loop over all files in the working directory.
# if not (filename.endswith('.png') or filename.endswith('.jpg')) \ # initial code with break line format
# or filename == LOGO_FILENAME: # initial code with break line format
if not (filename.endswith('.png') or filename.endswith('.jpg')):
print("----------- file moved to exceptn folder 1 -----------")
shutil.move(source + filename, exceptn + filename) # Move the content of source to destination
posts_ex = db.ic4_ocr_ex
post_ex_data = {
'ticket_image': filename,
'extractn_date': datetime.now().date().strftime("%Y%d%m"),
'extractn_time': datetime.now().time().strftime("%H:%M:%S"),
'rejectn_reason': 'incompatible image format'
}
result = posts_ex.insert_one(post_ex_data)
print('One rejected: {0}, {1}'.format(result.inserted_id, 'incompatible image format'))
# post_ic4_ocr_ex = ic4_ocr_ex(
# ticket_image = filename
# rejectn_reason = 'incompatible image format'
# )
# post_ic4_ocr_ex.save() # This will perform an insert
# print('One rejected: {0}, {1}'.format(post_ic4_ocr_ex._id, 'incompatible image format'))
continue # skip non-image files and the logo file itself
# im = Image.open(filename) # using "from PIL import Image" module
# width, height = im.size # using "from PIL import Image" module
im = filename
# ---------------------- check if im is cheque -------------------------------
# # initialize the list of reference character names, in the same
# # order as they appear in the reference image where the digits
# # their names and:
# # T = Transit (delimit bank branch routing transit #)
# # U = On-us (delimit customer account number)
# # A = Amount (delimit transaction amount)
# # D = Dash (delimit parts of numbers, such as routing or account)
# charNames = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0",
# "T", "U", "A", "D"]
# # load the reference MICR image from disk, convert it to grayscale,
# # and threshold it, such that the digits appear as *white* on a
# # *black* background # "./out/output-image.png"
# ref = cv2.imread(micrpath + micrfile, 0)
# # ref = cv2.imread("D:\\iC4_Pro_Project\\ic4_pro_ocr\\micrfolder\\micr_e13b_reference.png")
# # ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
# ref = imutils.resize(ref, width=400)
# ref = cv2.threshold(ref, 0, 255, cv2.THRESH_BINARY_INV |
# cv2.THRESH_OTSU)[1]
# # find contours in the MICR image (i.e,. the outlines of the
# # characters) and sort them from left to right
# refCnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL,
# cv2.CHAIN_APPROX_SIMPLE)
# refCnts = imutils.grab_contours(refCnts)
# refCnts = contours.sort_contours(refCnts, method="left-to-right")[0]
# # extract the digits and symbols from the list of contours, then
# # initialize a dictionary to map the character name to the ROI
# # refROIs = extract_digits_and_symbols(ref, refCnts, # initial line of code
# # minW=10, minH=20)[0] # initial line of code
# refROIs = extract_digits_and_symbols(ref, refCnts, minW=10, minH=20)[0]
# chars = {}
# # loop over the reference ROIs
# for (name, roi) in zip(charNames, refROIs):
# # resize the ROI to a fixed size, then update the characters
# # dictionary, mapping the character name to the ROI
# # roi = cv2.resize(roi, (36, 36)) # initial line of code
# roi = cv2.resize(roi, (36, 36))
# chars[name] = roi
# # initialize a rectangular kernel (wider than it is tall) along with
# # an empty list to store the output of the check OCR
# # rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (17, 7)) # initial line of code
# rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 5)) # better segmented rectangular kernel
# output = []
# load the input image, grab its dimensions, and apply array slicing
# to keep only the bottom 20% of the image (that's where the account
# information is)
# # image = cv2.imread(im, 0)
# image = cv2.imread(im)
# # image = cv2.imread('D:/iC4_Pro_Project/ic4_pro_ocr/input/sample1c.png', 0)
# (h, w,) = image.shape[:2]
# delta = int(h - (h * 0.2)) # initial line of code
# # delta = int(h - (h * 0.3)) # adjust the bottom % of the cheque image captured for scanning
# bottom = image[delta:h, 0:w]
# # convert the bottom image to grayscale, then apply a blackhat
# # morphological operator to find dark regions against a light
# # background (i.e., the routing and account numbers)
# # gray = cv2.cvtColor(bottom, cv2.COLOR_BGR2GRAY) # initial code line, throws error: (Invalid number of channels in input image:'VScn::contains(scn)' where 'scn' is 1)
# gray = bottom
# # gray = cv2.cvtColor(bottom, cv2.CV_8UC1)
# # gray = cv2.cvtColor(bottom, cv2.CV_32SC1)
# blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)
# # compute the Scharr gradient of the blackhat image, then scale
# # the rest back into the range [0, 255]
# gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0,
# ksize=-1)
# gradX = np.absolute(gradX)
# (minVal, maxVal) = (np.min(gradX), np.max(gradX))
# gradX = (255 * ((gradX - minVal) / (maxVal - minVal)))
# gradX = gradX.astype("uint8")
# # apply a closing operation using the rectangular kernel to help
# # close gaps in between rounting and account digits, then apply
# # Otsu's thresholding method to binarize the image
# gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
# thresh = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] # initial code line
# # thresh = cv2.threshold(gradX, 0, 255, cv2.CV_8UC1)[1]
# # thresh = cv2.threshold(gradX, 0, 255, cv2.CV_32SC1)[1]
# # remove any pixels that are touching the borders of the image (this
# # simply helps us in the next step when we prune contours)
# thresh = clear_border(thresh)
# # find contours in the thresholded image, then initialize the
# # list of group locations
# # groupCnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# groupCnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# # ------ code modification on line 164-107, initial line is 104 ------ # 18-05-2020
# # groupCnts = groupCnts[0] if imutils.is_cv2() else groupCnts[1]
# groupCnts = groupCnts[0] #if imutils.is_cv2() else groupCnts[1]
# if groupCnts :
# # os.system('python bank_check_ocr.py --image D:/iC4_Pro_Project/ic4_pro_ocr/input/sample1c.png --reference D:/iC4_Pro_Project/towardsdatascience/micrfolder/micr_e13b_reference.png')
# # os.system('python bank_check_ocr.py --image D:/iC4_Pro_Project/ic4_pro_ocr/input/sample1c.png --reference D:/iC4_Pro_Project/towardsdatascience/micrfolder/micr_e13b_reference.png')
# import bank_check_ocr
# shutil.move(source + filename, destinatn + filename) # Move successful image from source to destination
# print("----------- enter this line1 -----------")
# continue
import numpy as np_
from PIL import Image as Image_
# im_haystack = Image_.open(r"lenna.png")
# im_needle = Image_.open(r"eye.png")
im_haystack = Image_.open(im)
im_needle = Image_.open(micrfont)
# found = False
def find_matches(haystack, needle):
arr_h = np_.asarray(haystack)
arr_n = np_.asarray(needle)
# arr_h = np_.array(haystack)
# arr_n = np_.array(needle)
y_h, x_h = arr_h.shape[:2]
y_n, x_n = arr_n.shape[:2]
xstop = x_h - x_n + 1
ystop = y_h - y_n + 1
matches = []
for xmin in range(0, xstop):
for ymin in range(0, ystop):
xmax = xmin + x_n
ymax = ymin + y_n
arr_s = arr_h[ymin:ymax, xmin:xmax] # Extract subimage
arr_t = (arr_s == arr_n) # Create test matrix
# if arr_t.all(): # Only consider exact matches
if arr_t: # Only consider exact matches
matches.append((xmin,ymin))
# return matches
return matches
# return False
# print(find_matches(im_haystack, im_needle))
if find_matches(im_haystack, im_needle) :
# if found :
# print(find_matches(im_haystack, im_needle))
print("----------- enter this line3 -----------")
import bank_check_ocr
print("----------- enter this line4 -----------")
shutil.move(source + im, destinatn + im) # Move successful image from source to destination
print("----------- enter this line1 -----------")
continue
# display information to the screen
print("----------- enter this line2 -----------")
try:
########## Use numpy slicing to extract Voucher Name ##########
# img1 = cv2.imread(args["image"])
# img1 = cv2.imread(args["image"], cv2.IMREAD_GRAYSCALE) # read image in grayscale
img1 = cv2.imread(im, cv2.IMREAD_GRAYSCALE) # read image in grayscale
x = 10
y = 50
h = 105
w = 380
# x = 12
# y = 46
# h = 45
# w = 320
# x = 5
# y = 40
# h = 120
# w = 410
global crop_img1
crop_img1 = img1[y:y+h, x:x+w]
# cv2.imshow("Voucher Name Before Morph", crop_img1)
# crop_img1 = cv2.threshold(crop_img1, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] # convert image to binary format
# morphological operations to remove noise around the characters using erosion and dilation
kernel = np.ones((2, 1), np.uint8)
crop_img1 = cv2.erode(crop_img1, kernel, iterations=1)
crop_img1 = cv2.dilate(crop_img1, kernel, iterations=1)
cv2.imshow("Voucher Name", crop_img1)
except FileNotFoundError as fnf_error:
print(fnf_error)
print("----------- file moved to exceptn folder 2 -----------")
shutil.move(source + filename, exceptn + filename) # Move unsuccessful image from source to exception
posts_ex = db.ic4_ocr_ex
post_ex_data = {
'ticket_image': filename,
'extractn_date': datetime.now().date().strftime("%Y%d%m"),
'extractn_time': datetime.now().time().strftime("%H:%M:%S"),
'rejectn_reason': 'FileNotFoundError'
}
result = posts_ex.insert_one(post_ex_data)
print('One rejected: {0}, {1}'.format(result.inserted_id, 'FileNotFoundError'))
# post_ic4_ocr_ex = ic4_ocr_ex(
# ticket_image = filename
# rejectn_reason = 'FileNotFoundError'
# )
# post_ic4_ocr_ex.save() # This will perform an insert
# print('One rejected: {0}, {1}'.format(post_ic4_ocr_ex._id, 'FileNotFoundError'))
continue
except Exception as e:
print(e)
print("----------- file moved to exceptn folder 2 -----------")
shutil.move(source + filename, exceptn + filename) # Move unsuccessful image from source to exception
posts_ex = db.ic4_ocr_ex
post_ex_data = {
'ticket_image': filename,
'extractn_date': datetime.now().date().strftime("%Y%d%m"),
'extractn_time': datetime.now().time().strftime("%H:%M:%S"),
'rejectn_reason': e
}
result = posts_ex.insert_one(post_ex_data)
print('One rejected: {0}, {1}'.format(result.inserted_id, e))
# post_ic4_ocr_ex = ic4_ocr_ex(
# ticket_image = filename
# rejectn_reason = e
# )
# post_ic4_ocr_ex.save() # This will perform an insert
# print('One rejected: {0}, {1}'.format(post_ic4_ocr_ex._id, e))
continue
except:
print("Exception3 raised while extracting")
print("----------- file moved to exceptn folder 3 -----------")
shutil.move(source + filename, exceptn + filename) # Move unsuccessful image from source to exception
posts_ex = db.ic4_ocr_ex
post_ex_data = {
'ticket_image': filename,
'extractn_date': datetime.now().date().strftime("%Y%d%m"),
'extractn_time': datetime.now().time().strftime("%H:%M:%S"),
'rejectn_reason': "Exception3 raised while extracting"
}
result = posts_ex.insert_one(post_ex_data)
print('One rejected: {0}, {1}'.format(result.inserted_id, "Exception3 raised while extracting"))
# post_ic4_ocr_ex = ic4_ocr_ex(
# ticket_image = filename
# rejectn_reason = "Exception3 raised while extracting"
# )
# post_ic4_ocr_ex.save() # This will perform an insert
# print('One rejected: {0}, {1}'.format(post_ic4_ocr_ex._id, "Exception3 raised while extracting"))
continue
finally:
cv2.waitKey(0)
# text1 = pytesseract.image_to_string(crop_img1)
text1 = pytesseract.image_to_string(crop_img1, lang='eng', config='--oem 3 --psm 6')
searchlist1 = re.findall(r"[A-Z]{3,20}[ ]?", text1) # first rank
# searchlist1 = re.findall(r"([A-Z]{3,20}\s?)", text1) # first rank
# print(text1)
if searchlist1:
# print("ACCOUNT NAME:", "".join(str(item) for item in searchlist))
# print("ACCOUNT NAME:", "".join(searchtext))
searchstring1 = "".join(str(item) for item in searchlist1)
# creating substring from start of string
# define length upto which substring required
substring1 = searchstring1[:20]
# substring1 = substring1[:-1] # remove linebreak which is the last character of the string
else:
substring1 = ""
print('Voucher Name: ' + substring1)
print("----------- file moved to exceptn folder 3 -----------")
shutil.move(source + filename, exceptn + filename) # Move unsuccessful image from source to exception
posts_ex = db.ic4_ocr_ex
post_ex_data = {
'ticket_image': filename,
'extractn_date': datetime.now().date().strftime("%Y%d%m"),
'extractn_time': datetime.now().time().strftime("%H:%M:%S"),
'rejectn_reason': "Exception3 raised while extracting"
}
result = posts_ex.insert_one(post_ex_data)
print('One rejected: {0}, {1}'.format(result.inserted_id, "Exception3 raised while extracting"))
# post_ic4_ocr_ex = ic4_ocr_ex(
# ticket_image = filename
# rejectn_reason = "Exception3 raised while extracting"
# )
# post_ic4_ocr_ex.save() # This will perform an insert
# print('One rejected: {0}, {1}'.format(post_ic4_ocr_ex._id, "Exception3 raised while extracting"))
continue
print('Voucher Name: ' + substring1)
try:
########## Use numpy slicing to extract Voucher Number ##########
# img2 = cv2.imread(args["image"])
# img2 = cv2.imread(args["image"], cv2.IMREAD_GRAYSCALE) # read image in grayscale
img2 = cv2.imread(im, cv2.IMREAD_GRAYSCALE) # read image in grayscale
if substring1 == 'CASH DEPOSIT SLIP':
x2 = 420 # Cash Deposit Slip
y2 = 155 # Cash Deposit Slip
h2 = 130 # Cash Deposit Slip
w2 = 470 # Cash Deposit Slip
elif substring1 == 'CASH DEPOSIT SLIP ':
x2 = 410 # Cash Deposit Slip
y2 = 140 # Cash Deposit Slip
h2 = 150 # Cash Deposit Slip
w2 = 500 # Cash Deposit Slip
substring1 = 'CASH DEPOSIT SLIP'
elif substring1 == 'CHEQUE DEPOSIT SLIP':
x2 = 460 # Cheque Deposit Slip
y2 = 110 # Cheque Deposit Slip
h2 = 140 # Cheque Deposit Slip
w2 = 520 # Cheque Deposit Slip
elif substring1 == 'CHEQUE DEPOSIT SLIP ':
x2 = 460 # Cheque Deposit Slip
y2 = 110 # Cheque Deposit Slip
h2 = 140 # Cheque Deposit Slip
w2 = 520 # Cheque Deposit Slip
substring1 = 'CHEQUE DEPOSIT SLIP'
elif substring1 == 'CASH WITHDRAWAL SLIP':
x2 = 230 # Cash Deposit Slip
y2 = 60 # Cash Deposit Slip
h2 = 120 # Cash Deposit Slip
w2 = 290 # Cash Deposit Slip
elif substring1 in 'CASH WITHDRAWAL SLIP':
x2 = 230 # Cash Deposit Slip
y2 = 60 # Cash Deposit Slip
h2 = 130 # Cash Deposit Slip
w2 = 290 # Cash Deposit Slip
substring1 = 'CASH WITHDRAWAL SLIP'
else:
print("Exception1 raised while extracting ticket_name")
print("----------- file moved to exceptn folder 4 -----------")
shutil.move(source + filename, exceptn + filename) # Move unsuccessful image from source to exception
posts_ex = db.ic4_ocr_ex
post_ex_data = {
'ticket_image': filename,
'extractn_date': datetime.now().date().strftime("%Y%d%m"),
'extractn_time': datetime.now().time().strftime("%H:%M:%S"),
'rejectn_reason': "Exception1 raised while extracting ticket_name"
}
result = posts_ex.insert_one(post_ex_data)
print('One rejected: {0}, {1}'.format(result.inserted_id, "Exception1 raised while extracting ticket_name"))
# post_ic4_ocr_ex = ic4_ocr_ex(
# ticket_image = filename
# rejectn_reason = "Exception1 raised while extracting ticket_name"
# )
# post_ic4_ocr_ex.save() # This will perform an insert
# print('One rejected: {0}, {1}'.format(post_ic4_ocr_ex._id, "Exception1 raised while extracting ticket_name"))
continue
global crop_img2
crop_img2 = img2[y2:y2+h2, x2:x2+w2]
# cv2.imshow("Voucher Number Before Morph", crop_img2)
# crop_img2 = cv2.threshold(crop_img2, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] # convert image to binary format
# morphological operations to remove noise around the characters using erosion and dilation
kernel = np.ones((2, 1), np.uint8)
crop_img2 = cv2.erode(crop_img2, kernel, iterations=1)
crop_img2 = cv2.dilate(crop_img2, kernel, iterations=1)
cv2.imshow("Voucher Number", crop_img2)
except:
print("Exception raised while identifying ticket")
print("----------- file moved to exceptn folder 5 -----------")
shutil.move(source + filename, exceptn + filename) # Move unsuccessful image from source to exception
posts_ex = db.ic4_ocr_ex
post_ex_data = {
'ticket_image': filename,
'extractn_date': datetime.now().date().strftime("%Y%d%m"),
'extractn_time': datetime.now().time().strftime("%H:%M:%S"),
'rejectn_reason': "Exception raised while identifying ticket"
}
result = posts_ex.insert_one(post_ex_data)
print('One rejected: {0}, {1}'.format(result.inserted_id, "Exception raised while identifying ticket"))
# post_ic4_ocr_ex = ic4_ocr_ex(
# ticket_image = filename
# rejectn_reason = "Exception2 raised while extracting ticket_id"
# )
# post_ic4_ocr_ex.save() # This will perform an insert
# print('One rejected: {0}, {1}'.format(post_ic4_ocr_ex._id, "Exception2 raised while extracting ticket_id"))
continue
finally:
cv2.waitKey(0)
# # img = cv2.imread('bitcoin.jpg') # initial code line
# img = cv2.imread('cash-deposit-blank.png')
# text = pytesseract.image_to_string(img) # initial code line
# print(text) # initial code line
# out_below = pytesseract.image_to_string(img) # initial line of code
############################# Moved up ###############################################
# # text1 = pytesseract.image_to_string(crop_img1)
# text1 = pytesseract.image_to_string(crop_img1, lang='eng', config='--oem 3 --psm 6')
# searchlist1 = re.findall(r"[A-Z]{3,20}[ ]?", text1) # first rank
# # searchlist1 = re.findall(r"([A-Z]{3,20}\s?)", text1) # first rank
# # print(text1)
# # print(searchtext1)
# if searchlist1:
# # print("ACCOUNT NAME:", "".join(str(item) for item in searchlist))
# # print("ACCOUNT NAME:", "".join(searchtext))
# # searchstring1 = "".join(str(item) for item in searchlist1)
# searchstring1 = "".join(str(item) for item in searchlist1)
# # creating substring from start of string
# # define length upto which substring required
# substring1 = searchstring1[:20]
# # substring1 = substring1[:-1] # remove linebreak which is the last character of the string
# else:substring1 = ""
# print(substring1)
######################################################################################
# text2 = pytesseract.image_to_string(crop_img2) # use default settings
# text2 = pytesseract.image_to_string(crop_img2, config='digits') # use this if 'C:\Program Files (x86)\Tesseract-OCR\tessdata\configs\digits' is modified to suit requirement
# text2 = pytesseract.image_to_string(crop_img2, lang='eng',config='--psm 6 --oem 3 -c tessedit_char_whitelist=0123456789')
text2 = pytesseract.image_to_string(crop_img2, lang='eng', config='--oem 3 --psm 6')
searchlist2 = re.findall(r"[0-9]{7,9}", text2) # first rank
# searchlist2 = re.findall(r'[0-9]{8}', text2)
# searchlist2 = re.findall(r'[0-9]', text2)
# searchlist2 = re.findall(r'\d{7,9}', text2)
# searchlist2 = re.findall(r'\d', text2)
# print(text2)
# print(searchtext2)
if searchlist2:
# print("ACCOUNT NAME:", "".join(str(item) for item in searchlist))
# print("ACCOUNT NAME:", "".join(searchtext))
# searchstring2 = "".join(str(item) for item in searchlist2)
# # creating substring from start of string
# # define length upto which substring required
# substring2 = searchstring2[:20]
# OR
substring2 = ''.join(searchlist2)
else:
substring2 = ""
print("Exception4 raised while extracting ticket_id")
print("----------- file moved to exceptn folder 6 -----------")
shutil.move(source + filename, exceptn + filename) # Move unsuccessful image from source to exception
posts_ex = db.ic4_ocr_ex
post_ex_data = {
'ticket_image': filename,
'extractn_date': datetime.now().date().strftime("%Y%d%m"),
'extractn_time': datetime.now().time().strftime("%H:%M:%S"),
'rejectn_reason': "Exception4 raised while extracting ticket_id"
}
result = posts_ex.insert_one(post_ex_data)
print('One rejected: {0}, {1}'.format(result.inserted_id, "Exception4 raised while extracting ticket_id"))
# post_ic4_ocr_ex = ic4_ocr_ex(
# ticket_image = filename
# rejectn_reason = "Exception4 raised while extracting ticket_id"
# )
# post_ic4_ocr_ex.save() # This will perform an insert
# print('One rejected: {0}, {1}'.format(post_ic4_ocr_ex._id, "Exception4 raised while extracting ticket_id"))
continue
print(substring2)
print("----------- file moved to destinatn folder 1 -----------")
shutil.move(source + filename, destinatn + filename) # Move successful image from source to destination
# itemlist = [text1,text2]
# itemlist = [substring1,substring2]
itemline = [substring1,",",substring2]
with open("D:\\iC4_Pro_Project\\ic4_pro_ocr\\outfile.txt", "a") as outfile:
# outfile.write(",".join(itemlist))
outfile.write("" .join(itemline))
outfile.write("\n")
posts = db.ic4_ocr
post_data = {
'_id': substring2,
'ticket_image': filename,
'ticket_name': substring1,
'ticket_date': 'null',
'account_no': 'null',
'account_name': 'null',
'amount': 'null',
'amount_word': 'null',
'cheque_no': 'null',
'micr_digit4': 'null',
'micr_digit5': 'null',
'micr_digit6': 'null',
'bank_name': 'null',
'signature': 'null',
'stamp': 'null',
'extractn_date': datetime.now().date().strftime("%Y%d%m"),
'extractn_time': datetime.now().time().strftime("%H:%M:%S"),
'remark': 'record extracted',
'comment': 'required fields extracted',
'rejectn_reason': 'null',
'callover_agent': 'null',
'callover_date': 'null',
'callover_time': 'null'
}
result = posts.insert_one(post_data)
print('One post: {0}, {1}'.format(result.inserted_id, substring1))
# with open('./outfile2.txt', 'w') as filehandle:
# for listitem in itemlist:
# filehandle.write('%s,' % listitem)
# with open('./outfile2.txt', 'w') as filehandle:
# filehandle.writelines("%s" % listitem for listitem in itemlist)
# post_ic4_ocr = ic4_ocr(
# _id = substring2
# ticket_image = filename
# ticket_name = substring1
# ticket_date = 'null'
# account_no = 'null'
# account_name = 'null'
# amount = 'null'
# amount_word = 'null'
# cheque_no = 'null'
# bank_name = 'null'
# signature = 'null'
# stamp = 'null'
# # extractn_date = DateTimeField(default=datetime.datetime.now().date().strftime("%Y%d%m"))
# # extractn_time = DateTimeField(default=datetime.datetime.now().time().strftime("%H:%M:%S"))
# remark = 'record extracted'
# comment = 'required fields extracted'
# rejectn_reason = 'null'
# )
# post_ic4_ocr.save() # This will perform an insert
# print(post_ic4_ocr._id + ', ' + substring1)
# # post_ic4_ocr.title = 'A Better Post Title'
# # post_ic4_ocr.save() # This will perform an atomic edit on "title"
# # print(post_ic4_ocr.title)
# # def extract_digits_and_symbols(image, charCnts, minW=5, minH=15): # initial line of code
# def extract_digits_and_symbols(image, charCnts, minW=5, minH=10): # modified line that enhance detection of micr xter
# # grab the internal Python iterator for the list of character
# # contours, then initialize the character ROI and location
# # lists, respectively
# charIter = charCnts.__iter__()
# rois = []
# locs = []
# # keep looping over the character contours until we reach the end
# # of the list
# while True:
# try:
# # grab the next character contour from the list, compute
# # its bounding box, and initialize the ROI
# c = next(charIter)
# (cX, cY, cW, cH) = cv2.boundingRect(c)
# roi = None
# # check to see if the width and height are sufficiently
# # large, indicating that we have found a digit
# if cW >= minW and cH >= minH:
# # extract the ROI
# roi = image[cY:cY + cH, cX:cX + cW]
# rois.append(roi)
# locs.append((cX, cY, cX + cW, cY + cH))
# # otherwise, we are examining one of the special symbols
# else:
# # MICR symbols include three separate parts, so we
# # need to grab the next two parts from our iterator,
# # followed by initializing the bounding box
# # coordinates for the symbol
# parts = [c, next(charIter), next(charIter)]
# (sXA, sYA, sXB, sYB) = (np.inf, np.inf, -np.inf,
# -np.inf)
# # loop over the parts
# for p in parts:
# # compute the bounding box for the part, then
# # update our bookkeeping variables
# (pX, pY, pW, pH) = cv2.boundingRect(p)
# sXA = min(sXA, pX)
# sYA = min(sYA, pY)
# sXB = max(sXB, pX + pW)
# sYB = max(sYB, pY + pH)
# # extract the ROI
# roi = image[sYA:sYB, sXA:sXB]
# rois.append(roi)
# locs.append((sXA, sYA, sXB, sYB))
# # we have reached the end of the iterator; gracefully break
# # from the loop
# except StopIteration:
# break
# # return a tuple of the ROIs and locations
# return (rois, locs) | 45.663739 | 193 | 0.580783 |
acf92d830ef3730a667c44acf165235025076398 | 843 | py | Python | myapp/migrations/0013_profile.py | sarodemayur55/Hospital_Management_Website | a90e64d2b02482d7ad69a807365bdc0abfca4212 | [
"Apache-2.0"
] | 1 | 2022-02-08T16:37:43.000Z | 2022-02-08T16:37:43.000Z | myapp/migrations/0013_profile.py | sarodemayur55/Hospital_Management_Website | a90e64d2b02482d7ad69a807365bdc0abfca4212 | [
"Apache-2.0"
] | null | null | null | myapp/migrations/0013_profile.py | sarodemayur55/Hospital_Management_Website | a90e64d2b02482d7ad69a807365bdc0abfca4212 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.6 on 2021-05-16 12:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('myapp', '0012_auto_20210516_1706'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_patient', models.BooleanField(default=False)),
('is_doctor', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 32.423077 | 121 | 0.639383 |
acf92e54abe43b36fbcf3f40a52a8795955de861 | 1,054 | py | Python | movie_classifier/core/preprocessing/embeddings.py | daniele21/Genre_Detection | c79c62e1c784a3bd89100b791dbe2d717ce2f72e | [
"MIT"
] | null | null | null | movie_classifier/core/preprocessing/embeddings.py | daniele21/Genre_Detection | c79c62e1c784a3bd89100b791dbe2d717ce2f72e | [
"MIT"
] | null | null | null | movie_classifier/core/preprocessing/embeddings.py | daniele21/Genre_Detection | c79c62e1c784a3bd89100b791dbe2d717ce2f72e | [
"MIT"
] | null | null | null | from tqdm import tqdm
from typing import Text
from movie_classifier.constants.config import EMBEDDING_DIM
from movie_classifier.constants.paths import GLOVE_PATH
import numpy as np
from movie_classifier.core.preprocessing.tokenizers import MyTokenizer
def load_pretrained_glove_embeddings(tokenizer: MyTokenizer,
embedding_path: Text =GLOVE_PATH):
embeddings_index = {}
f = open(embedding_path)
for line in tqdm(f, desc='> Loading Embeddings'):
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((tokenizer.n_words+1, EMBEDDING_DIM))
for word, i in tqdm(tokenizer.vocab().items(), total=tokenizer.n_words):
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
| 34 | 76 | 0.697343 |
acf92e789a60bdc6bd4438ba4bf275b53d23e550 | 25 | py | Python | kontulari/management/commands/__init__.py | Etxea/gestion_eide_web | 8a59be1ddb59a4713cb3346534fd01f643d8f924 | [
"MIT"
] | null | null | null | kontulari/management/commands/__init__.py | Etxea/gestion_eide_web | 8a59be1ddb59a4713cb3346534fd01f643d8f924 | [
"MIT"
] | null | null | null | kontulari/management/commands/__init__.py | Etxea/gestion_eide_web | 8a59be1ddb59a4713cb3346534fd01f643d8f924 | [
"MIT"
] | null | null | null | __author__ = 'patataman'
| 12.5 | 24 | 0.76 |
acf92fdba608898ebf14cc16d1f9c1698ed0cd75 | 112 | py | Python | home/urls.py | thekonungr/wolfsschanze | a203c7c5bf2be17b779dd5e3e472c84fd245c7bf | [
"MIT"
] | null | null | null | home/urls.py | thekonungr/wolfsschanze | a203c7c5bf2be17b779dd5e3e472c84fd245c7bf | [
"MIT"
] | null | null | null | home/urls.py | thekonungr/wolfsschanze | a203c7c5bf2be17b779dd5e3e472c84fd245c7bf | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.main_page, name='home'),
] | 16 | 43 | 0.678571 |
acf93038a29925e10960eb926772d8bf2d7a808b | 26,901 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_virtual_hubs_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_virtual_hubs_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_virtual_hubs_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubsOperations(object):
"""VirtualHubsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
"""Retrieves the details of a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHub, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.VirtualHub
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.VirtualHub"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'VirtualHub')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.VirtualHub"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualHub"]
"""Creates a VirtualHub resource if it doesn't exist else updates the existing VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to create or update VirtualHub.
:type virtual_hub_parameters: ~azure.mgmt.network.v2020_04_01.models.VirtualHub
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualHub or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_04_01.models.VirtualHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
virtual_hub_parameters=virtual_hub_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
"""Updates VirtualHub tags.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to update VirtualHub tags.
:type virtual_hub_parameters: ~azure.mgmt.network.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHub, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.VirtualHub
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubsResult"]
"""Lists all the VirtualHubs in a resource group.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubsResult"]
"""Lists all the VirtualHubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualHubs'} # type: ignore
| 48.64557 | 191 | 0.661165 |
acf9303d5d678eee1a8534ee359adb0a44f33487 | 2,471 | py | Python | photosifter/display.py | kulikjak/photosifter | 8f99b73ef037dad01102325d62d558f9d3e6c1a0 | [
"MIT"
] | 1 | 2019-06-06T21:12:57.000Z | 2019-06-06T21:12:57.000Z | photosifter/display.py | kulikjak/photosifter | 8f99b73ef037dad01102325d62d558f9d3e6c1a0 | [
"MIT"
] | 1 | 2021-10-12T22:57:05.000Z | 2021-10-12T22:57:05.000Z | photosifter/display.py | kulikjak/photosifter | 8f99b73ef037dad01102325d62d558f9d3e6c1a0 | [
"MIT"
] | null | null | null | import enum
import cv2
import numpy
# Maximum amount of images displayed at the same time
MAXIMUM_DISPLAY_SIZE = 6
class BORDER(enum.Enum):
BLUE = [100, 0, 0]
GREEN = [0, 100, 0]
class DisplayHandler:
# Name of the application window
WINDOW_NAME = "Display Handler"
def __init__(self):
self._enable_text_embeding = True
self._current = None
cv2.namedWindow(self.WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.WINDOW_NAME, 1280, 640)
def __del__(self):
cv2.destroyWindow(self.WINDOW_NAME)
@staticmethod
def _embed_text(image, focus, filename):
cv2.putText(image, f"Focus: {focus:.2f}", (50, 140),
cv2.FONT_HERSHEY_SIMPLEX, 5, (0, 0, 255), thickness=20)
cv2.putText(image, filename, (50, 280),
cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), thickness=12)
def toggle_text_embeding(self):
self._enable_text_embeding = not self._enable_text_embeding
def toggle_fullscreen(self):
if not cv2.getWindowProperty(self.WINDOW_NAME, cv2.WND_PROP_FULLSCREEN):
cv2.setWindowProperty(self.WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty(self.WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
def render_border(self, border=None):
if not isinstance(border, BORDER) and border is not None:
raise ValueError("Argument 'border' must be either from BORDER enum or None")
if border is None:
image = self._current
else:
image = cv2.copyMakeBorder(self._current, 60, 60, 0, 0,
borderType=cv2.BORDER_CONSTANT,
value=border.value)
cv2.imshow(self.WINDOW_NAME, image)
cv2.waitKey(1)
def render(self, image_objects):
min_height = min(obj.get().shape[0] for obj in image_objects)
complete = None
for obj in image_objects:
image = obj.get(min_height).copy()
if self._enable_text_embeding:
self._embed_text(image, obj.focus, obj.filename)
if complete is None:
complete = image
else:
complete = numpy.hstack((complete, image))
self._current = complete
cv2.imshow(self.WINDOW_NAME, complete)
cv2.waitKey(1) # needed to display the image
| 30.134146 | 99 | 0.621611 |
acf9315df05b65e861a84896389335035a65c8a1 | 3,940 | py | Python | py/aon/aoncmd_comment.py | aevri/phabricator-tools | ef7501bcaee83e98d168d16f64b3f73e744d3336 | [
"Apache-2.0"
] | 150 | 2015-01-21T15:52:22.000Z | 2021-11-09T05:53:36.000Z | py/aon/aoncmd_comment.py | aevri/phabricator-tools | ef7501bcaee83e98d168d16f64b3f73e744d3336 | [
"Apache-2.0"
] | 72 | 2015-05-08T04:33:08.000Z | 2017-01-27T09:37:36.000Z | py/aon/aoncmd_comment.py | aevri/phabricator-tools | ef7501bcaee83e98d168d16f64b3f73e744d3336 | [
"Apache-2.0"
] | 38 | 2015-01-30T10:33:47.000Z | 2021-11-09T05:53:30.000Z | """create a comment on differential reviews.
usage examples:
comment on revision '1':
$ arcyon comment 1 -m 'hello revision 1, how are you?'
accept revision '1':
$ arcyon comment 1 -m 'looks good' --action accept
comment on revisions 1 and 2, reading the message from 'mymessage':
$ arcyon comment 1 2 --message-file mymessage
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# aoncmd_comment
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import textwrap
import phlcon_differential
import phlsys_makeconduit
def getFromfilePrefixChars():
return ""
def setupParser(parser):
actions = parser.add_argument_group(
'action arguments',
'use any of ' + textwrap.fill(
str(phlcon_differential.USER_ACTIONS.keys())))
parser.add_argument(
'ids',
type=int,
nargs="*",
default=[],
help="the revisions to comment on (e.g. 1)")
parser.add_argument(
'--ids-file',
metavar='FILE',
type=argparse.FileType('r'),
help="a file to read ids from, use '-' to specify stdin")
parser.add_argument(
'--message', '-m',
metavar="M",
default="",
type=str,
help="the body text of the comment")
parser.add_argument(
'--message-file',
metavar='FILE',
type=argparse.FileType('r'),
help="a file to read the message from, use '-' for stdin")
parser.add_argument(
'--silent',
action='store_true',
help="don't send notification emails for this comment")
parser.add_argument(
'--attach-inlines',
action='store_true',
help="attach pending inline comments")
actions.add_argument(
'--action', '-a',
choices=phlcon_differential.USER_ACTIONS.keys(),
metavar="ACTION",
default='comment',
type=str,
help="perform an action on a review")
phlsys_makeconduit.add_argparse_arguments(parser)
def process(args):
conduit = phlsys_makeconduit.make_conduit(
args.uri, args.user, args.cert, args.act_as_user)
d = {
'message': args.message,
'silent': args.silent,
'action': phlcon_differential.USER_ACTIONS[args.action],
'attach_inlines': args.attach_inlines
}
if args.message_file:
d['message'] += args.message_file.read()
ids = args.ids
if args.ids_file:
ids.extend([int(i) for i in args.ids_file.read().split()])
if not ids:
print("error: you have not specified any revision ids")
sys.exit(1)
for i in ids:
phlcon_differential.create_comment(conduit, i, **d)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| 29.62406 | 79 | 0.574873 |
acf93177216a2e80104ffc7c38c90d7976a5f0f9 | 7,733 | py | Python | app/core/admin.py | spbso/so_rest | 2ede1ee849fa3e4ba9fc76a64a522b9aaa34e27f | [
"MIT"
] | null | null | null | app/core/admin.py | spbso/so_rest | 2ede1ee849fa3e4ba9fc76a64a522b9aaa34e27f | [
"MIT"
] | 1 | 2022-03-11T14:25:08.000Z | 2022-03-11T14:25:08.000Z | app/core/admin.py | spbso/so_rest | 2ede1ee849fa3e4ba9fc76a64a522b9aaa34e27f | [
"MIT"
] | null | null | null | import logging
from core import models
from core.auth_backend import PasswordlessAuthBackend
from django import forms
from django.contrib import admin
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import UsernameField
from django.utils.translation import gettext as _
from django_admin_listfilter_dropdown.filters import RelatedDropdownFilter
from django_fsm_log.admin import StateLogInline
from fsm_admin.mixins import FSMTransitionMixin
from reversion_compare.admin import CompareVersionAdmin
authenticate = PasswordlessAuthBackend.authenticate
logger = logging.getLogger(__name__)
class LoginForm(AdminAuthenticationForm):
"""
Class for authenticating users by vk id
"""
username = UsernameField(
label=_("VK id"), widget=forms.TextInput(attrs={"autofocus": True})
)
password = None
error_messages = {
"invalid_login": _("Please enter a correct %(username)s."),
"inactive": _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super().__init__(*args, **kwargs)
# Set the max length and label for the "username" field.
self.username_field = get_user_model()._meta.get_field(
get_user_model().USERNAME_FIELD
)
def clean(self):
username = self.cleaned_data.get("username")
if username is not None:
self.user_cache = authenticate(self.request, vk_id=username)
logger.error(self.user_cache is None)
if self.user_cache is None:
raise self.get_invalid_login_error()
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
class UserAdmin(CompareVersionAdmin, BaseUserAdmin):
ordering = ["id"]
list_display = ["vk_id", "name"]
fieldsets = (
(None, {"fields": ("vk_id",)}),
(_("Personal Info"), {"fields": ("name",)}),
(_("Permissions"), {"fields": ("is_active", "is_staff", "is_superuser")}),
(_("Important dates"), {"fields": ("last_login",)}),
)
add_fieldsets = ((None, {"classes": ("wide",), "fields": ("vk_id",)}),)
class SeasonAdmin(CompareVersionAdmin, admin.ModelAdmin):
ordering = ["id"]
list_display = [
"boec",
"state",
]
search_fields = ("boec__last_name", "boec__first_name")
autocomplete_fields = ["boec"]
class ShtabAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class AreaAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class BoecAdmin(CompareVersionAdmin, admin.ModelAdmin):
ordering = ["last_name"]
search_fields = ("last_name", "first_name")
class BrigadeAdmin(CompareVersionAdmin, FSMTransitionMixin, admin.ModelAdmin):
inlines = [StateLogInline]
ordering = ["title"]
search_fields = ("title",)
list_filter = ("area", "shtab", "rso_state", "last_festival_state")
list_display = ["area", "title"]
class EventAdmin(CompareVersionAdmin, FSMTransitionMixin, admin.ModelAdmin):
inlines = [StateLogInline]
class EventQuotaAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class ActivePositionFilter(admin.SimpleListFilter):
title = _("Действующий")
parameter_name = "to_date"
def lookups(self, request, model_admin):
return (("0", _("Действующий")), ("1", _("Не действующий")))
def queryset(self, request, queryset):
if self.value() == "0":
return queryset.filter(to_date__isnull=True)
if self.value() == "1":
return queryset.filter(to_date__isnull=False)
class PositionAdmin(CompareVersionAdmin, admin.ModelAdmin):
list_display = ["position", "brigade", "boec"]
list_filter = ("position", ActivePositionFilter, ("brigade", RelatedDropdownFilter))
autocomplete_fields = ["brigade", "boec"]
class ParticipantAdmin(CompareVersionAdmin, admin.ModelAdmin):
list_display = ["boec", "event", "worth"]
list_filter = ("event", "worth")
class CompetitionAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class CompetitionParticipantAdmin(CompareVersionAdmin, admin.ModelAdmin):
list_display = ["title", "competition", "worth"]
list_filter = ("competition", "worth")
class NominationAdmin(CompareVersionAdmin, admin.ModelAdmin):
list_display = ["title", "competition", "is_rated"]
list_filter = ("competition",)
class ConferenceAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class TicketAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class TicketScanAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class ActivityAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class AchievementsAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class WarningAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class VotingAdmin(CompareVersionAdmin, admin.ModelAdmin):
list_display = ("text", "voters_count")
def voters_count(self, obj):
return obj.voters.count()
class VoteQuestionAdmin(CompareVersionAdmin, admin.ModelAdmin):
list_display = ("text", "voting")
list_filter = ("rso_state",)
def voting(self, obj):
voting_titles = f""
for voting in obj.votings.all():
voting_titles += f"{voting.text} "
return f"{voting_titles}"
class QuesitonsInline(admin.TabularInline):
model = models.VoteQuestion
class VoteAnswerAdmin(CompareVersionAdmin, admin.ModelAdmin):
list_display = ("text", "question")
def question(self, obj):
questions_titles = f""
for question in obj.questions.all():
questions_titles += f"{question.text} "
return f"{questions_titles}"
class VoterAdmin(CompareVersionAdmin, admin.ModelAdmin):
pass
class UserApplyAdmin(CompareVersionAdmin, admin.ModelAdmin):
autocomplete_fields = ["brigade"]
class SeasonReportAdmin(CompareVersionAdmin, admin.ModelAdmin):
list_display = ("year", "brigade", "state")
list_filter = ("year", "brigade", "state")
autocomplete_fields = ["seasons"]
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Shtab, ShtabAdmin)
admin.site.register(models.Area, AreaAdmin)
admin.site.register(models.Boec, BoecAdmin)
admin.site.register(models.Brigade, BrigadeAdmin)
admin.site.register(models.Event, EventAdmin)
admin.site.register(models.Season, SeasonAdmin)
admin.site.register(models.Position, PositionAdmin)
admin.site.register(models.Participant, ParticipantAdmin)
admin.site.register(models.Competition, CompetitionAdmin)
admin.site.register(models.CompetitionParticipant, CompetitionParticipantAdmin)
admin.site.register(models.Nomination, NominationAdmin)
admin.site.register(models.Conference, ConferenceAdmin)
admin.site.register(models.Ticket, TicketAdmin)
admin.site.register(models.TicketScan, TicketScanAdmin)
admin.site.register(models.Achievement, AchievementsAdmin)
admin.site.register(models.Activity, ActivityAdmin)
admin.site.register(models.Warning, WarningAdmin)
admin.site.register(models.EventQuota, EventQuotaAdmin)
admin.site.register(models.Voting, VotingAdmin)
admin.site.register(models.VoteQuestion, VoteQuestionAdmin)
admin.site.register(models.VoteAnswer, VoteAnswerAdmin)
admin.site.register(models.Voter, VoterAdmin)
admin.site.register(models.SeasonReport, SeasonReportAdmin)
admin.site.register(models.UserApply, UserApplyAdmin)
| 31.056225 | 88 | 0.718738 |
acf931ebc8b592a7e4301b082b39e617e53a1474 | 38,522 | py | Python | calfem/vedo_utils.py | CALFEM/calfem-py | 26d4082ca6b907c48ad814733c733ae30a959657 | [
"MIT"
] | null | null | null | calfem/vedo_utils.py | CALFEM/calfem-py | 26d4082ca6b907c48ad814733c733ae30a959657 | [
"MIT"
] | null | null | null | calfem/vedo_utils.py | CALFEM/calfem-py | 26d4082ca6b907c48ad814733c733ae30a959657 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
CALFEM Vedo
Utils for 3D visualization in CALFEM using Vedo (https://vedo.embl.es/)
@author: Andreas Åmand
"""
import numpy as np
import vedo as v
import pyvtk
import vtk
import sys
#import webbrowser
from scipy.io import loadmat
import calfem.core as cfc
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
# Tools, used in this file but can be accessed by a user as well (see exv4.py)
def get_coord_from_edof(edof_row,dof,element_type):
"""
Routine to get element coodinates based on element type and degrees of freedom.
:param array edof_row: Element topology row [1 x degrees of freedom per element]
:param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node]
:param int element_type: Element type [1-6]
:return array coords: Array of node coordinates for element [n_nodes x 3]
"""
if element_type == 1 or element_type == 2 or element_type == 5:
edof_row1,edof_row2 = np.split(edof_row,2)
#coord1 = int(np.where(np.all(edof_row1==dof,axis=1))[0])
#coord2 = int(np.where(np.all(edof_row2==dof,axis=1))[0])
#coord1 = int(np.where(np.any(edof_row1==dof,axis=1))[0])
#coord2 = int(np.where(np.any(edof_row2==dof,axis=1))[0])
coord1 = int(np.where((edof_row1==dof).any(axis=1))[0])
coord2 = int(np.where((edof_row2==dof).any(axis=1))[0])
return coord1, coord2
elif element_type == 3 or element_type == 4:
edof_row1,edof_row2,edof_row3,edof_row4,edof_row5,edof_row6,edof_row7,edof_row8 = np.split(edof_row,8)
#coord1 = int(np.where(np.all(edof_row1==dof,axis=1))[0])
#coord2 = int(np.where(np.all(edof_row2==dof,axis=1))[0])
#coord3 = int(np.where(np.all(edof_row3==dof,axis=1))[0])
#coord4 = int(np.where(np.all(edof_row4==dof,axis=1))[0])
#coord5 = int(np.where(np.all(edof_row5==dof,axis=1))[0])
#coord6 = int(np.where(np.all(edof_row6==dof,axis=1))[0])
#coord7 = int(np.where(np.all(edof_row7==dof,axis=1))[0])
#coord8 = int(np.where(np.all(edof_row8==dof,axis=1))[0])
coord1 = int(np.where(np.any(edof_row1==dof,axis=1))[0])
coord2 = int(np.where(np.any(edof_row2==dof,axis=1))[0])
coord3 = int(np.where(np.any(edof_row3==dof,axis=1))[0])
coord4 = int(np.where(np.any(edof_row4==dof,axis=1))[0])
coord5 = int(np.where(np.any(edof_row5==dof,axis=1))[0])
coord6 = int(np.where(np.any(edof_row6==dof,axis=1))[0])
coord7 = int(np.where(np.any(edof_row7==dof,axis=1))[0])
coord8 = int(np.where(np.any(edof_row8==dof,axis=1))[0])
coords = np.array([coord1, coord2, coord3, coord4, coord5, coord6, coord7, coord8])
return coords
elif element_type == 6:
edof_row1,edof_row2,edof_row3,edof_row4 = np.split(edof_row,4)
#coord1 = int(np.where(np.all(edof_row1==dof,axis=1))[0])
#coord2 = int(np.where(np.all(edof_row2==dof,axis=1))[0])
#coord3 = int(np.where(np.all(edof_row3==dof,axis=1))[0])
#coord4 = int(np.where(np.all(edof_row4==dof,axis=1))[0])
coord1 = int(np.where(np.any(edof_row1==dof,axis=1))[0])
coord2 = int(np.where(np.any(edof_row2==dof,axis=1))[0])
coord3 = int(np.where(np.any(edof_row3==dof,axis=1))[0])
coord4 = int(np.where(np.any(edof_row4==dof,axis=1))[0])
coords = np.array([coord1, coord2, coord3, coord4])
return coords
def get_a_from_coord(coord_row_num,num_of_deformations,a,scale=1):
"""
Routine to get node displacements based on coordinates.
:param int coord_row_num: Node coordinate row number
:param int num_of_deformations: Number of degrees of freedom per node
:param array a: Global displacement vector [1 x total degrees of freedom]
:return float dx: Nodal displacement in x-direction
:return float dy: Nodal displacement in y-direction
:return float dz: Nodal displacement in z-direction
"""
dx = a[coord_row_num*num_of_deformations]*scale
dy = a[coord_row_num*num_of_deformations+1]*scale
dz = a[coord_row_num*num_of_deformations+2]*scale
return dx, dy, dz
def get_node_elements(coord,scale,alpha,dof,bcPrescr=None,bc=None,bc_color='red',fPrescr=None,f=None,f_color='blue6',dofs_per_node=None):
"""
Routine to get node node actors.
:param array coord: Nodal coordinates [number of nodes x 3]
:param int scale: Node actor radius
:param float alpha: Node actor transparency [0-1]
:param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node]
:param array bcPrescr: Degrees of freedom with prescribed boundary conditions [number of prescribed boundary contidions x 1]
:param array bc: Values for prescribed boundary conditions [number of prescribed boundary contidions x 1]
:param string bc_color: Color for nodes with prescribed boundary conditions
:param array fPrescr: Degrees of freedom with applied forces [number of applied forces x 1]
:param array f: Values for forces [number of applied forces x 1]
:param string f_color: Color for nodes with applied forces
:param int dofs_per_node: Degrees of freedom per node [1-6]
:return list nodes: Node actors
"""
nnode = np.size(coord, axis = 0)
ncoord = np.size(coord, axis = 1)
nodes = []
bc_dict = {}
indx = 0
if isinstance(bcPrescr, np.ndarray):
for i in bcPrescr:
bc_dict[i] = bc[indx]
indx += 1
f_dict = {}
indx = 0
if isinstance(fPrescr, np.ndarray):
for i in fPrescr:
f_dict[i] = f[indx]
indx += 1
for i in range(nnode):
#if ncoord == 3:
dofs = dof[i]
if np.any(np.isin(bcPrescr, dofs, assume_unique=True)) == True:
color = bc_color
elif np.any(np.isin(fPrescr, dofs, assume_unique=True)) == True:
color = f_color
else:
color = 'black'
node = v.Sphere(c=color).scale(1.5*scale).pos([coord[i,0],coord[i,1],coord[i,2]]).alpha(alpha)
if np.any(np.isin(bcPrescr, dofs, assume_unique=True)) == True:
node.name = f"Node nr. {i+1}, DoFs & BCs: ["
for j in range(dofs_per_node):
#print('j',j)
node.name += str(dof[i,j])
if dof[i,j] in bc_dict:
node.name += (': ' + str(bc_dict[dof[i,j]]))
if j == dofs_per_node-1:
node.name += ']'
else:
node.name += ', '
elif np.any(np.isin(fPrescr, dofs, assume_unique=True)) == True:
node.name = f"Node nr. {i+1}, DoFs & Forces: ["
for j in range(dofs_per_node):
node.name += str(dof[i,j])
if dof[i,j] in f_dict:
node.name += (': ' + str(f_dict[dof[i,j]]))
if j == dofs_per_node-1:
node.name += ']'
else:
node.name += ', '
else:
node.name = f"Node nr. {i+1}, DoFs: ["
for j in range(dofs_per_node):
node.name += str(dof[i,j])
if j == dofs_per_node-1:
node.name += ']'
else:
node.name += ', '
nodes.append(node)
return nodes
def vectors(
points,
vectors,
c="k",
alpha=1,
shaftWidth=0.05,
text=None, vmax=None, vmin=None, cmap='jet', values = None):
"""
Routine for creating vectors.
:param list points: Mid point for vector [number of vectors x 3]
:param list vectors: Vector components [number of vectors x 3]
:param string dof: Vector color
:param float alpha: Vector transparancy [0-1]
:param float shaftWidth: Vector width
:param list text: Vector values [number of vectors x 1]
:param float vmax: Maximum vector value for colormapping
:param float vmin: Minimum vector value for colormapping
:param string cmap: Vector colormap
:param list values: [number of vectors x 1]
:return list cylinders: Vector actors
"""
if isinstance(points, v.Points):
points = points.points()
else:
points = np.array(points)
vectors = np.array(vectors) / 2
spts = points - vectors
epts = points + vectors
npts = np.size(points,0)
cylinders = []
for i in range(npts):
cyl = v.Cylinder([spts[i],epts[i]],r=shaftWidth*0.01,res=4,c=c)
cyl.name = text[i]
cyl.cmap(cmap,input_array=values[i],vmin=vmin,vmax=vmax,on='cells')
cylinders.append(cyl)
'''
arrs2d = shapes.Arrows2D(
spts,
epts,
c=c,
shaftLength=shaftLength,
shaftWidth=shaftWidth,
headLength=headLength,
headWidth=headWidth,
fill=fill,
alpha=alpha,
)
'''
#arrs2d.pickable(False)
#arrs2d.name = "quiver"
return cylinders
def check_input(edof,coord,dof,element_type,a=None,values=None,nseg=None):
"""
Routine for checking input to draw_mesh, draw_displaced_mesh & animation.
:param array edof: Element topology [number of elements x degrees of freedom per element]
:param array coord: Nodal coordinates [number of nodes x 3]
:param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node]
:param int element_type: Element type [1-6]
:param array a: Global displacement vector [degrees of freedom x 1]
:param array values: Scalar values [number of elements x 1 | number of elements x nodes per element | number of nodes x 1]
:param int nseg: Number of beam segments + 1
:return int number_of_elements: Number of elements
:return int number_of_degrees_of_freedom_per_element: Degrees of freesom per element
:return int number_of_coordinates: Number of coordinates
:return int number_of_dimensions: Number of dimensions for model [1-3]
:return int number_of_degrees_of_freedom: Number of degrees of freedom
:return int degrees_of_freedom_per_node: Degrees of freedom per node
:return int number_of_displacements: Number of displacements
:return string val: Types of scalar input ['el_values' / 'nodal_values_by_el' / 'nodal_values']
"""
if element_type == 1 or element_type == 2 or element_type == 5:
number_of_nodes_per_element = 2
elif element_type == 3 or element_type == 4:
number_of_nodes_per_element = 8
elif element_type == 6:
number_of_nodes_per_element = 4
number_of_elements = np.size(edof, axis=0)
number_of_degrees_of_freedom_per_element = np.size(edof, axis=1)
number_of_coordinates = np.size(coord, axis=0)
number_of_dimensions = np.size(coord, axis=1)
number_of_degrees_of_freedom = np.size(dof, axis=0)*np.size(dof, axis=1)
degrees_of_freedom_per_node = np.size(dof, axis=1)
if a is not None:
number_of_displacements = np.size(a, axis=0)
if values is not None:
#print(values)
if element_type == 1 or element_type == 2 or element_type == 5:
if element_type == 1 or element_type == 2:
nseg = 1
number_of_values = np.size(values, axis=0)
#print(np.size(values, axis=0))
#print(number_of_elements)
if number_of_values == number_of_elements*nseg:
val = 'el_values'
else:
print("Invalid number of element-/nodal values, please make sure values correspond to total number of elements or nodes")
sys.exit()
else:
number_of_values = np.size(values, axis=0)*np.size(values, axis=1)
if number_of_values == number_of_elements:
val = 'el_values'
elif number_of_values == number_of_elements*number_of_nodes_per_element:
val = 'nodal_values_by_el'
elif number_of_values == number_of_coordinates:
val = 'nodal_values'
else:
print("Invalid number of element-/nodal values, please make sure values correspond to total number of elements or nodes")
sys.exit()
# Implementera kontroll av dim. stämmer för draw_mesh/draw_displaced_mesh
if element_type == 1 or element_type == 2 or element_type == 5:
print(element_type)
elif element_type == 3 or element_type == 4:
print(element_type)
elif element_type == 6:
print(element_type)
if a is None and values is None:
print(1)
return number_of_elements, \
number_of_degrees_of_freedom_per_element, \
number_of_coordinates, \
number_of_dimensions, \
number_of_degrees_of_freedom, \
degrees_of_freedom_per_node,
elif a is None:
print(2)
return number_of_elements, \
number_of_degrees_of_freedom_per_element, \
number_of_coordinates, \
number_of_dimensions, \
number_of_degrees_of_freedom, \
degrees_of_freedom_per_node, \
val
elif values is None:
print(3)
return number_of_elements, \
number_of_degrees_of_freedom_per_element, \
number_of_coordinates, \
number_of_dimensions, \
number_of_degrees_of_freedom, \
degrees_of_freedom_per_node, \
number_of_displacements
else:
#print('test')
return number_of_elements, \
number_of_degrees_of_freedom_per_element, \
number_of_coordinates, \
number_of_dimensions, \
number_of_degrees_of_freedom, \
degrees_of_freedom_per_node, \
number_of_displacements, \
val
""" Från kopia 2
def ugrid_from_edof_ec(edof, ex, ey, ez, ed=None, dofs_per_node=3, ignore_first=True):
coords, topo, node_dofs, node_displ = convert_to_node_topo(edof, ex, ey, ez, ed, dofs_per_node, ignore_first)
npoint = coords.shape[0]
nel = topo.shape[0]
nnd = topo.shape[1]
if nnd == 4:
ct = vtk.VTK_TETRA
elif nnd == 8:
ct = vtk.VTK_HEXAHEDRON
else:
print("Topology not supported.")
celltypes = [ct] * nel
return UGrid([coords, topo, celltypes])
def convert_to_node_topo(edof, ex, ey, ez, ed=None, dofs_per_node=3, ignore_first=True):
Routine to convert dof based topology and element coordinates to node based
topology required for visualisation with VTK and other visualisation frameworks
:param array edof: element topology [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array ex: element x coordinates [nel x n_nodes]
:param array ey: element y coordinates [nel x n_nodes]
:param array ez: element z coordinates [nel x n_nodes]
:param array n_dofs_per_node: number of dofs per node. (default = 3)
:param boolean ignore_first: ignore first column of edof. (default = True)
:return array coords: Array of node coordinates. [n_nodes x 3]
:return array topo: Node topology. [nel x n_nodes]
:return array node_dofs: Dofs for each node. [n_nodes x n_dofs_per_node]
node_hash_coords = {}
node_hash_numbers = {}
node_hash_dofs = {}
node_hash_displ = {}
el_hash_dofs = []
nel, cols = edof.shape
if ignore_first:
tot_dofs = cols-1
else:
tot_dofs = cols
n_nodes = int(tot_dofs / dofs_per_node)
# print("n_dofs_per_node =", dofs_per_node)
# print("cols =", tot_dofs)
# print("nel =", nel)
# print("n_nodes =", n_nodes)
if ed is None:
ed = np.zeros((nel,n_nodes*dofs_per_node))
for elx, ely, elz, eed, dofs in zip(ex, ey, ez, ed, edof):
if ignore_first:
el_dofs = dofs[1:]
else:
el_dofs = dofs
# 0 1 2 3 4 5 6 7 8 9 12 11
el_dof = np.zeros((n_nodes, dofs_per_node), dtype=int)
el_hash_topo = []
for i in range(n_nodes):
el_dof[i] = el_dofs[ (i*dofs_per_node):((i+1)*dofs_per_node) ]
node_hash_coords[hash(tuple(el_dof[i]))] = [elx[i], ely[i], elz[i]]
node_hash_numbers[hash(tuple(el_dof[i]))] = -1
node_hash_dofs[hash(tuple(el_dof[i]))] = el_dof[i]
displ_dofs = []
for j in range(dofs_per_node):
displ_dofs.append(eed[i*dofs_per_node+j])
node_hash_displ[hash(tuple(el_dof[i]))] = displ_dofs
#node_hash_displ[hash(tuple(el_dof[i]))] = [eed[i*dofs_per_node], eed[i*dofs_per_node+1], eed[i*dofs_per_node+2]]
el_hash_topo.append(hash(tuple(el_dof[i])))
el_hash_dofs.append(el_hash_topo)
coord_count = 0
coords = []
node_dofs = []
node_displ = []
for node_hash in node_hash_numbers.keys():
node_hash_numbers[node_hash] = coord_count
node_dofs.append(node_hash_dofs[node_hash])
node_displ.append(node_hash_displ[node_hash])
coord_count +=1
coords.append(node_hash_coords[node_hash])
topo = []
for el_hashes in el_hash_dofs:
el_hash_topo = []
for el_hash in el_hashes:
el_hash_topo.append(node_hash_numbers[el_hash])
topo.append(el_hash_topo)
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[3]]
# ]
# )
if ed is None:
return np.asarray(coords), np.asarray(topo), np.asarray(node_dofs)
else:
#print('test')
return np.asarray(coords), np.asarray(topo), np.asarray(node_dofs), np.asarray(node_displ)
"""
### Aktuella
def ugrid_from_edof_ec(edof, ex, ey, ez, ed=None, dofs_per_node=3, ignore_first=True):
"""
Routine for creating an unstructured grid based on element topology.
:param array edof: element topology [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array ex: element x coordinates [nel x n_nodes]
:param array ey: element y coordinates [nel x n_nodes]
:param array ez: element z coordinates [nel x n_nodes]
:param array ed: element displacements [nel x n_dofs_per_node*n_nodes]
:param array n_dofs_per_node: number of dofs per node. (default = 3)
:param boolean ignore_first: ignore first column of edof. (default = True)
:return object Ugrid: Unstructured grid
"""
coords, topo, node_dofs, node_displ = convert_to_node_topo(edof, ex, ey, ez, ed, dofs_per_node, ignore_first)
npoint = coords.shape[0]
nel = topo.shape[0]
nnd = topo.shape[1]
if nnd == 4:
ct = vtk.VTK_TETRA
elif nnd == 8:
ct = vtk.VTK_HEXAHEDRON
else:
print("Topology not supported.")
celltypes = [ct] * nel
return UGrid([coords, topo, celltypes])
def convert_to_node_topo(edof, ex, ey, ez, ed=None, es=None, dofs_per_node=3, ignore_first=True):
"""
Routine to convert dof based topology and element coordinates to node based
topology required for visualisation with VTK and other visualisation frameworks
:param array edof: element topology [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array ex: element x coordinates [nel x n_nodes]
:param array ey: element y coordinates [nel x n_nodes]
:param array ez: element z coordinates [nel x n_nodes]
:param array n_dofs_per_node: number of dofs per node. (default = 3)
:param boolean ignore_first: ignore first column of edof. (default = True)
:return array coords: Array of node coordinates. [n_nodes x 3]
:return array topo: Node topology. [nel x n_nodes]
:return array node_dofs: Dofs for each node. [n_nodes x n_dofs_per_node]
"""
node_hash_coords = {}
node_hash_numbers = {}
node_hash_dofs = {}
node_hash_displ = {}
node_hash_scalar = {}
node_hash_count = {}
el_hash_dofs = []
nel, cols = edof.shape
if ignore_first:
tot_dofs = cols-1
else:
tot_dofs = cols
n_nodes = int(tot_dofs / dofs_per_node)
# print("n_dofs_per_node =", dofs_per_node)
# print("cols =", tot_dofs)
# print("nel =", nel)
# print("n_nodes =", n_nodes)
if ed is None:
ed = np.zeros((nel,n_nodes*dofs_per_node))
if es is None:
es = np.zeros((nel,n_nodes))
for elx, ely, elz, eed, ees, dofs in zip(ex, ey, ez, ed, es, edof):
if ignore_first:
el_dofs = dofs[1:]
else:
el_dofs = dofs
# 0 1 2 3 4 5 6 7 8 9 12 11
el_dof = np.zeros((n_nodes, dofs_per_node), dtype=int)
el_hash_topo = []
for i in range(n_nodes):
el_dof[i] = el_dofs[ (i*dofs_per_node):((i+1)*dofs_per_node) ]
node_hash_coords[hash(tuple(el_dof[i]))] = [elx[i], ely[i], elz[i]]
node_hash_numbers[hash(tuple(el_dof[i]))] = -1
node_hash_dofs[hash(tuple(el_dof[i]))] = el_dof[i]
if hash(tuple(el_dof[i])) in node_hash_scalar:
node_hash_scalar[hash(tuple(el_dof[i]))] += ees[i]
else:
node_hash_scalar[hash(tuple(el_dof[i]))] = ees[i]
if hash(tuple(el_dof[i])) in node_hash_count:
node_hash_count[hash(tuple(el_dof[i]))] += 1
else:
node_hash_count[hash(tuple(el_dof[i]))] = 1
displ_dofs = []
for j in range(dofs_per_node):
displ_dofs.append(eed[i*dofs_per_node+j])
node_hash_displ[hash(tuple(el_dof[i]))] = displ_dofs
el_hash_topo.append(hash(tuple(el_dof[i])))
el_hash_dofs.append(el_hash_topo)
coord_count = 0
coords = []
node_dofs = []
node_displ = []
node_scalars = []
for node_hash in node_hash_numbers.keys():
node_hash_numbers[node_hash] = coord_count
node_dofs.append(node_hash_dofs[node_hash])
node_displ.append(node_hash_displ[node_hash])
node_scalars.append(node_hash_scalar[node_hash]/node_hash_count[node_hash])
coord_count +=1
coords.append(node_hash_coords[node_hash])
topo = []
for el_hashes in el_hash_dofs:
el_hash_topo = []
for el_hash in el_hashes:
el_hash_topo.append(node_hash_numbers[el_hash])
topo.append(el_hash_topo)
return np.asarray(coords), np.asarray(topo), np.asarray(node_dofs), np.asarray(node_displ), np.asarray(node_scalars)
def convert_nodal_values(edof,topo,dof,values):
"""
Routine for converting nodal values from element to global and interpolating.
:param array edof: element topology by degrees of freedom [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array topo: element topology [nel x nodes per element]
:param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node]
:param array values: Element scalar values [nel x nodes per element]
:return array nodal_value_array: Global scalar values at nodes
"""
nel = np.size(edof, axis=0)
nnode = np.size(dof, axis=0)
nodal_value_array = np.zeros((nnode,1))
print('Number of element values: ', np.size(values, axis=0))
print('Number of values per element: ', np.size(values, axis=1))
#edof_upd = edof-1
topo_num = {}
#edof_num = {}
for i in range(nel):
topo_num[i] = tuple(topo[i])
#print(topo_num[i])
#edof_num[i] = tuple(edof[i,[0,3,6,9,12,15,18,21]])
#edof_num[i] = tuple(edof_upd[i])
#print(edof_num[i])
#print('topo_num')
#for key,val in topo_num.items():
# print(key,'|',val)
test = {}
for el, nodes in topo_num.items():
it = 0
#[0,3,7,4],[1,2,6,5],[0,1,5,4],[2,3,7,6]
#print(topo[el])
#points = [0,1,2,3,5,4,7,6]
points = [0,1,2,3,4,5,6,7]
#points = [7,6,5,4,3,2,1,0]
#print(nodes)
#print(points)
for i in nodes:
#print(zip(nodes))
#for i in range(nodes):
#print(el,points[it],i,'|',topo[el,it])
test[tuple([el,points[it]])] = i
it += 1
#print('test')
#for key,val in test.items():
# print(key,'|',val)
test2 = {}
for i in range(nnode):
test2[i] = []
#print('test2')
#for key,val in test.items():
# print(key,'|',val)
#print(values)
for data, node in test.items():
# print(node)
test2[node].append(values[data])
#print(test2)
for i in range(nnode):
nodal_value_array[i] = np.mean(test2[i])
return nodal_value_array
"""
def convert_a(coord_old,coord_new,a,ndofs):
ncoord = np.size(coord_old, axis=0)
a_new = np.zeros((ncoord,ndofs))
coord_hash_old = {}
coord_hash_new = {}
for i in range(ncoord):
coord_hash_old[hash(tuple(coord_old[i]))] = i
coord_hash_new[hash(tuple(coord_new[i]))] = i
indexes = []
for node_hash in coord_hash_old.keys():
index = coord_hash_new[node_hash]
indexes.append(index)
node = 0
for index in zip(indexes):
if ndofs == 1:
a_new[index] = a[node]
elif ndofs == 3:
a_new[index,0] = a[node*3]
a_new[index,1] = a[node*3+1]
a_new[index,2] = a[node*3+2]
node += 1
# Returns disp. by node, i.e. a_new = [number of nodes x degrees of freedom per node]
return a_new
"""
def convert_el_values(edof,values):
"""
Routine for converting element values from element to global.
:param array edof: element topology by degrees of freedom [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array values: Element scalar values [nel x nodes per element]
:return array el_values: Global scalar values for element
"""
nel = np.size(edof, axis=0)
el_values = np.zeros((nel*6,1))
for i in range(nel):
el_values[i*6,:] = values[i]
el_values[i*6+1,:] = values[i]
el_values[i*6+2,:] = values[i]
el_values[i*6+3,:] = values[i]
el_values[i*6+4,:] = values[i]
el_values[i*6+5,:] = values[i]
"""
el_hash_old = {}
elem = {}
for i in range(nel):
el_hash_old[hash(tuple(edof[i]))] = i
elem[i] = i
indexes = []
for el in el_hash_old.values():
index = elem[el]
indexes.append(index)
el = 0
for index in zip(indexes):
i = index[0]*6
el_values[i,:] = values[el]
i += 1
el_values[i,:] = values[el]
i += 1
el_values[i,:] = values[el]
i += 1
el_values[i,:] = values[el]
i += 1
el_values[i,:] = values[el]
i += 1
el_values[i,:] = values[el]
el += 1
"""
return el_values
# def convert_to_node_topo(edof, ex, ey, ez, n_dofs_per_node=3, ignore_first=False):
# """
# Written by: Jonas Lindemann
# Modified by: Andreas Åmand
# Routine to convert dof based topology and element coordinates to node based
# topology required for visualisation with VTK and other visualisation frameworks
# :param array edof: element topology [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
# :param array ex: element x coordinates [nel x n_nodes]
# :param array ey: element y coordinates [nel x n_nodes]
# :param array ez: element z coordinates [nel x n_nodes]
# :param array a: global deformation [ndof]
# :param array n_dofs_per_node: number of dofs per node. (default = 3)
# :param boolean ignore_first: ignore first column of edof. (default = False)
# :return array coords: Array of node coordinates. [n_nodes x 3]
# :return array topo: Node topology. [nel x n_nodes]
# :return array node_dofs: Dofs for each node. [n_nodes x n_dofs_per_node]
# :return array a: global deformation [ndof] (reorderd according to )
# """
# node_hash_coords = {}
# node_hash_numbers = {}
# #a_hash_numbers = {}
# #node_hash_a = {}
# node_hash_dofs = {}
# el_hash_dofs = []
# nel, cols = edof.shape
# if ignore_first:
# tot_dofs = cols-1
# else:
# tot_dofs = cols
# n_nodes = int(tot_dofs / n_dofs_per_node)
# print("cols =", tot_dofs)
# print("nel =", nel)
# print("n_nodes =", n_nodes)
# #node_hash_a[hash(tuple(a))] = a
# #print(node_hash_a)
# #tot_nnodes = int(np.size(a, axis = 0)/3)
# #a_node = np.zeros((tot_nnodes, n_dofs_per_node))
# #print(np.size(a_node, axis = 0),np.size(a_node, axis = 1))
# #for i in range(tot_nnodes):
# # a_node[i,:] = [a[i*3], a[i*3+1], a[i*3+2]]
# #node_hash_a[hash(tuple(a_node[i]))] = a_node[i,:]
# #print(a_node)
# # Loopar igenom element
# for elx, ely, elz, dofs in zip(ex, ey, ez, edof):
# if ignore_first:
# el_dofs = dofs[1:]
# else:
# el_dofs = dofs
# # 0 1 2 3 4 5 6 7 8 9 12 11
# el_dof = np.zeros((n_nodes, n_dofs_per_node), dtype=int)
# #a_upd = np.zeros((n_nodes, n_dofs_per_node), dtype=int)
# el_hash_topo = []
# # Loopar igenom elementets noder
# for i in range(n_nodes):
# el_dof[i] = el_dofs[ (i*n_dofs_per_node):((i+1)*n_dofs_per_node) ]
# node_hash_coords[hash(tuple(el_dof[i]))] = [elx[i], ely[i], elz[i]]
# #node_hash_a[hash(tuple(a_node[i]))] = a
# #node_hash_a[hash(tuple(el_dof[i]))] = a
# #node_hash_coords[hash(tuple(el_dof[i]))] = [elx[i]+a[i*3], ely[i]+a[i*3+1], elz[i]+a[i*3+2]]
# #node_hash_a[hash(tuple(a_upd[i]))] = [ a[i*3], a[i*3+1], a[i*3+2] ]
# node_hash_numbers[hash(tuple(el_dof[i]))] = -1
# #a_hash_numbers[hash(tuple(el_dof[i]))] = -1
# node_hash_dofs[hash(tuple(el_dof[i]))] = el_dof[i]
# el_hash_topo.append(hash(tuple(el_dof[i])))
# el_hash_dofs.append(el_hash_topo)
# coord_count = 0
# """
# #for i in range(tot_nnodes):
# for node_hash in node_hash_numbers.keys():
# node_hash_numbers[node_hash] = coord_count
# #node_hash_numbers[node_hash] = coord_count
# #node[i] = el_dofs[ (i*n_dofs_per_node):((i+1)*n_dofs_per_node) ]
# a_node[i] = node_hash_numbers[node_hash]
# coord_count +=1
# node_hash_a[hash(tuple(node[i]))] = a[i]
# """
# #for i in range
# coord_count = 0
# coords = []
# node_dofs = []
# #a_new = []
# #print(node_hash_numbers.keys())
# #print(len(node_hash_a))
# #print(node_hash_a)
# #print(node_hash_coords)
# #a_node_new = []
# # Skapar global koordinatmartis baserat på hashes
# for node_hash in node_hash_numbers.keys():
# node_hash_numbers[node_hash] = coord_count
# #print(node_hash_numbers[node_hash])
# #node_hash_a[hash(tuple(a))] = a_upd
# node_dofs.append(node_hash_dofs[node_hash])
# coord_count +=1
# coords.append(node_hash_coords[node_hash])
# #a_node_new.append(node_hash_a[node_hash])
# #a_node_new.append(node_hash_coords[node_hash])
# #print(node_hash_numbers.keys())
# #print(node_hash_coords)
# #a_new.append(node_hash_a[node_hash])
# #a_new.append(hash(node_hash_a[node_hash]))
# #a_new.append(hash(tuple(node_hash_a[node_hash])))
# """
# a_count = 0
# for a_hash in a_hash_numbers.keys():
# a_hash_numbers[node_hash] = coord_count
# a_count +=1
# a_node_new.append(node_hash_a[a_hash])
# #a_node_new.append(node_hash_coords[node_hash])
# #print(node_hash_numbers.keys())
# #print(node_hash_coords)
# #a_new.append(node_hash_a[node_hash])
# #a_new.append(hash(node_hash_a[node_hash]))
# #a_new.append(hash(tuple(node_hash_a[node_hash])))
# """
# #for i in range(coord_count)
# # node_hash_a[hash(tuple(el_dof[i]))] = -1
# #for node_hash in node_hash_numbers.keys():
# # a_node.append()
# topo = []
# #a_el = []
# #print(el_hash_dofs)
# #print(node_hash_numbers)
# # Skapar global topologimartis baserat på hashes
# for el_hashes in el_hash_dofs:
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[3]]
# ])
# topo.append([
# node_hash_numbers[el_hashes[4]],
# node_hash_numbers[el_hashes[5]],
# node_hash_numbers[el_hashes[6]],
# node_hash_numbers[el_hashes[7]]
# ])
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[3]],
# node_hash_numbers[el_hashes[7]],
# node_hash_numbers[el_hashes[4]]
# ])
# topo.append([
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[6]],
# node_hash_numbers[el_hashes[5]]
# ])
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[5]],
# node_hash_numbers[el_hashes[4]]
# ])
# topo.append([
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[3]],
# node_hash_numbers[el_hashes[7]],
# node_hash_numbers[el_hashes[6]]
# ])
# #a_el.append(a[node_hash_numbers[el_hashes[0]]])
# #a_el.append(a[node_hash_numbers[el_hashes[1]]])
# #a_el.append(a[node_hash_numbers[el_hashes[2]]])
# #a_el.append(a[node_hash_numbers[el_hashes[3]]])
# #a_el.append(a[node_hash_numbers[el_hashes[4]]])
# #a_el.append(a[node_hash_numbers[el_hashes[5]]])
# #a_el.append(a[node_hash_numbers[el_hashes[6]]])
# #a_el.append(a[node_hash_numbers[el_hashes[7]]])
# """
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[3]]
# ]
# )
# """
# #print(coords)
# """
# a = a.tolist()
# print(a)
# for i in range(len(coords)):
# coords[i][0] = coords[i][0] + a[i*3]
# coords[i][1] = coords[i][1] + a[i*3+1]
# coords[i][2] = coords[i][2] + a[i*3+2]
# """
# #mesh = v.Mesh([def_coord[coords,:],[[0,1,2,3],[4,5,6,7],[0,3,7,4],[1,2,6,5],[0,1,5,4],[2,3,7,6]]],alpha=alpha).lw(1)
# return coords, topo, node_dofs
"""
def ugrid_from_edof_ec(edof, ex, ey, ez, a, dofs_per_node=3, ignore_first=False):
coords, topo, node_dofs = convert_to_node_topo_upd(edof, ex, ey, ez, dofs_per_node, ignore_first)
npoint = coords.shape[0]
nel = topo.shape[0]
nnd = topo.shape[1]
for i in range(npoint):
#print([a[i*3],a[i*3+1],a[i*3+2]])
coords[i][0] = coords[i][0] + a[i*3]
coords[i][1] = coords[i][1] + a[i*3+1]
coords[i][2] = coords[i][2] + a[i*3+2]
if nnd == 4:
ct = vtk.VTK_TETRA
elif nnd == 8:
ct = vtk.VTK_HEXAHEDRON
else:
print("Topology not supported.")
celltypes = [ct] * nel
return v.UGrid([coords, topo, celltypes])
def convert_to_node_topo_upd(edof, ex, ey, ez, dofs_per_node=3, ignore_first=False):
Routine to convert dof based topology and element coordinates to node based
topology required for visualisation with VTK and other visualisation frameworks
:param array edof: element topology [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ]
:param array ex: element x coordinates [nel x n_nodes]
:param array ey: element y coordinates [nel x n_nodes]
:param array ez: element z coordinates [nel x n_nodes]
:param array n_dofs_per_node: number of dofs per node. (default = 3)
:param boolean ignore_first: ignore first column of edof. (default = True)
:return array coords: Array of node coordinates. [n_nodes x 3]
:return array topo: Node topology. [nel x n_nodes]
:return array node_dofs: Dofs for each node. [n_nodes x n_dofs_per_node]
node_hash_coords = {}
node_hash_numbers = {}
node_hash_dofs = {}
el_hash_dofs = []
nel, cols = edof.shape
if ignore_first:
tot_dofs = cols-1
else:
tot_dofs = cols
n_nodes = int(tot_dofs / dofs_per_node)
print("n_dofs_per_node =", dofs_per_node)
print("cols =", tot_dofs)
print("nel =", nel)
print("n_nodes =", n_nodes)
for elx, ely, elz, dofs in zip(ex, ey, ez, edof):
if ignore_first:
el_dofs = dofs[1:]
else:
el_dofs = dofs
# 0 1 2 3 4 5 6 7 8 9 12 11
el_dof = np.zeros((n_nodes, dofs_per_node), dtype=int)
el_hash_topo = []
for i in range(n_nodes):
el_dof[i] = el_dofs[ (i*dofs_per_node):((i+1)*dofs_per_node) ]
node_hash_coords[hash(tuple(el_dof[i]))] = [elx[i], ely[i], elz[i]]
node_hash_numbers[hash(tuple(el_dof[i]))] = -1
node_hash_dofs[hash(tuple(el_dof[i]))] = el_dof[i]
el_hash_topo.append(hash(tuple(el_dof[i])))
el_hash_dofs.append(el_hash_topo)
coord_count = 0
coords = []
node_dofs = []
for node_hash in node_hash_numbers.keys():
node_hash_numbers[node_hash] = coord_count
node_dofs.append(node_hash_dofs[node_hash])
coord_count +=1
coords.append(node_hash_coords[node_hash])
topo = []
for el_hashes in el_hash_dofs:
el_hash_topo = []
for el_hash in el_hashes:
el_hash_topo.append(node_hash_numbers[el_hash])
topo.append(el_hash_topo)
# topo.append([
# node_hash_numbers[el_hashes[0]],
# node_hash_numbers[el_hashes[1]],
# node_hash_numbers[el_hashes[2]],
# node_hash_numbers[el_hashes[3]]
# ]
# )
return np.asarray(coords), np.asarray(topo), np.asarray(node_dofs)
"""
| 32.562975 | 137 | 0.60433 |
acf934814822c47dcac13ae5bf4ecb6875ffd4c0 | 2,102 | py | Python | data/external/repositories_2to3/132160/kaggle-ndsb-master/buffering.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/132160/kaggle-ndsb-master/buffering.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/132160/kaggle-ndsb-master/buffering.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import multiprocessing as mp
import queue
import threading
def buffered_gen_mp(source_gen, buffer_size=2):
"""
Generator that runs a slow source generator in a separate process.
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = mp.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_process(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
buffer.close() # unfortunately this does not suffice as a signal: if buffer.get()
# was called and subsequently the buffer is closed, it will block forever.
process = mp.Process(target=_buffered_generation_process, args=(source_gen, buffer))
process.start()
for data in iter(buffer.get, None):
yield data
def buffered_gen_threaded(source_gen, buffer_size=2):
"""
Generator that runs a slow source generator in a separate thread. Beware of the GIL!
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = queue.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_thread(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
thread.daemon = True
thread.start()
for data in iter(buffer.get, None):
yield data
| 38.925926 | 93 | 0.68411 |
acf93569b4eb44a42e272e1ca4db9c6132de7c2c | 4,865 | py | Python | matrix_misp_bot/bot_commands.py | Rafiot/matrix-misp-bot | 338b11683b870bb3e3c9050f47894b72c820b1ec | [
"Apache-2.0"
] | 5 | 2021-01-21T19:07:54.000Z | 2021-07-19T12:20:49.000Z | matrix_misp_bot/bot_commands.py | Rafiot/matrix-misp-bot | 338b11683b870bb3e3c9050f47894b72c820b1ec | [
"Apache-2.0"
] | null | null | null | matrix_misp_bot/bot_commands.py | Rafiot/matrix-misp-bot | 338b11683b870bb3e3c9050f47894b72c820b1ec | [
"Apache-2.0"
] | 2 | 2021-07-04T22:59:19.000Z | 2021-07-19T12:23:44.000Z | from pathlib import Path
from nio import AsyncClient, MatrixRoom, RoomMessageText
from pymisp import PyMISP
from matrix_misp_bot.chat_functions import react_to_event, send_text_to_room
from matrix_misp_bot.config import Config
from matrix_misp_bot.storage import Storage
class Command:
def __init__(
self,
client: AsyncClient,
store: Storage,
config: Config,
command: str,
room: MatrixRoom,
event: RoomMessageText,
):
"""A command made by a user.
Args:
client: The client to communicate to matrix with.
store: Bot storage.
config: Bot configuration parameters.
command: The command and arguments.
room: The room the command was sent in.
event: The event describing the command.
"""
self.client = client
self.store = store
self.config = config
self.command = command
self.room = room
self.event = event
self.args = self.command.split()[1:]
self.pymisp = PyMISP(self.config.config_dict.get('misp')['url'],
self.config.config_dict.get('misp')['apikey'])
self.allowed_users = self.config.config_dict.get('misp')['allowed_users']
self.allowed_servers = self.config.config_dict.get('misp')['allowed_servers']
async def process(self):
"""Process the command"""
if self.command.startswith("misp"):
await self._misp()
elif self.command.startswith("echo"):
await self._echo()
elif self.command.startswith("react"):
await self._react()
elif self.command.startswith("help"):
await self._show_help()
else:
await self._unknown_command()
async def _misp(self):
for user in self.room.users.keys():
if user in self.allowed_users:
continue
if user.split(':', 1)[-1] in self.allowed_servers:
continue
response = 'Not allowed.'
break
else:
if self.args[0] == 'search':
attrs = self.pymisp.search(controller='attributes', value=self.args[1], page=1, limit=20, pythonify=True)
if attrs:
response = 'The following events contain this value: \n'
for a in attrs:
response += f'{self.pymisp.root_url}/events/view/{a.event_id}\n'
else:
response = 'Nothing found.'
elif self.args[0] == 'subscribe':
response = "You subscribed to the alerts"
if not (Path(__file__).parent / 'subscribed').exists():
subscribed = []
else:
with open(Path(__file__).parent / 'subscribed') as f_ro:
subscribed = [roomid.strip() for roomid in f_ro.readlines()]
subscribed.append(self.room.room_id)
with open(Path(__file__).parent / 'subscribed', 'w') as f_w:
f_w.writelines(f'{roomid}\n' for roomid in subscribed)
else:
response = 'Only "search" and "subscribe" are supported for now.'
await send_text_to_room(self.client, self.room.room_id, response)
async def _echo(self):
"""Echo back the command's arguments"""
response = " ".join(self.args)
await send_text_to_room(self.client, self.room.room_id, response)
async def _react(self):
"""Make the bot react to the command message"""
# React with a start emoji
reaction = "⭐"
await react_to_event(
self.client, self.room.room_id, self.event.event_id, reaction
)
# React with some generic text
reaction = "Some text"
await react_to_event(
self.client, self.room.room_id, self.event.event_id, reaction
)
async def _show_help(self):
"""Show the help text"""
if not self.args:
text = (
"Hello, I am a bot made with matrix-nio! Use `help commands` to view "
"available commands."
)
await send_text_to_room(self.client, self.room.room_id, text)
return
topic = self.args[0]
if topic == "rules":
text = "These are the rules!"
elif topic == "commands":
text = "Available commands: ..."
else:
text = "Unknown help topic!"
await send_text_to_room(self.client, self.room.room_id, text)
async def _unknown_command(self):
await send_text_to_room(
self.client,
self.room.room_id,
f"Unknown command '{self.command}'. Try the 'help' command for more information.",
)
| 35.253623 | 121 | 0.565468 |
acf9358380bea5bbc9b4cf76ffc03036fe547e08 | 15,192 | py | Python | ec2.py | whojarr/drupal_aws | e9a7e42a554158b6e068d0fa54ab87151a3ae807 | [
"ADSL"
] | null | null | null | ec2.py | whojarr/drupal_aws | e9a7e42a554158b6e068d0fa54ab87151a3ae807 | [
"ADSL"
] | null | null | null | ec2.py | whojarr/drupal_aws | e9a7e42a554158b6e068d0fa54ab87151a3ae807 | [
"ADSL"
] | null | null | null | import datetime
import json
import boto3
class DrupalSecurityGroup(object):
'''
class to create a drupal ec2 security group
'''
def __init__(self, security_group_id=None, name="drupal9_security_group", region="ap-southeast-2", vpc_id=None):
'''
Constructor
'''
self.security_group_id = security_group_id
self.name = name
self.region = region
self.ec2_client = boto3.client("ec2", region_name=self.region)
self.vpc_id = vpc_id
if not self.vpc_id:
self.vpc_id = vpc_default()
def create(self):
security_group = self.ec2_client.create_security_group(
Description = self.name,
GroupName = self.name,
VpcId = self.vpc_id,
TagSpecifications=[
{
'ResourceType': 'security-group',
'Tags': [
{
'Key': 'Name',
'Value': self.name
},
{
'Key': 'Product',
'Value': 'drupal'
},
{
'Key': 'Version',
'Value': '9'
}
]
}
]
)
''' add ingress rules '''
security_group_id = security_group['GroupId']
self.ec2_client.authorize_security_group_ingress(
GroupId=security_group_id,
IpPermissions=[
{
'IpProtocol': 'tcp',
'FromPort': 443,
'ToPort': 443,
'IpRanges': [
{'CidrIp': '0.0.0.0/0'}
]
},
{
'IpProtocol': 'tcp',
'FromPort': 80,
'ToPort': 80,
'IpRanges': [
{'CidrIp': '0.0.0.0/0'}
]
},
{
'IpProtocol': 'tcp',
'FromPort': 22,
'ToPort': 22,
'IpRanges': [
{'CidrIp': '0.0.0.0/0'}
]
}
]
)
return security_group['GroupId']
def exists(self):
'''
look for a security group with the tags Name:drupal9_security_group Product:Drupal Version:9
'''
# list security group with tag Product: drupal
security_groups = security_group_list(product="drupal")
if len(security_groups['SecurityGroups']) > 0:
return security_groups['SecurityGroups'][0]['GroupId']
return False
class DrupalServer(object):
'''
class to create a drupal ec2 instance
'''
def __init__(self, instance_id=None, name="drupal9", image_id="ami-04a81599b183d7908", instance_type="t3.micro", region="ap-southeast-2"):
'''
Constructor
'''
self.instance_id = instance_id
self.instance_state = None
self.name = name
self.image_id = image_id
self.instance_type = instance_type
self.region = region
self.ec2_client = boto3.client("ec2", region_name=self.region)
self.user_data = self.cloud_init_cmd()
#"mysql -e \"ALTER USER 'root'@'localhost' IDENTIFIED BY 'CHANGEME';\"",
#"mysql -e \"DROP DATABASE IF EXISTS test\"",
#"mysql -e \"FLUSH PRIVILEGES\"",
def drupal_apache_conf(self):
conf = """
<VirtualHost *:80>
ServerAdmin dhunter@digitalcreation.co.nz
ServerName drupal9.pauanui.nz
ServerAlias www.drupal9.pauanui.nz
DocumentRoot /var/www/drupal/web
<Directory /var/www/drupal/web>
Options -Indexes -MultiViews +FollowSymLinks
AllowOverride All
Order allow,deny
allow from all
</Directory>
LogLevel warn
ErrorLog /var/log/httpd/drupal-error.log
CustomLog /var/log/httpd/drupal-access.log combined
</VirtualHost>
ServerSignature Off
ServerTokens Prod
"""
return conf
def cloud_init_cmd(self):
cmds = [
"#!/bin/bash",
"yum update -y",
"amazon-linux-extras install -y php7.3",
"amazon-linux-extras install -y mariadb10.5",
"yum install -y httpd mariadb-server git php-cli php-common php-pdo php-mysqlnd php-xml php-gd php-mbstring php-fpm php-opcache php-pecl-memcached php-pecl-apcu mod_ssl httpd-itk",
"sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm",
"sudo systemctl enable amazon-ssm-agent",
"sudo systemctl start amazon-ssm-agent",
"systemctl start mariadb",
"systemctl enable mariadb",
"mysql -e \"CREATE DATABASE IF NOT EXISTS drupal9 CHARACTER SET UTF8 COLLATE utf8_general_ci;\"",
"mysql -e \"GRANT ALL ON drupal9.* TO 'drupal'@'%' IDENTIFIED BY 'drup4lp4ssw0rd';\"",
"cd /etc/httpd/conf.d/",
"rm -f autoindex.conf notrace.conf userdir.conf welcome.conf",
"echo \"" + self.drupal_apache_conf() + "\" > /etc/httpd/conf.d/drupal.conf",
"cd ~",
"systemctl start httpd",
"systemctl enable httpd",
"cd /etc/pki/tls/certs",
"./make-dummy-cert localhost.crt",
"cd ~",
"sed -i -e 's/SSLCertificateKeyFile/#SSLCertificateKeyFile/g' /etc/httpd/conf.d/ssl.conf",
"sed -i -e 's/LoadModule mpm_prefork_module/#LoadModule mpm_prefork_module/g' /etc/httpd/conf.modules.d/00-mpm.conf",
"sed -i -e 's/#LoadModule mpm_event_module/LoadModule mpm_event_module/g' /etc/httpd/conf.modules.d/00-mpm.conf",
"systemctl stop httpd",
"systemctl start httpd",
"cd /var/www/html/",
"wget https://files.phpmyadmin.net/phpMyAdmin/5.1.1/phpMyAdmin-5.1.1-all-languages.zip",
"unzip phpMyAdmin-5.1.1-all-languages.zip",
"mv phpMyAdmin-5.1.1-all-languages phpMyAdmin",
"cd ~",
"usermod -a -G apache ec2-user",
"chown -R ec2-user:apache /var/www",
"chmod 2775 /var/www",
"find /var/www -type d -exec chmod 2775 {} \;",
"find /var/www -type f -exec chmod 0664 {} \;",
"systemctl stop mariadb",
"systemctl start mariadb",
"export HOME=/root",
"export DRUSH_PHP=/usr/bin/php",
"cd /root/",
"sed -i 's/allow_url_fopen = Off/allow_url_fopen = On/g' /etc/php.ini",
"curl -sS https://getcomposer.org/installer | php",
"mv composer.phar /usr/local/bin/composer",
"ln -s /usr/local/bin/composer /usr/bin/composer",
"/usr/bin/composer global require drush/drush:10.*",
"/usr/bin/composer create-project drupal/recommended-project:9.* drupal",
"sed -i 's/allow_url_fopen = On/allow_url_fopen = Off/g' /etc/php.ini",
"sed -i 's/expose_php = On/expose_php = Off/g' /etc/php.ini",
"sed -i 's/memory_limit = 128M/memory_limit = -1/g' /etc/php.ini",
"cd /root/drupal",
"/usr/bin/composer require drush/drush:^10",
"cd /root",
"mv /root/drupal /var/www/drupal",
"cd /var/www/drupal",
"/var/www/drupal/vendor/bin/drush site-install standard --yes --site-name=drupal9 --site-mail=dhunter@digitalcreation.co.nz --account-name=admin --account-pass=l3tm31n --db-url=mysql://drupal:drup4lp4ssw0rd@localhost/drupal9 --db-prefix=drupal_",
"cd /var/www/drupal/web/sites/default",
"echo \"\$_SERVER['HTTPS'] = 'On';\" >> settings.php",
"chown -R ec2-user:apache /var/www/drupal",
"chmod -R 750 /var/www/drupal",
"chmod -R 770 /var/www/drupal/web/sites/default/files",
]
result = '\n'.join(cmds)
return result
def create(self):
sg = DrupalSecurityGroup()
security_group_id = sg.exists()
if not security_group_id:
print("Failed to identify valid security group id")
exit(1)
print("Using Security Group ID:{}".format(security_group_id))
instances = self.ec2_client.run_instances(
ImageId=self.image_id,
MinCount=1,
MaxCount=1,
InstanceType=self.instance_type,
SecurityGroupIds=[security_group_id],
IamInstanceProfile={
'Arn': 'arn:aws:iam::687368024180:instance-profile/SSMInstanceProfile'
},
KeyName="drupal",
TagSpecifications=[
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'Name',
'Value': self.name
},
{
'Key': 'Product',
'Value': 'drupal'
},
{
'Key': 'Version',
'Value': '9'
}
]
},
{
'ResourceType': 'volume',
'Tags': [
{
'Key': 'Name',
'Value': self.name
},
{
'Key': 'Product',
'Value': 'drupal'
},
{
'Key': 'Version',
'Value': '9'
}
]
},
],
UserData=self.user_data
)
self.instance_id = instances["Instances"][0]["InstanceId"]
return self.instance_id
def exists(self):
return True
def stop(self):
response = self.ec2_client.stop_instances(InstanceIds=[self.instance_id])
self.instance_state = response["StoppingInstances"][0]["CurrentState"]["Name"]
return self.instance_state
def terminate(self):
response = self.ec2_client.terminate_instances(InstanceIds=[self.instance_id])
self.instance_state = response["TerminatingInstances"][0]["CurrentState"]["Name"]
return self.instance_state
class DrupalServers(object):
'''
class to manage drupal instances
'''
def __init__(self, region="ap-southeast-2", vpc_id=None):
'''
Constructor
'''
self.region = region
if vpc_id:
self.vpc_id = vpc_id
else:
vpc_id = vpc_default()
self.ec2_client = boto3.client("ec2", region_name=self.region)
def list(self):
instances = self.ec2_client.describe_vpcs()
print(instances)
def getnametag(tags):
name = ""
for tag in tags:
if tag['Key'] == 'Name':
name = tag['Value']
return(name)
def getenvironmenttag(tags):
name = ""
for tag in tags:
if tag['Key'] == 'environment':
name = tag['Value']
return(name)
def instance_list(region='ap-southeast-2', environment=None, instance_state=None, product=None):
instance_ec2 = boto3.client('ec2', region_name=region)
instances = []
filters = []
if product:
product_filter = {
'Name': 'tag:Product',
'Values': [
product
]
}
filters.append(product_filter)
if instance_state:
instance_state_filter = {
'Name': 'instance-state-name',
'Values': [
instance_state
]
}
filters.append(instance_state_filter)
if environment:
environment_filter = {
'Name': 'tag:environment',
'Values': [
environment
]
}
filters.append(environment_filter)
instances_returned = instance_ec2.describe_instances(Filters=filters)
for reservation in instances_returned['Reservations']:
for instance in reservation['Instances']:
output = json.dumps(instance, indent=4, default=myconverter)
#print(output)
result = dict()
result['id'] = instance['InstanceId']
result['instance_type'] = instance['InstanceType']
result['state'] = instance['State']['Name']
result['state_transition_reason'] = instance['StateTransitionReason']
result['cpu_options_core_count'] = instance['CpuOptions']['CoreCount']
result['cpu_options_threads_per_core'] = instance['CpuOptions']['ThreadsPerCore']
result['name'] = getnametag(instance['Tags'])
result['environment'] = getenvironmenttag(instance['Tags'])
if 'PublicIpAddress' in instance:
result['public_ip'] = instance['PublicIpAddress']
if 'PrivateIpAddress' in instance:
result['private_ip'] = instance['PrivateIpAddress']
result['region'] = region
instances.append(result)
return instances
def instance_list_filtered(region=None, environment=None, instance_state=None, product=None):
names = []
if region:
names.extend(instance_list(region=region, environment=environment, instance_state=instance_state, product=product))
else:
for region in regions_list():
names.extend(instance_list(region=region, environment=environment, instance_state=instance_state, product=product))
return names
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
def security_group_list(vpc_id=None, product=None):
if not vpc_id:
vpc_id = vpc_default()
ec2_client = boto3.client('ec2')
filters = [
{
'Name': 'vpc-id',
'Values': [
vpc_id
]
}
]
if product:
product_filter = {
'Name': 'tag:Product',
'Values': [
'drupal'
]
}
filters.append(product_filter)
security_groups = ec2_client.describe_security_groups(
Filters=filters
)
return security_groups
def regions_list():
regions = []
ec2_client = boto3.client('ec2')
regions_returned = ec2_client.describe_regions().get('Regions', [])
for region in regions_returned:
regions.append(region['RegionName'])
return regions
def vpc_default():
vpc_id = None
ec2_client = boto3.client("ec2", region_name="ap-southeast-2")
vpcs = ec2_client.describe_vpcs()
for vpc in vpcs['Vpcs']:
if vpc["IsDefault"]:
vpc_id = vpc['VpcId']
return vpc_id | 32.600858 | 259 | 0.521854 |
acf935b7cbfad6e28daf21b3358214e771ead6b5 | 7,349 | py | Python | CSVSplit_generalised_v3.py | Dnshbbu/RulesView-Backend | 09d47e600f9a3e815b5c60241817ce4d98e7ba7b | [
"MIT"
] | null | null | null | CSVSplit_generalised_v3.py | Dnshbbu/RulesView-Backend | 09d47e600f9a3e815b5c60241817ce4d98e7ba7b | [
"MIT"
] | null | null | null | CSVSplit_generalised_v3.py | Dnshbbu/RulesView-Backend | 09d47e600f9a3e815b5c60241817ce4d98e7ba7b | [
"MIT"
] | null | null | null | import csv
import pandas as pd
import os
import time
import logging
import traceback
import configparser
import collections
# Gets or creates a logger
logger = logging.getLogger(__name__)
# # set log level
logger.setLevel(logging.INFO)
#config object to pull the password from conf file
config = configparser.ConfigParser()
config.read('conf/creds.ini')
dirLogFolder = config.get('logs', 'LOGS_FOLDER')
# Create target Directory if don't exist
if not os.path.exists(dirLogFolder):
os.mkdir(dirLogFolder)
print("[*] Directory \'"+dirLogFolder+"\' Created ")
else:
print("[*] Directory \'"+dirLogFolder+"\' already exists")
# define file handler and set formatter
LOG_FILE = config.get('logs', 'LOGS_FOLDER')+'\\sample.log'
file_handler = logging.FileHandler(LOG_FILE)
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(name)s | %(funcName)s | :%(lineno)s | %(message)s',datefmt='%y-%m-%d %H:%M:%S')
file_handler.setFormatter(formatter)
# add file handler to logger
logger.addHandler(file_handler)
def toSplit(splitfilename,src_select,dst_select,ser_select):
try:
#Create DataFrame
tosplitfile = "uploads\\"+splitfilename
#df = pd.read_csv(tosplitfile)
# df = pd.read_csv(tosplitfile, index_col=False)
df = pd.read_csv(tosplitfile,dtype=object, index_col=False)
# df.astype(str)
print(df)
#Fill empty values with "NA", otherwise it will be filled with "nan" by pandas
df.fillna(value='NA',inplace=True)
x=0; #x is to traverse all lineitems in dataframe
headers = list(df) #header values
timestr = time.strftime("%Y%m%d_%H%M%S")
# dirName = "output"
dirName = config.get('output', 'OUTPUT_FOLDER')
fileName= "Rules_"+timestr
logger.info("Filename: "+fileName)
# Create target Directory if don't exist
if not os.path.exists(dirName):
os.mkdir(dirName)
print("[*] Directory \'"+dirName+"\' Created ")
else:
print("[*] Directory \'"+dirName+"\' already exists")
print("[*] "+fileName+'.csv created in the output folder')
# with open("./output/"+fileName+'.csv', 'w', newline='') as csvfile:
files = config.get('output', 'OUTPUT_FOLDER')
csvfilename = files+'\\'+fileName+'.csv'
logger.info("csvFilename: "+str(csvfilename))
with open(csvfilename, 'w', newline='') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',)
filewriter.writerow(headers) #write headers
#Below three lines are the inputs from the user indicating source, destination and services fields
selected_src = int(src_select)
selected_dest = int(dst_select)
selected_service = int(ser_select)
# logger.info("selected_src: "+str(selected_src))
# logger.info("selected_dest: "+str(selected_dest))
# logger.info("selected_service: "+str(selected_service))
print(df)
while x < len(df):
logger.info(df.loc[x][0])
if (df.loc[x][0]!="NA"):
cols = 0
build_column = {}
#Below loop is to assign each cell values into key,value pairs Example: 'col1':'xxx','col2':'yyy' etc..
while cols < len(headers):
build_column['col'+str(cols)]=df.loc[x][cols]
cols+= 1
srcsplit = build_column['col'+str(selected_src)].split(';')
dstsplit = build_column['col'+str(selected_dest)].split(';')
servicesplit = build_column['col'+str(selected_service)].split(';')
s = 0
while s < len(srcsplit): #each section of ; in the source
newsrc = srcsplit[s];
s += 1
d = 0
while d < len(dstsplit): #each section of ; in the destination
newdst = dstsplit[d];
d += 1
ser = 0
while ser < len(servicesplit): #each section of ; in the service
newservice = servicesplit[ser];
ser += 1
# Appenddf = pd.read_csv("./output/"+fileName+'.csv')
Appenddf = pd.read_csv(csvfilename)
# with open(files+fileName+'.csv', 'w', newline='') as csvfile:
header_col=0
to_append_column = {}
while header_col < len(headers):
if header_col == selected_src:
to_append_column[headers[(header_col)]]=newsrc
elif header_col == selected_dest:
to_append_column[headers[(header_col)]]=newdst
elif header_col == selected_service:
to_append_column[headers[(header_col)]]=newservice
else:
to_append_column[headers[(header_col)]]=build_column['col'+str(header_col)]
header_col+=1
with open(csvfilename, 'a', newline='') as csvfileAppend:
# with open("./output/"+fileName+'.csv', 'a', newline='') as csvfileAppend:
filewriter_append = csv.DictWriter(csvfileAppend,fieldnames=headers)
filewriter_append.writerow(to_append_column) #write headers
#newdf = Appenddf.append(to_append_column, ignore_index=True) # creating new dataframe by appending the new data frame to the data already in the csv
#newdf.to_csv("./output/"+fileName+'.csv', index=False) #writing the new data frame to csv
x += 1
#print("[*] "+str(len(df))+" is converted to "+str(len(newdf)))
return (fileName)
except Exception as e:
logger.exception("%s",e)
def savetoImportDir(fileName):
try:
#Create DataFrame
tosavefile = "uploads\\"+fileName
# tosavefile1 = "uploads\\"+"newone.csv"
df = pd.read_csv(tosavefile, index_col=False)
#Fill empty values with "NA", otherwise it will be filled with "nan" by pandas
df.fillna(value='NA',inplace=True)
# headers = list(df) #header values
# print(df)
files = config.get('output', 'OUTPUT_FOLDER')
csvfilename = files+'\\'+fileName
df.to_csv(csvfilename,index=False)
logger.info("csvFilename: "+str(csvfilename))
# with open(csvfilename, 'w', newline='') as csvfile:
# filewriter = csv.writer(csvfile, delimiter=',')
# filewriter.writerow(headers) #write headers
# with open(csvfilename, 'a', newline='') as f:
# df.to_csv(f, header=False,index= False)
# df.to_csv(csvfilename, header=True,index= False) #writing the new data frame to csv
return(csvfilename)
except Exception as e:
logger.exception("%s",e)
| 39.510753 | 177 | 0.552728 |
acf93607a0f087347c227af47c5a730ac9b6ad9e | 13,736 | py | Python | nodepool/driver/fake/provider.py | grahamhayes/nodepool | 58eb86e049b8bdcb2cdf9c99dad25c47e464e0b5 | [
"Apache-2.0"
] | null | null | null | nodepool/driver/fake/provider.py | grahamhayes/nodepool | 58eb86e049b8bdcb2cdf9c99dad25c47e464e0b5 | [
"Apache-2.0"
] | null | null | null | nodepool/driver/fake/provider.py | grahamhayes/nodepool | 58eb86e049b8bdcb2cdf9c99dad25c47e464e0b5 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2011-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
import uuid
import openstack.exceptions
from nodepool import exceptions
from nodepool.driver.openstack.provider import OpenStackProvider
from nodepool.driver.fake.handler import FakeNodeRequestHandler
from openstack.cloud.exc import OpenStackCloudCreateException
class Dummy(object):
IMAGE = 'Image'
INSTANCE = 'Instance'
FLAVOR = 'Flavor'
LOCATION = 'Server.Location'
PORT = 'Port'
def __init__(self, kind, **kw):
self.__kind = kind
self.__kw = kw
for k, v in kw.items():
setattr(self, k, v)
try:
if self.should_fail:
raise openstack.exceptions.OpenStackCloudException(
'This image has SHOULD_FAIL set to True.')
if self.over_quota:
raise openstack.exceptions.HttpException(
message='Quota exceeded for something', http_status=403)
except AttributeError:
pass
def __repr__(self):
args = []
for k in self.__kw.keys():
args.append('%s=%s' % (k, getattr(self, k)))
args = ' '.join(args)
return '<%s %s %s>' % (self.__kind, id(self), args)
def __getitem__(self, key, default=None):
return getattr(self, key, default)
def __setitem__(self, key, value):
setattr(self, key, value)
def get(self, key, default=None):
return getattr(self, key, default)
def set(self, key, value):
setattr(self, key, value)
class FakeOpenStackCloud(object):
log = logging.getLogger("nodepool.FakeOpenStackCloud")
@staticmethod
def _get_quota():
return 100, 20, 1000000
def __init__(self, images=None, networks=None):
self.pause_creates = False
self._image_list = images
if self._image_list is None:
self._image_list = [
Dummy(
Dummy.IMAGE,
id='fake-image-id',
status='READY',
name='Fake Precise',
metadata={})
]
if networks is None:
networks = [dict(id='fake-public-network-uuid',
name='fake-public-network-name'),
dict(id='fake-private-network-uuid',
name='fake-private-network-name'),
dict(id='fake-ipv6-network-uuid',
name='fake-ipv6-network-name')]
self.networks = networks
self._flavor_list = [
Dummy(Dummy.FLAVOR, id='f1', ram=8192, name='Fake Flavor',
vcpus=4),
Dummy(Dummy.FLAVOR, id='f2', ram=8192, name='Unreal Flavor',
vcpus=4),
]
self._azs = ['az1', 'az2']
self._server_list = []
self.max_cores, self.max_instances, self.max_ram = FakeOpenStackCloud.\
_get_quota()
self._down_ports = [
Dummy(Dummy.PORT, id='1a', status='DOWN',
device_owner="compute:nova"),
Dummy(Dummy.PORT, id='2b', status='DOWN',
device_owner=None),
]
def _get(self, name_or_id, instance_list):
self.log.debug("Get %s in %s" % (name_or_id, repr(instance_list)))
for instance in instance_list:
if isinstance(name_or_id, dict):
if instance.id == name_or_id['id']:
return instance
elif instance.name == name_or_id or instance.id == name_or_id:
return instance
return None
def get_network(self, name_or_id, filters=None):
for net in self.networks:
if net['id'] == name_or_id or net['name'] == name_or_id:
return net
return self.networks[0]
def _create(self, instance_list, instance_type=Dummy.INSTANCE,
done_status='ACTIVE', max_quota=-1, **kw):
should_fail = kw.get('SHOULD_FAIL', '').lower() == 'true'
nics = kw.get('nics', [])
security_groups = kw.get('security_groups', [])
addresses = None
# if keyword 'ipv6-uuid' is found in provider config,
# ipv6 address will be available in public addr dict.
for nic in nics:
if nic['net-id'] != 'fake-ipv6-network-uuid':
continue
addresses = dict(
public=[dict(version=4, addr='fake'),
dict(version=6, addr='fake_v6')],
private=[dict(version=4, addr='fake')]
)
public_v6 = 'fake_v6'
public_v4 = 'fake'
private_v4 = 'fake'
host_id = 'fake_host_id'
interface_ip = 'fake_v6'
break
if not addresses:
addresses = dict(
public=[dict(version=4, addr='fake')],
private=[dict(version=4, addr='fake')]
)
public_v6 = ''
public_v4 = 'fake'
private_v4 = 'fake'
host_id = 'fake'
interface_ip = 'fake'
over_quota = False
if (instance_type == Dummy.INSTANCE and
self.max_instances > -1 and
len(instance_list) >= self.max_instances):
over_quota = True
az = kw.get('availability_zone')
if az and az not in self._azs:
raise openstack.exceptions.BadRequestException(
message='The requested availability zone is not available',
http_status=400)
s = Dummy(instance_type,
id=uuid.uuid4().hex,
name=kw['name'],
status='BUILD',
adminPass='fake',
addresses=addresses,
public_v4=public_v4,
public_v6=public_v6,
private_v4=private_v4,
host_id=host_id,
interface_ip=interface_ip,
security_groups=security_groups,
location=Dummy(Dummy.LOCATION, zone=kw.get('az')),
metadata=kw.get('meta', {}),
manager=self,
key_name=kw.get('key_name', None),
should_fail=should_fail,
over_quota=over_quota,
event=threading.Event())
instance_list.append(s)
t = threading.Thread(target=self._finish,
name='FakeProvider create',
args=(s, 0.1, done_status))
t.start()
return s
def _delete(self, name_or_id, instance_list):
self.log.debug("Delete from %s" % (repr(instance_list),))
instance = None
for maybe in instance_list:
if maybe.name == name_or_id or maybe.id == name_or_id:
instance = maybe
if instance:
instance_list.remove(instance)
self.log.debug("Deleted from %s" % (repr(instance_list),))
def _finish(self, obj, delay, status):
self.log.debug("Pause creates %s", self.pause_creates)
if self.pause_creates:
self.log.debug("Pausing")
obj.event.wait()
self.log.debug("Continuing")
else:
time.sleep(delay)
obj.status = status
def create_image(self, **kwargs):
return self._create(
self._image_list, instance_type=Dummy.IMAGE,
done_status='READY', **kwargs)
def get_image(self, name_or_id):
return self._get(name_or_id, self._image_list)
def list_images(self):
return self._image_list
def delete_image(self, name_or_id):
if not name_or_id:
raise Exception('name_or_id is Empty')
self._delete(name_or_id, self._image_list)
def create_image_snapshot(self, name, server, **metadata):
# XXX : validate metadata?
return self._create(
self._image_list, instance_type=Dummy.IMAGE,
name=name, **metadata)
def list_flavors(self, get_extra=False):
return self._flavor_list
def get_openstack_vars(self, server):
server.public_v4 = 'fake'
server.public_v6 = 'fake'
server.private_v4 = 'fake'
server.host_id = 'fake'
server.interface_ip = 'fake'
return server
def create_server(self, **kw):
return self._create(self._server_list, **kw)
def get_server(self, name_or_id):
result = self._get(name_or_id, self._server_list)
return result
def _clean_floating_ip(self, server):
server.public_v4 = ''
server.public_v6 = ''
server.interface_ip = server.private_v4
return server
def wait_for_server(self, server, **kwargs):
while server.status == 'BUILD':
time.sleep(0.1)
auto_ip = kwargs.get('auto_ip')
if not auto_ip:
server = self._clean_floating_ip(server)
return server
def list_servers(self):
return self._server_list
def delete_server(self, name_or_id, delete_ips=True):
self._delete(name_or_id, self._server_list)
def list_availability_zone_names(self):
return self._azs.copy()
def get_compute_limits(self):
return Dummy(
'limits',
max_total_cores=self.max_cores,
max_total_instances=self.max_instances,
max_total_ram_size=self.max_ram,
total_cores_used=4 * len(self._server_list),
total_instances_used=len(self._server_list),
total_ram_used=8192 * len(self._server_list)
)
def list_ports(self, filters=None):
if filters and filters.get('status') == 'DOWN':
return self._down_ports
return []
def delete_port(self, port_id):
tmp_ports = []
for port in self._down_ports:
if port.id != port_id:
tmp_ports.append(port)
else:
self.log.debug("Deleted port ID: %s", port_id)
self._down_ports = tmp_ports
class FakeUploadFailCloud(FakeOpenStackCloud):
log = logging.getLogger("nodepool.FakeUploadFailCloud")
def __init__(self, times_to_fail=None):
super(FakeUploadFailCloud, self).__init__()
self.times_to_fail = times_to_fail
self.times_failed = 0
def create_image(self, **kwargs):
if self.times_to_fail is None:
raise exceptions.BuilderError("Test fail image upload.")
self.times_failed += 1
if self.times_failed <= self.times_to_fail:
raise exceptions.BuilderError("Test fail image upload.")
else:
return super(FakeUploadFailCloud, self).create_image(**kwargs)
class FakeLaunchAndDeleteFailCloud(FakeOpenStackCloud):
log = logging.getLogger("nodepool.FakeLaunchAndDeleteFailCloud")
def __init__(self, times_to_fail=None):
super(FakeLaunchAndDeleteFailCloud, self).__init__()
self.times_to_fail_delete = times_to_fail
self.times_to_fail_launch = times_to_fail
self.times_failed_delete = 0
self.times_failed_launch = 0
self.launch_success = False
self.delete_success = False
def wait_for_server(self, **kwargs):
if self.times_to_fail_launch is None:
raise Exception("Test fail server launch.")
if self.times_failed_launch < self.times_to_fail_launch:
self.times_failed_launch += 1
raise exceptions.ServerDeleteException("Test fail server launch.")
else:
self.launch_success = True
return super(FakeLaunchAndDeleteFailCloud,
self).wait_for_server(**kwargs)
def delete_server(self, *args, **kwargs):
if self.times_to_fail_delete is None:
raise exceptions.ServerDeleteException("Test fail server delete.")
if self.times_failed_delete < self.times_to_fail_delete:
self.times_failed_delete += 1
raise exceptions.ServerDeleteException("Test fail server delete.")
else:
self.delete_success = True
return super(FakeLaunchAndDeleteFailCloud,
self).delete_server(*args, **kwargs)
class FakeProvider(OpenStackProvider):
fake_cloud = FakeOpenStackCloud
def __init__(self, provider):
self.createServer_fails = 0
self.createServer_fails_with_external_id = 0
self.__client = FakeProvider.fake_cloud()
super(FakeProvider, self).__init__(provider)
def _getClient(self):
return self.__client
def createServer(self, *args, **kwargs):
while self.createServer_fails:
self.createServer_fails -= 1
raise Exception("Expected createServer exception")
while self.createServer_fails_with_external_id:
self.createServer_fails_with_external_id -= 1
raise OpenStackCloudCreateException('server', 'fakeid')
return super(FakeProvider, self).createServer(*args, **kwargs)
def getRequestHandler(self, poolworker, request):
return FakeNodeRequestHandler(poolworker, request)
| 35.49354 | 79 | 0.593695 |
acf9361296c727e512780705fc1b8d79c8460cbb | 16,357 | py | Python | lib/galaxy/app.py | Oddant1/galaxy | 5f3a2c4ce494c460a3ca6f297269a5b14f1a5162 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/app.py | Oddant1/galaxy | 5f3a2c4ce494c460a3ca6f297269a5b14f1a5162 | [
"CC-BY-3.0"
] | 1 | 2017-11-15T14:49:37.000Z | 2017-11-15T14:49:37.000Z | lib/galaxy/app.py | xgaia/galaxy | 5f3a2c4ce494c460a3ca6f297269a5b14f1a5162 | [
"CC-BY-3.0"
] | null | null | null | from __future__ import absolute_import
import logging
import signal
import sys
import time
import galaxy.model
import galaxy.model.security
import galaxy.queues
import galaxy.quota
import galaxy.security
from galaxy import config, job_metrics, jobs
from galaxy.config_watchers import ConfigWatchers
from galaxy.containers import build_container_interfaces
from galaxy.managers.collections import DatasetCollectionManager
from galaxy.managers.folders import FolderManager
from galaxy.managers.hdas import HDAManager
from galaxy.managers.histories import HistoryManager
from galaxy.managers.interactivetool import InteractiveToolManager
from galaxy.managers.libraries import LibraryManager
from galaxy.managers.tools import DynamicToolManager
from galaxy.managers.users import UserManager
from galaxy.managers.workflows import WorkflowsManager
from galaxy.model.database_heartbeat import DatabaseHeartbeat
from galaxy.model.tags import GalaxyTagHandler
from galaxy.queue_worker import (
GalaxyQueueWorker,
send_local_control_task,
)
from galaxy.tool_shed.galaxy_install.installed_repository_manager import InstalledRepositoryManager
from galaxy.tool_shed.galaxy_install.update_repository_manager import UpdateRepositoryManager
from galaxy.tool_util.deps.views import DependencyResolversView
from galaxy.tool_util.verify import test_data
from galaxy.tools.cache import (
ToolCache,
ToolShedRepositoryCache
)
from galaxy.tools.data_manager.manager import DataManagers
from galaxy.tools.error_reports import ErrorReports
from galaxy.tools.special_tools import load_lib_tools
from galaxy.tours import ToursRegistry
from galaxy.util import (
ExecutionTimer,
heartbeat,
StructuredExecutionTimer,
)
from galaxy.visualization.data_providers.registry import DataProviderRegistry
from galaxy.visualization.genomes import Genomes
from galaxy.visualization.plugins.registry import VisualizationsRegistry
from galaxy.web import url_for
from galaxy.web.proxy import ProxyManager
from galaxy.web_stack import application_stack_instance
from galaxy.webhooks import WebhooksRegistry
log = logging.getLogger(__name__)
app = None
class UniverseApplication(config.ConfiguresGalaxyMixin):
"""Encapsulates the state of a Universe application"""
def __init__(self, **kwargs):
if not log.handlers:
# Paste didn't handle it, so we need a temporary basic log
# configured. The handler added here gets dumped and replaced with
# an appropriately configured logger in configure_logging below.
logging.basicConfig(level=logging.DEBUG)
log.debug("python path is: %s", ", ".join(sys.path))
self.name = 'galaxy'
# is_webapp will be set to true when building WSGI app
self.is_webapp = False
self.startup_timer = ExecutionTimer()
self.new_installation = False
# Read config file and check for errors
self.config = config.Configuration(**kwargs)
self.config.check()
config.configure_logging(self.config)
self.execution_timer_factory = ExecutionTimerFactory(self.config)
self.configure_fluent_log()
# A lot of postfork initialization depends on the server name, ensure it is set immediately after forking before other postfork functions
self.application_stack = application_stack_instance(app=self)
self.application_stack.register_postfork_function(self.application_stack.set_postfork_server_name, self)
self.config.reload_sanitize_allowlist(explicit='sanitize_allowlist_file' in kwargs)
self.amqp_internal_connection_obj = galaxy.queues.connection_from_config(self.config)
# queue_worker *can* be initialized with a queue, but here we don't
# want to and we'll allow postfork to bind and start it.
self.queue_worker = GalaxyQueueWorker(self)
self._configure_tool_shed_registry()
self._configure_object_store(fsmon=True)
# Setup the database engine and ORM
config_file = kwargs.get('global_conf', {}).get('__file__', None)
if config_file:
log.debug('Using "galaxy.ini" config file: %s', config_file)
check_migrate_tools = self.config.check_migrate_tools
self._configure_models(check_migrate_databases=self.config.check_migrate_databases, check_migrate_tools=check_migrate_tools, config_file=config_file)
# Security helper
self._configure_security()
# Tag handler
self.tag_handler = GalaxyTagHandler(self.model.context)
self.dataset_collections_service = DatasetCollectionManager(self)
self.history_manager = HistoryManager(self)
self.hda_manager = HDAManager(self)
self.workflow_manager = WorkflowsManager(self)
self.dependency_resolvers_view = DependencyResolversView(self)
self.test_data_resolver = test_data.TestDataResolver(file_dirs=self.config.tool_test_data_directories)
self.library_folder_manager = FolderManager()
self.library_manager = LibraryManager()
self.dynamic_tool_manager = DynamicToolManager(self)
# Tool Data Tables
self._configure_tool_data_tables(from_shed_config=False)
# Load dbkey / genome build manager
self._configure_genome_builds(data_table_name="__dbkeys__", load_old_style=True)
# Genomes
self.genomes = Genomes(self)
# Data providers registry.
self.data_provider_registry = DataProviderRegistry()
# Initialize job metrics manager, needs to be in place before
# config so per-destination modifications can be made.
self.job_metrics = job_metrics.JobMetrics(self.config.job_metrics_config_file, app=self)
# Initialize error report plugins.
self.error_reports = ErrorReports(self.config.error_report_file, app=self)
# Initialize the job management configuration
self.job_config = jobs.JobConfiguration(self)
# Setup a Tool Cache
self.tool_cache = ToolCache()
self.tool_shed_repository_cache = ToolShedRepositoryCache(self)
# Watch various config files for immediate reload
self.watchers = ConfigWatchers(self)
self._configure_tool_config_files()
self.installed_repository_manager = InstalledRepositoryManager(self)
self._configure_datatypes_registry(self.installed_repository_manager)
galaxy.model.set_datatypes_registry(self.datatypes_registry)
self._configure_toolbox()
# Load Data Manager
self.data_managers = DataManagers(self)
# Load the update repository manager.
self.update_repository_manager = UpdateRepositoryManager(self)
# Load proprietary datatype converters and display applications.
self.installed_repository_manager.load_proprietary_converters_and_display_applications()
# Load datatype display applications defined in local datatypes_conf.xml
self.datatypes_registry.load_display_applications(self)
# Load datatype converters defined in local datatypes_conf.xml
self.datatypes_registry.load_datatype_converters(self.toolbox)
# Load external metadata tool
self.datatypes_registry.load_external_metadata_tool(self.toolbox)
# Load history import/export tools.
load_lib_tools(self.toolbox)
# visualizations registry: associates resources with visualizations, controls how to render
self.visualizations_registry = VisualizationsRegistry(
self,
directories_setting=self.config.visualization_plugins_directory,
template_cache_dir=self.config.template_cache_path)
# Tours registry
self.tour_registry = ToursRegistry(self.config.tour_config_dir)
# Webhooks registry
self.webhooks_registry = WebhooksRegistry(self.config.webhooks_dir)
# Load security policy.
self.security_agent = self.model.security_agent
self.host_security_agent = galaxy.model.security.HostAgent(
model=self.security_agent.model,
permitted_actions=self.security_agent.permitted_actions)
# Load quota management.
if self.config.enable_quotas:
self.quota_agent = galaxy.quota.QuotaAgent(self.model)
else:
self.quota_agent = galaxy.quota.NoQuotaAgent(self.model)
# Heartbeat for thread profiling
self.heartbeat = None
from galaxy import auth
self.auth_manager = auth.AuthManager(self)
self.user_manager = UserManager(self)
# Start the heartbeat process if configured and available (wait until
# postfork if using uWSGI)
if self.config.use_heartbeat:
if heartbeat.Heartbeat:
self.heartbeat = heartbeat.Heartbeat(
self.config,
period=self.config.heartbeat_interval,
fname=self.config.heartbeat_log
)
self.heartbeat.daemon = True
self.application_stack.register_postfork_function(self.heartbeat.start)
self.authnz_manager = None
if self.config.enable_oidc:
from galaxy.authnz import managers
self.authnz_manager = managers.AuthnzManager(self,
self.config.oidc_config,
self.config.oidc_backends_config)
self.sentry_client = None
if self.config.sentry_dsn:
def postfork_sentry_client():
import raven
self.sentry_client = raven.Client(self.config.sentry_dsn, transport=raven.transport.HTTPTransport)
self.application_stack.register_postfork_function(postfork_sentry_client)
# Transfer manager client
if self.config.get_bool('enable_beta_job_managers', False):
from galaxy.jobs import transfer_manager
self.transfer_manager = transfer_manager.TransferManager(self)
# Start the job manager
from galaxy.jobs import manager
self.job_manager = manager.JobManager(self)
self.application_stack.register_postfork_function(self.job_manager.start)
self.proxy_manager = ProxyManager(self.config)
from galaxy.workflow import scheduling_manager
# Must be initialized after job_config.
self.workflow_scheduling_manager = scheduling_manager.WorkflowSchedulingManager(self)
# Must be initialized after any component that might make use of stack messaging is configured. Alternatively if
# it becomes more commonly needed we could create a prefork function registration method like we do with
# postfork functions.
self.application_stack.init_late_prefork()
self.containers = {}
if self.config.enable_beta_containers_interface:
self.containers = build_container_interfaces(
self.config.containers_config_file,
containers_conf=self.config.containers_conf
)
self.interactivetool_manager = InteractiveToolManager(self)
# Configure handling of signals
handlers = {}
if self.heartbeat:
handlers[signal.SIGUSR1] = self.heartbeat.dump_signal_handler
self._configure_signal_handlers(handlers)
self.database_heartbeat = DatabaseHeartbeat(
application_stack=self.application_stack
)
self.database_heartbeat.add_change_callback(self.watchers.change_state)
self.application_stack.register_postfork_function(self.database_heartbeat.start)
# Start web stack message handling
self.application_stack.register_postfork_function(self.application_stack.start)
self.application_stack.register_postfork_function(self.queue_worker.bind_and_start)
# Delay toolbox index until after startup
self.application_stack.register_postfork_function(lambda: send_local_control_task(self, 'rebuild_toolbox_search_index'))
self.model.engine.dispose()
# Inject url_for for components to more easily optionally depend
# on url_for.
self.url_for = url_for
self.server_starttime = int(time.time()) # used for cachebusting
log.info("Galaxy app startup finished %s" % self.startup_timer)
def shutdown(self):
log.debug('Shutting down')
exception = None
try:
self.queue_worker.shutdown()
except Exception as e:
exception = exception or e
log.exception("Failed to shutdown control worker cleanly")
try:
self.watchers.shutdown()
except Exception as e:
exception = exception or e
log.exception("Failed to shutdown configuration watchers cleanly")
try:
self.database_heartbeat.shutdown()
except Exception as e:
exception = exception or e
log.exception("Failed to shutdown database heartbeat cleanly")
try:
self.workflow_scheduling_manager.shutdown()
except Exception as e:
exception = exception or e
log.exception("Failed to shutdown workflow scheduling manager cleanly")
try:
self.job_manager.shutdown()
except Exception as e:
exception = exception or e
log.exception("Failed to shutdown job manager cleanly")
try:
self.object_store.shutdown()
except Exception as e:
exception = exception or e
log.exception("Failed to shutdown object store cleanly")
try:
if self.heartbeat:
self.heartbeat.shutdown()
except Exception as e:
exception = exception or e
log.exception("Failed to shutdown heartbeat cleanly")
try:
self.update_repository_manager.shutdown()
except Exception as e:
exception = exception or e
log.exception("Failed to shutdown update repository manager cleanly")
try:
self.model.engine.dispose()
except Exception as e:
exception = exception or e
log.exception("Failed to shutdown SA database engine cleanly")
try:
self.application_stack.shutdown()
except Exception as e:
exception = exception or e
log.exception("Failed to shutdown application stack interface cleanly")
if exception:
raise exception
else:
log.debug('Finished shutting down')
def configure_fluent_log(self):
if self.config.fluent_log:
from galaxy.util.custom_logging.fluent_log import FluentTraceLogger
self.trace_logger = FluentTraceLogger('galaxy', self.config.fluent_host, self.config.fluent_port)
else:
self.trace_logger = None
@property
def is_job_handler(self):
return (self.config.track_jobs_in_database and self.job_config.is_handler) or not self.config.track_jobs_in_database
class StatsdStructuredExecutionTimer(StructuredExecutionTimer):
def __init__(self, galaxy_statsd_client, *args, **kwds):
self.galaxy_statsd_client = galaxy_statsd_client
super(StatsdStructuredExecutionTimer, self).__init__(*args, **kwds)
def to_str(self, **kwd):
self.galaxy_statsd_client.timing(self.timer_id, self.elapsed * 1000., kwd)
return super(StatsdStructuredExecutionTimer, self).to_str(**kwd)
class ExecutionTimerFactory(object):
def __init__(self, config):
statsd_host = getattr(config, "statsd_host", None)
if statsd_host:
from galaxy.web.framework.middleware.statsd import GalaxyStatsdClient
self.galaxy_statsd_client = GalaxyStatsdClient(
statsd_host,
getattr(config, 'statsd_port', 8125),
getattr(config, 'statsd_prefix', 'galaxy'),
getattr(config, 'statsd_influxdb', False),
)
else:
self.galaxy_statsd_client = None
def get_timer(self, *args, **kwd):
if self.galaxy_statsd_client:
return StatsdStructuredExecutionTimer(self.galaxy_statsd_client, *args, **kwd)
else:
return StructuredExecutionTimer(*args, **kwd)
| 44.088949 | 157 | 0.704102 |
acf936b4b6c94db1eacb1c0669e736d56b2c4f1e | 3,811 | py | Python | setup.py | jonathanmach/pytracking | abb7d74f868f82edae37a86863faecab7ec98ea9 | [
"BSD-3-Clause"
] | null | null | null | setup.py | jonathanmach/pytracking | abb7d74f868f82edae37a86863faecab7ec98ea9 | [
"BSD-3-Clause"
] | null | null | null | setup.py | jonathanmach/pytracking | abb7d74f868f82edae37a86863faecab7ec98ea9 | [
"BSD-3-Clause"
] | null | null | null | from itertools import chain
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
EXTRA_REQUIRES = {
'test': ['tox>=2.3.1', 'pytest>=2.9.2'],
'webhook': ['requests>=2.10.0'],
'html': ['lxml>=3.6.1'],
'crypto': ['cryptography>=1.4'],
'django': ['django-ipware>=1.1.5,<1.2', 'django>=1.7']
}
ALL_REQUIRE = list(chain(*EXTRA_REQUIRES.values()))
EXTRA_REQUIRES["all"] = ALL_REQUIRE
setup(
name='pytracking',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.2.1',
description='Email open and click tracking',
long_description=long_description,
# The project's main homepage.
url='https://github.com/resulto-admin/pytracking',
# Author details
author='Resulto Dev Team',
author_email='dev@resulto.ca',
# Choose your license
license='New BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Communications :: Email',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='email open click tracking',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require=EXTRA_REQUIRES,
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
},
)
| 34.645455 | 94 | 0.673052 |
acf936e2d7e1f1b7e9f40d64261167c57abd00e7 | 8,973 | py | Python | pipes/test_pipes.py | thekio/pipes | 882baf5b11a820a90ac51761a41ce5840c14e536 | [
"MIT"
] | null | null | null | pipes/test_pipes.py | thekio/pipes | 882baf5b11a820a90ac51761a41ce5840c14e536 | [
"MIT"
] | null | null | null | pipes/test_pipes.py | thekio/pipes | 882baf5b11a820a90ac51761a41ce5840c14e536 | [
"MIT"
] | null | null | null | import random
import time
from collections.abc import Iterator
from itertools import groupby, chain
from typing import Sized, Any
from pipes import Pipes
class _CountableIterator:
def __init__(self, itr: Iterator):
self._count = 0
self._itr = itr
def gen(self) -> Iterator:
for i in self._itr:
self._count += 1
yield i
@property
def count(self) -> int:
return self._count
_KNOWN_RANGE_START = 1
_KNOWN_RANGE_END = 11
_KNOWN_RANGE = range(_KNOWN_RANGE_START, _KNOWN_RANGE_END)
_KNOWN_COLLECTION = [
{
'color': 'blue',
'shape': 'square',
'size': 1,
'items': ['a', 'b', 'c', ],
},
{
'color': 'red',
'shape': 'square',
'size': 2,
'items': ['d', 'e', 'f', ],
},
{
'color': 'red',
'shape': 'circle',
'size': 2,
'items': ['a', 'a', 'b', ],
},
{
'color': 'green',
'shape': 'circle',
'size': 1,
'items': ['f', ],
},
{
'color': 'blue',
'shape': 'triangle',
'size': 3,
'items': ['c', 'd', 'e', 'a', ],
},
{
'color': 'green',
'shape': 'triangle',
'size': 3,
'items': ['f', 'g', ],
},
]
def _assert_linear_performance(iterator: _CountableIterator, sized: Sized):
assert iterator.count == len(sized)
def test_map_apply():
# arrange
iterator = _CountableIterator(_KNOWN_RANGE)
expected = [i * 2 for i in _KNOWN_RANGE]
# act
actual = (Pipes(iterator.gen())
.lz_map(lambda x: x * 2)
.apply())
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_RANGE)
def test_multi_map_apply():
# arrange
iterator = _CountableIterator(_KNOWN_RANGE)
expected = [1.0 / ((i + 1) * 2) for i in _KNOWN_RANGE]
# act
actual = (Pipes(iterator.gen())
.lz_map(lambda x: x + 1)
.lz_map(lambda x: x * 2)
.lz_map(lambda x: 1.0 / x)
.apply())
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_RANGE)
def test_map_filter_map_apply():
# arrange
iterator = _CountableIterator(_KNOWN_RANGE)
expected = [(i * 10) + 1 for i in _KNOWN_RANGE if (i * 10) % 2 == 0]
# act
actual = (Pipes(iterator.gen())
.lz_map(lambda x: x * 10)
.lz_filter(lambda x: x % 2 == 0)
.lz_map(lambda x: x + 1)
.apply())
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_RANGE)
def test_multi_filter_apply():
# arrange
iterator = _CountableIterator(_KNOWN_RANGE)
expected = [i for i in _KNOWN_RANGE if (i % 3 == 0) and (i % 9 == 0)]
# act
actual = (Pipes(iterator.gen())
.lz_filter(lambda x: x % 3 == 0)
.lz_filter(lambda x: x % 9 == 0)
.apply())
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_RANGE)
def test_map_reduce():
# arrange
iterator = _CountableIterator(_KNOWN_RANGE)
expected = sum([i for i in _KNOWN_RANGE])
# act
actual = (Pipes(iterator.gen())
.reduce(lambda a, b: a + b, init_val=0))
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_RANGE)
def test_map_group_by():
# arrange
iterator = _CountableIterator(_KNOWN_COLLECTION)
expected = {
key: len(list(group))
for key, group in groupby([e for e in _KNOWN_COLLECTION], lambda x: x['color'])
}
# act
actual = (Pipes(iterator.gen())
.lz_map(lambda x: x)
.group_by(lambda x: x['color'], group_fn=lambda group: len(list(group))))
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_COLLECTION)
def test_map_flat_map_count():
# arrange
iterator = _CountableIterator(_KNOWN_COLLECTION)
expected = len([x for x in
[e.upper() for e in chain.from_iterable(
[e['items'] for e in _KNOWN_COLLECTION])
] if x == 'A'])
# act
actual = (Pipes(iterator.gen())
.lz_map(lambda x: x)
.flat_map(lambda x: x['items'])
.lz_map(lambda x: x.upper())
.lz_filter(lambda x: x == 'A')
.count())
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_COLLECTION)
def test_map_uniq_count():
# arrange
iterator = _CountableIterator(_KNOWN_COLLECTION)
expected = len(list(dict.fromkeys([e['color'] for e in _KNOWN_COLLECTION])))
# act
actual = (Pipes(iterator.gen())
.lz_map(lambda x: x['color'])
.uniq()
.count())
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_COLLECTION)
def test_uniq_with_custom_hash():
# arrange
iterator = _CountableIterator(_KNOWN_COLLECTION)
expected = len(list(dict.fromkeys([e['color'] for e in _KNOWN_COLLECTION])))
# act
actual = (Pipes(iterator.gen())
.uniq(hash_fn=lambda x: hash(x['color']))
.count())
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_COLLECTION)
def test_sort_apply():
# arrange
iterator = _CountableIterator(_KNOWN_COLLECTION)
expected = sorted([e['color'] for e in _KNOWN_COLLECTION])
# act
actual = (Pipes(iterator.gen())
.lz_map(lambda x: x['color'])
.sort()
.apply())
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_COLLECTION)
def test_reverse_sort_with_custom_hash_fn():
# arrange
iterator = _CountableIterator(_KNOWN_COLLECTION)
expected = sorted([e['color'] for e in _KNOWN_COLLECTION], reverse=True)
# act
actual = (Pipes(iterator.gen())
.sort(fn=lambda x: x['color'], reverse=True)
.lz_map(lambda x: x['color'])
.apply())
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_COLLECTION)
def test_peek():
# arrange
iterator = _CountableIterator(_KNOWN_COLLECTION)
expected = [e['color'] for e in _KNOWN_COLLECTION]
# act
actual = []
(Pipes(iterator.gen())
.lz_map(lambda x: x['color'])
.lz_peek(lambda x: actual.append(x))
.apply())
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_COLLECTION)
def test_stream():
# arrange
iterator = _CountableIterator(_KNOWN_COLLECTION)
expected = [e['color'] for e in _KNOWN_COLLECTION]
# act
actual = []
for e in Pipes(iterator.gen()).lz_map(lambda x: x['color']).stream():
actual.append(e)
# assert
assert actual == expected
_assert_linear_performance(iterator, _KNOWN_COLLECTION)
def test_map_with_exception_handling():
# arrange
error = 'expected error'
value = 5
new_value = value * 100
iterator = _CountableIterator(_KNOWN_RANGE)
expected = [new_value if i == value else i * 2 for i in _KNOWN_RANGE]
def raise_exception(x: Any) -> Any:
if x == value:
raise ValueError(error)
return x * 2
captured_errors = {}
def error_log_fn(exc: Exception, v: Any, nv: Any) -> None:
captured_errors['error'] = f'{exc}'
captured_errors['value'] = v
captured_errors['new_value'] = nv
# act
actual = (Pipes(iterator.gen())
.lz_map(fn=raise_exception,
error_val_fn=lambda v: v * 100,
error_log_fn=error_log_fn)
.apply())
# assert
assert actual == expected
assert captured_errors['error'] == error
assert captured_errors['value'] == value
assert captured_errors['new_value'] == new_value
_assert_linear_performance(iterator, _KNOWN_RANGE)
def test_end_to_end_parallel_pipeline_using_a_threadpool():
# arrange
iterator = _CountableIterator(_KNOWN_COLLECTION)
expected = [e['shape'] for e in _KNOWN_COLLECTION]
def run_task(x: Any) -> Any:
# sleep between 1/5 (200ms) to 1/10 (100ms)
time.sleep(1.0 / random.randrange(5, 10))
return x['shape']
start_time = time.time()
# act
actual = (Pipes(iterator.gen())
.lz_map(run_task)
.apply())
actual_exec_time = time.time() - start_time
start_time = time.time()
with Pipes(iterator.gen(), num_threads=8) as pipes:
threaded_actual = (pipes
.lz_map(run_task)
.apply())
threaded_actual_exec_time = time.time() - start_time
# assert
assert actual == expected
assert threaded_actual == expected
assert actual_exec_time > threaded_actual_exec_time
| 27.694444 | 87 | 0.588878 |
acf9373dbbcf565ea858def5fed8302b0983ee9e | 1,565 | py | Python | test/functional/wallet_fallbackfee.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 8 | 2021-04-17T16:11:50.000Z | 2021-06-23T05:30:39.000Z | test/functional/wallet_fallbackfee.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 1 | 2021-04-18T11:57:59.000Z | 2021-04-18T11:57:59.000Z | test/functional/wallet_fallbackfee.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 7 | 2021-04-17T16:04:12.000Z | 2021-06-10T00:54:53.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Widecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet replace-by-fee capabilities in conjunction with the fallbackfee."""
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import WidecoinTestFramework
from test_framework.util import assert_raises_rpc_error
class WalletRBFTest(WidecoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].generate(COINBASE_MATURITY + 1)
# sending a transaction without fee estimations must be possible by default on regtest
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# test sending a tx with disabled fallback fee (must fail)
self.restart_node(0, extra_args=["-fallbackfee=0"])
assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1))
assert_raises_rpc_error(-4, "Fee estimation failed", lambda: self.nodes[0].fundrawtransaction(self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})))
assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendmany("", {self.nodes[0].getnewaddress(): 1}))
if __name__ == '__main__':
WalletRBFTest().main()
| 47.424242 | 178 | 0.738658 |
acf937d178baf3437939cb16ca39228819fd3b0e | 293 | py | Python | tests/test_tensorable_mixin.py | mmcdermott/ML_mixins | b494effe5c0feebb22517190faa85d7cdc418a3f | [
"MIT"
] | null | null | null | tests/test_tensorable_mixin.py | mmcdermott/ML_mixins | b494effe5c0feebb22517190faa85d7cdc418a3f | [
"MIT"
] | null | null | null | tests/test_tensorable_mixin.py | mmcdermott/ML_mixins | b494effe5c0feebb22517190faa85d7cdc418a3f | [
"MIT"
] | null | null | null | import sys
sys.path.append('..')
import unittest
from mixins.mixins import *
try:
import torch
class TestTensorableMixin(unittest.TestCase):
def test_constructs(self):
T = TensorableMixin()
except ImportError: pass
if __name__ == '__main__': unittest.main()
| 15.421053 | 49 | 0.686007 |
acf9380bf712975316f87f62cc13dc6dc779b8e7 | 2,076 | py | Python | homeassistant/components/plum_lightpad/config_flow.py | SAABoholic/core | 25b093e69e9939c131f4dc83566a9571929803df | [
"Apache-2.0"
] | 4 | 2020-07-29T17:47:10.000Z | 2020-09-16T13:39:13.000Z | homeassistant/components/plum_lightpad/config_flow.py | SAABoholic/core | 25b093e69e9939c131f4dc83566a9571929803df | [
"Apache-2.0"
] | 38 | 2020-07-23T07:14:17.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/plum_lightpad/config_flow.py | SAABoholic/core | 25b093e69e9939c131f4dc83566a9571929803df | [
"Apache-2.0"
] | 3 | 2016-10-03T20:14:06.000Z | 2019-04-19T15:56:56.000Z | """Config flow for Plum Lightpad."""
import logging
from typing import Any, Dict, Optional
from aiohttp import ContentTypeError
from requests.exceptions import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import ConfigType
from .const import DOMAIN # pylint: disable=unused-import
from .utils import load_plum
_LOGGER = logging.getLogger(__name__)
class PlumLightpadConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for Plum Lightpad integration."""
VERSION = 1
def _show_form(self, errors=None):
schema = {
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
return self.async_show_form(
step_id="user", data_schema=vol.Schema(schema), errors=errors or {},
)
async def async_step_user(
self, user_input: Optional[ConfigType] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by the user or redirected to by import."""
if not user_input:
return self._show_form()
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
# load Plum just so we know username/password work
try:
await load_plum(username, password, self.hass)
except (ContentTypeError, ConnectTimeout, HTTPError) as ex:
_LOGGER.error("Unable to connect/authenticate to Plum cloud: %s", str(ex))
return self._show_form({"base": "cannot_connect"})
await self.async_set_unique_id(username)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=username, data={CONF_USERNAME: username, CONF_PASSWORD: password}
)
async def async_step_import(
self, import_config: Optional[ConfigType]
) -> Dict[str, Any]:
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
| 32.952381 | 86 | 0.687861 |
acf9382f0d406b56db8270d774c36293c28958db | 3,322 | py | Python | RecoMET/Configuration/python/RecoMET_EventContent_cff.py | neu-physics/cmssw | a0f9c15465d8cc160b6fd2fa92475ad0f268f46f | [
"Apache-2.0"
] | 2 | 2020-01-21T11:23:39.000Z | 2020-01-21T11:23:42.000Z | RecoMET/Configuration/python/RecoMET_EventContent_cff.py | neu-physics/cmssw | a0f9c15465d8cc160b6fd2fa92475ad0f268f46f | [
"Apache-2.0"
] | 8 | 2020-03-20T23:18:36.000Z | 2020-05-27T11:00:06.000Z | RecoMET/Configuration/python/RecoMET_EventContent_cff.py | neu-physics/cmssw | a0f9c15465d8cc160b6fd2fa92475ad0f268f46f | [
"Apache-2.0"
] | 3 | 2019-03-09T13:06:43.000Z | 2020-07-03T00:47:30.000Z | import FWCore.ParameterSet.Config as cms
##_____________________________________________________________ AOD content __||
RecoMETAOD = cms.PSet(
outputCommands = cms.untracked.vstring('keep recoCaloMETs_caloMet_*_*',
'keep recoCaloMETs_caloMetBE_*_*',
'keep recoCaloMETs_caloMetBEFO_*_*',
'keep recoCaloMETs_caloMetM_*_*',
'keep recoPFMETs_pfMet_*_*',
'keep recoPFMETs_pfChMet_*_*',
'keep recoPFMETs_pfMetEI_*_*',
'keep recoMuonMETCorrectionDataedmValueMap_muonMETValueMapProducer_*_*',
# 'drop recoHcalNoiseRBXs_*_*_*',
'keep HcalNoiseSummary_hcalnoise_*_*',
#'keep *GlobalHaloData_*_*_*',
'keep recoGlobalHaloData_GlobalHaloData_*_*',
'keep recoCSCHaloData_CSCHaloData_*_*',
'keep recoBeamHaloSummary_BeamHaloSummary_*_*'
)
)
RecoGenMETAOD = cms.PSet(
outputCommands = cms.untracked.vstring('keep recoGenMETs_*_*_*')
)
RecoHcalNoiseAOD = cms.PSet(
outputCommands = cms.untracked.vstring( # 'drop recoHcalNoiseRBXs_hcalnoise_*_*',
'keep HcalNoiseSummary_hcalnoise_*_*'
)
)
##____________________________________________________________ RECO content __||
RecoMETRECO = cms.PSet(
outputCommands = cms.untracked.vstring('keep recoHcalNoiseRBXs_hcalnoise_*_*',
#'keep *HaloData_*_*_*',
'keep recoEcalHaloData_EcalHaloData_*_*',
'keep recoHcalHaloData_HcalHaloData_*_*'
)
)
RecoMETRECO.outputCommands.extend(RecoMETAOD.outputCommands)
RecoGenMETRECO = cms.PSet(
outputCommands = cms.untracked.vstring()
)
RecoGenMETRECO.outputCommands.extend(RecoGenMETAOD.outputCommands)
RecoHcalNoiseRECO = cms.PSet(
outputCommands = cms.untracked.vstring('keep recoHcalNoiseRBXs_hcalnoise_*_*')
)
RecoHcalNoiseRECO.outputCommands.extend(RecoHcalNoiseAOD.outputCommands)
##______________________________________________________ Full Event content __||
RecoMETFEVT = cms.PSet(
outputCommands = cms.untracked.vstring('keep *HaloData_*_*_*',
'keep *BeamHaloSummary_BeamHaloSummary_*_*'
)
)
RecoMETFEVT.outputCommands.extend(RecoMETRECO.outputCommands)
RecoGenMETFEVT = cms.PSet(
outputCommands = cms.untracked.vstring()
)
RecoGenMETFEVT.outputCommands.extend(RecoGenMETRECO.outputCommands)
RecoHcalNoiseFEVT = cms.PSet(
outputCommands = cms.untracked.vstring()
)
RecoHcalNoiseFEVT.outputCommands.extend(RecoHcalNoiseRECO.outputCommands)
##____________________________________________________________________________||
| 47.457143 | 115 | 0.573149 |
acf9393043d213b0feb7c4e86b8c69cbab48b407 | 1,407 | py | Python | lib/spack/spack/test/views.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3 | 2021-09-29T02:14:40.000Z | 2022-01-27T20:50:36.000Z | lib/spack/spack/test/views.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2022-01-08T08:41:11.000Z | 2022-03-14T19:28:07.000Z | lib/spack/spack/test/views.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.directory_layout import DirectoryLayout
from spack.filesystem_view import YamlFilesystemView
from spack.spec import Spec
def test_global_activation(install_mockery, mock_fetch):
"""This test ensures that views which are maintained inside of an extendee
package's prefix are maintained as expected and are compatible with
global activations prior to #7152.
"""
spec = Spec('extension1').concretized()
pkg = spec.package
pkg.do_install()
pkg.do_activate()
extendee_spec = spec['extendee']
extendee_pkg = spec['extendee'].package
view = extendee_pkg.view()
assert pkg.is_activated(view)
expected_path = os.path.join(
extendee_spec.prefix, '.spack', 'extensions.yaml')
assert (view.extensions_layout.extension_file_path(extendee_spec) ==
expected_path)
def test_remove_extensions_ordered(install_mockery, mock_fetch, tmpdir):
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
e2 = Spec('extension2').concretized()
e2.package.do_install()
view.add_specs(e2)
e1 = e2['extension1']
view.remove_specs(e1, e2)
| 31.977273 | 78 | 0.729922 |
acf93a4aa704eb7cd11477f858f526fc01a885ac | 363 | py | Python | code/CIRC05.py | WCRSyyc/ardx | 09b932f69c21f4c257641a086cec8669ee121e63 | [
"CC-BY-4.0"
] | 1 | 2017-05-02T10:15:27.000Z | 2017-05-02T10:15:27.000Z | code/CIRC05p-code.txt | WCRSyyc/ardx | 09b932f69c21f4c257641a086cec8669ee121e63 | [
"CC-BY-4.0"
] | null | null | null | code/CIRC05p-code.txt | WCRSyyc/ardx | 09b932f69c21f4c257641a086cec8669ee121e63 | [
"CC-BY-4.0"
] | null | null | null | # CIRC05 Shift register
import board
import adafruit_74hc595
import busio
import digitalio
import time
spi = busio.SPI(board.SCK, MOSI=board.MOSI)
latch_pin = digitalio.DigitalInOut(board.D5)
sr = adafruit_74hc595.ShiftRegister74HC595(spi, latch_pin)
pin1 = sr.get_pin(1)
while True:
pin1.value = True
time.sleep(1)
pin1.value = False
time.sleep(1)
| 16.5 | 58 | 0.763085 |
acf93a7ad5ec52d7fcd3758588f799c797d47b79 | 46,615 | py | Python | src/genie/libs/parser/nxos/tests/ShowIpOspfDatabaseOpaqueAreaDetail/cli/equal/golden_output_1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/nxos/tests/ShowIpOspfDatabaseOpaqueAreaDetail/cli/equal/golden_output_1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/nxos/tests/ShowIpOspfDatabaseOpaqueAreaDetail/cli/equal/golden_output_1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z |
expected_output = {
'vrf':
{'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'database':
{'lsa_types':
{10:
{'lsa_type': 10,
'lsas':
{'10.1.0.0 10.4.1.1':
{'adv_router': '10.4.1.1',
'lsa_id': '10.1.0.0',
'ospfv2':
{'body':
{'opaque': {}},
'header':
{'adv_router': '10.4.1.1',
'age': 385,
'checksum': '0x54d3',
'fragment_number': 0,
'length': 28,
'lsa_id': '10.1.0.0',
'mpls_te_router_id': '10.4.1.1',
'num_links': 0,
'opaque_id': 0,
'opaque_type': 1,
'option': '0x20',
'option_desc': 'No TOS-capability, DC',
'seq_num': '0x80000003',
'type': 10}}},
'10.1.0.0 10.100.2.2':
{'adv_router': '10.100.2.2',
'lsa_id': '10.1.0.0',
'ospfv2':
{'body':
{'opaque': {}},
'header':
{'adv_router': '10.100.2.2',
'age': 1612,
'checksum': '0x1c22',
'fragment_number': 0,
'length': 28,
'lsa_id': '10.1.0.0',
'mpls_te_router_id': '10.100.2.2',
'num_links': 0,
'opaque_id': 0,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000003',
'type': 10}}},
'10.1.0.0 10.36.3.3':
{'adv_router': '10.36.3.3',
'lsa_id': '10.1.0.0',
'ospfv2':
{'body':
{'opaque': {}},
'header':
{'adv_router': '10.36.3.3',
'age': 113,
'checksum': '0x5cbb',
'fragment_number': 0,
'length': 28,
'lsa_id': '10.1.0.0',
'mpls_te_router_id': '10.36.3.3',
'num_links': 0,
'opaque_id': 0,
'opaque_type': 1,
'option': '0x20',
'option_desc': 'No TOS-capability, DC',
'seq_num': '0x80000003',
'type': 10}}},
'10.1.0.1 10.4.1.1':
{'adv_router': '10.4.1.1',
'lsa_id': '10.1.0.1',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '10.1.4.4',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.1.4.1': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unknown_tlvs':
{1:
{'length': 4,
'type': 32770,
'value': '00 00 00 01'}},
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.4.1.1',
'age': 385,
'checksum': '0x6387',
'fragment_number': 1,
'length': 124,
'lsa_id': '10.1.0.1',
'num_links': 1,
'opaque_id': 1,
'opaque_type': 1,
'option': '0x20',
'option_desc': 'No TOS-capability, DC',
'seq_num': '0x80000003',
'type': 10}}},
'10.1.0.2 10.4.1.1':
{'adv_router': '10.4.1.1',
'lsa_id': '10.1.0.2',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '10.1.2.1',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs': {'10.1.2.1': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unknown_tlvs':
{1:
{'length': 4,
'type': 32770,
'value': '00 00 00 01'}},
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.4.1.1',
'age': 385,
'checksum': '0xb23e',
'fragment_number': 2,
'length': 124,
'lsa_id': '10.1.0.2',
'num_links': 1,
'opaque_id': 2,
'opaque_type': 1,
'option': '0x20',
'option_desc': 'No TOS-capability, DC',
'seq_num': '0x80000003',
'type': 10}}},
'10.1.0.37 10.100.2.2':
{'adv_router': '10.100.2.2',
'lsa_id': '10.1.0.37',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '10.2.3.3',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.2.3.2': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.100.2.2',
'age': 1202,
'checksum': '0xe492',
'fragment_number': 37,
'length': 116,
'lsa_id': '10.1.0.37',
'num_links': 1,
'opaque_id': 37,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000004',
'type': 10}}},
'10.1.0.38 10.100.2.2':
{'adv_router': '10.100.2.2',
'lsa_id': '10.1.0.38',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '10.2.4.4',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.2.4.2': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.100.2.2',
'age': 1191,
'checksum': '0x2350',
'fragment_number': 38,
'length': 116,
'lsa_id': '10.1.0.38',
'num_links': 1,
'opaque_id': 38,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000004',
'type': 10}}},
'10.1.0.39 10.100.2.2':
{'adv_router': '10.100.2.2',
'lsa_id': '10.1.0.39',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '10.1.2.1',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.1.2.2': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.100.2.2',
'age': 1191,
'checksum': '0x4239',
'fragment_number': 39,
'length': 116,
'lsa_id': '10.1.0.39',
'num_links': 1,
'opaque_id': 39,
'opaque_type': 1,
'option': '0x2',
'option_desc': 'No TOS-capability, No DC',
'seq_num': '0x80000004',
'type': 10}}},
'10.1.0.4 10.36.3.3':
{'adv_router': '10.36.3.3',
'lsa_id': '10.1.0.4',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '10.3.4.4',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.3.4.3': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unknown_tlvs':
{1:
{'length': 4,
'type': 32770,
'value': '00 00 00 01'},
2: {'length': 32,
'type': 32771,
'value': '00 00 00 00 00 0 0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00'}},
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.36.3.3',
'age': 113,
'checksum': '0x8f5e',
'fragment_number': 4,
'length': 160,
'lsa_id': '10.1.0.4',
'num_links': 1,
'opaque_id': 4,
'opaque_type': 1,
'option': '0x20',
'option_desc': 'No TOS-capability, DC',
'seq_num': '0x80000003',
'type': 10}}},
'10.1.0.6 10.36.3.3':
{'adv_router': '10.36.3.3',
'lsa_id': '10.1.0.6',
'ospfv2':
{'body':
{'opaque':
{'link_tlvs':
{1:
{'admin_group': '0x0',
'link_id': '10.2.3.3',
'link_name': 'broadcast network',
'link_type': 2,
'local_if_ipv4_addrs':
{'10.2.3.3': {}},
'max_bandwidth': 125000000,
'max_reservable_bandwidth': 93750000,
'remote_if_ipv4_addrs':
{'0.0.0.0': {}},
'te_metric': 1,
'unknown_tlvs':
{1:
{'length': 4,
'type': 32770,
'value': '00 00 00 01'},
2: {'length': 32,
'type': 32771,
'value': '00 00 00 00 00 0 0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00'}},
'unreserved_bandwidths':
{'0 93750000':
{'priority': 0,
'unreserved_bandwidth': 93750000},
'1 93750000':
{'priority': 1,
'unreserved_bandwidth': 93750000},
'2 93750000':
{'priority': 2,
'unreserved_bandwidth': 93750000},
'3 93750000':
{'priority': 3,
'unreserved_bandwidth': 93750000},
'4 93750000':
{'priority': 4,
'unreserved_bandwidth': 93750000},
'5 93750000':
{'priority': 5,
'unreserved_bandwidth': 93750000},
'6 93750000':
{'priority': 6,
'unreserved_bandwidth': 93750000},
'7 93750000':
{'priority': 7,
'unreserved_bandwidth': 93750000}}}}}},
'header':
{'adv_router': '10.36.3.3',
'age': 113,
'checksum': '0x03ed',
'fragment_number': 6,
'length': 160,
'lsa_id': '10.1.0.6',
'num_links': 1,
'opaque_id': 6,
'opaque_type': 1,
'option': '0x20',
'option_desc': 'No TOS-capability, DC',
'seq_num': '0x80000003',
'type': 10}}}}}}}}}}}}}}}}
| 91.223092 | 194 | 0.14549 |
acf93b25b4fe6760b9d4d463256331cbf195383f | 16,051 | py | Python | env/Lib/site-packages/jupyter_server/pytest_plugin.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 1 | 2022-03-17T12:56:14.000Z | 2022-03-17T12:56:14.000Z | env/Lib/site-packages/jupyter_server/pytest_plugin.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | null | null | null | env/Lib/site-packages/jupyter_server/pytest_plugin.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 1 | 2022-03-28T09:19:34.000Z | 2022-03-28T09:19:34.000Z | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import json
import logging
import os
import shutil
import sys
import urllib.parse
from binascii import hexlify
import jupyter_core.paths
import nbformat
import pytest
import tornado
from tornado.escape import url_escape
from traitlets.config import Config
from jupyter_server.extension import serverextension
from jupyter_server.serverapp import ServerApp
from jupyter_server.services.contents.filemanager import FileContentsManager
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.utils import url_path_join
# List of dependencies needed for this plugin.
pytest_plugins = [
"pytest_tornasync",
# Once the chunk below moves to Jupyter Core, we'll uncomment
# This plugin and use the fixtures directly from Jupyter Core.
# "jupyter_core.pytest_plugin"
]
import asyncio
if os.name == "nt" and sys.version_info >= (3, 7):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# ============ Move to Jupyter Core =============
def mkdir(tmp_path, *parts):
path = tmp_path.joinpath(*parts)
if not path.exists():
path.mkdir(parents=True)
return path
@pytest.fixture
def jp_home_dir(tmp_path):
"""Provides a temporary HOME directory value."""
return mkdir(tmp_path, "home")
@pytest.fixture
def jp_data_dir(tmp_path):
"""Provides a temporary Jupyter data dir directory value."""
return mkdir(tmp_path, "data")
@pytest.fixture
def jp_config_dir(tmp_path):
"""Provides a temporary Jupyter config dir directory value."""
return mkdir(tmp_path, "config")
@pytest.fixture
def jp_runtime_dir(tmp_path):
"""Provides a temporary Jupyter runtime dir directory value."""
return mkdir(tmp_path, "runtime")
@pytest.fixture
def jp_system_jupyter_path(tmp_path):
"""Provides a temporary Jupyter system path value."""
return mkdir(tmp_path, "share", "jupyter")
@pytest.fixture
def jp_env_jupyter_path(tmp_path):
"""Provides a temporary Jupyter env system path value."""
return mkdir(tmp_path, "env", "share", "jupyter")
@pytest.fixture
def jp_system_config_path(tmp_path):
"""Provides a temporary Jupyter config path value."""
return mkdir(tmp_path, "etc", "jupyter")
@pytest.fixture
def jp_env_config_path(tmp_path):
"""Provides a temporary Jupyter env config path value."""
return mkdir(tmp_path, "env", "etc", "jupyter")
@pytest.fixture
def jp_environ(
monkeypatch,
tmp_path,
jp_home_dir,
jp_data_dir,
jp_config_dir,
jp_runtime_dir,
jp_system_jupyter_path,
jp_system_config_path,
jp_env_jupyter_path,
jp_env_config_path,
):
"""Configures a temporary environment based on Jupyter-specific environment variables. """
monkeypatch.setenv("HOME", str(jp_home_dir))
monkeypatch.setenv("PYTHONPATH", os.pathsep.join(sys.path))
# monkeypatch.setenv("JUPYTER_NO_CONFIG", "1")
monkeypatch.setenv("JUPYTER_CONFIG_DIR", str(jp_config_dir))
monkeypatch.setenv("JUPYTER_DATA_DIR", str(jp_data_dir))
monkeypatch.setenv("JUPYTER_RUNTIME_DIR", str(jp_runtime_dir))
monkeypatch.setattr(jupyter_core.paths, "SYSTEM_JUPYTER_PATH", [str(jp_system_jupyter_path)])
monkeypatch.setattr(jupyter_core.paths, "ENV_JUPYTER_PATH", [str(jp_env_jupyter_path)])
monkeypatch.setattr(jupyter_core.paths, "SYSTEM_CONFIG_PATH", [str(jp_system_config_path)])
monkeypatch.setattr(jupyter_core.paths, "ENV_CONFIG_PATH", [str(jp_env_config_path)])
# ================= End: Move to Jupyter core ================
@pytest.fixture
def jp_server_config():
"""Allows tests to setup their specific configuration values. """
return {}
@pytest.fixture
def jp_root_dir(tmp_path):
"""Provides a temporary Jupyter root directory value."""
return mkdir(tmp_path, "root_dir")
@pytest.fixture
def jp_template_dir(tmp_path):
"""Provides a temporary Jupyter templates directory value."""
return mkdir(tmp_path, "templates")
@pytest.fixture
def jp_argv():
"""Allows tests to setup specific argv values. """
return []
@pytest.fixture
def jp_extension_environ(jp_env_config_path, monkeypatch):
"""Monkeypatch a Jupyter Extension's config path into each test's environment variable"""
monkeypatch.setattr(serverextension, "ENV_CONFIG_PATH", [str(jp_env_config_path)])
@pytest.fixture
def jp_http_port(http_server_port):
"""Returns the port value from the http_server_port fixture. """
return http_server_port[-1]
@pytest.fixture
def jp_nbconvert_templates(jp_data_dir):
"""Setups up a temporary directory consisting of the nbconvert templates."""
# Get path to nbconvert template directory *before*
# monkeypatching the paths env variable via the jp_environ fixture.
possible_paths = jupyter_core.paths.jupyter_path("nbconvert", "templates")
nbconvert_path = None
for path in possible_paths:
if os.path.exists(path):
nbconvert_path = path
break
nbconvert_target = jp_data_dir / "nbconvert" / "templates"
# copy nbconvert templates to new tmp data_dir.
if nbconvert_path:
shutil.copytree(nbconvert_path, str(nbconvert_target))
@pytest.fixture
def jp_logging_stream():
"""StringIO stream intended to be used by the core
Jupyter ServerApp logger's default StreamHandler. This
helps avoid collision with stdout which is hijacked
by Pytest.
"""
logging_stream = io.StringIO()
yield logging_stream
output = logging_stream.getvalue()
# If output exists, print it.
if output:
print(output)
return output
@pytest.fixture(scope="function")
def jp_configurable_serverapp(
jp_nbconvert_templates, # this fixture must preceed jp_environ
jp_environ,
jp_server_config,
jp_argv,
jp_http_port,
jp_base_url,
tmp_path,
jp_root_dir,
io_loop,
jp_logging_stream,
):
"""Starts a Jupyter Server instance based on
the provided configuration values.
The fixture is a factory; it can be called like
a function inside a unit test. Here's a basic
example of how use this fixture:
.. code-block:: python
def my_test(jp_configurable_serverapp):
app = jp_configurable_serverapp(...)
...
"""
ServerApp.clear_instance()
def _configurable_serverapp(
config=jp_server_config,
base_url=jp_base_url,
argv=jp_argv,
environ=jp_environ,
http_port=jp_http_port,
tmp_path=tmp_path,
root_dir=jp_root_dir,
**kwargs
):
c = Config(config)
c.NotebookNotary.db_file = ":memory:"
token = hexlify(os.urandom(4)).decode("ascii")
app = ServerApp.instance(
# Set the log level to debug for testing purposes
log_level="DEBUG",
port=http_port,
port_retries=0,
open_browser=False,
root_dir=str(root_dir),
base_url=base_url,
config=c,
allow_root=True,
token=token,
**kwargs
)
app.init_signal = lambda: None
app.log.propagate = True
app.log.handlers = []
# Initialize app without httpserver
app.initialize(argv=argv, new_httpserver=False)
# Reroute all logging StreamHandlers away from stdin/stdout since pytest hijacks
# these streams and closes them at unfortunate times.
stream_handlers = [h for h in app.log.handlers if isinstance(h, logging.StreamHandler)]
for handler in stream_handlers:
handler.setStream(jp_logging_stream)
app.log.propagate = True
app.log.handlers = []
# Start app without ioloop
app.start_app()
return app
return _configurable_serverapp
@pytest.fixture
def jp_ensure_app_fixture(request):
"""Ensures that the 'app' fixture used by pytest-tornasync
is set to `jp_web_app`, the Tornado Web Application returned
by the ServerApp in Jupyter Server, provided by the jp_web_app
fixture in this module.
Note, this hardcodes the `app_fixture` option from
pytest-tornasync to `jp_web_app`. If this value is configured
to something other than the default, it will raise an exception.
"""
app_option = request.config.getoption("app_fixture")
if app_option not in ["app", "jp_web_app"]:
raise Exception(
"jp_serverapp requires the `app-fixture` option "
"to be set to 'jp_web_app`. Try rerunning the "
"current tests with the option `--app-fixture "
"jp_web_app`."
)
elif app_option == "app":
# Manually set the app_fixture to `jp_web_app` if it's
# not set already.
request.config.option.app_fixture = "jp_web_app"
@pytest.fixture(scope="function")
def jp_serverapp(jp_ensure_app_fixture, jp_server_config, jp_argv, jp_configurable_serverapp):
"""Starts a Jupyter Server instance based on the established configuration values."""
app = jp_configurable_serverapp(config=jp_server_config, argv=jp_argv)
yield app
app.remove_server_info_file()
app.remove_browser_open_files()
@pytest.fixture
def jp_web_app(jp_serverapp):
"""app fixture is needed by pytest_tornasync plugin"""
return jp_serverapp.web_app
@pytest.fixture
def jp_auth_header(jp_serverapp):
"""Configures an authorization header using the token from the serverapp fixture."""
return {"Authorization": "token {token}".format(token=jp_serverapp.token)}
@pytest.fixture
def jp_base_url():
"""Returns the base url to use for the test."""
return "/a%40b/"
@pytest.fixture
def jp_fetch(jp_serverapp, http_server_client, jp_auth_header, jp_base_url):
"""Sends an (asynchronous) HTTP request to a test server.
The fixture is a factory; it can be called like
a function inside a unit test. Here's a basic
example of how use this fixture:
.. code-block:: python
async def my_test(jp_fetch):
response = await jp_fetch("api", "spec.yaml")
...
"""
def client_fetch(*parts, headers=None, params=None, **kwargs):
if not headers:
headers = {}
if not params:
params = {}
# Handle URL strings
path_url = url_escape(url_path_join(*parts), plus=False)
base_path_url = url_path_join(jp_base_url, path_url)
params_url = urllib.parse.urlencode(params)
url = base_path_url + "?" + params_url
# Add auth keys to header
headers.update(jp_auth_header)
# Make request.
return http_server_client.fetch(url, headers=headers, request_timeout=20, **kwargs)
return client_fetch
@pytest.fixture
def jp_ws_fetch(jp_serverapp, http_server_client, jp_auth_header, jp_http_port, jp_base_url):
"""Sends a websocket request to a test server.
The fixture is a factory; it can be called like
a function inside a unit test. Here's a basic
example of how use this fixture:
.. code-block:: python
async def my_test(jp_fetch, jp_ws_fetch):
# Start a kernel
r = await jp_fetch(
'api', 'kernels',
method='POST',
body=json.dumps({
'name': "python3"
})
)
kid = json.loads(r.body.decode())['id']
# Open a websocket connection.
ws = await jp_ws_fetch(
'api', 'kernels', kid, 'channels'
)
...
"""
def client_fetch(*parts, headers=None, params=None, **kwargs):
if not headers:
headers = {}
if not params:
params = {}
# Handle URL strings
path_url = url_escape(url_path_join(*parts), plus=False)
base_path_url = url_path_join(jp_base_url, path_url)
urlparts = urllib.parse.urlparse("ws://localhost:{}".format(jp_http_port))
urlparts = urlparts._replace(path=base_path_url, query=urllib.parse.urlencode(params))
url = urlparts.geturl()
# Add auth keys to header
headers.update(jp_auth_header)
# Make request.
req = tornado.httpclient.HTTPRequest(url, headers=headers, connect_timeout=120)
return tornado.websocket.websocket_connect(req)
return client_fetch
some_resource = u"The very model of a modern major general"
sample_kernel_json = {
"argv": ["cat", "{connection_file}"],
"display_name": "Test kernel",
}
@pytest.fixture
def jp_kernelspecs(jp_data_dir):
"""Configures some sample kernelspecs in the Jupyter data directory."""
spec_names = ["sample", "sample 2", "bad"]
for name in spec_names:
sample_kernel_dir = jp_data_dir.joinpath("kernels", name)
sample_kernel_dir.mkdir(parents=True)
# Create kernel json file
sample_kernel_file = sample_kernel_dir.joinpath("kernel.json")
kernel_json = sample_kernel_json.copy()
if name == "bad":
kernel_json["argv"] = ["non_existent_path"]
sample_kernel_file.write_text(json.dumps(kernel_json))
# Create resources text
sample_kernel_resources = sample_kernel_dir.joinpath("resource.txt")
sample_kernel_resources.write_text(some_resource)
@pytest.fixture(params=[True, False])
def jp_contents_manager(request, tmp_path):
"""Returns a FileContentsManager instance based on the use_atomic_writing parameter value."""
return FileContentsManager(root_dir=str(tmp_path), use_atomic_writing=request.param)
@pytest.fixture
def jp_large_contents_manager(tmp_path):
"""Returns a LargeFileManager instance."""
return LargeFileManager(root_dir=str(tmp_path))
@pytest.fixture
def jp_create_notebook(jp_root_dir):
"""Creates a notebook in the test's home directory."""
def inner(nbpath):
nbpath = jp_root_dir.joinpath(nbpath)
# Check that the notebook has the correct file extension.
if nbpath.suffix != ".ipynb":
raise Exception("File extension for notebook must be .ipynb")
# If the notebook path has a parent directory, make sure it's created.
parent = nbpath.parent
parent.mkdir(parents=True, exist_ok=True)
# Create a notebook string and write to file.
nb = nbformat.v4.new_notebook()
nbtext = nbformat.writes(nb, version=4)
nbpath.write_text(nbtext)
return inner
@pytest.fixture(autouse=True)
def jp_server_cleanup():
yield
ServerApp.clear_instance()
@pytest.fixture
def jp_cleanup_subprocesses(jp_serverapp):
"""Clean up subprocesses started by a Jupyter Server, i.e. kernels and terminal."""
async def _():
terminal_cleanup = jp_serverapp.web_app.settings["terminal_manager"].terminate_all
kernel_cleanup = jp_serverapp.kernel_manager.shutdown_all
async def kernel_cleanup_steps():
# Try a graceful shutdown with a timeout
try:
await asyncio.wait_for(kernel_cleanup(), timeout=15.0)
except asyncio.TimeoutError:
# Now force a shutdown
try:
await asyncio.wait_for(kernel_cleanup(now=True), timeout=15.0)
except asyncio.TimeoutError:
print(Exception("Kernel never shutdown!"))
except Exception as e:
print(e)
if asyncio.iscoroutinefunction(terminal_cleanup):
try:
await terminal_cleanup()
except Exception as e:
print(e)
else:
try:
await terminal_cleanup()
except Exception as e:
print(e)
if asyncio.iscoroutinefunction(kernel_cleanup):
await kernel_cleanup_steps()
else:
try:
kernel_cleanup()
except Exception as e:
print(e)
return _
| 31.349609 | 97 | 0.674475 |
acf93b69d0bba25193542152f2a34dd897c6cf2b | 225 | py | Python | src/modules/polynomial/chebyshev.py | ychnlgy/TIMIT-diarization | 1fbf410cbb643de60201d2d351f1654273885674 | [
"MIT"
] | 1 | 2021-08-19T14:28:45.000Z | 2021-08-19T14:28:45.000Z | src/modules/polynomial/chebyshev.py | ychnlgy/TIMIT-diarization | 1fbf410cbb643de60201d2d351f1654273885674 | [
"MIT"
] | null | null | null | src/modules/polynomial/chebyshev.py | ychnlgy/TIMIT-diarization | 1fbf410cbb643de60201d2d351f1654273885674 | [
"MIT"
] | 1 | 2022-03-11T07:20:06.000Z | 2022-03-11T07:20:06.000Z | import torch, math
def get_nodes(n, a, b):
return chebyshev_node(torch.arange(1, n+1).float(), n, a, b)
# === PRIVATE ===
def chebyshev_node(k, n, a, b):
return 0.5*(a+b)+0.5*(b-a)*torch.cos((2*k-1)*math.pi/(2*n))
| 22.5 | 64 | 0.591111 |
acf93b9d3cd50af75d10a0d16f50339f27022dce | 20,931 | py | Python | stonesoup/models/measurement/tests/test_models.py | mgomesborges/Stone-Soup | 39c7f02ce11e10c9b3c612ad359f6d8bca495266 | [
"MIT"
] | 1 | 2019-12-26T14:55:03.000Z | 2019-12-26T14:55:03.000Z | stonesoup/models/measurement/tests/test_models.py | mgomesborges/Stone-Soup | 39c7f02ce11e10c9b3c612ad359f6d8bca495266 | [
"MIT"
] | null | null | null | stonesoup/models/measurement/tests/test_models.py | mgomesborges/Stone-Soup | 39c7f02ce11e10c9b3c612ad359f6d8bca495266 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pytest import approx
from scipy.stats import multivariate_normal
from ..nonlinear import (
CartesianToElevationBearingRange, CartesianToBearingRange,
CartesianToElevationBearing, Cartesian2DToBearing, CartesianToBearingRangeRate,
CartesianToElevationBearingRangeRate)
from ...base import ReversibleModel
from ....functions import jacobian as compute_jac
from ....functions import pol2cart
from ....functions import rotz, rotx, roty, cart2sphere
from ....types.angle import Bearing, Elevation
from ....types.array import StateVector, StateVectors
from ....types.state import State, CovarianceMatrix
def h1d(state_vector, pos_map, translation_offset, rotation_offset):
xyz = [[state_vector[0, 0] - translation_offset[0, 0]],
[state_vector[1, 0] - translation_offset[1, 0]],
[0]]
# Get rotation matrix
theta_x, theta_y, theta_z = - rotation_offset[:, 0]
rotation_matrix = rotz(theta_z) @ roty(theta_y) @ rotx(theta_x)
xyz_rot = rotation_matrix @ xyz
_, phi, _ = cart2sphere(*xyz_rot)
return StateVector([Bearing(phi)])
def h2d(state_vector, pos_map, translation_offset, rotation_offset):
xyz = [[state_vector[0, 0] - translation_offset[0, 0]],
[state_vector[1, 0] - translation_offset[1, 0]],
[0]]
# Get rotation matrix
theta_x, theta_y, theta_z = - rotation_offset[:, 0]
rotation_matrix = rotz(theta_z) @ roty(theta_y) @ rotx(theta_x)
xyz_rot = rotation_matrix @ xyz
rho, phi, _ = cart2sphere(*xyz_rot)
return StateVector([Bearing(phi), rho])
def h3d(state_vector, pos_map, translation_offset, rotation_offset):
xyz = state_vector[pos_map, :] - translation_offset
# Get rotation matrix
theta_x, theta_y, theta_z = - rotation_offset[:, 0]
rotation_matrix = rotz(theta_z) @ roty(theta_y) @ rotx(theta_x)
xyz_rot = rotation_matrix @ xyz
rho, phi, theta = cart2sphere(*xyz_rot)
return StateVector([Elevation(theta), Bearing(phi), rho])
def hbearing(state_vector, pos_map, translation_offset, rotation_offset):
xyz = state_vector[pos_map, :] - translation_offset
# Get rotation matrix
theta_x, theta_y, theta_z = - rotation_offset[:, 0]
rotation_matrix = rotz(theta_z) @ roty(theta_y) @ rotx(theta_x)
xyz_rot = rotation_matrix @ xyz
_, phi, theta = cart2sphere(*xyz_rot)
return StateVector([Elevation(theta), Bearing(phi)])
@pytest.mark.parametrize(
"h, ModelClass, state_vec, R , mapping,\
translation_offset, rotation_offset",
[
( # 1D meas, 2D state
h1d,
Cartesian2DToBearing,
StateVector([[0], [1]]),
CovarianceMatrix([[0.015]]),
np.array([0, 1]),
StateVector([[1], [-1]]),
StateVector([[0], [0], [1]])
),
( # 1D meas, 2D state
h1d,
Cartesian2DToBearing,
StateVector([[0], [1]]),
CovarianceMatrix([[0.015]]),
np.array([0, 1]),
None,
None
),
( # 2D meas, 2D state
h2d,
CartesianToBearingRange,
StateVector([[0], [1]]),
CovarianceMatrix([[0.015, 0],
[0, 0.1]]),
np.array([0, 1]),
StateVector([[1], [-1]]),
StateVector([[0], [0], [1]])
),
( # 2D meas, 2D state
h2d,
CartesianToBearingRange,
StateVector([[0], [1]]),
CovarianceMatrix([[0.015, 0],
[0, 0.1]]),
np.array([0, 1]),
None,
None
),
( # 3D meas, 3D state
h3d,
CartesianToElevationBearingRange,
StateVector([[1], [2], [2]]),
CovarianceMatrix([[0.05, 0, 0],
[0, 0.015, 0],
[0, 0, 0.1]]),
np.array([0, 1, 2]),
StateVector([[0], [0], [0]]),
StateVector([[.2], [3], [-1]])
),
( # 3D meas, 3D state
h3d,
CartesianToElevationBearingRange,
StateVector([[1], [2], [2]]),
CovarianceMatrix([[0.05, 0, 0],
[0, 0.015, 0],
[0, 0, 0.1]]),
np.array([0, 1, 2]),
None,
None
),
( # 2D meas, 3D state
hbearing,
CartesianToElevationBearing,
StateVector([[1], [2], [3]]),
np.array([[0.05, 0],
[0, 0.015]]),
np.array([0, 1, 2]),
StateVector([[0], [0], [0]]),
StateVector([[-3], [0], [np.pi/3]])
),
( # 2D meas, 3D state
hbearing,
CartesianToElevationBearing,
StateVector([[1], [2], [3]]),
np.array([[0.05, 0],
[0, 0.015]]),
np.array([0, 1, 2]),
None,
None
)
],
ids=["Bearing1", "Bearing2",
"BearingElevation1", "BearingElevation2",
"RangeBearingElevation1", "RangeBearingElevation1",
"BearingsOnly1", "BearingsOnly2"]
)
def test_models(h, ModelClass, state_vec, R,
mapping, translation_offset, rotation_offset):
""" Test for the CartesianToBearingRange, CartesianToElevationBearingRange,
and CartesianToElevationBearing Measurement Models """
ndim_state = state_vec.size
state = State(state_vec)
# Check default translation_offset, rotation_offset and velocity is applied
model_test = ModelClass(ndim_state=ndim_state,
mapping=mapping,
noise_covar=R)
assert len(model_test.translation_offset) == ndim_state
assert len(model_test.rotation_offset) == 3
# Create and a measurement model object
model = ModelClass(ndim_state=ndim_state,
mapping=mapping,
noise_covar=R,
translation_offset=translation_offset,
rotation_offset=rotation_offset)
# Project a state through the model
# (without noise)
meas_pred_wo_noise = model.function(state)
eval_m = h(state_vec, mapping, model.translation_offset, model.rotation_offset)
assert np.array_equal(meas_pred_wo_noise, eval_m)
# Ensure ```lg.transfer_function()``` returns H
def fun(x):
return model.function(x)
H = compute_jac(fun, state)
assert np.array_equal(H, model.jacobian(state))
# Check Jacobian has proper dimensions
assert H.shape == (model.ndim_meas, ndim_state)
# Ensure inverse function returns original
if isinstance(model, ReversibleModel):
J = model.inverse_function(State(meas_pred_wo_noise))
assert np.allclose(J, state_vec)
# Ensure ```lg.covar()``` returns R
assert np.array_equal(R, model.covar())
# Ensure model creates noise
rvs = model.rvs()
assert rvs.shape == (model.ndim_meas, 1)
assert isinstance(rvs, StateVector)
rvs = model.rvs(10)
assert rvs.shape == (model.ndim_meas, 10)
assert isinstance(rvs, StateVectors)
assert not isinstance(rvs, StateVector)
# Project a state through the model
# (without noise)
meas_pred_wo_noise = model.function(state)
assert np.array_equal(meas_pred_wo_noise, h(
state_vec, mapping, model.translation_offset, model.rotation_offset))
# Evaluate the likelihood of the predicted measurement, given the state
# (without noise)
prob = model.pdf(State(meas_pred_wo_noise), state)
assert approx(prob) == multivariate_normal.pdf(
(meas_pred_wo_noise
- np.array(h(state_vec, mapping, model.translation_offset, model.rotation_offset))
).ravel(),
cov=R)
# Propagate a state vector through the model
# (with internal noise)
meas_pred_w_inoise = model.function(state, noise=True)
assert not np.array_equal(
meas_pred_w_inoise, h(state_vec,
mapping,
model.translation_offset,
model.rotation_offset))
# Evaluate the likelihood of the predicted state, given the prior
# (with noise)
prob = model.pdf(State(meas_pred_w_inoise), state)
assert approx(prob) == multivariate_normal.pdf(
(meas_pred_w_inoise
- np.array(h(state_vec, mapping, model.translation_offset, model.rotation_offset))
).ravel(),
cov=R)
# Propagate a state vector through the model
# (with external noise)
noise = model.rvs()
meas_pred_w_enoise = model.function(state,
noise=noise)
assert np.array_equal(meas_pred_w_enoise, h(
state_vec, mapping, model.translation_offset, model.rotation_offset)+noise)
# Evaluate the likelihood of the predicted state, given the prior
# (with noise)
prob = model.pdf(State(meas_pred_w_enoise), state)
assert approx(prob) == multivariate_normal.pdf(
(meas_pred_w_enoise
- h(state_vec, model.mapping, model.translation_offset, model.rotation_offset)
).ravel(),
cov=R)
def test_angle_pdf():
model = CartesianToBearingRange(ndim_state=2,
mapping=(0, 1),
noise_covar=np.diag([np.radians(10), 2]))
# Around 0 degrees
measurement = State(StateVector([[Bearing(np.radians(1.))], [10.]]))
x, y = pol2cart(10, np.radians(-1))
state = State(StateVector([[x], [y]]))
reference_probability = model.pdf(measurement, state)
# Check same result around 90 degrees
measurement.state_vector[0, 0] += np.radians(90)
x, y = pol2cart(10, np.radians(89))
state = State(StateVector([[x], [y]]))
assert approx(reference_probability) == model.pdf(measurement, state)
# Check same result around 180 degrees
measurement.state_vector[0, 0] += np.radians(90)
x, y = pol2cart(10, np.radians(179))
state = State(StateVector([[x], [y]]))
assert approx(reference_probability) == model.pdf(measurement, state)
def h2d_rr(state_vector, pos_map, vel_map, translation_offset, rotation_offset, velocity):
xyz = np.array([[state_vector[pos_map[0], 0] - translation_offset[0, 0]],
[state_vector[pos_map[1], 0] - translation_offset[1, 0]],
[0]])
# Get rotation matrix
theta_x, theta_y, theta_z = - rotation_offset[:, 0]
rotation_matrix = rotz(theta_z) @ roty(theta_y) @ rotx(theta_x)
xyz_rot = rotation_matrix @ xyz
rho, phi, _ = cart2sphere(*xyz_rot)
# Calculate range rate extension
# Determine the net velocity component in the engagement
xyz_vel = np.array([[state_vector[vel_map[0], 0] - velocity[0, 0]],
[state_vector[vel_map[1], 0] - velocity[1, 0]],
[0]])
# Use polar to calculate range rate
rr = np.dot(xyz[:, 0], xyz_vel[:, 0]) / np.linalg.norm(xyz)
return StateVector([Bearing(phi), rho, rr])
def h3d_rr(state_vector, pos_map, vel_map, translation_offset, rotation_offset, velocity):
xyz = state_vector[pos_map, :] - translation_offset
# Get rotation matrix
theta_x, theta_y, theta_z = - rotation_offset[:, 0]
rotation_matrix = rotz(theta_z) @ roty(theta_y) @ rotx(theta_x)
xyz_rot = rotation_matrix @ xyz
rho, phi, theta = cart2sphere(*xyz_rot)
# Calculate range rate extension
# Determine the net velocity component in the engagement
xyz_vel = np.array([[state_vector[vel_map[0], 0] - velocity[0, 0]],
[state_vector[vel_map[1], 0] - velocity[1, 0]],
[state_vector[vel_map[2], 0] - velocity[2, 0]]])
# Use polar to calculate range rate
rr = np.dot(xyz[:, 0], xyz_vel[:, 0]) / np.linalg.norm(xyz)
return StateVector([Elevation(theta), Bearing(phi), rho, rr])
@pytest.mark.parametrize(
"h, modelclass, state_vec, ndim_state, pos_mapping, vel_mapping,\
noise_covar, position, orientation",
[
( # 3D meas, 6D state
h2d_rr, # h
CartesianToBearingRangeRate, # ModelClass
StateVector([[200.], [10.], [0.], [0.], [0.], [0.]]), # state_vec
6, # ndim_state
np.array([0, 2, 4]), # pos_mapping
np.array([1, 3, 5]), # vel_mapping
CovarianceMatrix([[0.05, 0, 0],
[0, 0.015, 0],
[0, 0, 10]]), # noise_covar
StateVector([[1], [-1], [0]]), # position (translation offset)
StateVector([[0], [0], [1]]) # orientation (rotation offset)
),
( # 3D meas, 6D state
h2d_rr, # h
CartesianToBearingRangeRate, # ModelClass
StateVector([[200.], [10.], [0.], [0.], [0.], [0.]]), # state_vec
6, # ndim_state
np.array([0, 2, 4]), # pos_mapping
np.array([1, 3, 5]), # vel_mapping
CovarianceMatrix([[0.05, 0, 0],
[0, 0.015, 0],
[0, 0, 10]]), # noise_covar
None, # position (translation offset)
None # orientation (rotation offset)
),
( # 4D meas, 6D state
h3d_rr, # h
CartesianToElevationBearingRangeRate, # ModelClass
StateVector([[200.], [10.], [0.], [0.], [0.], [0.]]), # state_vec
6, # ndim_state
np.array([0, 2, 4]), # pos_mapping
np.array([1, 3, 5]), # vel_mapping
CovarianceMatrix([[0.05, 0, 0, 0],
[0, 0.05, 0, 0],
[0, 0, 0.015, 0],
[0, 0, 0, 10]]), # noise_covar
StateVector([[100], [0], [0]]), # position (translation offset)
StateVector([[0], [0], [0]]) # orientation (rotation offset)
),
( # 4D meas, 6D state
h3d_rr, # h
CartesianToElevationBearingRangeRate, # ModelClass
StateVector([[200.], [10.], [0.], [0.], [0.], [0.]]), # state_vec
6, # ndim_state
np.array([0, 2, 4]), # pos_mapping
np.array([1, 3, 5]), # vel_mapping
CovarianceMatrix([[0.05, 0, 0, 0],
[0, 0.05, 0, 0],
[0, 0, 0.015, 0],
[0, 0, 0, 10]]), # noise_covar
None, # position (translation offset)
None # orientation (rotation offset)
)
],
ids=["rrRB_1", "rrRB_2", "rrRBE_1", "rrRBE_2"]
)
def test_rangeratemodels(h, modelclass, state_vec, ndim_state, pos_mapping, vel_mapping,
noise_covar, position, orientation):
""" Test for the CartesianToBearingRangeRate and
CartesianToElevationBearingRangeRate Measurement Models """
state = State(state_vec)
# Check default translation_offset, rotation_offset and velocity is applied
model_test = modelclass(ndim_state=ndim_state,
mapping=pos_mapping,
velocity_mapping=vel_mapping,
noise_covar=noise_covar)
assert len(model_test.translation_offset) == 3
assert len(model_test.rotation_offset) == 3
assert len(model_test.velocity) == 3
# Create and a measurement model object
model = modelclass(ndim_state=ndim_state,
mapping=pos_mapping,
velocity_mapping=vel_mapping,
noise_covar=noise_covar,
translation_offset=position,
rotation_offset=orientation)
# Project a state through the model
# (without noise)
meas_pred_wo_noise = model.function(state)
eval_m = h(state_vec,
model.mapping,
model.velocity_mapping,
model.translation_offset,
model.rotation_offset,
model.velocity)
assert np.array_equal(meas_pred_wo_noise, eval_m)
# Ensure ```lg.transfer_function()``` returns H
def fun(x):
return model.function(x)
H = compute_jac(fun, state)
assert np.array_equal(H, model.jacobian(state))
# Check Jacobian has proper dimensions
assert H.shape == (model.ndim_meas, ndim_state)
# Ensure inverse function returns original
if isinstance(model, ReversibleModel):
J = model.inverse_function(State(meas_pred_wo_noise))
assert np.allclose(J, state_vec)
# Ensure ```lg.covar()``` returns R
assert np.array_equal(noise_covar, model.covar())
# Ensure model creates noise
rvs = model.rvs()
assert rvs.shape == (model.ndim_meas, 1)
assert isinstance(rvs, StateVector)
rvs = model.rvs(10)
assert rvs.shape == (model.ndim_meas, 10)
assert isinstance(rvs, StateVectors)
# StateVector is subclass of Matrix, so need to check explicitly.
assert not isinstance(rvs, StateVector)
# Project a state throught the model
# Project a state through the model
# (without noise)
meas_pred_wo_noise = model.function(state)
assert np.array_equal(meas_pred_wo_noise, h(state_vec,
model.mapping,
model.velocity_mapping,
model.translation_offset,
model.rotation_offset,
model.velocity))
# Evaluate the likelihood of the predicted measurement, given the state
# (without noise)
prob = model.pdf(State(meas_pred_wo_noise), state)
assert approx(prob) == multivariate_normal.pdf(
(meas_pred_wo_noise
- h(state_vec, model.mapping, model.velocity_mapping, model.translation_offset,
model.rotation_offset, model.velocity)
).ravel(),
cov=noise_covar)
# Propagate a state vector through the model
# (with internal noise)
meas_pred_w_inoise = model.function(state, noise=True)
assert not np.array_equal(
meas_pred_w_inoise, h(state_vec,
model.mapping,
model.velocity_mapping,
model.translation_offset,
model.rotation_offset,
model.velocity))
# Evaluate the likelihood of the predicted state, given the prior
# (with noise)
prob = model.pdf(State(meas_pred_w_inoise), state)
assert approx(prob) == multivariate_normal.pdf(
(meas_pred_w_inoise
- h(state_vec, model.mapping, model.velocity_mapping, model.translation_offset,
model.rotation_offset, model.velocity)
).ravel(),
cov=noise_covar)
# Propagate a state vector throught the model
# (with external noise)
noise = model.rvs()
meas_pred_w_enoise = model.function(state,
noise=noise)
assert np.array_equal(meas_pred_w_enoise, h(state_vec,
model.mapping,
model.velocity_mapping,
model.translation_offset,
model.rotation_offset,
model.velocity) + noise)
# Evaluate the likelihood of the predicted state, given the prior
# (with noise)
prob = model.pdf(State(meas_pred_w_enoise), state)
assert approx(prob) == multivariate_normal.pdf(
(meas_pred_w_enoise
- h(state_vec, model.mapping, model.velocity_mapping, model.translation_offset,
model.rotation_offset, model.velocity)
).ravel(),
cov=noise_covar)
def test_inverse_function():
measure_model = CartesianToElevationBearingRangeRate(
ndim_state=6,
mapping=np.array([0, 2, 4]),
velocity_mapping=np.array([1, 3, 5]),
noise_covar=np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]))
measured_state = State(StateVector([np.pi / 18, np.pi / 18, 10e3, 100.0]))
inv_measure_state = measure_model.inverse_function(measured_state)
assert approx(inv_measure_state[0], 0.02) == 9698.46
assert approx(inv_measure_state[1], 0.02) == 96.98
assert approx(inv_measure_state[2], 0.02) == 1710.1
assert approx(inv_measure_state[3], 0.02) == 17.10
assert approx(inv_measure_state[4], 0.02) == 1736.48
assert approx(inv_measure_state[5], 0.02) == 17.36
| 37.046018 | 91 | 0.571019 |
acf93be9cdb4c4eb1f105de461f36c0e40aaa839 | 157 | py | Python | faq/urls/admin.py | Sejong-Creative-Semester2021/OJ-BE | cecc511b771f1979ba7a556abdae1cbefa8e17bd | [
"MIT"
] | null | null | null | faq/urls/admin.py | Sejong-Creative-Semester2021/OJ-BE | cecc511b771f1979ba7a556abdae1cbefa8e17bd | [
"MIT"
] | null | null | null | faq/urls/admin.py | Sejong-Creative-Semester2021/OJ-BE | cecc511b771f1979ba7a556abdae1cbefa8e17bd | [
"MIT"
] | null | null | null | from django.conf.urls import url
from ..views.admin import FAQAdminAPI
urlpatterns = [
url(r"^faq/?$", FAQAdminAPI.as_view(), name="faq_admin_api"),
]
| 19.625 | 65 | 0.707006 |
acf93ccbfd86296012e6ae041153912b697f58eb | 3,582 | py | Python | bindings/python/ensmallen/datasets/string/pseudomonasstutzeridsm10701.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/pseudomonasstutzeridsm10701.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/pseudomonasstutzeridsm10701.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Pseudomonas stutzeri DSM10701.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def PseudomonasStutzeriDsm10701(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Pseudomonas stutzeri DSM10701 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Pseudomonas stutzeri DSM10701 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PseudomonasStutzeriDsm10701",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.166667 | 223 | 0.679788 |
acf93ce8ab77751b09f405e388e0aee62097103c | 24,350 | py | Python | mmdet/models/backbones/resnet.py | lvgu597/mm_frequency | bcc2b030140ff1fe6bd27c193c1fbd72f930e923 | [
"Apache-2.0"
] | null | null | null | mmdet/models/backbones/resnet.py | lvgu597/mm_frequency | bcc2b030140ff1fe6bd27c193c1fbd72f930e923 | [
"Apache-2.0"
] | null | null | null | mmdet/models/backbones/resnet.py | lvgu597/mm_frequency | bcc2b030140ff1fe6bd27c193c1fbd72f930e923 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer,
constant_init, kaiming_init)
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
from ..utils import ResLayer
from .cbam import CBAM
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None):
super(BasicBlock, self).__init__()
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
assert dcn is None or isinstance(dcn, dict)
assert plugins is None or isinstance(plugins, list)
if plugins is not None:
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
assert all(p['position'] in allowed_position for p in plugins)
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = dcn is not None
self.plugins = plugins
self.with_plugins = plugins is not None
if self.with_plugins:
# collect plugins for conv1/conv2/conv3
self.after_conv1_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv1'
]
self.after_conv2_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv2'
]
self.after_conv3_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv3'
]
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
else:
# assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
dcn,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_plugins:
self.after_conv1_plugin_names = self.make_block_plugins(
planes, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(
planes, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins(
planes * self.expansion, self.after_conv3_plugins)
def make_block_plugins(self, in_channels, plugins):
"""make plugins for block.
Args:
in_channels (int): Input channels of plugin.
plugins (list[dict]): List of plugins cfg to build.
Returns:
list[str]: List of the names of plugin.
"""
assert isinstance(plugins, list)
plugin_names = []
for plugin in plugins:
plugin = plugin.copy()
name, layer = build_plugin_layer(
plugin,
in_channels=in_channels,
postfix=plugin.pop('postfix', ''))
assert not hasattr(self, name), f'duplicate plugin {name}'
self.add_module(name, layer)
plugin_names.append(name)
return plugin_names
def forward_plugin(self, x, plugin_names):
out = x
for name in plugin_names:
out = getattr(self, name)(x)
return out
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
@property
def norm3(self):
"""nn.Module: normalization layer after the third convolution layer"""
return getattr(self, self.norm3_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
stem_channels (int | None): Number of stem channels. If not specified,
it will be the same as `base_channels`. Default: None.
base_channels (int): Number of base channels of res layer. Default: 64.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from mmdet.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
stem_channels=None,
base_channels=64,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
with_cp=False,
zero_init_residual=True):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
if stem_channels is None:
stem_channels = base_channels
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
if plugins is not None:
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
planes = base_channels * 2**i
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=stage_plugins)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * base_channels * 2**(
len(self.stage_blocks) - 1)
def make_stage_plugins(self, plugins, stage_idx):
"""Make plugins for ResNet ``stage_idx`` th stage.
Currently we support to insert ``context_block``,
``empirical_attention_block``, ``nonlocal_block`` into the backbone
like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
Bottleneck.
An example of plugins format could be:
Examples:
>>> plugins=[
... dict(cfg=dict(type='xxx', arg1='xxx'),
... stages=(False, True, True, True),
... position='after_conv2'),
... dict(cfg=dict(type='yyy'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='1'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='2'),
... stages=(True, True, True, True),
... position='after_conv3')
... ]
>>> self = ResNet(depth=18)
>>> stage_plugins = self.make_stage_plugins(plugins, 0)
>>> assert len(stage_plugins) == 3
Suppose ``stage_idx=0``, the structure of blocks in the stage would be:
.. code-block:: none
conv1-> conv2->conv3->yyy->zzz1->zzz2
Suppose 'stage_idx=1', the structure of blocks in the stage would be:
.. code-block:: none
conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
If stages is missing, the plugin would be applied to all stages.
Args:
plugins (list[dict]): List of plugins cfg to build. The postfix is
required if multiple same type plugins are inserted.
stage_idx (int): Index of stage to build
Returns:
list[dict]: Plugins for current stage
"""
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert stages is None or len(stages) == self.num_stages
# whether to insert plugin into current stage
if stages is None or stages[stage_idx]:
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayer(**kwargs)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels)[1],
nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottleneck) and hasattr(
m.conv2, 'conv_offset'):
constant_init(m.conv2.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Forward function."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
freezed."""
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNetV1d(ResNet):
r"""ResNetV1d variant described in `Bag of Tricks
<https://arxiv.org/pdf/1812.01187.pdf>`_.
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
"""
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(
deep_stem=True, avg_down=True, **kwargs)
@BACKBONES.register_module()
class ResNetCBam(ResNet):
def __init__(self, **kwargs):
super(ResNetCBam, self).__init__(**kwargs)
self.bam1 = CBAM(64*self.block.expansion)
self.bam2 = CBAM(128*self.block.expansion)
self.bam3 = CBAM(256*self.block.expansion)
self.bam4 = CBAM(512*self.block.expansion)
def forward(self, x):
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
# CBAM: Convolutional Block Attention Module
bam = getattr(self, f'bam{i+1}')
x = bam(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs) | 35.035971 | 79 | 0.545873 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.